From 0fbb0bf7a1ddb7b94907de861c71e45cc60d6d2d Mon Sep 17 00:00:00 2001 From: bertrandhaut Date: Fri, 3 Oct 2014 08:30:29 +0200 Subject: [PATCH 1/6] to_csv: decimal support --- .../EGG-INFO/PKG-INFO | 39 + .../EGG-INFO/SOURCES.txt | 1033 +++ .../EGG-INFO/dependency_links.txt | 1 + .../EGG-INFO/native_libs.txt | 14 + .../EGG-INFO/not-zip-safe | 1 + .../EGG-INFO/scripts/f2py | 24 + .../EGG-INFO/top_level.txt | 1 + .../numpy/__config__.py | 36 + .../numpy/__init__.py | 216 + .../numpy/_import_tools.py | 348 + .../numpy/add_newdocs.py | 7526 +++++++++++++++++ .../numpy/compat/__init__.py | 20 + .../numpy/compat/_inspect.py | 221 + .../numpy/compat/py3k.py | 89 + .../numpy/compat/setup.py | 12 + .../numpy/core/__init__.py | 78 + .../numpy/core/_dummy.py | 7 + .../numpy/core/_internal.py | 570 ++ .../numpy/core/_methods.py | 134 + .../numpy/core/arrayprint.py | 752 ++ .../numpy/core/cversions.py | 15 + .../numpy/core/defchararray.py | 2687 ++++++ .../numpy/core/fromnumeric.py | 2930 +++++++ .../numpy/core/function_base.py | 188 + .../numpy/core/generate_numpy_api.py | 259 + .../numpy/core/getlimits.py | 306 + .../core/include/numpy/__multiarray_api.h | 1721 ++++ .../numpy/core/include/numpy/__ufunc_api.h | 328 + .../numpy/_neighborhood_iterator_imp.h | 90 + .../numpy/core/include/numpy/_numpyconfig.h | 32 + .../numpy/core/include/numpy/arrayobject.h | 11 + .../numpy/core/include/numpy/arrayscalars.h | 175 + .../numpy/core/include/numpy/halffloat.h | 69 + .../core/include/numpy/multiarray_api.txt | 2442 ++++++ .../numpy/core/include/numpy/ndarrayobject.h | 237 + .../numpy/core/include/numpy/ndarraytypes.h | 1820 ++++ .../numpy/core/include/numpy/noprefix.h | 209 + .../include/numpy/npy_1_7_deprecated_api.h | 130 + .../numpy/core/include/numpy/npy_3kcompat.h | 506 ++ .../numpy/core/include/numpy/npy_common.h | 1046 +++ .../numpy/core/include/numpy/npy_cpu.h | 122 + .../numpy/core/include/numpy/npy_endian.h | 49 + .../numpy/core/include/numpy/npy_interrupt.h | 117 + .../numpy/core/include/numpy/npy_math.h | 479 ++ .../include/numpy/npy_no_deprecated_api.h | 19 + .../numpy/core/include/numpy/npy_os.h | 30 + .../numpy/core/include/numpy/numpyconfig.h | 35 + .../numpy/core/include/numpy/old_defines.h | 187 + .../numpy/core/include/numpy/oldnumeric.h | 23 + .../numpy/core/include/numpy/ufunc_api.txt | 321 + .../numpy/core/include/numpy/ufuncobject.h | 375 + .../numpy/core/include/numpy/utils.h | 19 + .../numpy/core/info.py | 87 + .../numpy/core/lib/npy-pkg-config/mlib.ini | 12 + .../numpy/core/lib/npy-pkg-config/npymath.ini | 20 + .../numpy/core/machar.py | 338 + .../numpy/core/memmap.py | 308 + .../numpy/core/multiarray.py | 7 + .../numpy/core/multiarray_tests.py | 7 + .../numpy/core/numeric.py | 2842 +++++++ .../numpy/core/numerictypes.py | 1042 +++ .../numpy/core/operand_flag_tests.py | 7 + .../numpy/core/records.py | 808 ++ .../numpy/core/scalarmath.py | 7 + .../numpy/core/setup.py | 1013 +++ .../numpy/core/setup_common.py | 321 + .../numpy/core/shape_base.py | 277 + .../numpy/core/struct_ufunc_test.py | 7 + .../numpy/core/test_rational.py | 7 + .../numpy/core/tests/data/astype_copy.pkl | Bin 0 -> 716 bytes .../core/tests/data/recarray_from_file.fits | Bin 0 -> 8640 bytes .../numpy/core/tests/test_abc.py | 45 + .../numpy/core/tests/test_api.py | 514 ++ .../numpy/core/tests/test_arrayprint.py | 167 + .../numpy/core/tests/test_blasdot.py | 172 + .../numpy/core/tests/test_datetime.py | 1771 ++++ .../numpy/core/tests/test_defchararray.py | 642 ++ .../numpy/core/tests/test_deprecations.py | 512 ++ .../numpy/core/tests/test_dtype.py | 542 ++ .../numpy/core/tests/test_einsum.py | 573 ++ .../numpy/core/tests/test_errstate.py | 51 + .../numpy/core/tests/test_function_base.py | 111 + .../numpy/core/tests/test_getlimits.py | 86 + .../numpy/core/tests/test_half.py | 439 + .../numpy/core/tests/test_indexerrors.py | 127 + .../numpy/core/tests/test_indexing.py | 983 +++ .../numpy/core/tests/test_item_selection.py | 70 + .../numpy/core/tests/test_machar.py | 30 + .../numpy/core/tests/test_memmap.py | 127 + .../numpy/core/tests/test_multiarray.py | 4482 ++++++++++ .../core/tests/test_multiarray_assignment.py | 80 + .../numpy/core/tests/test_nditer.py | 2630 ++++++ .../numpy/core/tests/test_numeric.py | 2091 +++++ .../numpy/core/tests/test_numerictypes.py | 377 + .../numpy/core/tests/test_print.py | 245 + .../numpy/core/tests/test_records.py | 176 + .../numpy/core/tests/test_regression.py | 2108 +++++ .../numpy/core/tests/test_scalarinherit.py | 34 + .../numpy/core/tests/test_scalarmath.py | 275 + .../numpy/core/tests/test_scalarprint.py | 30 + .../numpy/core/tests/test_shape_base.py | 250 + .../numpy/core/tests/test_ufunc.py | 1153 +++ .../numpy/core/tests/test_umath.py | 1665 ++++ .../numpy/core/tests/test_umath_complex.py | 537 ++ .../numpy/core/tests/test_unicode.py | 357 + .../numpy/core/umath.py | 7 + .../numpy/core/umath_tests.py | 7 + .../numpy/ctypeslib.py | 426 + .../numpy/distutils/__config__.py | 36 + .../numpy/distutils/__init__.py | 39 + .../numpy/distutils/__version__.py | 6 + .../numpy/distutils/ccompiler.py | 656 ++ .../numpy/distutils/command/__init__.py | 43 + .../numpy/distutils/command/autodist.py | 43 + .../numpy/distutils/command/bdist_rpm.py | 24 + .../numpy/distutils/command/build.py | 39 + .../numpy/distutils/command/build_clib.py | 284 + .../numpy/distutils/command/build_ext.py | 503 ++ .../numpy/distutils/command/build_py.py | 33 + .../numpy/distutils/command/build_scripts.py | 51 + .../numpy/distutils/command/build_src.py | 806 ++ .../numpy/distutils/command/config.py | 476 ++ .../distutils/command/config_compiler.py | 125 + .../numpy/distutils/command/develop.py | 17 + .../numpy/distutils/command/egg_info.py | 11 + .../numpy/distutils/command/install.py | 82 + .../numpy/distutils/command/install_clib.py | 39 + .../numpy/distutils/command/install_data.py | 26 + .../distutils/command/install_headers.py | 27 + .../numpy/distutils/command/sdist.py | 29 + .../numpy/distutils/compat.py | 10 + .../numpy/distutils/conv_template.py | 337 + .../numpy/distutils/core.py | 210 + .../numpy/distutils/cpuinfo.py | 693 ++ .../numpy/distutils/environment.py | 72 + .../numpy/distutils/exec_command.py | 618 ++ .../numpy/distutils/extension.py | 90 + .../numpy/distutils/fcompiler/__init__.py | 989 +++ .../numpy/distutils/fcompiler/absoft.py | 160 + .../numpy/distutils/fcompiler/compaq.py | 128 + .../numpy/distutils/fcompiler/g95.py | 45 + .../numpy/distutils/fcompiler/gnu.py | 390 + .../numpy/distutils/fcompiler/hpux.py | 45 + .../numpy/distutils/fcompiler/ibm.py | 96 + .../numpy/distutils/fcompiler/intel.py | 205 + .../numpy/distutils/fcompiler/lahey.py | 49 + .../numpy/distutils/fcompiler/mips.py | 58 + .../numpy/distutils/fcompiler/nag.py | 45 + .../numpy/distutils/fcompiler/none.py | 31 + .../numpy/distutils/fcompiler/pathf95.py | 38 + .../numpy/distutils/fcompiler/pg.py | 60 + .../numpy/distutils/fcompiler/sun.py | 52 + .../numpy/distutils/fcompiler/vast.py | 56 + .../numpy/distutils/from_template.py | 256 + .../numpy/distutils/info.py | 6 + .../numpy/distutils/intelccompiler.py | 45 + .../numpy/distutils/lib2def.py | 116 + .../numpy/distutils/line_endings.py | 76 + .../numpy/distutils/log.py | 93 + .../numpy/distutils/mingw32ccompiler.py | 582 ++ .../numpy/distutils/misc_util.py | 2271 +++++ .../numpy/distutils/npy_pkg_config.py | 464 + .../numpy/distutils/numpy_distribution.py | 19 + .../numpy/distutils/pathccompiler.py | 23 + .../numpy/distutils/setup.py | 17 + .../numpy/distutils/system_info.py | 2242 +++++ .../distutils/tests/f2py_ext/__init__.py | 1 + .../numpy/distutils/tests/f2py_ext/setup.py | 13 + .../numpy/distutils/tests/f2py_ext/src/fib1.f | 18 + .../distutils/tests/f2py_ext/src/fib2.pyf | 9 + .../tests/f2py_ext/tests/test_fib2.py | 13 + .../distutils/tests/f2py_f90_ext/__init__.py | 1 + .../tests/f2py_f90_ext/include/body.f90 | 5 + .../distutils/tests/f2py_f90_ext/setup.py | 18 + .../tests/f2py_f90_ext/src/foo_free.f90 | 6 + .../tests/f2py_f90_ext/tests/test_foo.py | 12 + .../numpy/distutils/tests/gen_ext/__init__.py | 1 + .../numpy/distutils/tests/gen_ext/setup.py | 48 + .../tests/gen_ext/tests/test_fib3.py | 12 + .../distutils/tests/pyrex_ext/__init__.py | 1 + .../distutils/tests/pyrex_ext/primes.pyx | 22 + .../numpy/distutils/tests/pyrex_ext/setup.py | 14 + .../tests/pyrex_ext/tests/test_primes.py | 14 + .../numpy/distutils/tests/setup.py | 16 + .../distutils/tests/swig_ext/__init__.py | 1 + .../numpy/distutils/tests/swig_ext/setup.py | 20 + .../distutils/tests/swig_ext/src/example.i | 14 + .../numpy/distutils/tests/swig_ext/src/zoo.cc | 23 + .../numpy/distutils/tests/swig_ext/src/zoo.h | 9 + .../numpy/distutils/tests/swig_ext/src/zoo.i | 10 + .../tests/swig_ext/tests/test_example.py | 18 + .../tests/swig_ext/tests/test_example2.py | 16 + .../distutils/tests/test_exec_command.py | 92 + .../distutils/tests/test_fcompiler_gnu.py | 53 + .../distutils/tests/test_fcompiler_intel.py | 36 + .../numpy/distutils/tests/test_misc_util.py | 75 + .../distutils/tests/test_npy_pkg_config.py | 98 + .../numpy/distutils/unixccompiler.py | 113 + .../numpy/doc/__init__.py | 28 + .../numpy/doc/basics.py | 146 + .../numpy/doc/broadcasting.py | 178 + .../numpy/doc/byteswapping.py | 147 + .../numpy/doc/constants.py | 393 + .../numpy/doc/creation.py | 144 + .../numpy/doc/glossary.py | 418 + .../numpy/doc/howtofind.py | 10 + .../numpy/doc/indexing.py | 437 + .../numpy/doc/internals.py | 163 + .../numpy/doc/io.py | 10 + .../numpy/doc/jargon.py | 10 + .../numpy/doc/methods_vs_functions.py | 10 + .../numpy/doc/misc.py | 226 + .../numpy/doc/performance.py | 10 + .../numpy/doc/structured_arrays.py | 223 + .../numpy/doc/subclassing.py | 560 ++ .../numpy/doc/ufuncs.py | 138 + .../numpy/dual.py | 71 + .../numpy/f2py/__init__.py | 49 + .../numpy/f2py/__version__.py | 10 + .../numpy/f2py/auxfuncs.py | 711 ++ .../numpy/f2py/capi_maps.py | 773 ++ .../numpy/f2py/cb_rules.py | 539 ++ .../numpy/f2py/cfuncs.py | 1224 +++ .../numpy/f2py/common_rules.py | 132 + .../numpy/f2py/crackfortran.py | 2868 +++++++ .../numpy/f2py/diagnose.py | 149 + .../numpy/f2py/f2py2e.py | 598 ++ .../numpy/f2py/f2py_testing.py | 46 + .../numpy/f2py/f90mod_rules.py | 246 + .../numpy/f2py/func2subr.py | 291 + .../numpy/f2py/info.py | 6 + .../numpy/f2py/rules.py | 1448 ++++ .../numpy/f2py/setup.py | 129 + .../numpy/f2py/src/fortranobject.h | 162 + .../f2py/tests/src/assumed_shape/.f2py_f2cmap | 1 + .../f2py/tests/src/assumed_shape/foo_free.f90 | 34 + .../f2py/tests/src/assumed_shape/foo_mod.f90 | 41 + .../f2py/tests/src/assumed_shape/foo_use.f90 | 19 + .../tests/src/assumed_shape/precision.f90 | 4 + .../numpy/f2py/tests/src/kind/foo.f90 | 20 + .../numpy/f2py/tests/src/mixed/foo.f | 5 + .../numpy/f2py/tests/src/mixed/foo_fixed.f90 | 8 + .../numpy/f2py/tests/src/mixed/foo_free.f90 | 8 + .../numpy/f2py/tests/src/size/foo.f90 | 44 + .../numpy/f2py/tests/test_array_from_pyobj.py | 559 ++ .../numpy/f2py/tests/test_assumed_shape.py | 37 + .../numpy/f2py/tests/test_callback.py | 132 + .../numpy/f2py/tests/test_kind.py | 36 + .../numpy/f2py/tests/test_mixed.py | 41 + .../numpy/f2py/tests/test_return_character.py | 142 + .../numpy/f2py/tests/test_return_complex.py | 169 + .../numpy/f2py/tests/test_return_integer.py | 178 + .../numpy/f2py/tests/test_return_logical.py | 187 + .../numpy/f2py/tests/test_return_real.py | 203 + .../numpy/f2py/tests/test_size.py | 47 + .../numpy/f2py/tests/util.py | 353 + .../numpy/f2py/use_rules.py | 109 + .../numpy/fft/__init__.py | 11 + .../numpy/fft/fftpack.py | 1169 +++ .../numpy/fft/fftpack_lite.py | 7 + .../numpy/fft/helper.py | 224 + .../numpy/fft/info.py | 179 + .../numpy/fft/setup.py | 20 + .../numpy/fft/tests/test_fftpack.py | 75 + .../numpy/fft/tests/test_helper.py | 78 + .../numpy/lib/__init__.py | 46 + .../numpy/lib/_compiled_base.py | 7 + .../numpy/lib/_datasource.py | 666 ++ .../numpy/lib/_iotools.py | 891 ++ .../numpy/lib/_version.py | 156 + .../numpy/lib/arraypad.py | 1475 ++++ .../numpy/lib/arraysetops.py | 463 + .../numpy/lib/arrayterator.py | 226 + .../numpy/lib/financial.py | 737 ++ .../numpy/lib/format.py | 730 ++ .../numpy/lib/function_base.py | 3872 +++++++++ .../numpy/lib/index_tricks.py | 869 ++ .../numpy/lib/info.py | 151 + .../numpy/lib/nanfunctions.py | 1158 +++ .../numpy/lib/npyio.py | 1912 +++++ .../numpy/lib/polynomial.py | 1271 +++ .../numpy/lib/recfunctions.py | 1003 +++ .../numpy/lib/scimath.py | 566 ++ .../numpy/lib/setup.py | 23 + .../numpy/lib/shape_base.py | 865 ++ .../numpy/lib/stride_tricks.py | 123 + .../numpy/lib/tests/test__datasource.py | 351 + .../numpy/lib/tests/test__iotools.py | 326 + .../numpy/lib/tests/test__version.py | 57 + .../numpy/lib/tests/test_arraypad.py | 560 ++ .../numpy/lib/tests/test_arraysetops.py | 301 + .../numpy/lib/tests/test_arrayterator.py | 52 + .../numpy/lib/tests/test_financial.py | 160 + .../numpy/lib/tests/test_format.py | 706 ++ .../numpy/lib/tests/test_function_base.py | 2131 +++++ .../numpy/lib/tests/test_index_tricks.py | 289 + .../numpy/lib/tests/test_io.py | 1736 ++++ .../numpy/lib/tests/test_nanfunctions.py | 758 ++ .../numpy/lib/tests/test_polynomial.py | 177 + .../numpy/lib/tests/test_recfunctions.py | 705 ++ .../numpy/lib/tests/test_regression.py | 265 + .../numpy/lib/tests/test_shape_base.py | 368 + .../numpy/lib/tests/test_stride_tricks.py | 238 + .../numpy/lib/tests/test_twodim_base.py | 504 ++ .../numpy/lib/tests/test_type_check.py | 328 + .../numpy/lib/tests/test_ufunclike.py | 65 + .../numpy/lib/tests/test_utils.py | 65 + .../numpy/lib/twodim_base.py | 1003 +++ .../numpy/lib/type_check.py | 605 ++ .../numpy/lib/ufunclike.py | 177 + .../numpy/lib/user_array.py | 277 + .../numpy/lib/utils.py | 1176 +++ .../numpy/linalg/__init__.py | 55 + .../numpy/linalg/_umath_linalg.py | 7 + .../numpy/linalg/info.py | 37 + .../numpy/linalg/lapack_lite.py | 7 + .../numpy/linalg/linalg.py | 2136 +++++ .../numpy/linalg/setup.py | 56 + .../numpy/linalg/tests/test_build.py | 53 + .../numpy/linalg/tests/test_deprecations.py | 24 + .../numpy/linalg/tests/test_linalg.py | 1153 +++ .../numpy/linalg/tests/test_regression.py | 90 + .../numpy/ma/__init__.py | 58 + .../numpy/ma/bench.py | 166 + .../numpy/ma/core.py | 7321 ++++++++++++++++ .../numpy/ma/extras.py | 1923 +++++ .../numpy/ma/mrecords.py | 734 ++ .../numpy/ma/setup.py | 20 + .../numpy/ma/tests/test_core.py | 3684 ++++++++ .../numpy/ma/tests/test_extras.py | 947 +++ .../numpy/ma/tests/test_mrecords.py | 521 ++ .../numpy/ma/tests/test_old_ma.py | 869 ++ .../numpy/ma/tests/test_regression.py | 75 + .../numpy/ma/tests/test_subclassing.py | 236 + .../numpy/ma/testutils.py | 240 + .../numpy/ma/timer_comparison.py | 459 + .../numpy/ma/version.py | 14 + .../numpy/matlib.py | 358 + .../numpy/matrixlib/__init__.py | 12 + .../numpy/matrixlib/defmatrix.py | 1094 +++ .../numpy/matrixlib/setup.py | 15 + .../numpy/matrixlib/tests/test_defmatrix.py | 400 + .../numpy/matrixlib/tests/test_multiarray.py | 18 + .../numpy/matrixlib/tests/test_numeric.py | 10 + .../numpy/matrixlib/tests/test_regression.py | 34 + .../numpy/polynomial/__init__.py | 27 + .../numpy/polynomial/_polybase.py | 962 +++ .../numpy/polynomial/chebyshev.py | 2056 +++++ .../numpy/polynomial/hermite.py | 1789 ++++ .../numpy/polynomial/hermite_e.py | 1786 ++++ .../numpy/polynomial/laguerre.py | 1781 ++++ .../numpy/polynomial/legendre.py | 1809 ++++ .../numpy/polynomial/polynomial.py | 1532 ++++ .../numpy/polynomial/polytemplate.py | 927 ++ .../numpy/polynomial/polyutils.py | 403 + .../numpy/polynomial/setup.py | 11 + .../numpy/polynomial/tests/test_chebyshev.py | 554 ++ .../numpy/polynomial/tests/test_classes.py | 570 ++ .../numpy/polynomial/tests/test_hermite.py | 516 ++ .../numpy/polynomial/tests/test_hermite_e.py | 517 ++ .../numpy/polynomial/tests/test_laguerre.py | 513 ++ .../numpy/polynomial/tests/test_legendre.py | 517 ++ .../numpy/polynomial/tests/test_polynomial.py | 477 ++ .../numpy/polynomial/tests/test_polyutils.py | 109 + .../numpy/polynomial/tests/test_printing.py | 74 + .../numpy/random/__init__.py | 122 + .../numpy/random/info.py | 135 + .../numpy/random/mtrand.py | 7 + .../numpy/random/randomkit.h | 189 + .../numpy/random/setup.py | 74 + .../numpy/random/tests/test_random.py | 707 ++ .../numpy/random/tests/test_regression.py | 86 + .../numpy/setup.py | 27 + .../numpy/testing/__init__.py | 16 + .../numpy/testing/decorators.py | 271 + .../numpy/testing/noseclasses.py | 353 + .../numpy/testing/nosetester.py | 504 ++ .../numpy/testing/print_coercion_tables.py | 89 + .../numpy/testing/setup.py | 20 + .../numpy/testing/tests/test_decorators.py | 185 + .../numpy/testing/tests/test_doctesting.py | 56 + .../numpy/testing/tests/test_utils.py | 558 ++ .../numpy/testing/utils.py | 1715 ++++ .../numpy/tests/test_ctypeslib.py | 102 + .../numpy/tests/test_matlib.py | 55 + .../numpy/version.py | 10 + pandas/core/format.py | 4 +- pandas/core/frame.py | 7 +- pandas/core/internals.py | 9 +- pandas/core/series.py | 6 +- 390 files changed, 166475 insertions(+), 6 deletions(-) create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/PKG-INFO create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/SOURCES.txt create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/dependency_links.txt create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/native_libs.txt create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/not-zip-safe create mode 100755 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/scripts/f2py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/top_level.txt create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__config__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/_import_tools.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/add_newdocs.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/_inspect.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/py3k.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_dummy.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_internal.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_methods.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/arrayprint.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/cversions.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/defchararray.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/fromnumeric.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/function_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/generate_numpy_api.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/getlimits.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__multiarray_api.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__ufunc_api.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_neighborhood_iterator_imp.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_numpyconfig.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayobject.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayscalars.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/halffloat.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/multiarray_api.txt create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarrayobject.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarraytypes.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/noprefix.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_1_7_deprecated_api.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_3kcompat.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_common.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_cpu.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_endian.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_interrupt.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_math.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_no_deprecated_api.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_os.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/numpyconfig.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/old_defines.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/oldnumeric.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufunc_api.txt create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufuncobject.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/utils.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/info.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/mlib.ini create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/npymath.ini create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/machar.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/memmap.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray_tests.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numeric.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numerictypes.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/operand_flag_tests.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/records.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/scalarmath.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup_common.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/shape_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/struct_ufunc_test.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/test_rational.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/data/astype_copy.pkl create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/data/recarray_from_file.fits create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_abc.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_api.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_arrayprint.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_blasdot.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_datetime.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_defchararray.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_deprecations.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_dtype.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_einsum.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_errstate.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_function_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_getlimits.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_half.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexerrors.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexing.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_item_selection.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_machar.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_memmap.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray_assignment.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_nditer.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numeric.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numerictypes.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_print.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_records.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_regression.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarinherit.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarmath.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarprint.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_shape_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_ufunc.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath_complex.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_unicode.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath_tests.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ctypeslib.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__config__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__version__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/ccompiler.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/autodist.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/bdist_rpm.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_clib.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_ext.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_py.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_scripts.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_src.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config_compiler.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/develop.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/egg_info.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_clib.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_data.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_headers.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/sdist.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/compat.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/conv_template.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/core.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/cpuinfo.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/environment.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/exec_command.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/extension.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/absoft.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/compaq.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/g95.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/gnu.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/hpux.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/ibm.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/intel.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/lahey.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/mips.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/nag.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/none.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pathf95.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pg.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/sun.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/vast.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/from_template.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/info.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/intelccompiler.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/lib2def.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/line_endings.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/log.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/mingw32ccompiler.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/misc_util.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/npy_pkg_config.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/numpy_distribution.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/pathccompiler.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/system_info.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib1.f create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib2.pyf create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/tests/test_fib2.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/include/body.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/tests/test_fib3.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/primes.pyx create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/tests/test_primes.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/example.i create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.cc create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.i create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example2.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_exec_command.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_gnu.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_intel.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_misc_util.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_npy_pkg_config.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/unixccompiler.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/basics.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/broadcasting.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/byteswapping.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/constants.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/creation.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/glossary.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/howtofind.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/indexing.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/internals.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/io.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/jargon.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/methods_vs_functions.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/misc.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/performance.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/structured_arrays.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/subclassing.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/ufuncs.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/dual.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__version__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/auxfuncs.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/capi_maps.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cb_rules.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cfuncs.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/common_rules.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/crackfortran.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/diagnose.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py2e.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py_testing.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f90mod_rules.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/func2subr.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/info.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/rules.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/src/fortranobject.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_free.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_use.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/precision.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/kind/foo.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo.f create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_fixed.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_free.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/size/foo.f90 create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_array_from_pyobj.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_assumed_shape.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_callback.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_kind.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_mixed.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_character.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_complex.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_integer.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_logical.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_real.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_size.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/util.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/use_rules.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack_lite.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/helper.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/info.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_fftpack.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_helper.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_compiled_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_datasource.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_iotools.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_version.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraypad.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraysetops.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arrayterator.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/financial.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/format.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/function_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/index_tricks.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/info.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/nanfunctions.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/npyio.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/polynomial.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/recfunctions.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/scimath.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/shape_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/stride_tricks.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__datasource.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__iotools.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__version.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraypad.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraysetops.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arrayterator.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_financial.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_format.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_function_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_index_tricks.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_io.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_nanfunctions.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_polynomial.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_recfunctions.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_regression.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_shape_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_stride_tricks.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_twodim_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_type_check.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_ufunclike.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_utils.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/twodim_base.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/type_check.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/ufunclike.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/user_array.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/utils.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/_umath_linalg.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/info.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/lapack_lite.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/linalg.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_build.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_deprecations.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_linalg.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_regression.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/bench.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/core.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/extras.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/mrecords.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_core.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_extras.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_mrecords.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_old_ma.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_regression.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_subclassing.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/testutils.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/timer_comparison.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/version.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matlib.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/defmatrix.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_defmatrix.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_multiarray.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_numeric.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_regression.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/_polybase.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/chebyshev.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite_e.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/laguerre.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/legendre.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polynomial.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polytemplate.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polyutils.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_chebyshev.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_classes.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite_e.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_laguerre.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_legendre.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polynomial.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polyutils.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_printing.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/info.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/mtrand.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/randomkit.h create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_random.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_regression.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/__init__.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/decorators.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/noseclasses.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/nosetester.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/print_coercion_tables.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/setup.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_decorators.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_doctesting.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_utils.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/utils.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_ctypeslib.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_matlib.py create mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/version.py diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/PKG-INFO b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/PKG-INFO new file mode 100644 index 0000000000000..477879ef61f2d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/PKG-INFO @@ -0,0 +1,39 @@ +Metadata-Version: 1.1 +Name: numpy +Version: 1.9.0 +Summary: NumPy: array processing for numbers, strings, records, and objects. +Home-page: http://www.numpy.org +Author: NumPy Developers +Author-email: numpy-discussion@scipy.org +License: BSD +Download-URL: http://sourceforge.net/projects/numpy/files/NumPy/ +Description: NumPy is a general-purpose array-processing package designed to + efficiently manipulate large multi-dimensional arrays of arbitrary + records without sacrificing too much speed for small multi-dimensional + arrays. NumPy is built on the Numeric code base and adds features + introduced by numarray as well as an extended C-API and the ability to + create arrays of arbitrary type which also makes NumPy suitable for + interfacing with general-purpose data-base applications. + + There are also basic facilities for discrete fourier transform, + basic linear algebra and random number generation. + + +Platform: Windows +Platform: Linux +Platform: Solaris +Platform: Mac OS-X +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/SOURCES.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/SOURCES.txt new file mode 100644 index 0000000000000..44e348eeedb4a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/SOURCES.txt @@ -0,0 +1,1033 @@ +BENTO_BUILD.txt +COMPATIBILITY +DEV_README.txt +INSTALL.txt +LICENSE.txt +MANIFEST.in +README.txt +THANKS.txt +setup.cfg +setup.py +setupegg.py +site.cfg.example +doc/Makefile +doc/postprocess.py +doc/f2py/BUGS.txt +doc/f2py/FAQ.txt +doc/f2py/HISTORY.txt +doc/f2py/Makefile +doc/f2py/OLDNEWS.txt +doc/f2py/README.txt +doc/f2py/Release-1.x.txt +doc/f2py/Release-2.x.txt +doc/f2py/Release-3.x.txt +doc/f2py/Release-4.x.txt +doc/f2py/TESTING.txt +doc/f2py/THANKS.txt +doc/f2py/TODO.txt +doc/f2py/apps.tex +doc/f2py/bugs.tex +doc/f2py/collectinput.py +doc/f2py/commands.tex +doc/f2py/default.css +doc/f2py/docutils.conf +doc/f2py/f2py.1 +doc/f2py/f2py2e.tex +doc/f2py/fortranobject.tex +doc/f2py/hello.f +doc/f2py/index.html +doc/f2py/intro.tex +doc/f2py/multiarrays.txt +doc/f2py/notes.tex +doc/f2py/oldnews.html +doc/f2py/options.tex +doc/f2py/pyforttest.pyf +doc/f2py/pytest.py +doc/f2py/python9.tex +doc/f2py/signaturefile.tex +doc/f2py/simple.f +doc/f2py/simple_session.dat +doc/f2py/using_F_compiler.txt +doc/f2py/win32_notes.txt +doc/f2py/ex1/arr.f +doc/f2py/ex1/bar.f +doc/f2py/ex1/foo.f +doc/f2py/ex1/foobar-smart.f90 +doc/f2py/ex1/foobar.f90 +doc/f2py/ex1/foobarmodule.tex +doc/f2py/ex1/runme +doc/f2py/f2python9-final/README.txt +doc/f2py/f2python9-final/aerostructure.jpg +doc/f2py/f2python9-final/flow.jpg +doc/f2py/f2python9-final/mk_html.sh +doc/f2py/f2python9-final/mk_pdf.sh +doc/f2py/f2python9-final/mk_ps.sh +doc/f2py/f2python9-final/structure.jpg +doc/f2py/f2python9-final/src/examples/exp1.f +doc/f2py/f2python9-final/src/examples/exp1mess.txt +doc/f2py/f2python9-final/src/examples/exp1session.txt +doc/f2py/f2python9-final/src/examples/foo.pyf +doc/f2py/f2python9-final/src/examples/foom.pyf +doc/f2py/multiarray/array_from_pyobj.c +doc/f2py/multiarray/bar.c +doc/f2py/multiarray/foo.f +doc/f2py/multiarray/fortran_array_from_pyobj.txt +doc/f2py/multiarray/fun.pyf +doc/f2py/multiarray/run.pyf +doc/f2py/multiarray/transpose.txt +doc/release/1.3.0-notes.rst +doc/release/1.4.0-notes.rst +doc/release/1.5.0-notes.rst +doc/release/1.6.0-notes.rst +doc/release/1.6.1-notes.rst +doc/release/1.6.2-notes.rst +doc/release/1.7.0-notes.rst +doc/release/1.7.1-notes.rst +doc/release/1.7.2-notes.rst +doc/release/1.8.0-notes.rst +doc/release/1.8.1-notes.rst +doc/release/1.8.2-notes.rst +doc/release/1.9.0-notes.rst +doc/release/time_based_proposal.rst +doc/scipy-sphinx-theme/.git +doc/scipy-sphinx-theme/.gitignore +doc/scipy-sphinx-theme/Makefile +doc/scipy-sphinx-theme/README.rst +doc/scipy-sphinx-theme/conf.py +doc/scipy-sphinx-theme/index.rst +doc/scipy-sphinx-theme/test_autodoc.rst +doc/scipy-sphinx-theme/test_autodoc_2.rst +doc/scipy-sphinx-theme/test_autodoc_3.rst +doc/scipy-sphinx-theme/test_autodoc_4.rst +doc/scipy-sphinx-theme/test_optimize.rst +doc/scipy-sphinx-theme/_static/scipyshiny_small.png +doc/scipy-sphinx-theme/_theme/scipy/layout.html +doc/scipy-sphinx-theme/_theme/scipy/searchbox.html +doc/scipy-sphinx-theme/_theme/scipy/sourcelink.html +doc/scipy-sphinx-theme/_theme/scipy/theme.conf +doc/scipy-sphinx-theme/_theme/scipy/static/scipy.css_t +doc/scipy-sphinx-theme/_theme/scipy/static/css/extend.css +doc/scipy-sphinx-theme/_theme/scipy/static/css/pygments.css +doc/scipy-sphinx-theme/_theme/scipy/static/css/scipy-central.css +doc/scipy-sphinx-theme/_theme/scipy/static/css/spc-bootstrap.css +doc/scipy-sphinx-theme/_theme/scipy/static/css/spc-extend.css +doc/scipy-sphinx-theme/_theme/scipy/static/img/all-icons.svg +doc/scipy-sphinx-theme/_theme/scipy/static/img/contents.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/create-new-account-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-icon-shrunk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-icon.svg +doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-list-icon-tiniest.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-list-icon-tiny.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-list-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/glyphicons-halflings-white.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/glyphicons-halflings.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/important-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/information-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/internet-web-browser.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-icon-shrunk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-icon.svg +doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-list-icon-tiny.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-list-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/navigation.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/person-list-icon-tiny.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/person-list-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/scipy-logo.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/scipy_org_logo.gif +doc/scipy-sphinx-theme/_theme/scipy/static/img/scipycentral_logo.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/scipyshiny_small.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/send-email-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-icon-shrunk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-icon.svg +doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-list-icon-tiniest.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-list-icon-tiny.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-list-icon.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/transparent-pixel.gif +doc/scipy-sphinx-theme/_theme/scipy/static/img/ui-anim_basic_16x16.gif +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ad.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ae.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-af.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ag.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ai.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-al.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-am.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ao.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-aq.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ar.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-as.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-at.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-au.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-aw.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-az.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ba.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bb.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bd.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-be.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bf.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bh.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bi.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bj.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bl.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bo.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-br.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bs.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bt.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bw.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-by.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bz.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ca.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cc.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cd.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cf.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ch.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ci.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ck.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cl.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-co.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cu.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cv.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cw.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cx.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cy.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cz.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-de.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-dj.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-dk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-dm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-do.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-dz.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ec.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ee.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-eg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-er.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-es.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-et.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fi.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fj.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fo.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ga.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gb.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gd.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ge.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gf.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gh.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gi.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gl.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gq.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gs.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gt.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gu.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gw.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gy.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ht.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hu.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-id.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ie.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-il.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-im.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-in.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-io.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-iq.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ir.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-is.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-it.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-je.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-jm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-jo.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-jp.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ke.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kh.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ki.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-km.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kp.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kw.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ky.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kz.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-la.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lb.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lc.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-li.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ls.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lt.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lu.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lv.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ly.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ma.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mc.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-md.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-me.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mf.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mh.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ml.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mo.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mp.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mq.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ms.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mt.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mu.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mv.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mw.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mx.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-my.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mz.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-na.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nc.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ne.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nf.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ng.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ni.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nl.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-no.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-np.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nu.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nz.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-om.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pa.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pe.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pf.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ph.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pl.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ps.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pt.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pw.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-py.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-qa.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-re.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ro.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-rs.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ru.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-rw.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sa.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sb.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sc.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sd.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-se.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sh.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-si.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sj.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sl.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-so.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-st.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sv.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sy.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sz.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tc.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-td.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tf.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-th.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tj.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tk.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tl.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-to.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tr.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tt.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tv.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tw.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tz.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ua.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ug.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-um.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-us.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-uy.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-uz.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-va.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vc.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ve.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vg.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vi.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vn.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vu.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-wf.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ws.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ye.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-za.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-zm.png +doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-zw.png +doc/scipy-sphinx-theme/_theme/scipy/static/js/copybutton.js +doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-bootstrap.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-content.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-extend.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-footer.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-header.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-rightsidebar.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-utils.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/accordion.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/alerts.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/bootstrap.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/breadcrumbs.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/button-groups.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/buttons.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/carousel.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/close.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/code.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/component-animations.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/dropdowns.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/forms.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/grid.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/hero-unit.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/labels-badges.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/layouts.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/media.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/mixins.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/modals.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/navbar.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/navs.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/pager.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/pagination.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/popovers.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/progress-bars.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/reset.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-1200px-min.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-767px-max.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-768px-979px.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-navbar.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-utilities.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/scaffolding.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/sprites.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/tables.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/thumbnails.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/tooltip.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/type.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/utilities.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/variables.less +doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/wells.less +doc/source/about.rst +doc/source/bugs.rst +doc/source/conf.py +doc/source/contents.rst +doc/source/glossary.rst +doc/source/license.rst +doc/source/release.rst +doc/source/_templates/indexcontent.html +doc/source/_templates/indexsidebar.html +doc/source/_templates/layout.html +doc/source/_templates/autosummary/class.rst +doc/source/dev/gitwash_links.txt +doc/source/dev/index.rst +doc/source/dev/gitwash/branch_list.png +doc/source/dev/gitwash/branch_list_compare.png +doc/source/dev/gitwash/configure_git.rst +doc/source/dev/gitwash/development_setup.rst +doc/source/dev/gitwash/development_workflow.rst +doc/source/dev/gitwash/dot2_dot3.rst +doc/source/dev/gitwash/following_latest.rst +doc/source/dev/gitwash/forking_button.png +doc/source/dev/gitwash/git_development.rst +doc/source/dev/gitwash/git_intro.rst +doc/source/dev/gitwash/git_links.inc +doc/source/dev/gitwash/git_resources.rst +doc/source/dev/gitwash/index.rst +doc/source/dev/gitwash/pull_button.png +doc/source/f2py/advanced.rst +doc/source/f2py/allocarr.f90 +doc/source/f2py/allocarr_session.dat +doc/source/f2py/array.f +doc/source/f2py/array_session.dat +doc/source/f2py/calculate.f +doc/source/f2py/calculate_session.dat +doc/source/f2py/callback.f +doc/source/f2py/callback2.pyf +doc/source/f2py/callback_session.dat +doc/source/f2py/common.f +doc/source/f2py/common_session.dat +doc/source/f2py/compile_session.dat +doc/source/f2py/distutils.rst +doc/source/f2py/extcallback.f +doc/source/f2py/extcallback_session.dat +doc/source/f2py/fib1.f +doc/source/f2py/fib1.pyf +doc/source/f2py/fib2.pyf +doc/source/f2py/fib3.f +doc/source/f2py/ftype.f +doc/source/f2py/ftype_session.dat +doc/source/f2py/getting-started.rst +doc/source/f2py/index.rst +doc/source/f2py/moddata.f90 +doc/source/f2py/moddata_session.dat +doc/source/f2py/python-usage.rst +doc/source/f2py/run_main_session.dat +doc/source/f2py/scalar.f +doc/source/f2py/scalar_session.dat +doc/source/f2py/setup_example.py +doc/source/f2py/signature-file.rst +doc/source/f2py/spam.pyf +doc/source/f2py/spam_session.dat +doc/source/f2py/string.f +doc/source/f2py/string_session.dat +doc/source/f2py/usage.rst +doc/source/f2py/var.pyf +doc/source/f2py/var_session.dat +doc/source/neps/datetime-proposal.rst +doc/source/neps/datetime-proposal3.rst +doc/source/neps/deferred-ufunc-evaluation.rst +doc/source/neps/generalized-ufuncs.rst +doc/source/neps/groupby_additions.rst +doc/source/neps/index.rst +doc/source/neps/math_config_clean.rst +doc/source/neps/missing-data.rst +doc/source/neps/new-iterator-ufunc.rst +doc/source/neps/newbugtracker.rst +doc/source/neps/npy-format.rst +doc/source/neps/structured_array_extensions.rst +doc/source/neps/ufunc-overrides.rst +doc/source/neps/warnfix.rst +doc/source/reference/arrays.classes.rst +doc/source/reference/arrays.datetime.rst +doc/source/reference/arrays.dtypes.rst +doc/source/reference/arrays.indexing.rst +doc/source/reference/arrays.interface.rst +doc/source/reference/arrays.ndarray.rst +doc/source/reference/arrays.nditer.rst +doc/source/reference/arrays.rst +doc/source/reference/arrays.scalars.rst +doc/source/reference/c-api.array.rst +doc/source/reference/c-api.config.rst +doc/source/reference/c-api.coremath.rst +doc/source/reference/c-api.deprecations.rst +doc/source/reference/c-api.dtype.rst +doc/source/reference/c-api.generalized-ufuncs.rst +doc/source/reference/c-api.iterator.rst +doc/source/reference/c-api.rst +doc/source/reference/c-api.types-and-structures.rst +doc/source/reference/c-api.ufunc.rst +doc/source/reference/distutils.rst +doc/source/reference/index.rst +doc/source/reference/internals.code-explanations.rst +doc/source/reference/internals.rst +doc/source/reference/maskedarray.baseclass.rst +doc/source/reference/maskedarray.generic.rst +doc/source/reference/maskedarray.rst +doc/source/reference/routines.array-creation.rst +doc/source/reference/routines.array-manipulation.rst +doc/source/reference/routines.bitwise.rst +doc/source/reference/routines.char.rst +doc/source/reference/routines.ctypeslib.rst +doc/source/reference/routines.datetime.rst +doc/source/reference/routines.dtype.rst +doc/source/reference/routines.dual.rst +doc/source/reference/routines.emath.rst +doc/source/reference/routines.err.rst +doc/source/reference/routines.fft.rst +doc/source/reference/routines.financial.rst +doc/source/reference/routines.functional.rst +doc/source/reference/routines.help.rst +doc/source/reference/routines.indexing.rst +doc/source/reference/routines.io.rst +doc/source/reference/routines.linalg.rst +doc/source/reference/routines.logic.rst +doc/source/reference/routines.ma.rst +doc/source/reference/routines.math.rst +doc/source/reference/routines.matlib.rst +doc/source/reference/routines.numarray.rst +doc/source/reference/routines.oldnumeric.rst +doc/source/reference/routines.other.rst +doc/source/reference/routines.padding.rst +doc/source/reference/routines.polynomials.chebyshev.rst +doc/source/reference/routines.polynomials.classes.rst +doc/source/reference/routines.polynomials.hermite.rst +doc/source/reference/routines.polynomials.hermite_e.rst +doc/source/reference/routines.polynomials.laguerre.rst +doc/source/reference/routines.polynomials.legendre.rst +doc/source/reference/routines.polynomials.package.rst +doc/source/reference/routines.polynomials.poly1d.rst +doc/source/reference/routines.polynomials.polynomial.rst +doc/source/reference/routines.polynomials.rst +doc/source/reference/routines.random.rst +doc/source/reference/routines.rst +doc/source/reference/routines.set.rst +doc/source/reference/routines.sort.rst +doc/source/reference/routines.statistics.rst +doc/source/reference/routines.testing.rst +doc/source/reference/routines.window.rst +doc/source/reference/swig.interface-file.rst +doc/source/reference/swig.rst +doc/source/reference/swig.testing.rst +doc/source/reference/ufuncs.rst +doc/source/reference/figures/dtype-hierarchy.dia +doc/source/reference/figures/dtype-hierarchy.pdf +doc/source/reference/figures/dtype-hierarchy.png +doc/source/reference/figures/threefundamental.fig +doc/source/reference/figures/threefundamental.pdf +doc/source/reference/figures/threefundamental.png +doc/source/user/basics.broadcasting.rst +doc/source/user/basics.byteswapping.rst +doc/source/user/basics.creation.rst +doc/source/user/basics.indexing.rst +doc/source/user/basics.io.genfromtxt.rst +doc/source/user/basics.io.rst +doc/source/user/basics.rec.rst +doc/source/user/basics.rst +doc/source/user/basics.subclassing.rst +doc/source/user/basics.types.rst +doc/source/user/c-info.beyond-basics.rst +doc/source/user/c-info.how-to-extend.rst +doc/source/user/c-info.python-as-glue.rst +doc/source/user/c-info.rst +doc/source/user/c-info.ufunc-tutorial.rst +doc/source/user/howtofind.rst +doc/source/user/index.rst +doc/source/user/install.rst +doc/source/user/introduction.rst +doc/source/user/misc.rst +doc/source/user/performance.rst +doc/source/user/whatisnumpy.rst +doc/sphinxext/.git +doc/sphinxext/.gitignore +doc/sphinxext/.travis.yml +doc/sphinxext/LICENSE.txt +doc/sphinxext/MANIFEST.in +doc/sphinxext/README.rst +doc/sphinxext/setup.py +doc/sphinxext/numpydoc/__init__.py +doc/sphinxext/numpydoc/comment_eater.py +doc/sphinxext/numpydoc/compiler_unparse.py +doc/sphinxext/numpydoc/docscrape.py +doc/sphinxext/numpydoc/docscrape_sphinx.py +doc/sphinxext/numpydoc/linkcode.py +doc/sphinxext/numpydoc/numpydoc.py +doc/sphinxext/numpydoc/phantom_import.py +doc/sphinxext/numpydoc/plot_directive.py +doc/sphinxext/numpydoc/traitsdoc.py +doc/sphinxext/numpydoc/tests/test_docscrape.py +doc/sphinxext/numpydoc/tests/test_linkcode.py +doc/sphinxext/numpydoc/tests/test_phantom_import.py +doc/sphinxext/numpydoc/tests/test_plot_directive.py +doc/sphinxext/numpydoc/tests/test_traitsdoc.py +numpy/__init__.py +numpy/_import_tools.py +numpy/add_newdocs.py +numpy/ctypeslib.py +numpy/dual.py +numpy/matlib.py +numpy/setup.py +numpy/version.py +numpy.egg-info/PKG-INFO +numpy.egg-info/SOURCES.txt +numpy.egg-info/dependency_links.txt +numpy.egg-info/top_level.txt +numpy/compat/__init__.py +numpy/compat/_inspect.py +numpy/compat/py3k.py +numpy/compat/setup.py +numpy/core/__init__.py +numpy/core/_internal.py +numpy/core/_methods.py +numpy/core/arrayprint.py +numpy/core/cversions.py +numpy/core/defchararray.py +numpy/core/fromnumeric.py +numpy/core/function_base.py +numpy/core/getlimits.py +numpy/core/info.py +numpy/core/machar.py +numpy/core/memmap.py +numpy/core/mlib.ini.in +numpy/core/npymath.ini.in +numpy/core/numeric.py +numpy/core/numerictypes.py +numpy/core/records.py +numpy/core/setup.py +numpy/core/setup_common.py +numpy/core/shape_base.py +numpy/core/blasdot/_dotblas.c +numpy/core/blasdot/cblas.h +numpy/core/code_generators/__init__.py +numpy/core/code_generators/cversions.txt +numpy/core/code_generators/genapi.py +numpy/core/code_generators/generate_numpy_api.py +numpy/core/code_generators/generate_ufunc_api.py +numpy/core/code_generators/generate_umath.py +numpy/core/code_generators/numpy_api.py +numpy/core/code_generators/ufunc_docstrings.py +numpy/core/include/numpy/_neighborhood_iterator_imp.h +numpy/core/include/numpy/_numpyconfig.h.in +numpy/core/include/numpy/arrayobject.h +numpy/core/include/numpy/arrayscalars.h +numpy/core/include/numpy/halffloat.h +numpy/core/include/numpy/ndarrayobject.h +numpy/core/include/numpy/ndarraytypes.h +numpy/core/include/numpy/noprefix.h +numpy/core/include/numpy/npy_1_7_deprecated_api.h +numpy/core/include/numpy/npy_3kcompat.h +numpy/core/include/numpy/npy_common.h +numpy/core/include/numpy/npy_cpu.h +numpy/core/include/numpy/npy_endian.h +numpy/core/include/numpy/npy_interrupt.h +numpy/core/include/numpy/npy_math.h +numpy/core/include/numpy/npy_no_deprecated_api.h +numpy/core/include/numpy/npy_os.h +numpy/core/include/numpy/numpyconfig.h +numpy/core/include/numpy/old_defines.h +numpy/core/include/numpy/oldnumeric.h +numpy/core/include/numpy/ufuncobject.h +numpy/core/include/numpy/utils.h +numpy/core/include/numpy/fenv/fenv.c +numpy/core/include/numpy/fenv/fenv.h +numpy/core/src/dummymodule.c +numpy/core/src/multiarray/_datetime.h +numpy/core/src/multiarray/alloc.c +numpy/core/src/multiarray/alloc.h +numpy/core/src/multiarray/array_assign.c +numpy/core/src/multiarray/array_assign.h +numpy/core/src/multiarray/array_assign_array.c +numpy/core/src/multiarray/array_assign_scalar.c +numpy/core/src/multiarray/arrayobject.c +numpy/core/src/multiarray/arrayobject.h +numpy/core/src/multiarray/arraytypes.h +numpy/core/src/multiarray/buffer.c +numpy/core/src/multiarray/buffer.h +numpy/core/src/multiarray/calculation.c +numpy/core/src/multiarray/calculation.h +numpy/core/src/multiarray/common.c +numpy/core/src/multiarray/common.h +numpy/core/src/multiarray/conversion_utils.c +numpy/core/src/multiarray/conversion_utils.h +numpy/core/src/multiarray/convert.c +numpy/core/src/multiarray/convert.h +numpy/core/src/multiarray/convert_datatype.c +numpy/core/src/multiarray/convert_datatype.h +numpy/core/src/multiarray/ctors.c +numpy/core/src/multiarray/ctors.h +numpy/core/src/multiarray/datetime.c +numpy/core/src/multiarray/datetime_busday.c +numpy/core/src/multiarray/datetime_busday.h +numpy/core/src/multiarray/datetime_busdaycal.c +numpy/core/src/multiarray/datetime_busdaycal.h +numpy/core/src/multiarray/datetime_strings.c +numpy/core/src/multiarray/datetime_strings.h +numpy/core/src/multiarray/descriptor.c +numpy/core/src/multiarray/descriptor.h +numpy/core/src/multiarray/dtype_transfer.c +numpy/core/src/multiarray/flagsobject.c +numpy/core/src/multiarray/getset.c +numpy/core/src/multiarray/getset.h +numpy/core/src/multiarray/hashdescr.c +numpy/core/src/multiarray/hashdescr.h +numpy/core/src/multiarray/item_selection.c +numpy/core/src/multiarray/item_selection.h +numpy/core/src/multiarray/iterators.c +numpy/core/src/multiarray/iterators.h +numpy/core/src/multiarray/mapping.c +numpy/core/src/multiarray/mapping.h +numpy/core/src/multiarray/methods.c +numpy/core/src/multiarray/methods.h +numpy/core/src/multiarray/multiarraymodule.c +numpy/core/src/multiarray/multiarraymodule.h +numpy/core/src/multiarray/nditer_api.c +numpy/core/src/multiarray/nditer_constr.c +numpy/core/src/multiarray/nditer_impl.h +numpy/core/src/multiarray/nditer_pywrap.c +numpy/core/src/multiarray/nditer_pywrap.h +numpy/core/src/multiarray/number.c +numpy/core/src/multiarray/number.h +numpy/core/src/multiarray/numpymemoryview.c +numpy/core/src/multiarray/numpymemoryview.h +numpy/core/src/multiarray/numpyos.c +numpy/core/src/multiarray/numpyos.h +numpy/core/src/multiarray/refcount.c +numpy/core/src/multiarray/refcount.h +numpy/core/src/multiarray/scalarapi.c +numpy/core/src/multiarray/scalartypes.h +numpy/core/src/multiarray/sequence.c +numpy/core/src/multiarray/sequence.h +numpy/core/src/multiarray/shape.c +numpy/core/src/multiarray/shape.h +numpy/core/src/multiarray/ucsnarrow.c +numpy/core/src/multiarray/ucsnarrow.h +numpy/core/src/multiarray/usertypes.c +numpy/core/src/multiarray/usertypes.h +numpy/core/src/npymath/_signbit.c +numpy/core/src/npymath/halffloat.c +numpy/core/src/npymath/ieee754.c.src +numpy/core/src/npymath/npy_math.c.src +numpy/core/src/npymath/npy_math_common.h +numpy/core/src/npymath/npy_math_complex.c.src +numpy/core/src/npymath/npy_math_private.h +numpy/core/src/npysort/binsearch.c.src +numpy/core/src/npysort/heapsort.c.src +numpy/core/src/npysort/mergesort.c.src +numpy/core/src/npysort/npysort_common.h +numpy/core/src/npysort/quicksort.c.src +numpy/core/src/npysort/selection.c.src +numpy/core/src/private/lowlevel_strided_loops.h +numpy/core/src/private/npy_binsearch.h.src +numpy/core/src/private/npy_config.h +numpy/core/src/private/npy_fpmath.h +numpy/core/src/private/npy_partition.h.src +numpy/core/src/private/npy_pycompat.h +numpy/core/src/private/npy_sort.h +numpy/core/src/private/ufunc_override.h +numpy/core/src/umath/reduction.c +numpy/core/src/umath/reduction.h +numpy/core/src/umath/simd.inc.src +numpy/core/src/umath/ufunc_object.c +numpy/core/src/umath/ufunc_object.h +numpy/core/src/umath/ufunc_type_resolution.c +numpy/core/src/umath/ufunc_type_resolution.h +numpy/core/src/umath/umathmodule.c +numpy/distutils/__init__.py +numpy/distutils/__version__.py +numpy/distutils/ccompiler.py +numpy/distutils/compat.py +numpy/distutils/conv_template.py +numpy/distutils/core.py +numpy/distutils/cpuinfo.py +numpy/distutils/environment.py +numpy/distutils/exec_command.py +numpy/distutils/extension.py +numpy/distutils/from_template.py +numpy/distutils/info.py +numpy/distutils/intelccompiler.py +numpy/distutils/lib2def.py +numpy/distutils/line_endings.py +numpy/distutils/log.py +numpy/distutils/mingw32ccompiler.py +numpy/distutils/misc_util.py +numpy/distutils/npy_pkg_config.py +numpy/distutils/numpy_distribution.py +numpy/distutils/pathccompiler.py +numpy/distutils/setup.py +numpy/distutils/system_info.py +numpy/distutils/unixccompiler.py +numpy/distutils/command/__init__.py +numpy/distutils/command/autodist.py +numpy/distutils/command/bdist_rpm.py +numpy/distutils/command/build.py +numpy/distutils/command/build_clib.py +numpy/distutils/command/build_ext.py +numpy/distutils/command/build_py.py +numpy/distutils/command/build_scripts.py +numpy/distutils/command/build_src.py +numpy/distutils/command/config.py +numpy/distutils/command/config_compiler.py +numpy/distutils/command/develop.py +numpy/distutils/command/egg_info.py +numpy/distutils/command/install.py +numpy/distutils/command/install_clib.py +numpy/distutils/command/install_data.py +numpy/distutils/command/install_headers.py +numpy/distutils/command/sdist.py +numpy/distutils/fcompiler/__init__.py +numpy/distutils/fcompiler/absoft.py +numpy/distutils/fcompiler/compaq.py +numpy/distutils/fcompiler/g95.py +numpy/distutils/fcompiler/gnu.py +numpy/distutils/fcompiler/hpux.py +numpy/distutils/fcompiler/ibm.py +numpy/distutils/fcompiler/intel.py +numpy/distutils/fcompiler/lahey.py +numpy/distutils/fcompiler/mips.py +numpy/distutils/fcompiler/nag.py +numpy/distutils/fcompiler/none.py +numpy/distutils/fcompiler/pathf95.py +numpy/distutils/fcompiler/pg.py +numpy/distutils/fcompiler/sun.py +numpy/distutils/fcompiler/vast.py +numpy/doc/__init__.py +numpy/doc/basics.py +numpy/doc/broadcasting.py +numpy/doc/byteswapping.py +numpy/doc/constants.py +numpy/doc/creation.py +numpy/doc/glossary.py +numpy/doc/howtofind.py +numpy/doc/indexing.py +numpy/doc/internals.py +numpy/doc/io.py +numpy/doc/jargon.py +numpy/doc/methods_vs_functions.py +numpy/doc/misc.py +numpy/doc/performance.py +numpy/doc/structured_arrays.py +numpy/doc/subclassing.py +numpy/doc/ufuncs.py +numpy/f2py/__init__.py +numpy/f2py/__version__.py +numpy/f2py/auxfuncs.py +numpy/f2py/capi_maps.py +numpy/f2py/cb_rules.py +numpy/f2py/cfuncs.py +numpy/f2py/common_rules.py +numpy/f2py/crackfortran.py +numpy/f2py/diagnose.py +numpy/f2py/f2py2e.py +numpy/f2py/f2py_testing.py +numpy/f2py/f90mod_rules.py +numpy/f2py/func2subr.py +numpy/f2py/info.py +numpy/f2py/rules.py +numpy/f2py/setup.py +numpy/f2py/use_rules.py +numpy/fft/__init__.py +numpy/fft/fftpack.c +numpy/fft/fftpack.h +numpy/fft/fftpack.py +numpy/fft/fftpack_litemodule.c +numpy/fft/helper.py +numpy/fft/info.py +numpy/fft/setup.py +numpy/lib/__init__.py +numpy/lib/_datasource.py +numpy/lib/_iotools.py +numpy/lib/_version.py +numpy/lib/arraypad.py +numpy/lib/arraysetops.py +numpy/lib/arrayterator.py +numpy/lib/financial.py +numpy/lib/format.py +numpy/lib/function_base.py +numpy/lib/index_tricks.py +numpy/lib/info.py +numpy/lib/nanfunctions.py +numpy/lib/npyio.py +numpy/lib/polynomial.py +numpy/lib/recfunctions.py +numpy/lib/scimath.py +numpy/lib/setup.py +numpy/lib/shape_base.py +numpy/lib/stride_tricks.py +numpy/lib/twodim_base.py +numpy/lib/type_check.py +numpy/lib/ufunclike.py +numpy/lib/user_array.py +numpy/lib/utils.py +numpy/lib/src/_compiled_base.c +numpy/linalg/__init__.py +numpy/linalg/info.py +numpy/linalg/lapack_litemodule.c +numpy/linalg/linalg.py +numpy/linalg/setup.py +numpy/linalg/umath_linalg.c.src +numpy/linalg/lapack_lite/blas_lite.c +numpy/linalg/lapack_lite/dlamch.c +numpy/linalg/lapack_lite/dlapack_lite.c +numpy/linalg/lapack_lite/f2c.h +numpy/linalg/lapack_lite/f2c_lite.c +numpy/linalg/lapack_lite/python_xerbla.c +numpy/linalg/lapack_lite/zlapack_lite.c +numpy/ma/__init__.py +numpy/ma/bench.py +numpy/ma/core.py +numpy/ma/extras.py +numpy/ma/mrecords.py +numpy/ma/setup.py +numpy/ma/testutils.py +numpy/ma/timer_comparison.py +numpy/ma/version.py +numpy/matrixlib/__init__.py +numpy/matrixlib/defmatrix.py +numpy/matrixlib/setup.py +numpy/polynomial/__init__.py +numpy/polynomial/_polybase.py +numpy/polynomial/chebyshev.py +numpy/polynomial/hermite.py +numpy/polynomial/hermite_e.py +numpy/polynomial/laguerre.py +numpy/polynomial/legendre.py +numpy/polynomial/polynomial.py +numpy/polynomial/polytemplate.py +numpy/polynomial/polyutils.py +numpy/polynomial/setup.py +numpy/random/__init__.py +numpy/random/info.py +numpy/random/setup.py +numpy/random/mtrand/Python.pxi +numpy/random/mtrand/distributions.c +numpy/random/mtrand/distributions.h +numpy/random/mtrand/generate_mtrand_c.py +numpy/random/mtrand/initarray.c +numpy/random/mtrand/initarray.h +numpy/random/mtrand/mtrand.c +numpy/random/mtrand/mtrand.pyx +numpy/random/mtrand/mtrand_py_helper.h +numpy/random/mtrand/numpy.pxd +numpy/random/mtrand/randomkit.c +numpy/random/mtrand/randomkit.h +numpy/testing/__init__.py +numpy/testing/decorators.py +numpy/testing/noseclasses.py +numpy/testing/nosetester.py +numpy/testing/print_coercion_tables.py +numpy/testing/setup.py +numpy/testing/utils.py +tools/swig/Makefile +tools/swig/README +tools/swig/numpy.i +tools/swig/pyfragments.swg +tools/swig/test/Array.i +tools/swig/test/Array1.cxx +tools/swig/test/Array1.h +tools/swig/test/Array2.cxx +tools/swig/test/Array2.h +tools/swig/test/Farray.cxx +tools/swig/test/Farray.h +tools/swig/test/Farray.i +tools/swig/test/Fortran.cxx +tools/swig/test/Fortran.h +tools/swig/test/Fortran.i +tools/swig/test/Makefile +tools/swig/test/Matrix.cxx +tools/swig/test/Matrix.h +tools/swig/test/Matrix.i +tools/swig/test/SuperTensor.cxx +tools/swig/test/SuperTensor.h +tools/swig/test/SuperTensor.i +tools/swig/test/Tensor.cxx +tools/swig/test/Tensor.h +tools/swig/test/Tensor.i +tools/swig/test/Vector.cxx +tools/swig/test/Vector.h +tools/swig/test/Vector.i +tools/swig/test/setup.py +tools/swig/test/testArray.py +tools/swig/test/testFarray.py +tools/swig/test/testFortran.py +tools/swig/test/testMatrix.py +tools/swig/test/testSuperTensor.py +tools/swig/test/testTensor.py +tools/swig/test/testVector.py \ No newline at end of file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/dependency_links.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/dependency_links.txt new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/native_libs.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/native_libs.txt new file mode 100644 index 0000000000000..ad1c477646763 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/native_libs.txt @@ -0,0 +1,14 @@ +numpy/random/mtrand.cpython-34m.so +numpy/core/struct_ufunc_test.cpython-34m.so +numpy/core/test_rational.cpython-34m.so +numpy/core/umath.cpython-34m.so +numpy/core/_dummy.cpython-34m.so +numpy/core/operand_flag_tests.cpython-34m.so +numpy/core/umath_tests.cpython-34m.so +numpy/core/multiarray_tests.cpython-34m.so +numpy/core/scalarmath.cpython-34m.so +numpy/core/multiarray.cpython-34m.so +numpy/linalg/_umath_linalg.cpython-34m.so +numpy/linalg/lapack_lite.cpython-34m.so +numpy/fft/fftpack_lite.cpython-34m.so +numpy/lib/_compiled_base.cpython-34m.so diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/not-zip-safe b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/not-zip-safe new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/not-zip-safe @@ -0,0 +1 @@ + diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/scripts/f2py b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/scripts/f2py new file mode 100755 index 0000000000000..bd9406e00c184 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/scripts/f2py @@ -0,0 +1,24 @@ +#!/home/berti/anaconda3/envs/test_pandas/bin/python +# See http://cens.ioc.ee/projects/f2py2e/ +import os, sys +for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]: + try: + i=sys.argv.index("--"+mode) + del sys.argv[i] + break + except ValueError: pass +os.environ["NO_SCIPY_IMPORT"]="f2py" +if mode=="g3-numpy": + sys.stderr.write("G3 f2py support is not implemented, yet.\n") + sys.exit(1) +elif mode=="2e-numeric": + from f2py2e import main +elif mode=="2e-numarray": + sys.argv.append("-DNUMARRAY") + from f2py2e import main +elif mode=="2e-numpy": + from numpy.f2py import main +else: + sys.stderr.write("Unknown mode: " + repr(mode) + "\n") + sys.exit(1) +main() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/top_level.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/top_level.txt new file mode 100644 index 0000000000000..24ce15ab7ead3 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/top_level.txt @@ -0,0 +1 @@ +numpy diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__config__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__config__.py new file mode 100644 index 0000000000000..67c0a0f242bf6 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__config__.py @@ -0,0 +1,36 @@ +# This file is generated by /tmp/easy_install-kn_oavq3/numpy-1.9.0/setup.py +# It contains system_info results at the time of building this package. +__all__ = ["get_info","show"] + +atlas_blas_info={} +lapack_info={} +openblas_info={} +lapack_src_info={} +atlas_blas_threads_info={} +blas_src_info={} +lapack_mkl_info={} +blas_info={} +atlas_threads_info={} +mkl_info={} +atlas_info={} +openblas_lapack_info={} +blas_opt_info={} +blas_mkl_info={} +lapack_opt_info={} + +def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + +def show(): + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + \ No newline at end of file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__init__.py new file mode 100644 index 0000000000000..772c75b630db4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__init__.py @@ -0,0 +1,216 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as `np`:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +To search for documents containing a keyword, do:: + + >>> np.lookfor('keyword') + ... # doctest: +SKIP + +General-purpose documents like a glossary and help on the basic concepts +of numpy are available under the ``doc`` sub-module:: + + >>> from numpy import doc + >>> help(doc) + ... # doctest: +SKIP + +Available subpackages +--------------------- +doc + Topical documentation on broadcasting, indexing, etc. +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + Numpy testing tools +f2py + Fortran to Python Interface Generator. +distutils + Enhancements to distutils with support for + Fortran compilers support and more. + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +dual + Overwrite certain functions with high-performance Scipy tools +matlib + Make everything matrices. +__version__ + Numpy version string + +Viewing documentation using IPython +----------------------------------- +Start IPython with the NumPy profile (``ipython -p numpy``), which will +import `numpy` under the alias `np`. Then, use the ``cpaste`` command to +paste examples into the shell. To see which functions are available in +`numpy`, type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" +from __future__ import division, absolute_import, print_function + +import sys + + +class ModuleDeprecationWarning(DeprecationWarning): + """Module deprecation warning. + + The nose tester turns ordinary Deprecation warnings into test failures. + That makes it hard to deprecate whole modules, because they get + imported by default. So this is a special Deprecation warning that the + nose tester will let pass without making tests fail. + + """ + pass + + +class VisibleDeprecationWarning(UserWarning): + """Visible deprecation warning. + + By default, python will not show deprecation warnings, so this class + can be used when a very visible warning is helpful, for example because + the usage is most likely a user bug. + + """ + pass + + +# oldnumeric and numarray were removed in 1.9. In case some packages import +# but do not use them, we define them here for backward compatibility. +oldnumeric = 'removed' +numarray = 'removed' + + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ +except NameError: + __NUMPY_SETUP__ = False + + +if __NUMPY_SETUP__: + import sys as _sys + _sys.stderr.write('Running from numpy source directory.\n') + del _sys +else: + try: + from numpy.__config__ import show as show_config + except ImportError: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) + from .version import git_revision as __git_revision__ + from .version import version as __version__ + + from ._import_tools import PackageLoader + + def pkgload(*packages, **options): + loader = PackageLoader(infunc=True) + return loader(*packages, **options) + + from . import add_newdocs + __all__ = ['add_newdocs', + 'ModuleDeprecationWarning', + 'VisibleDeprecationWarning'] + + pkgload.__doc__ = PackageLoader.__call__.__doc__ + + from .testing import Tester + test = Tester().test + bench = Tester().bench + + from . import core + from .core import * + from . import compat + from . import lib + from .lib import * + from . import linalg + from . import fft + from . import polynomial + from . import random + from . import ctypeslib + from . import ma + from . import matrixlib as _mat + from .matrixlib import * + from .compat import long + + # Make these accessible from numpy name-space + # but not imported in from numpy import * + if sys.version_info[0] >= 3: + from builtins import bool, int, float, complex, object, str + unicode = str + else: + from __builtin__ import bool, int, float, complex, object, unicode, str + + from .core import round, abs, max, min + + __all__.extend(['__version__', 'pkgload', 'PackageLoader', + 'show_config']) + __all__.extend(core.__all__) + __all__.extend(_mat.__all__) + __all__.extend(lib.__all__) + __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) + + # Filter annoying Cython warnings that serve no good purpose. + import warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/_import_tools.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/_import_tools.py new file mode 100644 index 0000000000000..5262173596240 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/_import_tools.py @@ -0,0 +1,348 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys + +__all__ = ['PackageLoader'] + +class PackageLoader(object): + def __init__(self, verbose=False, infunc=False): + """ Manages loading packages. + """ + + if infunc: + _level = 2 + else: + _level = 1 + self.parent_frame = frame = sys._getframe(_level) + self.parent_name = eval('__name__', frame.f_globals, frame.f_locals) + parent_path = eval('__path__', frame.f_globals, frame.f_locals) + if isinstance(parent_path, str): + parent_path = [parent_path] + self.parent_path = parent_path + if '__all__' not in frame.f_locals: + exec('__all__ = []', frame.f_globals, frame.f_locals) + self.parent_export_names = eval('__all__', frame.f_globals, frame.f_locals) + + self.info_modules = {} + self.imported_packages = [] + self.verbose = None + + def _get_info_files(self, package_dir, parent_path, parent_package=None): + """ Return list of (package name,info.py file) from parent_path subdirectories. + """ + from glob import glob + files = glob(os.path.join(parent_path, package_dir, 'info.py')) + for info_file in glob(os.path.join(parent_path, package_dir, 'info.pyc')): + if info_file[:-1] not in files: + files.append(info_file) + info_files = [] + for info_file in files: + package_name = os.path.dirname(info_file[len(parent_path)+1:])\ + .replace(os.sep, '.') + if parent_package: + package_name = parent_package + '.' + package_name + info_files.append((package_name, info_file)) + info_files.extend(self._get_info_files('*', + os.path.dirname(info_file), + package_name)) + return info_files + + def _init_info_modules(self, packages=None): + """Initialize info_modules = {: }. + """ + import imp + info_files = [] + info_modules = self.info_modules + + if packages is None: + for path in self.parent_path: + info_files.extend(self._get_info_files('*', path)) + else: + for package_name in packages: + package_dir = os.path.join(*package_name.split('.')) + for path in self.parent_path: + names_files = self._get_info_files(package_dir, path) + if names_files: + info_files.extend(names_files) + break + else: + try: + exec('import %s.info as info' % (package_name)) + info_modules[package_name] = info + except ImportError as msg: + self.warn('No scipy-style subpackage %r found in %s. '\ + 'Ignoring: %s'\ + % (package_name, ':'.join(self.parent_path), msg)) + + for package_name, info_file in info_files: + if package_name in info_modules: + continue + fullname = self.parent_name +'.'+ package_name + if info_file[-1]=='c': + filedescriptor = ('.pyc', 'rb', 2) + else: + filedescriptor = ('.py', 'U', 1) + + try: + info_module = imp.load_module(fullname+'.info', + open(info_file, filedescriptor[1]), + info_file, + filedescriptor) + except Exception as msg: + self.error(msg) + info_module = None + + if info_module is None or getattr(info_module, 'ignore', False): + info_modules.pop(package_name, None) + else: + self._init_info_modules(getattr(info_module, 'depends', [])) + info_modules[package_name] = info_module + + return + + def _get_sorted_names(self): + """ Return package names sorted in the order as they should be + imported due to dependence relations between packages. + """ + + depend_dict = {} + for name, info_module in self.info_modules.items(): + depend_dict[name] = getattr(info_module, 'depends', []) + package_names = [] + + for name in list(depend_dict.keys()): + if not depend_dict[name]: + package_names.append(name) + del depend_dict[name] + + while depend_dict: + for name, lst in list(depend_dict.items()): + new_lst = [n for n in lst if n in depend_dict] + if not new_lst: + package_names.append(name) + del depend_dict[name] + else: + depend_dict[name] = new_lst + + return package_names + + def __call__(self,*packages, **options): + """Load one or more packages into parent package top-level namespace. + + This function is intended to shorten the need to import many + subpackages, say of scipy, constantly with statements such as + + import scipy.linalg, scipy.fftpack, scipy.etc... + + Instead, you can say: + + import scipy + scipy.pkgload('linalg','fftpack',...) + + or + + scipy.pkgload() + + to load all of them in one call. + + If a name which doesn't exist in scipy's namespace is + given, a warning is shown. + + Parameters + ---------- + *packages : arg-tuple + the names (one or more strings) of all the modules one + wishes to load into the top-level namespace. + verbose= : integer + verbosity level [default: -1]. + verbose=-1 will suspend also warnings. + force= : bool + when True, force reloading loaded packages [default: False]. + postpone= : bool + when True, don't load packages [default: False] + + """ + frame = self.parent_frame + self.info_modules = {} + if options.get('force', False): + self.imported_packages = [] + self.verbose = verbose = options.get('verbose', -1) + postpone = options.get('postpone', None) + self._init_info_modules(packages or None) + + self.log('Imports to %r namespace\n----------------------------'\ + % self.parent_name) + + for package_name in self._get_sorted_names(): + if package_name in self.imported_packages: + continue + info_module = self.info_modules[package_name] + global_symbols = getattr(info_module, 'global_symbols', []) + postpone_import = getattr(info_module, 'postpone_import', False) + if (postpone and not global_symbols) \ + or (postpone_import and postpone is not None): + continue + + old_object = frame.f_locals.get(package_name, None) + + cmdstr = 'import '+package_name + if self._execcmd(cmdstr): + continue + self.imported_packages.append(package_name) + + if verbose!=-1: + new_object = frame.f_locals.get(package_name) + if old_object is not None and old_object is not new_object: + self.warn('Overwriting %s=%s (was %s)' \ + % (package_name, self._obj2repr(new_object), + self._obj2repr(old_object))) + + if '.' not in package_name: + self.parent_export_names.append(package_name) + + for symbol in global_symbols: + if symbol=='*': + symbols = eval('getattr(%s,"__all__",None)'\ + % (package_name), + frame.f_globals, frame.f_locals) + if symbols is None: + symbols = eval('dir(%s)' % (package_name), + frame.f_globals, frame.f_locals) + symbols = [s for s in symbols if not s.startswith('_')] + else: + symbols = [symbol] + + if verbose!=-1: + old_objects = {} + for s in symbols: + if s in frame.f_locals: + old_objects[s] = frame.f_locals[s] + + cmdstr = 'from '+package_name+' import '+symbol + if self._execcmd(cmdstr): + continue + + if verbose!=-1: + for s, old_object in old_objects.items(): + new_object = frame.f_locals[s] + if new_object is not old_object: + self.warn('Overwriting %s=%s (was %s)' \ + % (s, self._obj2repr(new_object), + self._obj2repr(old_object))) + + if symbol=='*': + self.parent_export_names.extend(symbols) + else: + self.parent_export_names.append(symbol) + + return + + def _execcmd(self, cmdstr): + """ Execute command in parent_frame.""" + frame = self.parent_frame + try: + exec (cmdstr, frame.f_globals, frame.f_locals) + except Exception as msg: + self.error('%s -> failed: %s' % (cmdstr, msg)) + return True + else: + self.log('%s -> success' % (cmdstr)) + return + + def _obj2repr(self, obj): + """ Return repr(obj) with""" + module = getattr(obj, '__module__', None) + file = getattr(obj, '__file__', None) + if module is not None: + return repr(obj) + ' from ' + module + if file is not None: + return repr(obj) + ' from ' + file + return repr(obj) + + def log(self, mess): + if self.verbose>1: + print(str(mess), file=sys.stderr) + def warn(self, mess): + if self.verbose>=0: + print(str(mess), file=sys.stderr) + def error(self, mess): + if self.verbose!=-1: + print(str(mess), file=sys.stderr) + + def _get_doc_title(self, info_module): + """ Get the title from a package info.py file. + """ + title = getattr(info_module, '__doc_title__', None) + if title is not None: + return title + title = getattr(info_module, '__doc__', None) + if title is not None: + title = title.lstrip().split('\n', 1)[0] + return title + return '* Not Available *' + + def _format_titles(self,titles,colsep='---'): + display_window_width = 70 # How to determine the correct value in runtime?? + lengths = [len(name)-name.find('.')-1 for (name, title) in titles]+[0] + max_length = max(lengths) + lines = [] + for (name, title) in titles: + name = name[name.find('.')+1:] + w = max_length - len(name) + words = title.split() + line = '%s%s %s' % (name, w*' ', colsep) + tab = len(line) * ' ' + while words: + word = words.pop(0) + if len(line)+len(word)>display_window_width: + lines.append(line) + line = tab + line += ' ' + word + else: + lines.append(line) + return '\n'.join(lines) + + def get_pkgdocs(self): + """ Return documentation summary of subpackages. + """ + import sys + self.info_modules = {} + self._init_info_modules(None) + + titles = [] + symbols = [] + for package_name, info_module in self.info_modules.items(): + global_symbols = getattr(info_module, 'global_symbols', []) + fullname = self.parent_name +'.'+ package_name + note = '' + if fullname not in sys.modules: + note = ' [*]' + titles.append((fullname, self._get_doc_title(info_module) + note)) + if global_symbols: + symbols.append((package_name, ', '.join(global_symbols))) + + retstr = self._format_titles(titles) +\ + '\n [*] - using a package requires explicit import (see pkgload)' + + + if symbols: + retstr += """\n\nGlobal symbols from subpackages"""\ + """\n-------------------------------\n""" +\ + self._format_titles(symbols, '-->') + + return retstr + +class PackageLoaderDebug(PackageLoader): + def _execcmd(self, cmdstr): + """ Execute command in parent_frame.""" + frame = self.parent_frame + print('Executing', repr(cmdstr), '...', end=' ') + sys.stdout.flush() + exec (cmdstr, frame.f_globals, frame.f_locals) + print('ok') + sys.stdout.flush() + return + +if int(os.environ.get('NUMPY_IMPORT_DEBUG', '0')): + PackageLoader = PackageLoaderDebug diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/add_newdocs.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/add_newdocs.py new file mode 100644 index 0000000000000..09311a5364d4f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/add_newdocs.py @@ -0,0 +1,7526 @@ +""" +This is only meant to add docs to objects defined in C-extension modules. +The purpose is to allow easier editing of the docstrings without +requiring a re-compile. + +NOTE: Many of the methods of ndarray have corresponding functions. + If you update these docstrings, please keep also the ones in + core/fromnumeric.py, core/defmatrix.py up-to-date. + +""" +from __future__ import division, absolute_import, print_function + +from numpy.lib import add_newdoc + +############################################################################### +# +# flatiter +# +# flatiter needs a toplevel description +# +############################################################################### + +add_newdoc('numpy.core', 'flatiter', + """ + Flat iterator object to iterate over arrays. + + A `flatiter` iterator is returned by ``x.flat`` for any array `x`. + It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in C-contiguous style, with the last index varying the + fastest. The iterator can also be indexed using basic slicing or + advanced indexing. + + See Also + -------- + ndarray.flat : Return a flat iterator over an array. + ndarray.flatten : Returns a flattened copy of an array. + + Notes + ----- + A `flatiter` iterator can not be constructed directly from Python code + by calling the `flatiter` constructor. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print item + ... + 0 + 1 + 2 + 3 + 4 + 5 + + >>> fl[2:4] + array([2, 3]) + + """) + +# flatiter attributes + +add_newdoc('numpy.core', 'flatiter', ('base', + """ + A reference to the array that is iterated over. + + Examples + -------- + >>> x = np.arange(5) + >>> fl = x.flat + >>> fl.base is x + True + + """)) + + + +add_newdoc('numpy.core', 'flatiter', ('coords', + """ + An N-dimensional tuple of current coordinates. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.coords + (0, 0) + >>> fl.next() + 0 + >>> fl.coords + (0, 1) + + """)) + + + +add_newdoc('numpy.core', 'flatiter', ('index', + """ + Current flat index into the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.index + 0 + >>> fl.next() + 0 + >>> fl.index + 1 + + """)) + +# flatiter functions + +add_newdoc('numpy.core', 'flatiter', ('__array__', + """__array__(type=None) Get array from iterator + + """)) + + +add_newdoc('numpy.core', 'flatiter', ('copy', + """ + copy() + + Get a copy of the iterator as a 1-D array. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> fl = x.flat + >>> fl.copy() + array([0, 1, 2, 3, 4, 5]) + + """)) + + +############################################################################### +# +# nditer +# +############################################################################### + +add_newdoc('numpy.core', 'nditer', + """ + Efficient multi-dimensional iterator object to iterate over arrays. + To get started using this object, see the + :ref:`introductory guide to array iteration `. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + flags : sequence of str, optional + Flags to control the behavior of the iterator. + + * "buffered" enables buffering when required. + * "c_index" causes a C-order index to be tracked. + * "f_index" causes a Fortran-order index to be tracked. + * "multi_index" causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * "common_dtype" causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * "delay_bufalloc" delays allocation of the buffers until + a reset() call is made. Allows "allocate" operands to + be initialized before their values are copied into the buffers. + * "external_loop" causes the `values` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * "grow_inner" allows the `value` array sizes to be made + larger than the buffer size when both "buffered" and + "external_loop" is used. + * "ranged" allows the iterator to be restricted to a sub-range + of the iterindex values. + * "refs_ok" enables iteration of reference types, such as + object arrays. + * "reduce_ok" enables iteration of "readwrite" operands + which are broadcasted, also known as reduction operands. + * "zerosize_ok" allows `itersize` to be zero. + op_flags : list of list of str, optional + This is a list of flags for each operand. At minimum, one of + "readonly", "readwrite", or "writeonly" must be specified. + + * "readonly" indicates the operand will only be read from. + * "readwrite" indicates the operand will be read from and written to. + * "writeonly" indicates the operand will only be written to. + * "no_broadcast" prevents the operand from being broadcasted. + * "contig" forces the operand data to be contiguous. + * "aligned" forces the operand data to be aligned. + * "nbo" forces the operand data to be in native byte order. + * "copy" allows a temporary read-only copy if required. + * "updateifcopy" allows a temporary read-write copy if required. + * "allocate" causes the array to be allocated if it is None + in the `op` parameter. + * "no_subtype" prevents an "allocate" operand from using a subtype. + * "arraymask" indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * 'writemasked' indicates that only elements where the chosen + 'arraymask' operand is True will be written to. + op_dtypes : dtype or tuple of dtype(s), optional + The required data type(s) of the operands. If copying or buffering + is enabled, the data will be converted to/from their original types. + order : {'C', 'F', 'A', 'K'}, optional + Controls the iteration order. 'C' means C order, 'F' means + Fortran order, 'A' means 'F' order if all the arrays are Fortran + contiguous, 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. This also + affects the element memory order of "allocate" operands, as they + are allocated to be compatible with iteration order. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy + or buffering. Setting this to 'unsafe' is not recommended, + as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + op_axes : list of list of ints, optional + If provided, is a list of ints or None for each operands. + The list of axes for an operand is a mapping from the dimensions + of the iterator to the dimensions of the operand. A value of + -1 can be placed for entries, causing that dimension to be + treated as "newaxis". + itershape : tuple of ints, optional + The desired shape of the iterator. This allows "allocate" operands + with a dimension mapped by op_axes not corresponding to a dimension + of a different operand to get a value not equal to 1 for that + dimension. + buffersize : int, optional + When buffering is enabled, controls the size of the temporary + buffers. Set to 0 for the default value. + + Attributes + ---------- + dtypes : tuple of dtype(s) + The data types of the values provided in `value`. This may be + different from the operand data types if buffering is enabled. + finished : bool + Whether the iteration over the operands is finished or not. + has_delayed_bufalloc : bool + If True, the iterator was created with the "delay_bufalloc" flag, + and no reset() function was called on it yet. + has_index : bool + If True, the iterator was created with either the "c_index" or + the "f_index" flag, and the property `index` can be used to + retrieve it. + has_multi_index : bool + If True, the iterator was created with the "multi_index" flag, + and the property `multi_index` can be used to retrieve it. + index : + When the "c_index" or "f_index" flag was used, this property + provides access to the index. Raises a ValueError if accessed + and `has_index` is False. + iterationneedsapi : bool + Whether iteration requires access to the Python API, for example + if one of the operands is an object array. + iterindex : int + An index which matches the order of iteration. + itersize : int + Size of the iterator. + itviews : + Structured view(s) of `operands` in memory, matching the reordered + and optimized iterator access pattern. + multi_index : + When the "multi_index" flag was used, this property + provides access to the index. Raises a ValueError if accessed + accessed and `has_multi_index` is False. + ndim : int + The iterator's dimension. + nop : int + The number of iterator operands. + operands : tuple of operand(s) + The array(s) to be iterated over. + shape : tuple of ints + Shape tuple, the shape of the iterator. + value : + Value of `operands` at current iteration. Normally, this is a + tuple of array scalars, but if the flag "external_loop" is used, + it is a tuple of one dimensional arrays. + + Notes + ----- + `nditer` supersedes `flatiter`. The iterator implementation behind + `nditer` is also exposed by the Numpy C API. + + The Python exposure supplies two iteration interfaces, one which follows + the Python iterator protocol, and another which mirrors the C-style + do-while pattern. The native Python approach is better in most cases, but + if you need the iterator's coordinates or index, use the C-style pattern. + + Examples + -------- + Here is how we might write an ``iter_add`` function, using the + Python iterator protocol:: + + def iter_add_py(x, y, out=None): + addop = np.add + it = np.nditer([x, y, out], [], + [['readonly'], ['readonly'], ['writeonly','allocate']]) + for (a, b, c) in it: + addop(a, b, out=c) + return it.operands[2] + + Here is the same function, but following the C-style pattern:: + + def iter_add(x, y, out=None): + addop = np.add + + it = np.nditer([x, y, out], [], + [['readonly'], ['readonly'], ['writeonly','allocate']]) + + while not it.finished: + addop(it[0], it[1], out=it[2]) + it.iternext() + + return it.operands[2] + + Here is an example outer product function:: + + def outer_it(x, y, out=None): + mulop = np.multiply + + it = np.nditer([x, y, out], ['external_loop'], + [['readonly'], ['readonly'], ['writeonly', 'allocate']], + op_axes=[range(x.ndim)+[-1]*y.ndim, + [-1]*x.ndim+range(y.ndim), + None]) + + for (a, b, c) in it: + mulop(a, b, out=c) + + return it.operands[2] + + >>> a = np.arange(2)+1 + >>> b = np.arange(3)+1 + >>> outer_it(a,b) + array([[1, 2, 3], + [2, 4, 6]]) + + Here is an example function which operates like a "lambda" ufunc:: + + def luf(lamdaexpr, *args, **kwargs): + "luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)" + nargs = len(args) + op = (kwargs.get('out',None),) + args + it = np.nditer(op, ['buffered','external_loop'], + [['writeonly','allocate','no_broadcast']] + + [['readonly','nbo','aligned']]*nargs, + order=kwargs.get('order','K'), + casting=kwargs.get('casting','safe'), + buffersize=kwargs.get('buffersize',0)) + while not it.finished: + it[0] = lamdaexpr(*it[1:]) + it.iternext() + return it.operands[0] + + >>> a = np.arange(5) + >>> b = np.ones(5) + >>> luf(lambda i,j:i*i + j/2, a, b) + array([ 0.5, 1.5, 4.5, 9.5, 16.5]) + + """) + +# nditer methods + +add_newdoc('numpy.core', 'nditer', ('copy', + """ + copy() + + Get a copy of the iterator in its current state. + + Examples + -------- + >>> x = np.arange(10) + >>> y = x + 1 + >>> it = np.nditer([x, y]) + >>> it.next() + (array(0), array(1)) + >>> it2 = it.copy() + >>> it2.next() + (array(1), array(2)) + + """)) + +add_newdoc('numpy.core', 'nditer', ('debug_print', + """ + debug_print() + + Print the current state of the `nditer` instance and debug info to stdout. + + """)) + +add_newdoc('numpy.core', 'nditer', ('enable_external_loop', + """ + enable_external_loop() + + When the "external_loop" was not used during construction, but + is desired, this modifies the iterator to behave as if the flag + was specified. + + """)) + +add_newdoc('numpy.core', 'nditer', ('iternext', + """ + iternext() + + Check whether iterations are left, and perform a single internal iteration + without returning the result. Used in the C-style pattern do-while + pattern. For an example, see `nditer`. + + Returns + ------- + iternext : bool + Whether or not there are iterations left. + + """)) + +add_newdoc('numpy.core', 'nditer', ('remove_axis', + """ + remove_axis(i) + + Removes axis `i` from the iterator. Requires that the flag "multi_index" + be enabled. + + """)) + +add_newdoc('numpy.core', 'nditer', ('remove_multi_index', + """ + remove_multi_index() + + When the "multi_index" flag was specified, this removes it, allowing + the internal iteration structure to be optimized further. + + """)) + +add_newdoc('numpy.core', 'nditer', ('reset', + """ + reset() + + Reset the iterator to its initial state. + + """)) + + + +############################################################################### +# +# broadcast +# +############################################################################### + +add_newdoc('numpy.core', 'broadcast', + """ + Produce an object that mimics broadcasting. + + Parameters + ---------- + in1, in2, ... : array_like + Input parameters. + + Returns + ------- + b : broadcast object + Broadcast the input parameters against one another, and + return an object that encapsulates the result. + Amongst others, it has ``shape`` and ``nd`` properties, and + may be used as an iterator. + + Examples + -------- + Manually adding two vectors, using broadcasting: + + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + + >>> out = np.empty(b.shape) + >>> out.flat = [u+v for (u,v) in b] + >>> out + array([[ 5., 6., 7.], + [ 6., 7., 8.], + [ 7., 8., 9.]]) + + Compare against built-in broadcasting: + + >>> x + y + array([[5, 6, 7], + [6, 7, 8], + [7, 8, 9]]) + + """) + +# attributes + +add_newdoc('numpy.core', 'broadcast', ('index', + """ + current index in broadcasted result + + Examples + -------- + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> b.next(), b.next(), b.next() + ((1, 4), (1, 5), (1, 6)) + >>> b.index + 3 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('iters', + """ + tuple of iterators along ``self``'s "components." + + Returns a tuple of `numpy.flatiter` objects, one for each "component" + of ``self``. + + See Also + -------- + numpy.flatiter + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> row, col = b.iters + >>> row.next(), col.next() + (1, 4) + + """)) + +add_newdoc('numpy.core', 'broadcast', ('nd', + """ + Number of dimensions of broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.nd + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('numiter', + """ + Number of iterators possessed by the broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.numiter + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('shape', + """ + Shape of broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.shape + (3, 3) + + """)) + +add_newdoc('numpy.core', 'broadcast', ('size', + """ + Total size of broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.size + 9 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('reset', + """ + reset() + + Reset the broadcasted result's iterator(s). + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]] + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> b.next(), b.next(), b.next() + ((1, 4), (2, 4), (3, 4)) + >>> b.index + 3 + >>> b.reset() + >>> b.index + 0 + + """)) + +############################################################################### +# +# numpy functions +# +############################################################################### + +add_newdoc('numpy.core.multiarray', 'array', + """ + array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0) + + Create an array. + + Parameters + ---------- + object : array_like + An array, any object exposing the array interface, an + object whose __array__ method returns an array, or any + (nested) sequence. + dtype : data-type, optional + The desired data-type for the array. If not given, then + the type will be determined as the minimum type required + to hold the objects in the sequence. This argument can only + be used to 'upcast' the array. For downcasting, use the + .astype(t) method. + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy + will only be made if __array__ returns a copy, if obj is a + nested sequence, or if a copy is needed to satisfy any of the other + requirements (`dtype`, `order`, etc.). + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). If order is 'A', then the returned array may + be in any order (either C-, Fortran-contiguous, or even + discontiguous). + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting + array should have. Ones will be pre-pended to the shape as + needed to meet this requirement. + + Returns + ------- + out : ndarray + An array object satisfying the specified requirements. + + See Also + -------- + empty, empty_like, zeros, zeros_like, ones, ones_like, fill + + Examples + -------- + >>> np.array([1, 2, 3]) + array([1, 2, 3]) + + Upcasting: + + >>> np.array([1, 2, 3.0]) + array([ 1., 2., 3.]) + + More than one dimension: + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + Minimum dimensions 2: + + >>> np.array([1, 2, 3], ndmin=2) + array([[1, 2, 3]]) + + Type provided: + + >>> np.array([1, 2, 3], dtype=complex) + array([ 1.+0.j, 2.+0.j, 3.+0.j]) + + Data-type consisting of more than one element: + + >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] + array([1, 3]) + + Creating an array from sub-classes: + + >>> np.array(np.mat('1 2; 3 4')) + array([[1, 2], + [3, 4]]) + + >>> np.array(np.mat('1 2; 3 4'), subok=True) + matrix([[1, 2], + [3, 4]]) + + """) + +add_newdoc('numpy.core.multiarray', 'empty', + """ + empty(shape, dtype=float, order='C') + + Return a new array of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty array + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in C (row-major) or + Fortran (column-major) order in memory. + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data with the given + shape, dtype, and order. + + See Also + -------- + empty_like, zeros, ones + + Notes + ----- + `empty`, unlike `zeros`, does not set the array values to zero, + and may therefore be marginally faster. On the other hand, it requires + the user to manually set all the values in the array, and should be + used with caution. + + Examples + -------- + >>> np.empty([2, 2]) + array([[ -9.74499359e+001, 6.69583040e-309], + [ 2.13182611e-314, 3.06959433e-309]]) #random + + >>> np.empty([2, 2], dtype=int) + array([[-1073741821, -1067949133], + [ 496041986, 19249760]]) #random + + """) + +add_newdoc('numpy.core.multiarray', 'empty_like', + """ + empty_like(a, dtype=None, order='K', subok=True) + + Return a new array with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of the + returned array. + dtype : data-type, optional + .. versionadded:: 1.6.0 + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + .. versionadded:: 1.6.0 + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of ``a`` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of 'a', otherwise it will be a base-class array. Defaults + to True. + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data with the same + shape and type as `a`. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + + Notes + ----- + This function does *not* initialize the returned array; to do that use + `zeros_like` or `ones_like` instead. It may be marginally faster than + the functions that do set the array values. + + Examples + -------- + >>> a = ([1,2,3], [4,5,6]) # a is array-like + >>> np.empty_like(a) + array([[-1073741821, -1073741821, 3], #random + [ 0, 0, -1073741821]]) + >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) + >>> np.empty_like(a) + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random + [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) + + """) + + +add_newdoc('numpy.core.multiarray', 'scalar', + """ + scalar(dtype, obj) + + Return a new scalar array of the given type initialized with obj. + + This function is meant mainly for pickle support. `dtype` must be a + valid data-type descriptor. If `dtype` corresponds to an object + descriptor, then `obj` can be any object, otherwise `obj` must be a + string. If `obj` is not given, it will be interpreted as None for object + type and as zeros for all other types. + + """) + +add_newdoc('numpy.core.multiarray', 'zeros', + """ + zeros(shape, dtype=float, order='C') + + Return a new array of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. + + Returns + ------- + out : ndarray + Array of zeros with the given shape, dtype, and order. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + empty_like : Return an empty array with shape and type of input. + ones : Return a new array setting values to one. + empty : Return a new uninitialized array. + + Examples + -------- + >>> np.zeros(5) + array([ 0., 0., 0., 0., 0.]) + + >>> np.zeros((5,), dtype=numpy.int) + array([0, 0, 0, 0, 0]) + + >>> np.zeros((2, 1)) + array([[ 0.], + [ 0.]]) + + >>> s = (2,2) + >>> np.zeros(s) + array([[ 0., 0.], + [ 0., 0.]]) + + >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype + array([(0, 0), (0, 0)], + dtype=[('x', '>> np.count_nonzero(np.eye(4)) + 4 + >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]]) + 5 + """) + +add_newdoc('numpy.core.multiarray', 'set_typeDict', + """set_typeDict(dict) + + Set the internal dictionary that can look up an array type using a + registered code. + + """) + +add_newdoc('numpy.core.multiarray', 'fromstring', + """ + fromstring(string, dtype=float, count=-1, sep='') + + A new 1-D array initialized from raw binary or text data in a string. + + Parameters + ---------- + string : str + A string containing the data. + dtype : data-type, optional + The data type of the array; default: float. For binary input data, + the data must be in exactly this format. + count : int, optional + Read this number of `dtype` elements from the data. If this is + negative (the default), the count will be determined from the + length of the data. + sep : str, optional + If not provided or, equivalently, the empty string, the data will + be interpreted as binary data; otherwise, as ASCII text with + decimal numbers. Also in this latter case, this argument is + interpreted as the string separating numbers in the data; extra + whitespace between elements is also ignored. + + Returns + ------- + arr : ndarray + The constructed array. + + Raises + ------ + ValueError + If the string is not the correct size to satisfy the requested + `dtype` and `count`. + + See Also + -------- + frombuffer, fromfile, fromiter + + Examples + -------- + >>> np.fromstring('\\x01\\x02', dtype=np.uint8) + array([1, 2], dtype=uint8) + >>> np.fromstring('1 2', dtype=int, sep=' ') + array([1, 2]) + >>> np.fromstring('1, 2', dtype=int, sep=',') + array([1, 2]) + >>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) + array([1, 2, 3], dtype=uint8) + + """) + +add_newdoc('numpy.core.multiarray', 'fromiter', + """ + fromiter(iterable, dtype, count=-1) + + Create a new 1-dimensional array from an iterable object. + + Parameters + ---------- + iterable : iterable object + An iterable object providing data for the array. + dtype : data-type + The data-type of the returned array. + count : int, optional + The number of items to read from *iterable*. The default is -1, + which means all data is read. + + Returns + ------- + out : ndarray + The output array. + + Notes + ----- + Specify `count` to improve performance. It allows ``fromiter`` to + pre-allocate the output array, instead of resizing it on demand. + + Examples + -------- + >>> iterable = (x*x for x in range(5)) + >>> np.fromiter(iterable, np.float) + array([ 0., 1., 4., 9., 16.]) + + """) + +add_newdoc('numpy.core.multiarray', 'fromfile', + """ + fromfile(file, dtype=float, count=-1, sep='') + + Construct an array from data in a text or binary file. + + A highly efficient way of reading binary data with a known data-type, + as well as parsing simply formatted text files. Data written using the + `tofile` method can be read using this function. + + Parameters + ---------- + file : file or str + Open file object or filename. + dtype : data-type + Data type of the returned array. + For binary files, it is used to determine the size and byte-order + of the items in the file. + count : int + Number of items to read. ``-1`` means all items (i.e., the complete + file). + sep : str + Separator between items if file is a text file. + Empty ("") separator means the file should be treated as binary. + Spaces (" ") in the separator match zero or more whitespace characters. + A separator consisting only of spaces must match at least one + whitespace. + + See also + -------- + load, save + ndarray.tofile + loadtxt : More flexible way of loading data from a text file. + + Notes + ----- + Do not rely on the combination of `tofile` and `fromfile` for + data storage, as the binary files generated are are not platform + independent. In particular, no byte-order or data-type information is + saved. Data can be stored in the platform independent ``.npy`` format + using `save` and `load` instead. + + Examples + -------- + Construct an ndarray: + + >>> dt = np.dtype([('time', [('min', int), ('sec', int)]), + ... ('temp', float)]) + >>> x = np.zeros((1,), dtype=dt) + >>> x['time']['min'] = 10; x['temp'] = 98.25 + >>> x + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> import os + >>> fname = os.tmpnam() + >>> x.tofile(fname) + + Read the raw data from disk: + + >>> np.fromfile(fname, dtype=dt) + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> np.save(fname, x) + >>> np.load(fname + '.npy') + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> dt = np.dtype(int) + >>> dt = dt.newbyteorder('>') + >>> np.frombuffer(buf, dtype=dt) + + The data of the resulting array will not be byteswapped, but will be + interpreted correctly. + + Examples + -------- + >>> s = 'hello world' + >>> np.frombuffer(s, dtype='S1', count=5, offset=6) + array(['w', 'o', 'r', 'l', 'd'], + dtype='|S1') + + """) + +add_newdoc('numpy.core.multiarray', 'concatenate', + """ + concatenate((a1, a2, ...), axis=0) + + Join a sequence of arrays together. + + Parameters + ---------- + a1, a2, ... : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. Default is 0. + + Returns + ------- + res : ndarray + The concatenated array. + + See Also + -------- + ma.concatenate : Concatenate function that preserves input masks. + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. + split : Split array into a list of multiple sub-arrays of equal size. + hsplit : Split array into multiple sub-arrays horizontally (column wise) + vsplit : Split array into multiple sub-arrays vertically (row wise) + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + hstack : Stack arrays in sequence horizontally (column wise) + vstack : Stack arrays in sequence vertically (row wise) + dstack : Stack arrays in sequence depth wise (along third dimension) + + Notes + ----- + When one or more of the arrays to be concatenated is a MaskedArray, + this function will return a MaskedArray object instead of an ndarray, + but the input masks are *not* preserved. In cases where a MaskedArray + is expected as input, use the ma.concatenate function from the masked + array module instead. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> b = np.array([[5, 6]]) + >>> np.concatenate((a, b), axis=0) + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.concatenate((a, b.T), axis=1) + array([[1, 2, 5], + [3, 4, 6]]) + + This function will not preserve masking of MaskedArray inputs. + + >>> a = np.ma.arange(3) + >>> a[1] = np.ma.masked + >>> b = np.arange(2, 5) + >>> a + masked_array(data = [0 -- 2], + mask = [False True False], + fill_value = 999999) + >>> b + array([2, 3, 4]) + >>> np.concatenate([a, b]) + masked_array(data = [0 1 2 2 3 4], + mask = False, + fill_value = 999999) + >>> np.ma.concatenate([a, b]) + masked_array(data = [0 -- 2 2 3 4], + mask = [False True False False False False], + fill_value = 999999) + + """) + +add_newdoc('numpy.core', 'inner', + """ + inner(a, b) + + Inner product of two arrays. + + Ordinary inner product of vectors for 1-D arrays (without complex + conjugation), in higher dimensions a sum product over the last axes. + + Parameters + ---------- + a, b : array_like + If `a` and `b` are nonscalar, their last dimensions of must match. + + Returns + ------- + out : ndarray + `out.shape = a.shape[:-1] + b.shape[:-1]` + + Raises + ------ + ValueError + If the last dimension of `a` and `b` has different size. + + See Also + -------- + tensordot : Sum products over arbitrary axes. + dot : Generalised matrix product, using second last dimension of `b`. + einsum : Einstein summation convention. + + Notes + ----- + For vectors (1-D arrays) it computes the ordinary inner-product:: + + np.inner(a, b) = sum(a[:]*b[:]) + + More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: + + np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) + + or explicitly:: + + np.inner(a, b)[i0,...,ir-1,j0,...,js-1] + = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) + + In addition `a` or `b` may be scalars, in which case:: + + np.inner(a,b) = a*b + + Examples + -------- + Ordinary inner product for vectors: + + >>> a = np.array([1,2,3]) + >>> b = np.array([0,1,0]) + >>> np.inner(a, b) + 2 + + A multidimensional example: + + >>> a = np.arange(24).reshape((2,3,4)) + >>> b = np.arange(4) + >>> np.inner(a, b) + array([[ 14, 38, 62], + [ 86, 110, 134]]) + + An example where `b` is a scalar: + + >>> np.inner(np.eye(2), 7) + array([[ 7., 0.], + [ 0., 7.]]) + + """) + +add_newdoc('numpy.core', 'fastCopyAndTranspose', + """_fastCopyAndTranspose(a)""") + +add_newdoc('numpy.core.multiarray', 'correlate', + """cross_correlate(a,v, mode=0)""") + +add_newdoc('numpy.core.multiarray', 'arange', + """ + arange([start,] stop[, step,], dtype=None) + + Return evenly spaced values within a given interval. + + Values are generated within the half-open interval ``[start, stop)`` + (in other words, the interval including `start` but excluding `stop`). + For integer arguments the function is equivalent to the Python built-in + `range `_ function, + but returns an ndarray rather than a list. + + When using a non-integer step, such as 0.1, the results will often not + be consistent. It is better to use ``linspace`` for these cases. + + Parameters + ---------- + start : number, optional + Start of interval. The interval includes this value. The default + start value is 0. + stop : number + End of interval. The interval does not include this value, except + in some cases where `step` is not an integer and floating point + round-off affects the length of `out`. + step : number, optional + Spacing between values. For any output `out`, this is the distance + between two adjacent values, ``out[i+1] - out[i]``. The default + step size is 1. If `step` is specified, `start` must also be given. + dtype : dtype + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + + Returns + ------- + arange : ndarray + Array of evenly spaced values. + + For floating point arguments, the length of the result is + ``ceil((stop - start)/step)``. Because of floating point overflow, + this rule may result in the last element of `out` being greater + than `stop`. + + See Also + -------- + linspace : Evenly spaced numbers with careful handling of endpoints. + ogrid: Arrays of evenly spaced numbers in N-dimensions. + mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. + + Examples + -------- + >>> np.arange(3) + array([0, 1, 2]) + >>> np.arange(3.0) + array([ 0., 1., 2.]) + >>> np.arange(3,7) + array([3, 4, 5, 6]) + >>> np.arange(3,7,2) + array([3, 5]) + + """) + +add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', + """_get_ndarray_c_version() + + Return the compile time NDARRAY_VERSION number. + + """) + +add_newdoc('numpy.core.multiarray', '_reconstruct', + """_reconstruct(subtype, shape, dtype) + + Construct an empty array. Used by Pickles. + + """) + + +add_newdoc('numpy.core.multiarray', 'set_string_function', + """ + set_string_function(f, repr=1) + + Internal method to set a function to be used when pretty printing arrays. + + """) + +add_newdoc('numpy.core.multiarray', 'set_numeric_ops', + """ + set_numeric_ops(op1=func1, op2=func2, ...) + + Set numerical operators for array objects. + + Parameters + ---------- + op1, op2, ... : callable + Each ``op = func`` pair describes an operator to be replaced. + For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace + addition by modulus 5 addition. + + Returns + ------- + saved_ops : list of callables + A list of all operators, stored before making replacements. + + Notes + ----- + .. WARNING:: + Use with care! Incorrect usage may lead to memory errors. + + A function replacing an operator cannot make use of that operator. + For example, when replacing add, you may not use ``+``. Instead, + directly call ufuncs. + + Examples + -------- + >>> def add_mod5(x, y): + ... return np.add(x, y) % 5 + ... + >>> old_funcs = np.set_numeric_ops(add=add_mod5) + + >>> x = np.arange(12).reshape((3, 4)) + >>> x + x + array([[0, 2, 4, 1], + [3, 0, 2, 4], + [1, 3, 0, 2]]) + + >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators + + """) + +add_newdoc('numpy.core.multiarray', 'where', + """ + where(condition, [x, y]) + + Return elements, either from `x` or `y`, depending on `condition`. + + If only `condition` is given, return ``condition.nonzero()``. + + Parameters + ---------- + condition : array_like, bool + When True, yield `x`, otherwise yield `y`. + x, y : array_like, optional + Values from which to choose. `x` and `y` need to have the same + shape as `condition`. + + Returns + ------- + out : ndarray or tuple of ndarrays + If both `x` and `y` are specified, the output array contains + elements of `x` where `condition` is True, and elements from + `y` elsewhere. + + If only `condition` is given, return the tuple + ``condition.nonzero()``, the indices where `condition` is True. + + See Also + -------- + nonzero, choose + + Notes + ----- + If `x` and `y` are given and input arrays are 1-D, `where` is + equivalent to:: + + [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] + + Examples + -------- + >>> np.where([[True, False], [True, True]], + ... [[1, 2], [3, 4]], + ... [[9, 8], [7, 6]]) + array([[1, 8], + [3, 4]]) + + >>> np.where([[0, 1], [1, 0]]) + (array([0, 1]), array([1, 0])) + + >>> x = np.arange(9.).reshape(3, 3) + >>> np.where( x > 5 ) + (array([2, 2, 2]), array([0, 1, 2])) + >>> x[np.where( x > 3.0 )] # Note: result is 1D. + array([ 4., 5., 6., 7., 8.]) + >>> np.where(x < 5, x, -1) # Note: broadcasting. + array([[ 0., 1., 2.], + [ 3., 4., -1.], + [-1., -1., -1.]]) + + Find the indices of elements of `x` that are in `goodvalues`. + + >>> goodvalues = [3, 4, 7] + >>> ix = np.in1d(x.ravel(), goodvalues).reshape(x.shape) + >>> ix + array([[False, False, False], + [ True, True, False], + [False, True, False]], dtype=bool) + >>> np.where(ix) + (array([1, 1, 2]), array([0, 1, 1])) + + """) + + +add_newdoc('numpy.core.multiarray', 'lexsort', + """ + lexsort(keys, axis=-1) + + Perform an indirect sort using a sequence of keys. + + Given multiple sorting keys, which can be interpreted as columns in a + spreadsheet, lexsort returns an array of integer indices that describes + the sort order by multiple columns. The last key in the sequence is used + for the primary sort order, the second-to-last key for the secondary sort + order, and so on. The keys argument must be a sequence of objects that + can be converted to arrays of the same shape. If a 2D array is provided + for the keys argument, it's rows are interpreted as the sorting keys and + sorting is according to the last row, second last row etc. + + Parameters + ---------- + keys : (k, N) array or tuple containing k (N,)-shaped sequences + The `k` different "columns" to be sorted. The last column (or row if + `keys` is a 2D array) is the primary sort key. + axis : int, optional + Axis to be indirectly sorted. By default, sort over the last axis. + + Returns + ------- + indices : (N,) ndarray of ints + Array of indices that sort the keys along the specified axis. + + See Also + -------- + argsort : Indirect sort. + ndarray.sort : In-place sort. + sort : Return a sorted copy of an array. + + Examples + -------- + Sort names: first by surname, then by name. + + >>> surnames = ('Hertz', 'Galilei', 'Hertz') + >>> first_names = ('Heinrich', 'Galileo', 'Gustav') + >>> ind = np.lexsort((first_names, surnames)) + >>> ind + array([1, 2, 0]) + + >>> [surnames[i] + ", " + first_names[i] for i in ind] + ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] + + Sort two columns of numbers: + + >>> a = [1,5,1,4,3,4,4] # First column + >>> b = [9,4,0,4,0,2,1] # Second column + >>> ind = np.lexsort((b,a)) # Sort by a, then by b + >>> print ind + [2 0 4 6 5 3 1] + + >>> [(a[i],b[i]) for i in ind] + [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] + + Note that sorting is first according to the elements of ``a``. + Secondary sorting is according to the elements of ``b``. + + A normal ``argsort`` would have yielded: + + >>> [(a[i],b[i]) for i in np.argsort(a)] + [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] + + Structured arrays are sorted lexically by ``argsort``: + + >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], + ... dtype=np.dtype([('x', int), ('y', int)])) + + >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) + array([2, 0, 4, 6, 5, 3, 1]) + + """) + +add_newdoc('numpy.core.multiarray', 'can_cast', + """ + can_cast(from, totype, casting = 'safe') + + Returns True if cast between data types can occur according to the + casting rule. If from is a scalar or array scalar, also returns + True if the scalar value can be cast without overflow or truncation + to an integer. + + Parameters + ---------- + from : dtype, dtype specifier, scalar, or array + Data type, scalar, or array to cast from. + totype : dtype or dtype specifier + Data type to cast to. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Returns + ------- + out : bool + True if cast can occur according to the casting rule. + + Notes + ----- + Starting in NumPy 1.9, can_cast function now returns False in 'safe' + casting mode for integer/float dtype and string dtype if the string dtype + length is not long enough to store the max integer/float value converted + to a string. Previously can_cast in 'safe' mode returned True for + integer/float dtype and a string dtype of any length. + + See also + -------- + dtype, result_type + + Examples + -------- + Basic examples + + >>> np.can_cast(np.int32, np.int64) + True + >>> np.can_cast(np.float64, np.complex) + True + >>> np.can_cast(np.complex, np.float) + False + + >>> np.can_cast('i8', 'f8') + True + >>> np.can_cast('i8', 'f4') + False + >>> np.can_cast('i4', 'S4') + False + + Casting scalars + + >>> np.can_cast(100, 'i1') + True + >>> np.can_cast(150, 'i1') + False + >>> np.can_cast(150, 'u1') + True + + >>> np.can_cast(3.5e100, np.float32) + False + >>> np.can_cast(1000.0, np.float32) + True + + Array scalar checks the value, array does not + + >>> np.can_cast(np.array(1000.0), np.float32) + True + >>> np.can_cast(np.array([1000.0]), np.float32) + False + + Using the casting rules + + >>> np.can_cast('i8', 'i8', 'no') + True + >>> np.can_cast('i8', 'no') + False + + >>> np.can_cast('i8', 'equiv') + True + >>> np.can_cast('i8', 'equiv') + False + + >>> np.can_cast('i8', 'safe') + True + >>> np.can_cast('i4', 'safe') + False + + >>> np.can_cast('i4', 'same_kind') + True + >>> np.can_cast('u4', 'same_kind') + False + + >>> np.can_cast('u4', 'unsafe') + True + + """) + +add_newdoc('numpy.core.multiarray', 'promote_types', + """ + promote_types(type1, type2) + + Returns the data type with the smallest size and smallest scalar + kind to which both ``type1`` and ``type2`` may be safely cast. + The returned data type is always in native byte order. + + This function is symmetric and associative. + + Parameters + ---------- + type1 : dtype or dtype specifier + First data type. + type2 : dtype or dtype specifier + Second data type. + + Returns + ------- + out : dtype + The promoted data type. + + Notes + ----- + .. versionadded:: 1.6.0 + Starting in NumPy 1.9, promote_types function now returns a valid string + length when given an integer or float dtype as one argument and a string + dtype as another argument. Previously it always returned the input string + dtype, even if it wasn't long enough to store the max integer/float value + converted to a string. + + See Also + -------- + result_type, dtype, can_cast + + Examples + -------- + >>> np.promote_types('f4', 'f8') + dtype('float64') + + >>> np.promote_types('i8', 'f4') + dtype('float64') + + >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + dtype('S11') + + """) + +add_newdoc('numpy.core.multiarray', 'min_scalar_type', + """ + min_scalar_type(a) + + For scalar ``a``, returns the data type with the smallest size + and smallest scalar kind which can hold its value. For non-scalar + array ``a``, returns the vector's dtype unmodified. + + Floating point values are not demoted to integers, + and complex values are not demoted to floats. + + Parameters + ---------- + a : scalar or array_like + The value whose minimal data type is to be found. + + Returns + ------- + out : dtype + The minimal data type. + + Notes + ----- + .. versionadded:: 1.6.0 + + See Also + -------- + result_type, promote_types, dtype, can_cast + + Examples + -------- + >>> np.min_scalar_type(10) + dtype('uint8') + + >>> np.min_scalar_type(-260) + dtype('int16') + + >>> np.min_scalar_type(3.1) + dtype('float16') + + >>> np.min_scalar_type(1e50) + dtype('float64') + + >>> np.min_scalar_type(np.arange(4,dtype='f8')) + dtype('float64') + + """) + +add_newdoc('numpy.core.multiarray', 'result_type', + """ + result_type(*arrays_and_dtypes) + + Returns the type that results from applying the NumPy + type promotion rules to the arguments. + + Type promotion in NumPy works similarly to the rules in languages + like C++, with some slight differences. When both scalars and + arrays are used, the array's type takes precedence and the actual value + of the scalar is taken into account. + + For example, calculating 3*a, where a is an array of 32-bit floats, + intuitively should result in a 32-bit float output. If the 3 is a + 32-bit integer, the NumPy rules indicate it can't convert losslessly + into a 32-bit float, so a 64-bit float should be the result type. + By examining the value of the constant, '3', we see that it fits in + an 8-bit integer, which can be cast losslessly into the 32-bit float. + + Parameters + ---------- + arrays_and_dtypes : list of arrays and dtypes + The operands of some operation whose result type is needed. + + Returns + ------- + out : dtype + The result type. + + See also + -------- + dtype, promote_types, min_scalar_type, can_cast + + Notes + ----- + .. versionadded:: 1.6.0 + + The specific algorithm used is as follows. + + Categories are determined by first checking which of boolean, + integer (int/uint), or floating point (float/complex) the maximum + kind of all the arrays and the scalars are. + + If there are only scalars or the maximum category of the scalars + is higher than the maximum category of the arrays, + the data types are combined with :func:`promote_types` + to produce the return value. + + Otherwise, `min_scalar_type` is called on each array, and + the resulting data types are all combined with :func:`promote_types` + to produce the return value. + + The set of int values is not a subset of the uint values for types + with the same number of bits, something not reflected in + :func:`min_scalar_type`, but handled as a special case in `result_type`. + + Examples + -------- + >>> np.result_type(3, np.arange(7, dtype='i1')) + dtype('int8') + + >>> np.result_type('i4', 'c8') + dtype('complex128') + + >>> np.result_type(3.0, -2) + dtype('float64') + + """) + +add_newdoc('numpy.core.multiarray', 'newbuffer', + """ + newbuffer(size) + + Return a new uninitialized buffer object. + + Parameters + ---------- + size : int + Size in bytes of returned buffer object. + + Returns + ------- + newbuffer : buffer object + Returned, uninitialized buffer object of `size` bytes. + + """) + +add_newdoc('numpy.core.multiarray', 'getbuffer', + """ + getbuffer(obj [,offset[, size]]) + + Create a buffer object from the given object referencing a slice of + length size starting at offset. + + Default is the entire buffer. A read-write buffer is attempted followed + by a read-only buffer. + + Parameters + ---------- + obj : object + + offset : int, optional + + size : int, optional + + Returns + ------- + buffer_obj : buffer + + Examples + -------- + >>> buf = np.getbuffer(np.ones(5), 1, 3) + >>> len(buf) + 3 + >>> buf[0] + '\\x00' + >>> buf + + + """) + +add_newdoc('numpy.core', 'dot', + """ + dot(a, b, out=None) + + Dot product of two arrays. + + For 2-D arrays it is equivalent to matrix multiplication, and for 1-D + arrays to inner product of vectors (without complex conjugation). For + N dimensions it is a sum product over the last axis of `a` and + the second-to-last of `b`:: + + dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) + + Parameters + ---------- + a : array_like + First argument. + b : array_like + Second argument. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of `a` and `b`. If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + If `out` is given, then it is returned. + + Raises + ------ + ValueError + If the last dimension of `a` is not the same size as + the second-to-last dimension of `b`. + + See Also + -------- + vdot : Complex-conjugating dot product. + tensordot : Sum products over arbitrary axes. + einsum : Einstein summation convention. + + Examples + -------- + >>> np.dot(3, 4) + 12 + + Neither argument is complex-conjugated: + + >>> np.dot([2j, 3j], [2j, 3j]) + (-13+0j) + + For 2-D arrays it's the matrix product: + + >>> a = [[1, 0], [0, 1]] + >>> b = [[4, 1], [2, 2]] + >>> np.dot(a, b) + array([[4, 1], + [2, 2]]) + + >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) + >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) + >>> np.dot(a, b)[2,3,2,1,2,2] + 499128 + >>> sum(a[2,3,2,:] * b[1,2,:,2]) + 499128 + + """) + +add_newdoc('numpy.core', 'einsum', + """ + einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe') + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional + array operations can be represented in a simple fashion. This function + provides a way compute such summations. The best way to understand this + function is to try the examples below, which show how many common NumPy + functions can be implemented as calls to `einsum`. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : data-type, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout as the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + dot, inner, outer, tensordot + + Notes + ----- + .. versionadded:: 1.6.0 + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Repeated subscripts labels in one operand take the diagonal. For example, + ``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``. + + Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to ``np.inner(a,b)``. If a label appears only once, + it is not summed, so ``np.einsum('i', a)`` produces a view of ``a`` + with no changes. + + The order of labels in the output is by default alphabetical. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. + + The output can be controlled by specifying output subscript labels + as well. This specifies the label order, and allows summing to + be disallowed or forced when desired. The call ``np.einsum('i->', a)`` + is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)`` + is like ``np.diag(a)``. The difference is that `einsum` does not + allow broadcasting by default. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, you can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view. + + An alternative way to provide the subscripts and operands is as + ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples + below have corresponding `einsum` calls with the two parameter methods. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> c.T + array([[0, 3], + [1, 4], + [2, 5]]) + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + >>> np.einsum('i...->...', a) + array([50, 55, 60, 65, 70]) + >>> np.einsum(a, [0,Ellipsis], [Ellipsis]) + array([50, 55, 60, 65, 70]) + >>> np.sum(a, axis=0) + array([50, 55, 60, 65, 70]) + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + """) + +add_newdoc('numpy.core', 'alterdot', + """ + Change `dot`, `vdot`, and `inner` to use accelerated BLAS functions. + + Typically, as a user of Numpy, you do not explicitly call this function. If + Numpy is built with an accelerated BLAS, this function is automatically + called when Numpy is imported. + + When Numpy is built with an accelerated BLAS like ATLAS, these functions + are replaced to make use of the faster implementations. The faster + implementations only affect float32, float64, complex64, and complex128 + arrays. Furthermore, the BLAS API only includes matrix-matrix, + matrix-vector, and vector-vector products. Products of arrays with larger + dimensionalities use the built in functions and are not accelerated. + + See Also + -------- + restoredot : `restoredot` undoes the effects of `alterdot`. + + """) + +add_newdoc('numpy.core', 'restoredot', + """ + Restore `dot`, `vdot`, and `innerproduct` to the default non-BLAS + implementations. + + Typically, the user will only need to call this when troubleshooting and + installation problem, reproducing the conditions of a build without an + accelerated BLAS, or when being very careful about benchmarking linear + algebra operations. + + See Also + -------- + alterdot : `restoredot` undoes the effects of `alterdot`. + + """) + +add_newdoc('numpy.core', 'vdot', + """ + vdot(a, b) + + Return the dot product of two vectors. + + The vdot(`a`, `b`) function handles complex numbers differently than + dot(`a`, `b`). If the first argument is complex the complex conjugate + of the first argument is used for the calculation of the dot product. + + Note that `vdot` handles multidimensional arrays differently than `dot`: + it does *not* perform a matrix product, but flattens input arguments + to 1-D vectors first. Consequently, it should only be used for vectors. + + Parameters + ---------- + a : array_like + If `a` is complex the complex conjugate is taken before calculation + of the dot product. + b : array_like + Second argument to the dot product. + + Returns + ------- + output : ndarray + Dot product of `a` and `b`. Can be an int, float, or + complex depending on the types of `a` and `b`. + + See Also + -------- + dot : Return the dot product without using the complex conjugate of the + first argument. + + Examples + -------- + >>> a = np.array([1+2j,3+4j]) + >>> b = np.array([5+6j,7+8j]) + >>> np.vdot(a, b) + (70-8j) + >>> np.vdot(b, a) + (70+8j) + + Note that higher-dimensional arrays are flattened! + + >>> a = np.array([[1, 4], [5, 6]]) + >>> b = np.array([[4, 1], [2, 2]]) + >>> np.vdot(a, b) + 30 + >>> np.vdot(b, a) + 30 + >>> 1*4 + 4*1 + 5*2 + 6*2 + 30 + + """) + + +############################################################################## +# +# Documentation for ndarray attributes and methods +# +############################################################################## + + +############################################################################## +# +# ndarray object +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', + """ + ndarray(shape, dtype=float, buffer=None, offset=0, + strides=None, order=None) + + An array object represents a multidimensional, homogeneous array + of fixed-size items. An associated data-type object describes the + format of each element in the array (its byte-order, how many bytes it + occupies in memory, whether it is an integer, a floating point number, + or something else, etc.) + + Arrays should be constructed using `array`, `zeros` or `empty` (refer + to the See Also section below). The parameters given here refer to + a low-level method (`ndarray(...)`) for instantiating an array. + + For more information, refer to the `numpy` module and examine the + the methods and attributes of an array. + + Parameters + ---------- + (for the __new__ method; see Notes below) + + shape : tuple of ints + Shape of created array. + dtype : data-type, optional + Any object that can be interpreted as a numpy data type. + buffer : object exposing buffer interface, optional + Used to fill the array with data. + offset : int, optional + Offset of array data in buffer. + strides : tuple of ints, optional + Strides of data in memory. + order : {'C', 'F'}, optional + Row-major or column-major order. + + Attributes + ---------- + T : ndarray + Transpose of the array. + data : buffer + The array's elements, in memory. + dtype : dtype object + Describes the format of the elements in the array. + flags : dict + Dictionary containing information related to memory use, e.g., + 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. + flat : numpy.flatiter object + Flattened version of the array as an iterator. The iterator + allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for + assignment examples; TODO). + imag : ndarray + Imaginary part of the array. + real : ndarray + Real part of the array. + size : int + Number of elements in the array. + itemsize : int + The memory use of each array element in bytes. + nbytes : int + The total number of bytes required to store the array data, + i.e., ``itemsize * size``. + ndim : int + The array's number of dimensions. + shape : tuple of ints + Shape of the array. + strides : tuple of ints + The step-size required to move from one element to the next in + memory. For example, a contiguous ``(3, 4)`` array of type + ``int16`` in C-order has strides ``(8, 2)``. This implies that + to move from element to element in memory requires jumps of 2 bytes. + To move from row-to-row, one needs to jump 8 bytes at a time + (``2 * 4``). + ctypes : ctypes object + Class containing properties of the array needed for interaction + with ctypes. + base : ndarray + If the array is a view into another array, that array is its `base` + (unless that array is also a view). The `base` array is where the + array data is actually stored. + + See Also + -------- + array : Construct an array. + zeros : Create an array, each element of which is zero. + empty : Create an array, but leave its allocated memory unchanged (i.e., + it contains "garbage"). + dtype : Create a data-type. + + Notes + ----- + There are two modes of creating an array using ``__new__``: + + 1. If `buffer` is None, then only `shape`, `dtype`, and `order` + are used. + 2. If `buffer` is an object exposing the buffer interface, then + all keywords are interpreted. + + No ``__init__`` method is needed because the array is fully initialized + after the ``__new__`` method. + + Examples + -------- + These examples illustrate the low-level `ndarray` constructor. Refer + to the `See Also` section above for easier ways of constructing an + ndarray. + + First mode, `buffer` is None: + + >>> np.ndarray(shape=(2,2), dtype=float, order='F') + array([[ -1.13698227e+002, 4.25087011e-303], + [ 2.88528414e-306, 3.27025015e-309]]) #random + + Second mode: + + >>> np.ndarray((2,), buffer=np.array([1,2,3]), + ... offset=np.int_().itemsize, + ... dtype=int) # offset = 1*itemsize, i.e. skip first element + array([2, 3]) + + """) + + +############################################################################## +# +# ndarray attributes +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', + """Array protocol: Python side.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', + """None.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', + """Array priority.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', + """Array protocol: C-struct side.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_', + """Allow the array to be interpreted as a ctypes object by returning the + data-memory location as an integer + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('base', + """ + Base object if memory is from some other object. + + Examples + -------- + The base of an array that owns its memory is None: + + >>> x = np.array([1,2,3,4]) + >>> x.base is None + True + + Slicing creates a view, whose memory is shared with x: + + >>> y = x[2:] + >>> y.base is x + True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', + """ + An object to simplify the interaction of the array with the ctypes + module. + + This attribute creates an object that makes it easier to use arrays + when calling shared libraries with the ctypes module. The returned + object has, among others, data, shape, and strides attributes (see + Notes below) which themselves return ctypes objects that can be used + as arguments to a shared library. + + Parameters + ---------- + None + + Returns + ------- + c : Python object + Possessing attributes data, shape, strides, etc. + + See Also + -------- + numpy.ctypeslib + + Notes + ----- + Below are the public attributes of this object which were documented + in "Guide to NumPy" (we have omitted undocumented public attributes, + as well as documented private attributes): + + * data: A pointer to the memory area of the array as a Python integer. + This memory area may contain data that is not aligned, or not in correct + byte-order. The memory area may not even be writeable. The array + flags and data-type of this array should be respected when passing this + attribute to arbitrary C-code to avoid trouble that can include Python + crashing. User Beware! The value of this attribute is exactly the same + as self._array_interface_['data'][0]. + + * shape (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the C-integer corresponding to dtype('p') on this + platform. This base-type could be c_int, c_long, or c_longlong + depending on the platform. The c_intp type is defined accordingly in + numpy.ctypeslib. The ctypes array contains the shape of the underlying + array. + + * strides (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the same as for the shape attribute. This ctypes array + contains the strides information from the underlying array. This strides + information is important for showing how many bytes must be jumped to + get to the next element in the array. + + * data_as(obj): Return the data pointer cast to a particular c-types object. + For example, calling self._as_parameter_ is equivalent to + self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a + pointer to a ctypes array of floating-point data: + self.data_as(ctypes.POINTER(ctypes.c_double)). + + * shape_as(obj): Return the shape tuple as an array of some other c-types + type. For example: self.shape_as(ctypes.c_short). + + * strides_as(obj): Return the strides tuple as an array of some other + c-types type. For example: self.strides_as(ctypes.c_longlong). + + Be careful using the ctypes attribute - especially on temporary + arrays or arrays constructed on the fly. For example, calling + ``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory + that is invalid because the array created as (a+b) is deallocated + before the next Python statement. You can avoid this problem using + either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will + hold a reference to the array until ct is deleted or re-assigned. + + If the ctypes module is not available, then the ctypes attribute + of array objects still returns something useful, but ctypes objects + are not returned and errors may be raised instead. In particular, + the object will still have the as parameter attribute which will + return an integer equal to the data attribute. + + Examples + -------- + >>> import ctypes + >>> x + array([[0, 1], + [2, 3]]) + >>> x.ctypes.data + 30439712 + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) + + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents + c_long(0) + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents + c_longlong(4294967296L) + >>> x.ctypes.shape + + >>> x.ctypes.shape_as(ctypes.c_long) + + >>> x.ctypes.strides + + >>> x.ctypes.strides_as(ctypes.c_longlong) + + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('data', + """Python buffer object pointing to the start of the array's data.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', + """ + Data-type of the array's elements. + + Parameters + ---------- + None + + Returns + ------- + d : numpy dtype object + + See Also + -------- + numpy.dtype + + Examples + -------- + >>> x + array([[0, 1], + [2, 3]]) + >>> x.dtype + dtype('int32') + >>> type(x.dtype) + + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', + """ + The imaginary part of the array. + + Examples + -------- + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.imag + array([ 0. , 0.70710678]) + >>> x.imag.dtype + dtype('float64') + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', + """ + Length of one array element in bytes. + + Examples + -------- + >>> x = np.array([1,2,3], dtype=np.float64) + >>> x.itemsize + 8 + >>> x = np.array([1,2,3], dtype=np.complex128) + >>> x.itemsize + 16 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', + """ + Information about the memory layout of the array. + + Attributes + ---------- + C_CONTIGUOUS (C) + The data is in a single, C-style contiguous segment. + F_CONTIGUOUS (F) + The data is in a single, Fortran-style contiguous segment. + OWNDATA (O) + The array owns the memory it uses or borrows it from another object. + WRITEABLE (W) + The data area can be written to. Setting this to False locks + the data, making it read-only. A view (slice, etc.) inherits WRITEABLE + from its base array at creation time, but a view of a writeable + array may be subsequently locked while the base array remains writeable. + (The opposite is not true, in that a view of a locked array may not + be made writeable. However, currently, locking a base object does not + lock any views that already reference it, so under that circumstance it + is possible to alter the contents of a locked array via a previously + created writeable view onto it.) Attempting to change a non-writeable + array raises a RuntimeError exception. + ALIGNED (A) + The data and all elements are aligned appropriately for the hardware. + UPDATEIFCOPY (U) + This array is a copy of some other array. When this array is + deallocated, the base array will be updated with the contents of + this array. + FNC + F_CONTIGUOUS and not C_CONTIGUOUS. + FORC + F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). + BEHAVED (B) + ALIGNED and WRITEABLE. + CARRAY (CA) + BEHAVED and C_CONTIGUOUS. + FARRAY (FA) + BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. + + Notes + ----- + The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), + or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag + names are only supported in dictionary access. + + Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by + the user, via direct assignment to the attribute or dictionary entry, + or by calling `ndarray.setflags`. + + The array flags cannot be set arbitrarily: + + - UPDATEIFCOPY can only be set ``False``. + - ALIGNED can only be set ``True`` if the data is truly aligned. + - WRITEABLE can only be set ``True`` if the array owns its own memory + or the ultimate owner of the memory exposes a writeable buffer + interface or is a string. + + Arrays can be both C-style and Fortran-style contiguous simultaneously. + This is clear for 1-dimensional arrays, but can also be true for higher + dimensional arrays. + + Even for contiguous arrays a stride for a given dimension + ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` + or the array has no elements. + It does *not* generally hold that ``self.strides[-1] == self.itemsize`` + for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for + Fortran-style contiguous arrays is true. + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', + """ + A 1-D iterator over the array. + + This is a `numpy.flatiter` instance, which acts similarly to, but is not + a subclass of, Python's built-in iterator object. + + See Also + -------- + flatten : Return a copy of the array collapsed into one dimension. + + flatiter + + Examples + -------- + >>> x = np.arange(1, 7).reshape(2, 3) + >>> x + array([[1, 2, 3], + [4, 5, 6]]) + >>> x.flat[3] + 4 + >>> x.T + array([[1, 4], + [2, 5], + [3, 6]]) + >>> x.T.flat[3] + 5 + >>> type(x.flat) + + + An assignment example: + + >>> x.flat = 3; x + array([[3, 3, 3], + [3, 3, 3]]) + >>> x.flat[[1,4]] = 1; x + array([[3, 1, 3], + [3, 1, 3]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', + """ + Total bytes consumed by the elements of the array. + + Notes + ----- + Does not include memory consumed by non-element attributes of the + array object. + + Examples + -------- + >>> x = np.zeros((3,5,2), dtype=np.complex128) + >>> x.nbytes + 480 + >>> np.prod(x.shape) * x.itemsize + 480 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', + """ + Number of array dimensions. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> x.ndim + 1 + >>> y = np.zeros((2, 3, 4)) + >>> y.ndim + 3 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('real', + """ + The real part of the array. + + Examples + -------- + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.real + array([ 1. , 0.70710678]) + >>> x.real.dtype + dtype('float64') + + See Also + -------- + numpy.real : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', + """ + Tuple of array dimensions. + + Notes + ----- + May be used to "reshape" the array, as long as this would not + require a change in the total number of elements + + Examples + -------- + >>> x = np.array([1, 2, 3, 4]) + >>> x.shape + (4,) + >>> y = np.zeros((2, 3, 4)) + >>> y.shape + (2, 3, 4) + >>> y.shape = (3, 8) + >>> y + array([[ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.]]) + >>> y.shape = (3, 6) + Traceback (most recent call last): + File "", line 1, in + ValueError: total size of new array must be unchanged + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('size', + """ + Number of elements in the array. + + Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's + dimensions. + + Examples + -------- + >>> x = np.zeros((3, 5, 2), dtype=np.complex128) + >>> x.size + 30 + >>> np.prod(x.shape) + 30 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', + """ + Tuple of bytes to step in each dimension when traversing an array. + + The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` + is:: + + offset = sum(np.array(i) * a.strides) + + A more detailed explanation of strides can be found in the + "ndarray.rst" file in the NumPy reference guide. + + Notes + ----- + Imagine an array of 32-bit integers (each 4 bytes):: + + x = np.array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]], dtype=np.int32) + + This array is stored in memory as 40 bytes, one after the other + (known as a contiguous block of memory). The strides of an array tell + us how many bytes we have to skip in memory to move to the next position + along a certain axis. For example, we have to skip 4 bytes (1 value) to + move to the next column, but 20 bytes (5 values) to get to the same + position in the next row. As such, the strides for the array `x` will be + ``(20, 4)``. + + See Also + -------- + numpy.lib.stride_tricks.as_strided + + Examples + -------- + >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) + >>> y + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + >>> y.strides + (48, 16, 4) + >>> y[1,1,1] + 17 + >>> offset=sum(y.strides * np.array((1,1,1))) + >>> offset/y.itemsize + 17 + + >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) + >>> x.strides + (32, 4, 224, 1344) + >>> i = np.array([3,5,2,2]) + >>> offset = sum(i * x.strides) + >>> x[3,5,2,2] + 813 + >>> offset / x.itemsize + 813 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('T', + """ + Same as self.transpose(), except that self is returned if + self.ndim < 2. + + Examples + -------- + >>> x = np.array([[1.,2.],[3.,4.]]) + >>> x + array([[ 1., 2.], + [ 3., 4.]]) + >>> x.T + array([[ 1., 3.], + [ 2., 4.]]) + >>> x = np.array([1.,2.,3.,4.]) + >>> x + array([ 1., 2., 3., 4.]) + >>> x.T + array([ 1., 2., 3., 4.]) + + """)) + + +############################################################################## +# +# ndarray methods +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', + """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. + + Returns either a new reference to self if dtype is not given or a new array + of provided data type if dtype is different from the current dtype of the + array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', + """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', + """a.__array_wrap__(obj) -> Object of same type as ndarray object a. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', + """a.__copy__([order]) + + Return a copy of the array. + + Parameters + ---------- + order : {'C', 'F', 'A'}, optional + If order is 'C' (False) then the result is contiguous (default). + If order is 'Fortran' (True) then the result has fortran order. + If order is 'Any' (None) then the result has fortran order + only if the array already is in fortran order. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', + """a.__deepcopy__() -> Deep copy of array. + + Used if copy.deepcopy is called on an array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', + """a.__reduce__() + + For pickling. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', + """a.__setstate__(version, shape, dtype, isfortran, rawdata) + + For unpickling. + + Parameters + ---------- + version : int + optional pickle version. If omitted defaults to 0. + shape : tuple + dtype : data-type + isFortran : bool + rawdata : string or list + a binary string with the data (or a list if 'a' is an object array) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('all', + """ + a.all(axis=None, out=None) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.all : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('any', + """ + a.any(axis=None, out=None) + + Returns True if any of the elements of `a` evaluate to True. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.any : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', + """ + a.argmax(axis=None, out=None) + + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', + """ + a.argmin(axis=None, out=None) + + Return indices of the minimum values along the given axis of `a`. + + Refer to `numpy.argmin` for detailed documentation. + + See Also + -------- + numpy.argmin : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', + """ + a.argsort(axis=-1, kind='quicksort', order=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', + """ + a.argpartition(kth, axis=-1, kind='introselect', order=None) + + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. + + .. versionadded:: 1.8.0 + + See Also + -------- + numpy.argpartition : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', + """ + a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) + + Copy of the array, cast to a specified type. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout order of the result. + 'C' means C order, 'F' means Fortran order, 'A' + means 'F' order if all the arrays are Fortran contiguous, + 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'unsafe' + for backwards compatibility. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + subok : bool, optional + If True, then sub-classes will be passed-through (default), otherwise + the returned array will be forced to be a base-class array. + copy : bool, optional + By default, astype always returns a newly allocated array. If this + is set to false, and the `dtype`, `order`, and `subok` + requirements are satisfied, the input array is returned instead + of a copy. + + Returns + ------- + arr_t : ndarray + Unless `copy` is False and the other conditions for returning the input + array are satisfied (see description for `copy` input paramter), `arr_t` + is a new array of the same shape as the input array, with dtype, order + given by `dtype`, `order`. + + Notes + ----- + Starting in NumPy 1.9, astype method now returns an error if the string + dtype to cast to is not long enough in 'safe' casting mode to hold the max + value of integer/float array that is being casted. Previously the casting + was allowed even if the result was truncated. + + Raises + ------ + ComplexWarning + When casting from complex to float or int. To avoid this, + one should use ``a.real.astype(t)``. + + Examples + -------- + >>> x = np.array([1, 2, 2.5]) + >>> x + array([ 1. , 2. , 2.5]) + + >>> x.astype(int) + array([1, 2, 2]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', + """ + a.byteswap(inplace) + + Swap the bytes of the array elements + + Toggle between low-endian and big-endian data representation by + returning a byteswapped array, optionally swapped in-place. + + Parameters + ---------- + inplace : bool, optional + If ``True``, swap bytes in-place, default is ``False``. + + Returns + ------- + out : ndarray + The byteswapped array. If `inplace` is ``True``, this is + a view to self. + + Examples + -------- + >>> A = np.array([1, 256, 8755], dtype=np.int16) + >>> map(hex, A) + ['0x1', '0x100', '0x2233'] + >>> A.byteswap(True) + array([ 256, 1, 13090], dtype=int16) + >>> map(hex, A) + ['0x100', '0x1', '0x3322'] + + Arrays of strings are not swapped + + >>> A = np.array(['ceg', 'fac']) + >>> A.byteswap() + array(['ceg', 'fac'], + dtype='|S3') + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', + """ + a.choose(choices, out=None, mode='raise') + + Use an index array to construct a new array from a set of choices. + + Refer to `numpy.choose` for full documentation. + + See Also + -------- + numpy.choose : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', + """ + a.clip(a_min, a_max, out=None) + + Return an array whose values are limited to ``[a_min, a_max]``. + + Refer to `numpy.clip` for full documentation. + + See Also + -------- + numpy.clip : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', + """ + a.compress(condition, axis=None, out=None) + + Return selected slices of this array along given axis. + + Refer to `numpy.compress` for full documentation. + + See Also + -------- + numpy.compress : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', + """ + a.conj() + + Complex-conjugate all elements. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', + """ + a.conjugate() + + Return the complex conjugate, element-wise. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', + """ + a.copy(order='C') + + Return a copy of the array. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :func:numpy.copy are very + similar, but have different default values for their order= + arguments.) + + See also + -------- + numpy.copy + numpy.copyto + + Examples + -------- + >>> x = np.array([[1,2,3],[4,5,6]], order='F') + + >>> y = x.copy() + + >>> x.fill(0) + + >>> x + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y + array([[1, 2, 3], + [4, 5, 6]]) + + >>> y.flags['C_CONTIGUOUS'] + True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', + """ + a.cumprod(axis=None, dtype=None, out=None) + + Return the cumulative product of the elements along the given axis. + + Refer to `numpy.cumprod` for full documentation. + + See Also + -------- + numpy.cumprod : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', + """ + a.cumsum(axis=None, dtype=None, out=None) + + Return the cumulative sum of the elements along the given axis. + + Refer to `numpy.cumsum` for full documentation. + + See Also + -------- + numpy.cumsum : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', + """ + a.diagonal(offset=0, axis1=0, axis2=1) + + Return specified diagonals. In NumPy 1.9 the returned array is a + read-only view instead of a copy as in previous NumPy versions. In + NumPy 1.10 the read-only restriction will be removed. + + Refer to :func:`numpy.diagonal` for full documentation. + + See Also + -------- + numpy.diagonal : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', + """ + a.dot(b, out=None) + + Dot product of two arrays. + + Refer to `numpy.dot` for full documentation. + + See Also + -------- + numpy.dot : equivalent function + + Examples + -------- + >>> a = np.eye(2) + >>> b = np.ones((2, 2)) * 2 + >>> a.dot(b) + array([[ 2., 2.], + [ 2., 2.]]) + + This array method can be conveniently chained: + + >>> a.dot(b).dot(b) + array([[ 8., 8.], + [ 8., 8.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', + """a.dump(file) + + Dump a pickle of the array to the specified file. + The array can be read back with pickle.load or numpy.load. + + Parameters + ---------- + file : str + A string naming the dump file. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', + """ + a.dumps() + + Returns the pickle of the array as a string. + pickle.loads or numpy.loads will convert the string back to an array. + + Parameters + ---------- + None + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', + """ + a.fill(value) + + Fill the array with a scalar value. + + Parameters + ---------- + value : scalar + All elements of `a` will be assigned this value. + + Examples + -------- + >>> a = np.array([1, 2]) + >>> a.fill(0) + >>> a + array([0, 0]) + >>> a = np.empty(2) + >>> a.fill(1) + >>> a + array([ 1., 1.]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', + """ + a.flatten(order='C') + + Return a copy of the array collapsed into one dimension. + + Parameters + ---------- + order : {'C', 'F', 'A'}, optional + Whether to flatten in C (row-major), Fortran (column-major) order, + or preserve the C/Fortran ordering from `a`. + The default is 'C'. + + Returns + ------- + y : ndarray + A copy of the input array, flattened to one dimension. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the array. + + Examples + -------- + >>> a = np.array([[1,2], [3,4]]) + >>> a.flatten() + array([1, 2, 3, 4]) + >>> a.flatten('F') + array([1, 3, 2, 4]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', + """ + a.getfield(dtype, offset=0) + + Returns a field of the given array as a certain type. + + A field is a view of the array data with a given data-type. The values in + the view are determined by the given type and the offset into the current + array in bytes. The offset needs to be such that the view dtype fits in the + array dtype; for example an array of dtype complex128 has 16-byte elements. + If taking a view with a 32-bit integer (4 bytes), the offset needs to be + between 0 and 12 bytes. + + Parameters + ---------- + dtype : str or dtype + The data type of the view. The dtype size of the view can not be larger + than that of the array itself. + offset : int + Number of bytes to skip before beginning the element view. + + Examples + -------- + >>> x = np.diag([1.+1.j]*2) + >>> x[1, 1] = 2 + 4.j + >>> x + array([[ 1.+1.j, 0.+0.j], + [ 0.+0.j, 2.+4.j]]) + >>> x.getfield(np.float64) + array([[ 1., 0.], + [ 0., 2.]]) + + By choosing an offset of 8 bytes we can select the complex part of the + array for our view: + + >>> x.getfield(np.float64, offset=8) + array([[ 1., 0.], + [ 0., 4.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('item', + """ + a.item(*args) + + Copy an element of an array to a standard Python scalar and return it. + + Parameters + ---------- + \\*args : Arguments (variable number and type) + + * none: in this case, the method only works for arrays + with one element (`a.size == 1`), which element is + copied into a standard Python scalar object and returned. + + * int_type: this argument is interpreted as a flat index into + the array, specifying which element to copy and return. + + * tuple of int_types: functions as does a single int_type argument, + except that the argument is interpreted as an nd-index into the + array. + + Returns + ------- + z : Standard Python scalar object + A copy of the specified element of the array as a suitable + Python scalar + + Notes + ----- + When the data type of `a` is longdouble or clongdouble, item() returns + a scalar array object because there is no available Python scalar that + would not lose information. Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned. + + `item` is very similar to a[args], except, instead of an array scalar, + a standard Python scalar is returned. This can be useful for speeding up + access to elements of the array and doing arithmetic on elements of the + array using Python's optimized math. + + Examples + -------- + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[3, 1, 7], + [2, 8, 3], + [8, 5, 3]]) + >>> x.item(3) + 2 + >>> x.item(7) + 5 + >>> x.item((0, 1)) + 1 + >>> x.item((2, 2)) + 3 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', + """ + a.itemset(*args) + + Insert scalar into an array (scalar is cast to array's dtype, if possible) + + There must be at least 1 argument, and define the last argument + as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster + than ``a[args] = item``. The item should be a scalar value and `args` + must select a single item in the array `a`. + + Parameters + ---------- + \*args : Arguments + If one argument: a scalar, only used in case `a` is of size 1. + If two arguments: the last argument is the value to be set + and must be a scalar, the first argument specifies a single array + element location. It is either an int or a tuple. + + Notes + ----- + Compared to indexing syntax, `itemset` provides some speed increase + for placing a scalar into a particular location in an `ndarray`, + if you must do this. However, generally this is discouraged: + among other problems, it complicates the appearance of the code. + Also, when using `itemset` (and `item`) inside a loop, be sure + to assign the methods to a local variable to avoid the attribute + look-up at each loop iteration. + + Examples + -------- + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[3, 1, 7], + [2, 8, 3], + [8, 5, 3]]) + >>> x.itemset(4, 0) + >>> x.itemset((2, 2), 9) + >>> x + array([[3, 1, 7], + [2, 0, 3], + [8, 5, 9]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('setasflat', + """ + a.setasflat(arr) + + Equivalent to a.flat = arr.flat, but is generally more efficient. + This function does not check for overlap, so if ``arr`` and ``a`` + are viewing the same data with different strides, the results will + be unpredictable. + + Parameters + ---------- + arr : array_like + The array to copy into a. + + Examples + -------- + >>> a = np.arange(2*4).reshape(2,4)[:,:-1]; a + array([[0, 1, 2], + [4, 5, 6]]) + >>> b = np.arange(3*3, dtype='f4').reshape(3,3).T[::-1,:-1]; b + array([[ 2., 5.], + [ 1., 4.], + [ 0., 3.]], dtype=float32) + >>> a.setasflat(b) + >>> a + array([[2, 5, 1], + [4, 0, 3]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('max', + """ + a.max(axis=None, out=None) + + Return the maximum along a given axis. + + Refer to `numpy.amax` for full documentation. + + See Also + -------- + numpy.amax : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', + """ + a.mean(axis=None, dtype=None, out=None) + + Returns the average of the array elements along given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('min', + """ + a.min(axis=None, out=None) + + Return the minimum along a given axis. + + Refer to `numpy.amin` for full documentation. + + See Also + -------- + numpy.amin : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'may_share_memory', + """ + Determine if two arrays can share memory + + The memory-bounds of a and b are computed. If they overlap then + this function returns True. Otherwise, it returns False. + + A return of True does not necessarily mean that the two arrays + share any element. It just means that they *might*. + + Parameters + ---------- + a, b : ndarray + + Returns + ------- + out : bool + + Examples + -------- + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + + """) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', + """ + arr.newbyteorder(new_order='S') + + Return the array with the same data viewed with a different byte order. + + Equivalent to:: + + arr.view(arr.dtype.newbytorder(new_order)) + + Changes are also made in all fields and sub-arrays of the array data + type. + + + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + above. `new_order` codes can be any of:: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + The default value ('S') results in swapping the current + byte order. The code does a case-insensitive check on the first + letter of `new_order` for the alternatives above. For example, + any of 'B' or 'b' or 'biggish' are valid to specify big-endian. + + + Returns + ------- + new_arr : array + New array object with the dtype reflecting given change to the + byte order. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', + """ + a.nonzero() + + Return the indices of the elements that are non-zero. + + Refer to `numpy.nonzero` for full documentation. + + See Also + -------- + numpy.nonzero : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', + """ + a.prod(axis=None, dtype=None, out=None) + + Return the product of the array elements over the given axis + + Refer to `numpy.prod` for full documentation. + + See Also + -------- + numpy.prod : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', + """ + a.ptp(axis=None, out=None) + + Peak to peak (maximum - minimum) value along a given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('put', + """ + a.put(indices, values, mode='raise') + + Set ``a.flat[n] = values[n]`` for all `n` in indices. + + Refer to `numpy.put` for full documentation. + + See Also + -------- + numpy.put : equivalent function + + """)) + +add_newdoc('numpy.core.multiarray', 'copyto', + """ + copyto(dst, src, casting='same_kind', where=None, preservena=False) + + Copies values from one array to another, broadcasting as necessary. + + Raises a TypeError if the `casting` rule is violated, and if + `where` is provided, it selects which elements to copy. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dst : ndarray + The array into which values are copied. + src : array_like + The array from which values are copied. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when copying. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `dst`, and selects elements to copy from `src` to `dst` + wherever it contains the value True. + preservena : bool, optional + If set to True, leaves any NA values in `dst` untouched. This + is similar to the "hard mask" feature in numpy.ma. + + """) + +add_newdoc('numpy.core.multiarray', 'putmask', + """ + putmask(a, mask, values) + + Changes elements of an array based on conditional and input values. + + Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. + + If `values` is not the same size as `a` and `mask` then it will repeat. + This gives behavior different from ``a[mask] = values``. + + .. note:: The `putmask` functionality is also provided by `copyto`, which + can be significantly faster and in addition is NA-aware + (`preservena` keyword). Replacing `putmask` with + ``np.copyto(a, values, where=mask)`` is recommended. + + Parameters + ---------- + a : array_like + Target array. + mask : array_like + Boolean mask array. It has to be the same shape as `a`. + values : array_like + Values to put into `a` where `mask` is True. If `values` is smaller + than `a` it will be repeated. + + See Also + -------- + place, put, take, copyto + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> np.putmask(x, x>2, x**2) + >>> x + array([[ 0, 1, 2], + [ 9, 16, 25]]) + + If `values` is smaller than `a` it is repeated: + + >>> x = np.arange(5) + >>> np.putmask(x, x>1, [-33, -44]) + >>> x + array([ 0, 1, -33, -44, -33]) + + """) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', + """ + a.ravel([order]) + + Return a flattened array. + + Refer to `numpy.ravel` for full documentation. + + See Also + -------- + numpy.ravel : equivalent function + + ndarray.flat : a flat iterator on the array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', + """ + a.repeat(repeats, axis=None) + + Repeat elements of an array. + + Refer to `numpy.repeat` for full documentation. + + See Also + -------- + numpy.repeat : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', + """ + a.reshape(shape, order='C') + + Returns an array containing the same data with a new shape. + + Refer to `numpy.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', + """ + a.resize(new_shape, refcheck=True) + + Change shape and size of array in-place. + + Parameters + ---------- + new_shape : tuple of ints, or `n` ints + Shape of resized array. + refcheck : bool, optional + If False, reference count will not be checked. Default is True. + + Returns + ------- + None + + Raises + ------ + ValueError + If `a` does not own its own data or references or views to it exist, + and the data memory must be changed. + + SystemError + If the `order` keyword argument is specified. This behaviour is a + bug in NumPy. + + See Also + -------- + resize : Return a new array with the specified shape. + + Notes + ----- + This reallocates space for the data area if necessary. + + Only contiguous arrays (data elements consecutive in memory) can be + resized. + + The purpose of the reference count check is to make sure you + do not use this array as a buffer for another Python object and then + reallocate the memory. However, reference counts can increase in + other ways so if you are sure that you have not shared the memory + for this array with another Python object, then you may safely set + `refcheck` to False. + + Examples + -------- + Shrinking an array: array is flattened (in the order that the data are + stored in memory), resized, and reshaped: + + >>> a = np.array([[0, 1], [2, 3]], order='C') + >>> a.resize((2, 1)) + >>> a + array([[0], + [1]]) + + >>> a = np.array([[0, 1], [2, 3]], order='F') + >>> a.resize((2, 1)) + >>> a + array([[0], + [2]]) + + Enlarging an array: as above, but missing entries are filled with zeros: + + >>> b = np.array([[0, 1], [2, 3]]) + >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple + >>> b + array([[0, 1, 2], + [3, 0, 0]]) + + Referencing an array prevents resizing... + + >>> c = a + >>> a.resize((1, 1)) + Traceback (most recent call last): + ... + ValueError: cannot resize an array that has been referenced ... + + Unless `refcheck` is False: + + >>> a.resize((1, 1), refcheck=False) + >>> a + array([[0]]) + >>> c + array([[0]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('round', + """ + a.round(decimals=0, out=None) + + Return `a` with each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.around : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', + """ + a.searchsorted(v, side='left', sorter=None) + + Find indices where elements of v should be inserted in a to maintain order. + + For full documentation, see `numpy.searchsorted` + + See Also + -------- + numpy.searchsorted : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', + """ + a.setfield(val, dtype, offset=0) + + Put a value into a specified place in a field defined by a data-type. + + Place `val` into `a`'s field defined by `dtype` and beginning `offset` + bytes into the field. + + Parameters + ---------- + val : object + Value to be placed in field. + dtype : dtype object + Data-type of the field in which to place `val`. + offset : int, optional + The number of bytes into the field at which to place `val`. + + Returns + ------- + None + + See Also + -------- + getfield + + Examples + -------- + >>> x = np.eye(3) + >>> x.getfield(np.float64) + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> x.setfield(3, np.int32) + >>> x.getfield(np.int32) + array([[3, 3, 3], + [3, 3, 3], + [3, 3, 3]]) + >>> x + array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323], + [ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323], + [ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]]) + >>> x.setfield(np.eye(3), np.int32) + >>> x + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', + """ + a.setflags(write=None, align=None, uic=None) + + Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively. + + These Boolean-valued flags affect how numpy interprets the memory + area used by `a` (see Notes below). The ALIGNED flag can only + be set to True if the data is actually aligned according to the type. + The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE + can only be set to True if the array owns its own memory, or the + ultimate owner of the memory exposes a writeable buffer interface, + or is a string. (The exception for string is made so that unpickling + can be done without copying memory.) + + Parameters + ---------- + write : bool, optional + Describes whether or not `a` can be written to. + align : bool, optional + Describes whether or not `a` is aligned properly for its type. + uic : bool, optional + Describes whether or not `a` is a copy of another "base" array. + + Notes + ----- + Array flags provide information about how the memory area used + for the array is to be interpreted. There are 6 Boolean flags + in use, only three of which can be changed by the user: + UPDATEIFCOPY, WRITEABLE, and ALIGNED. + + WRITEABLE (W) the data area can be written to; + + ALIGNED (A) the data and strides are aligned appropriately for the hardware + (as determined by the compiler); + + UPDATEIFCOPY (U) this array is a copy of some other array (referenced + by .base). When this array is deallocated, the base array will be + updated with the contents of this array. + + All flags can be accessed using their first (upper case) letter as well + as the full name. + + Examples + -------- + >>> y + array([[3, 1, 7], + [2, 0, 0], + [8, 5, 9]]) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : True + ALIGNED : True + UPDATEIFCOPY : False + >>> y.setflags(write=0, align=0) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : False + ALIGNED : False + UPDATEIFCOPY : False + >>> y.setflags(uic=1) + Traceback (most recent call last): + File "", line 1, in + ValueError: cannot set UPDATEIFCOPY flag to True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', + """ + a.sort(axis=-1, kind='quicksort', order=None) + + Sort an array, in-place. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. Default is 'quicksort'. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + + See Also + -------- + numpy.sort : Return a sorted copy of an array. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in sorted array. + partition: Partial sort. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.array([[1,4], [3,1]]) + >>> a.sort(axis=1) + >>> a + array([[1, 4], + [1, 3]]) + >>> a.sort(axis=0) + >>> a + array([[1, 3], + [1, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) + >>> a.sort(order='y') + >>> a + array([('c', 1), ('a', 2)], + dtype=[('x', '|S1'), ('y', '>> a = np.array([3, 4, 2, 1]) + >>> a.partition(a, 3) + >>> a + array([2, 1, 3, 4]) + + >>> a.partition((1, 3)) + array([1, 2, 3, 4]) + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', + """ + a.squeeze(axis=None) + + Remove single-dimensional entries from the shape of `a`. + + Refer to `numpy.squeeze` for full documentation. + + See Also + -------- + numpy.squeeze : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('std', + """ + a.std(axis=None, dtype=None, out=None, ddof=0) + + Returns the standard deviation of the array elements along given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', + """ + a.sum(axis=None, dtype=None, out=None) + + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', + """ + a.swapaxes(axis1, axis2) + + Return a view of the array with `axis1` and `axis2` interchanged. + + Refer to `numpy.swapaxes` for full documentation. + + See Also + -------- + numpy.swapaxes : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('take', + """ + a.take(indices, axis=None, out=None, mode='raise') + + Return an array formed from the elements of `a` at the given indices. + + Refer to `numpy.take` for full documentation. + + See Also + -------- + numpy.take : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', + """ + a.tofile(fid, sep="", format="%s") + + Write array to a file as text or binary (default). + + Data is always written in 'C' order, independent of the order of `a`. + The data produced by this method can be recovered using the function + fromfile(). + + Parameters + ---------- + fid : file or str + An open file object, or a string containing a filename. + sep : str + Separator between array items for text output. + If "" (empty), a binary file is written, equivalent to + ``file.write(a.tobytes())``. + format : str + Format string for text file output. + Each entry in the array is formatted to text by first converting + it to the closest Python type, and then using "format" % item. + + Notes + ----- + This is a convenience function for quick storage of array data. + Information on endianness and precision is lost, so this method is not a + good choice for files intended to archive data or transport data between + machines with different endianness. Some of these problems can be overcome + by outputting the data as text files, at the expense of speed and file + size. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', + """ + a.tolist() + + Return the array as a (possibly nested) list. + + Return a copy of the array data as a (nested) Python list. + Data items are converted to the nearest compatible Python type. + + Parameters + ---------- + none + + Returns + ------- + y : list + The possibly nested list of array elements. + + Notes + ----- + The array may be recreated, ``a = np.array(a.tolist())``. + + Examples + -------- + >>> a = np.array([1, 2]) + >>> a.tolist() + [1, 2] + >>> a = np.array([[1, 2], [3, 4]]) + >>> list(a) + [array([1, 2]), array([3, 4])] + >>> a.tolist() + [[1, 2], [3, 4]] + + """)) + + +tobytesdoc = """ + a.{name}(order='C') + + Construct Python bytes containing the raw data bytes in the array. + + Constructs Python bytes showing a copy of the raw contents of + data memory. The bytes object can be produced in either 'C' or 'Fortran', + or 'Any' order (the default is 'C'-order). 'Any' order means C-order + unless the F_CONTIGUOUS flag in the array is set, in which case it + means 'Fortran' order. + + {deprecated} + + Parameters + ---------- + order : {{'C', 'F', None}}, optional + Order of the data for multidimensional arrays: + C, Fortran, or the same as for the original array. + + Returns + ------- + s : bytes + Python bytes exhibiting a copy of `a`'s raw data. + + Examples + -------- + >>> x = np.array([[0, 1], [2, 3]]) + >>> x.tobytes() + b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00' + >>> x.tobytes('C') == x.tobytes() + True + >>> x.tobytes('F') + b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00' + + """ + +add_newdoc('numpy.core.multiarray', 'ndarray', + ('tostring', tobytesdoc.format(name='tostring', + deprecated= + 'This function is a compatibility ' + 'alias for tobytes. Despite its ' + 'name it returns bytes not ' + 'strings.'))) +add_newdoc('numpy.core.multiarray', 'ndarray', + ('tobytes', tobytesdoc.format(name='tobytes', + deprecated='.. versionadded:: 1.9.0'))) + +add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', + """ + a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) + + Return the sum along diagonals of the array. + + Refer to `numpy.trace` for full documentation. + + See Also + -------- + numpy.trace : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', + """ + a.transpose(*axes) + + Returns a view of the array with axes transposed. + + For a 1-D array, this has no effect. (To change between column and + row vectors, first cast the 1-D array into a matrix object.) + For a 2-D array, this is the usual matrix transpose. + For an n-D array, if axes are given, their order indicates how the + axes are permuted (see Examples). If axes are not provided and + ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then + ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. + + Parameters + ---------- + axes : None, tuple of ints, or `n` ints + + * None or no argument: reverses the order of the axes. + + * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s + `i`-th axis becomes `a.transpose()`'s `j`-th axis. + + * `n` ints: same as an n-tuple of the same ints (this form is + intended simply as a "convenience" alternative to the tuple form) + + Returns + ------- + out : ndarray + View of `a`, with axes suitably permuted. + + See Also + -------- + ndarray.T : Array property returning the array transposed. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.transpose() + array([[1, 3], + [2, 4]]) + >>> a.transpose((1, 0)) + array([[1, 3], + [2, 4]]) + >>> a.transpose(1, 0) + array([[1, 3], + [2, 4]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('var', + """ + a.var(axis=None, dtype=None, out=None, ddof=0) + + Returns the variance of the array elements, along given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('view', + """ + a.view(dtype=None, type=None) + + New view of array with the same data. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. The + default, None, results in the view having the same data-type as `a`. + This argument can also be specified as an ndarray sub-class, which + then specifies the type of the returned object (this is equivalent to + setting the ``type`` parameter). + type : Python type, optional + Type of the returned view, e.g., ndarray or matrix. Again, the + default None results in type preservation. + + Notes + ----- + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + + + Examples + -------- + >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + + Viewing array data using a different type and dtype: + + >>> y = x.view(dtype=np.int16, type=np.matrix) + >>> y + matrix([[513]], dtype=int16) + >>> print type(y) + + + Creating a view on a structured array so it can be used in calculations + + >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> xv = x.view(dtype=np.int8).reshape(-1,2) + >>> xv + array([[1, 2], + [3, 4]], dtype=int8) + >>> xv.mean(0) + array([ 2., 3.]) + + Making changes to the view changes the underlying array + + >>> xv[0,1] = 20 + >>> print x + [(1, 20) (3, 4)] + + Using a view to convert an array to a record array: + + >>> z = x.view(np.recarray) + >>> z.a + array([1], dtype=int8) + + Views share data: + + >>> x[0] = (9, 10) + >>> z[0] + (9, 10) + + Views that change the dtype size (bytes per entry) should normally be + avoided on arrays defined by slices, transposes, fortran-ordering, etc.: + + >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) + >>> y = x[:, 0:2] + >>> y + array([[1, 2], + [4, 5]], dtype=int16) + >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) + Traceback (most recent call last): + File "", line 1, in + ValueError: new type not compatible with array. + >>> z = y.copy() + >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) + array([[(1, 2)], + [(4, 5)]], dtype=[('width', '>> oct_array = np.frompyfunc(oct, 1, 1) + >>> oct_array(np.array((10, 30, 100))) + array([012, 036, 0144], dtype=object) + >>> np.array((oct(10), oct(30), oct(100))) # for comparison + array(['012', '036', '0144'], + dtype='|S4') + + """) + +add_newdoc('numpy.core.umath', 'geterrobj', + """ + geterrobj() + + Return the current object that defines floating-point error handling. + + The error object contains all information that defines the error handling + behavior in Numpy. `geterrobj` is used internally by the other + functions that get and set error handling behavior (`geterr`, `seterr`, + `geterrcall`, `seterrcall`). + + Returns + ------- + errobj : list + The error object, a list containing three elements: + [internal numpy buffer size, error mask, error callback function]. + + The error mask is a single integer that holds the treatment information + on all four floating point errors. The information for each error type + is contained in three bits of the integer. If we print it in base 8, we + can see what treatment is set for "invalid", "under", "over", and + "divide" (in that order). The printed string can be interpreted with + + * 0 : 'ignore' + * 1 : 'warn' + * 2 : 'raise' + * 3 : 'call' + * 4 : 'print' + * 5 : 'log' + + See Also + -------- + seterrobj, seterr, geterr, seterrcall, geterrcall + getbufsize, setbufsize + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterrobj() # first get the defaults + [10000, 0, None] + + >>> def err_handler(type, flag): + ... print "Floating point error (%s), with flag %s" % (type, flag) + ... + >>> old_bufsize = np.setbufsize(20000) + >>> old_err = np.seterr(divide='raise') + >>> old_handler = np.seterrcall(err_handler) + >>> np.geterrobj() + [20000, 2, ] + + >>> old_err = np.seterr(all='ignore') + >>> np.base_repr(np.geterrobj()[1], 8) + '0' + >>> old_err = np.seterr(divide='warn', over='log', under='call', + invalid='print') + >>> np.base_repr(np.geterrobj()[1], 8) + '4351' + + """) + +add_newdoc('numpy.core.umath', 'seterrobj', + """ + seterrobj(errobj) + + Set the object that defines floating-point error handling. + + The error object contains all information that defines the error handling + behavior in Numpy. `seterrobj` is used internally by the other + functions that set error handling behavior (`seterr`, `seterrcall`). + + Parameters + ---------- + errobj : list + The error object, a list containing three elements: + [internal numpy buffer size, error mask, error callback function]. + + The error mask is a single integer that holds the treatment information + on all four floating point errors. The information for each error type + is contained in three bits of the integer. If we print it in base 8, we + can see what treatment is set for "invalid", "under", "over", and + "divide" (in that order). The printed string can be interpreted with + + * 0 : 'ignore' + * 1 : 'warn' + * 2 : 'raise' + * 3 : 'call' + * 4 : 'print' + * 5 : 'log' + + See Also + -------- + geterrobj, seterr, geterr, seterrcall, geterrcall + getbufsize, setbufsize + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> old_errobj = np.geterrobj() # first get the defaults + >>> old_errobj + [10000, 0, None] + + >>> def err_handler(type, flag): + ... print "Floating point error (%s), with flag %s" % (type, flag) + ... + >>> new_errobj = [20000, 12, err_handler] + >>> np.seterrobj(new_errobj) + >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') + '14' + >>> np.geterr() + {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} + >>> np.geterrcall() is err_handler + True + + """) + + +############################################################################## +# +# lib._compiled_base functions +# +############################################################################## + +add_newdoc('numpy.lib._compiled_base', 'digitize', + """ + digitize(x, bins, right=False) + + Return the indices of the bins to which each value in input array belongs. + + Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if + `bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if + `bins` is monotonically decreasing. If values in `x` are beyond the + bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. If right + is True, then the right bin is closed so that the index ``i`` is such + that ``bins[i-1] < x <= bins[i]`` or bins[i-1] >= x > bins[i]`` if `bins` + is monotonically increasing or decreasing, respectively. + + Parameters + ---------- + x : array_like + Input array to be binned. It has to be 1-dimensional. + bins : array_like + Array of bins. It has to be 1-dimensional and monotonic. + right : bool, optional + Indicating whether the intervals include the right or the left bin + edge. Default behavior is (right==False) indicating that the interval + does not include the right edge. The left bin and is open in this + case. Ie., bins[i-1] <= x < bins[i] is the default behavior for + monotonically increasing bins. + + Returns + ------- + out : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or if `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. + + Examples + -------- + >>> x = np.array([0.2, 6.4, 3.0, 1.6]) + >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) + >>> inds = np.digitize(x, bins) + >>> inds + array([1, 4, 3, 2]) + >>> for n in range(x.size): + ... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]] + ... + 0.0 <= 0.2 < 1.0 + 4.0 <= 6.4 < 10.0 + 2.5 <= 3.0 < 4.0 + 1.0 <= 1.6 < 2.5 + + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0,5,10,15,20]) + >>> np.digitize(x,bins,right=True) + array([1, 2, 3, 4, 4]) + >>> np.digitize(x,bins,right=False) + array([1, 3, 3, 4, 5]) + """) + +add_newdoc('numpy.lib._compiled_base', 'bincount', + """ + bincount(x, weights=None, minlength=None) + + Count number of occurrences of each value in array of non-negative ints. + + The number of bins (of size 1) is one larger than the largest value in + `x`. If `minlength` is specified, there will be at least this number + of bins in the output array (though it will be longer if necessary, + depending on the contents of `x`). + Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. + + Parameters + ---------- + x : array_like, 1 dimension, nonnegative ints + Input array. + weights : array_like, optional + Weights, array of the same shape as `x`. + minlength : int, optional + .. versionadded:: 1.6.0 + + A minimum number of bins for the output array. + + Returns + ------- + out : ndarray of ints + The result of binning the input array. + The length of `out` is equal to ``np.amax(x)+1``. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or contains elements with negative + values, or if `minlength` is non-positive. + TypeError + If the type of the input is float or complex. + + See Also + -------- + histogram, digitize, unique + + Examples + -------- + >>> np.bincount(np.arange(5)) + array([1, 1, 1, 1, 1]) + >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) + array([1, 3, 1, 1, 0, 0, 0, 1]) + + >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) + >>> np.bincount(x).size == np.amax(x)+1 + True + + The input array needs to be of integer dtype, otherwise a + TypeError is raised: + + >>> np.bincount(np.arange(5, dtype=np.float)) + Traceback (most recent call last): + File "", line 1, in + TypeError: array cannot be safely cast to required type + + A possible use of ``bincount`` is to perform sums over + variable-size chunks of an array, using the ``weights`` keyword. + + >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights + >>> x = np.array([0, 1, 1, 2, 2, 2]) + >>> np.bincount(x, weights=w) + array([ 0.3, 0.7, 1.1]) + + """) + +add_newdoc('numpy.lib._compiled_base', 'ravel_multi_index', + """ + ravel_multi_index(multi_index, dims, mode='raise', order='C') + + Converts a tuple of index arrays into an array of flat + indices, applying boundary modes to the multi-index. + + Parameters + ---------- + multi_index : tuple of array_like + A tuple of integer arrays, one array for each dimension. + dims : tuple of ints + The shape of array into which the indices from ``multi_index`` apply. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices are handled. Can specify + either one mode or a tuple of modes, one mode per index. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + In 'clip' mode, a negative index which would normally + wrap will clip to 0 instead. + order : {'C', 'F'}, optional + Determines whether the multi-index should be viewed as indexing in + C (row-major) order or FORTRAN (column-major) order. + + Returns + ------- + raveled_indices : ndarray + An array of indices into the flattened version of an array + of dimensions ``dims``. + + See Also + -------- + unravel_index + + Notes + ----- + .. versionadded:: 1.6.0 + + Examples + -------- + >>> arr = np.array([[3,6,6],[4,5,1]]) + >>> np.ravel_multi_index(arr, (7,6)) + array([22, 41, 37]) + >>> np.ravel_multi_index(arr, (7,6), order='F') + array([31, 41, 13]) + >>> np.ravel_multi_index(arr, (4,6), mode='clip') + array([22, 23, 19]) + >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) + array([12, 13, 13]) + + >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) + 1621 + """) + +add_newdoc('numpy.lib._compiled_base', 'unravel_index', + """ + unravel_index(indices, dims, order='C') + + Converts a flat index or array of flat indices into a tuple + of coordinate arrays. + + Parameters + ---------- + indices : array_like + An integer array whose elements are indices into the flattened + version of an array of dimensions ``dims``. Before version 1.6.0, + this function accepted just one index value. + dims : tuple of ints + The shape of the array to use for unraveling ``indices``. + order : {'C', 'F'}, optional + .. versionadded:: 1.6.0 + + Determines whether the indices should be viewed as indexing in + C (row-major) order or FORTRAN (column-major) order. + + Returns + ------- + unraveled_coords : tuple of ndarray + Each array in the tuple has the same shape as the ``indices`` + array. + + See Also + -------- + ravel_multi_index + + Examples + -------- + >>> np.unravel_index([22, 41, 37], (7,6)) + (array([3, 6, 6]), array([4, 5, 1])) + >>> np.unravel_index([31, 41, 13], (7,6), order='F') + (array([3, 6, 6]), array([4, 5, 1])) + + >>> np.unravel_index(1621, (6,7,8,9)) + (3, 1, 4, 1) + + """) + +add_newdoc('numpy.lib._compiled_base', 'add_docstring', + """ + add_docstring(obj, docstring) + + Add a docstring to a built-in obj if possible. + If the obj already has a docstring raise a RuntimeError + If this routine does not know how to add a docstring to the object + raise a TypeError + """) + +add_newdoc('numpy.lib._compiled_base', 'add_newdoc_ufunc', + """ + add_ufunc_docstring(ufunc, new_docstring) + + Replace the docstring for a ufunc with new_docstring. + This method will only work if the current docstring for + the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) + + Parameters + ---------- + ufunc : numpy.ufunc + A ufunc whose current doc is NULL. + new_docstring : string + The new docstring for the ufunc. + + Notes + ----- + This method allocates memory for new_docstring on + the heap. Technically this creates a mempory leak, since this + memory will not be reclaimed until the end of the program + even if the ufunc itself is removed. However this will only + be a problem if the user is repeatedly creating ufuncs with + no documentation, adding documentation via add_newdoc_ufunc, + and then throwing away the ufunc. + """) + +add_newdoc('numpy.lib._compiled_base', 'packbits', + """ + packbits(myarray, axis=None) + + Packs the elements of a binary-valued array into bits in a uint8 array. + + The result is padded to full bytes by inserting zero bits at the end. + + Parameters + ---------- + myarray : array_like + An integer type array whose elements should be packed to bits. + axis : int, optional + The dimension over which bit-packing is done. + ``None`` implies packing the flattened array. + + Returns + ------- + packed : ndarray + Array of type uint8 whose elements represent bits corresponding to the + logical (0 or nonzero) value of the input elements. The shape of + `packed` has the same number of dimensions as the input (unless `axis` + is None, in which case the output is 1-D). + + See Also + -------- + unpackbits: Unpacks elements of a uint8 array into a binary-valued output + array. + + Examples + -------- + >>> a = np.array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = np.packbits(a, axis=-1) + >>> b + array([[[160],[64]],[[192],[32]]], dtype=uint8) + + Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, + and 32 = 0010 0000. + + """) + +add_newdoc('numpy.lib._compiled_base', 'unpackbits', + """ + unpackbits(myarray, axis=None) + + Unpacks elements of a uint8 array into a binary-valued output array. + + Each element of `myarray` represents a bit-field that should be unpacked + into a binary-valued output array. The shape of the output array is either + 1-D (if `axis` is None) or the same shape as the input array with unpacking + done along the axis specified. + + Parameters + ---------- + myarray : ndarray, uint8 type + Input array. + axis : int, optional + Unpacks along this axis. + + Returns + ------- + unpacked : ndarray, uint8 type + The elements are binary-valued (0 or 1). + + See Also + -------- + packbits : Packs the elements of a binary-valued array into bits in a uint8 + array. + + Examples + -------- + >>> a = np.array([[2], [7], [23]], dtype=np.uint8) + >>> a + array([[ 2], + [ 7], + [23]], dtype=uint8) + >>> b = np.unpackbits(a, axis=1) + >>> b + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) + + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', + """ + Functions that operate element by element on whole arrays. + + To see the documentation for a specific ufunc, use np.info(). For + example, np.info(np.sin). Because ufuncs are written in C + (for speed) and linked into Python with NumPy's ufunc facility, + Python's help() function finds this page whenever help() is called + on a ufunc. + + A detailed explanation of ufuncs can be found in the "ufuncs.rst" + file in the NumPy reference guide. + + Unary ufuncs: + ============= + + op(X, out=None) + Apply op to X elementwise + + Parameters + ---------- + X : array_like + Input array. + out : array_like + An array to store the output. Must be the same shape as `X`. + + Returns + ------- + r : array_like + `r` will have the same shape as `X`; if out is provided, `r` + will be equal to out. + + Binary ufuncs: + ============== + + op(X, Y, out=None) + Apply `op` to `X` and `Y` elementwise. May "broadcast" to make + the shapes of `X` and `Y` congruent. + + The broadcasting rules are: + + * Dimensions of length 1 may be prepended to either array. + * Arrays may be repeated along dimensions of length 1. + + Parameters + ---------- + X : array_like + First input array. + Y : array_like + Second input array. + out : array_like + An array to store the output. Must be the same shape as the + output would have. + + Returns + ------- + r : array_like + The return value; if out is provided, `r` will be equal to out. + + """) + + +############################################################################## +# +# ufunc attributes +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', ('identity', + """ + The identity value. + + Data attribute containing the identity element for the ufunc, if it has one. + If it does not, the attribute value is None. + + Examples + -------- + >>> np.add.identity + 0 + >>> np.multiply.identity + 1 + >>> np.power.identity + 1 + >>> print np.exp.identity + None + """)) + +add_newdoc('numpy.core', 'ufunc', ('nargs', + """ + The number of arguments. + + Data attribute containing the number of arguments the ufunc takes, including + optional ones. + + Notes + ----- + Typically this value will be one more than what you might expect because all + ufuncs take the optional "out" argument. + + Examples + -------- + >>> np.add.nargs + 3 + >>> np.multiply.nargs + 3 + >>> np.power.nargs + 3 + >>> np.exp.nargs + 2 + """)) + +add_newdoc('numpy.core', 'ufunc', ('nin', + """ + The number of inputs. + + Data attribute containing the number of arguments the ufunc treats as input. + + Examples + -------- + >>> np.add.nin + 2 + >>> np.multiply.nin + 2 + >>> np.power.nin + 2 + >>> np.exp.nin + 1 + """)) + +add_newdoc('numpy.core', 'ufunc', ('nout', + """ + The number of outputs. + + Data attribute containing the number of arguments the ufunc treats as output. + + Notes + ----- + Since all ufuncs can take output arguments, this will always be (at least) 1. + + Examples + -------- + >>> np.add.nout + 1 + >>> np.multiply.nout + 1 + >>> np.power.nout + 1 + >>> np.exp.nout + 1 + + """)) + +add_newdoc('numpy.core', 'ufunc', ('ntypes', + """ + The number of types. + + The number of numerical NumPy types - of which there are 18 total - on which + the ufunc can operate. + + See Also + -------- + numpy.ufunc.types + + Examples + -------- + >>> np.add.ntypes + 18 + >>> np.multiply.ntypes + 18 + >>> np.power.ntypes + 17 + >>> np.exp.ntypes + 7 + >>> np.remainder.ntypes + 14 + + """)) + +add_newdoc('numpy.core', 'ufunc', ('types', + """ + Returns a list with types grouped input->output. + + Data attribute listing the data-type "Domain-Range" groupings the ufunc can + deliver. The data-types are given using the character codes. + + See Also + -------- + numpy.ufunc.ntypes + + Examples + -------- + >>> np.add.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', + 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', + 'GG->G', 'OO->O'] + + >>> np.multiply.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', + 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', + 'GG->G', 'OO->O'] + + >>> np.power.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', + 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', + 'OO->O'] + + >>> np.exp.types + ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + + >>> np.remainder.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', + 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] + + """)) + + +############################################################################## +# +# ufunc methods +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', ('reduce', + """ + reduce(a, axis=0, dtype=None, out=None, keepdims=False) + + Reduces `a`'s dimension by one, by applying ufunc along one axis. + + Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then + :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = + the result of iterating `j` over :math:`range(N_i)`, cumulatively applying + ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. + For a one-dimensional array, reduce produces results equivalent to: + :: + + r = op.identity # op = ufunc + for i in range(len(A)): + r = op(r, A[i]) + return r + + For example, add.reduce() is equivalent to sum(). + + Parameters + ---------- + a : array_like + The array to act on. + axis : None or int or tuple of ints, optional + Axis or axes along which a reduction is performed. + The default (`axis` = 0) is perform a reduction over the first + dimension of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is `None`, a reduction is performed over all the axes. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + + For operations which are either not commutative or not associative, + doing a reduction over multiple axes is not well-defined. The + ufuncs do not currently raise an exception in this case, but will + likely do so in the future. + dtype : data-type code, optional + The type used to represent the intermediate results. Defaults + to the data-type of the output array if this is provided, or + the data-type of the input array if no output array is provided. + out : ndarray, optional + A location into which the result is stored. If not provided, a + freshly-allocated array is returned. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + .. versionadded:: 1.7.0 + + Returns + ------- + r : ndarray + The reduced array. If `out` was supplied, `r` is a reference to it. + + Examples + -------- + >>> np.multiply.reduce([2,3,5]) + 30 + + A multi-dimensional array example: + + >>> X = np.arange(8).reshape((2,2,2)) + >>> X + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.add.reduce(X, 0) + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X) # confirm: default axis value is 0 + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X, 1) + array([[ 2, 4], + [10, 12]]) + >>> np.add.reduce(X, 2) + array([[ 1, 5], + [ 9, 13]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('accumulate', + """ + accumulate(array, axis=0, dtype=None, out=None) + + Accumulate the result of applying the operator to all elements. + + For a one-dimensional array, accumulate produces results equivalent to:: + + r = np.empty(len(A)) + t = op.identity # op = the ufunc being applied to A's elements + for i in range(len(A)): + t = op(t, A[i]) + r[i] = t + return r + + For example, add.accumulate() is equivalent to np.cumsum(). + + For a multi-dimensional array, accumulate is applied along only one + axis (axis zero by default; see Examples below) so repeated use is + necessary if one wants to accumulate over multiple axes. + + Parameters + ---------- + array : array_like + The array to act on. + axis : int, optional + The axis along which to apply the accumulation; default is zero. + dtype : data-type code, optional + The data-type used to represent the intermediate results. Defaults + to the data-type of the output array if such is provided, or the + the data-type of the input array if no output array is provided. + out : ndarray, optional + A location into which the result is stored. If not provided a + freshly-allocated array is returned. + + Returns + ------- + r : ndarray + The accumulated values. If `out` was supplied, `r` is a reference to + `out`. + + Examples + -------- + 1-D array examples: + + >>> np.add.accumulate([2, 3, 5]) + array([ 2, 5, 10]) + >>> np.multiply.accumulate([2, 3, 5]) + array([ 2, 6, 30]) + + 2-D array examples: + + >>> I = np.eye(2) + >>> I + array([[ 1., 0.], + [ 0., 1.]]) + + Accumulate along axis 0 (rows), down columns: + + >>> np.add.accumulate(I, 0) + array([[ 1., 0.], + [ 1., 1.]]) + >>> np.add.accumulate(I) # no axis specified = axis zero + array([[ 1., 0.], + [ 1., 1.]]) + + Accumulate along axis 1 (columns), through rows: + + >>> np.add.accumulate(I, 1) + array([[ 1., 1.], + [ 0., 1.]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('reduceat', + """ + reduceat(a, indices, axis=0, dtype=None, out=None) + + Performs a (local) reduce with specified slices over a single axis. + + For i in ``range(len(indices))``, `reduceat` computes + ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th + generalized "row" parallel to `axis` in the final result (i.e., in a + 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if + `axis = 1`, it becomes the i-th column). There are three exceptions to this: + + * when ``i = len(indices) - 1`` (so for the last index), + ``indices[i+1] = a.shape[axis]``. + * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is + simply ``a[indices[i]]``. + * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. + + The shape of the output depends on the size of `indices`, and may be + larger than `a` (this happens if ``len(indices) > a.shape[axis]``). + + Parameters + ---------- + a : array_like + The array to act on. + indices : array_like + Paired indices, comma separated (not colon), specifying slices to + reduce. + axis : int, optional + The axis along which to apply the reduceat. + dtype : data-type code, optional + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. + out : ndarray, optional + A location into which the result is stored. If not provided a + freshly-allocated array is returned. + + Returns + ------- + r : ndarray + The reduced values. If `out` was supplied, `r` is a reference to + `out`. + + Notes + ----- + A descriptive example: + + If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as + ``ufunc.reduceat(a, indices)[::2]`` where `indices` is + ``range(len(array) - 1)`` with a zero placed + in every other element: + ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. + + Don't be fooled by this attribute's name: `reduceat(a)` is not + necessarily smaller than `a`. + + Examples + -------- + To take the running sum of four successive values: + + >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] + array([ 6, 10, 14, 18]) + + A 2-D example: + + >>> x = np.linspace(0, 15, 16).reshape(4,4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]]) + + :: + + # reduce such that the result has the following five rows: + # [row1 + row2 + row3] + # [row4] + # [row2] + # [row3] + # [row1 + row2 + row3 + row4] + + >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) + array([[ 12., 15., 18., 21.], + [ 12., 13., 14., 15.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [ 24., 28., 32., 36.]]) + + :: + + # reduce such that result has the following two columns: + # [col1 * col2 * col3, col4] + + >>> np.multiply.reduceat(x, [0, 3], 1) + array([[ 0., 3.], + [ 120., 7.], + [ 720., 11.], + [ 2184., 15.]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('outer', + """ + outer(A, B) + + Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. + + Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of + ``op.outer(A, B)`` is an array of dimension M + N such that: + + .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = + op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) + + For `A` and `B` one-dimensional, this is equivalent to:: + + r = empty(len(A),len(B)) + for i in range(len(A)): + for j in range(len(B)): + r[i,j] = op(A[i], B[j]) # op = ufunc in question + + Parameters + ---------- + A : array_like + First array + B : array_like + Second array + + Returns + ------- + r : ndarray + Output array + + See Also + -------- + numpy.outer + + Examples + -------- + >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) + array([[ 4, 5, 6], + [ 8, 10, 12], + [12, 15, 18]]) + + A multi-dimensional example: + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> A.shape + (2, 3) + >>> B = np.array([[1, 2, 3, 4]]) + >>> B.shape + (1, 4) + >>> C = np.multiply.outer(A, B) + >>> C.shape; C + (2, 3, 1, 4) + array([[[[ 1, 2, 3, 4]], + [[ 2, 4, 6, 8]], + [[ 3, 6, 9, 12]]], + [[[ 4, 8, 12, 16]], + [[ 5, 10, 15, 20]], + [[ 6, 12, 18, 24]]]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('at', + """ + at(a, indices, b=None) + + Performs unbuffered in place operation on operand 'a' for elements + specified by 'indices'. For addition ufunc, this method is equivalent to + `a[indices] += b`, except that results are accumulated for elements that + are indexed more than once. For example, `a[[0,0]] += 1` will only + increment the first element once because of buffering, whereas + `add.at(a, [0,0], 1)` will increment the first element twice. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + The array to perform in place operation on. + indices : array_like or tuple + Array like index object or slice object for indexing into first + operand. If first operand has multiple dimensions, indices can be a + tuple of array like index objects or slice objects. + b : array_like + Second operand for ufuncs requiring two operands. Operand must be + broadcastable over first operand after indexing or slicing. + + Examples + -------- + Set items 0 and 1 to their negative values: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.negative.at(a, [0, 1]) + >>> print(a) + array([-1, -2, 3, 4]) + + :: + + Increment items 0 and 1, and increment item 2 twice: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.add.at(a, [0, 1, 2, 2], 1) + >>> print(a) + array([2, 3, 5, 4]) + + :: + + Add items 0 and 1 in first array to second array, + and store results in first array: + + >>> a = np.array([1, 2, 3, 4]) + >>> b = np.array([1, 2]) + >>> np.add.at(a, [0, 1], b) + >>> print(a) + array([2, 4, 3, 4]) + + """)) + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', + """ + dtype(obj, align=False, copy=False) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + obj + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. If a struct dtype is being created, + this also sets a sticky alignment flag ``isalignedstruct``. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + + See also + -------- + result_type + + Examples + -------- + Using array-scalar type: + + >>> np.dtype(np.int16) + dtype('int16') + + Record, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) + dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '>> np.dtype("i4, (2,3)f8") + dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) + dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', '|S1'), ('age', '|u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', '|S25'), ('age', '|u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', + """ + The required alignment (bytes) of this data-type according to the compiler. + + More information is available in the C-API section of the manual. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', + """ + A character indicating the byte-order of this data-type object. + + One of: + + === ============== + '=' native + '<' little-endian + '>' big-endian + '|' not applicable + === ============== + + All built-in data-type objects have byteorder either '=' or '|'. + + Examples + -------- + + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('char', + """A unique character code for each of the 21 different built-in types.""")) + +add_newdoc('numpy.core.multiarray', 'dtype', ('descr', + """ + Array-interface compliant full description of the data-type. + + The format is that required by the 'descr' key in the + `__array_interface__` attribute. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('fields', + """ + Dictionary of named fields defined for this data type, or ``None``. + + The dictionary is indexed by keys that are the names of the fields. + Each entry in the dictionary is a tuple fully describing the field:: + + (dtype, offset[, title]) + + If present, the optional title can be any object (if it is a string + or unicode then it will also be a key in the fields dictionary, + otherwise it's meta-data). Notice also that the first two elements + of the tuple can be passed directly as arguments to the ``ndarray.getfield`` + and ``ndarray.setfield`` methods. + + See Also + -------- + ndarray.getfield, ndarray.setfield + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> print dt.fields + {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('flags', + """ + Bit-flags describing how this data type is to be interpreted. + + Bit-masks are in `numpy.core.multiarray` as the constants + `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, + `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation + of these flags is in C-API documentation; they are largely useful + for user-defined data-types. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', + """ + Boolean indicating whether this dtype contains any reference-counted + objects in any fields or sub-dtypes. + + Recall that what is actually in the ndarray memory representing + the Python object is the memory address of that object (a pointer). + Special handling may be required, and this attribute is useful for + distinguishing data types that may contain arbitrary Python objects + and data-types that won't. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', + """ + Integer indicating how this dtype relates to the built-in dtypes. + + Read-only. + + = ======================================================================== + 0 if this is a structured array type, with fields + 1 if this is a dtype compiled into numpy (such as ints, floats etc) + 2 if the dtype is for a user-defined numpy type + A user-defined type uses the numpy C-API machinery to extend + numpy to handle a new array type. See + :ref:`user.user-defined-data-types` in the Numpy manual. + = ======================================================================== + + Examples + -------- + >>> dt = np.dtype('i2') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype('f8') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.isbuiltin + 0 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', + """ + Boolean indicating whether the byte order of this dtype is native + to the platform. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', + """ + Boolean indicating whether the dtype is a struct which maintains + field alignment. This flag is sticky, so when combining multiple + structs together, it is preserved and produces new dtypes which + are also aligned. + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', + """ + The element size of this data-type object. + + For 18 of the 21 types this number is fixed by the data-type. + For the flexible data-types, this number can be anything. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('kind', + """ + A character code (one of 'biufcOSUV') identifying the general kind of data. + + = ====================== + b boolean + i signed integer + u unsigned integer + f floating-point + c complex floating-point + O object + S (byte-)string + U Unicode + V void + = ====================== + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('name', + """ + A bit-width name for this data-type. + + Un-sized flexible data-type objects do not have this attribute. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('names', + """ + Ordered list of field names, or ``None`` if there are no fields. + + The names are ordered according to increasing byte offset. This can be + used, for example, to walk through all of the named fields in offset order. + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.names + ('name', 'grades') + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('num', + """ + A unique number for each of the 21 different built-in types. + + These are roughly ordered from least-to-most precision. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('shape', + """ + Shape tuple of the sub-array if this data type describes a sub-array, + and ``()`` otherwise. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('str', + """The array-protocol typestring of this data-type object.""")) + +add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', + """ + Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and + None otherwise. + + The *shape* is the fixed shape of the sub-array described by this + data type, and *item_dtype* the data type of the array. + + If a field whose dtype object has this attribute is retrieved, + then the extra dimensions implied by *shape* are tacked on to + the end of the retrieved array. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('type', + """The type object used to instantiate a scalar of this data-type.""")) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', + """ + newbyteorder(new_order='S') + + Return a new dtype with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order + specifications below. The default value ('S') results in + swapping the current byte order. + `new_order` codes can be any of:: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + The code does a case-insensitive check on the first letter of + `new_order` for these alternatives. For example, any of '>' + or 'B' or 'b' or 'brian' are valid to specify big-endian. + + Returns + ------- + new_dtype : dtype + New dtype object with the given change to the byte order. + + Notes + ----- + Changes are also made in all fields and sub-arrays of the data type. + + Examples + -------- + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> native_dt = np.dtype(native_code+'i2') + >>> swapped_dt = np.dtype(swapped_code+'i2') + >>> native_dt.newbyteorder('S') == swapped_dt + True + >>> native_dt.newbyteorder() == swapped_dt + True + >>> native_dt == swapped_dt.newbyteorder('S') + True + >>> native_dt == swapped_dt.newbyteorder('=') + True + >>> native_dt == swapped_dt.newbyteorder('N') + True + >>> native_dt == native_dt.newbyteorder('|') + True + >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('B') + True + + """)) + + +############################################################################## +# +# Datetime-related Methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', + """ + busdaycalendar(weekmask='1111100', holidays=None) + + A business day calendar object that efficiently stores information + defining valid days for the busday family of functions. + + The default valid days are Monday through Friday ("business days"). + A busdaycalendar object can be specified with any set of weekly + valid days, plus an optional "holiday" dates that always will be invalid. + + Once a busdaycalendar object is created, the weekmask and holidays + cannot be modified. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates, no matter which + weekday they fall upon. Holiday dates may be specified in any + order, and NaT (not-a-time) dates are ignored. This list is + saved in a normalized form that is suited for fast calculations + of valid days. + + Returns + ------- + out : busdaycalendar + A business day calendar object containing the specified + weekmask and holidays values. + + See Also + -------- + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Attributes + ---------- + Note: once a busdaycalendar object is created, you cannot modify the + weekmask or holidays. The attributes return copies of internal data. + weekmask : (copy) seven-element array of bool + holidays : (copy) sorted array of datetime64[D] + + Examples + -------- + >>> # Some important days in July + ... bdd = np.busdaycalendar( + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + >>> # Default is Monday to Friday weekdays + ... bdd.weekmask + array([ True, True, True, True, True, False, False], dtype='bool') + >>> # Any holidays already on the weekend are removed + ... bdd.holidays + array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') + """) + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', + """A copy of the seven-element boolean mask indicating valid days.""")) + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', + """A copy of the holiday array indicating additional invalid days.""")) + +add_newdoc('numpy.core.multiarray', 'is_busday', + """ + is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) + + Calculates which of the given dates are valid days, and which are not. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of bool, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of bool + An array with the same shape as ``dates``, containing True for + each valid day, and False for each invalid day. + + See Also + -------- + busdaycalendar: An object that specifies a custom set of valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> # The weekdays are Friday, Saturday, and Monday + ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + array([False, False, True], dtype='bool') + """) + +add_newdoc('numpy.core.multiarray', 'busday_offset', + """ + busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) + + First adjusts the date to fall on a valid day according to + the ``roll`` rule, then applies offsets to the given dates + counted in valid days. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + offsets : array_like of int + The array of offsets, which is broadcast with ``dates``. + roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional + How to treat dates that do not fall on a valid day. The default + is 'raise'. + + * 'raise' means to raise an exception for an invalid day. + * 'nat' means to return a NaT (not-a-time) for an invalid day. + * 'forward' and 'following' mean to take the first valid day + later in time. + * 'backward' and 'preceding' mean to take the first valid day + earlier in time. + * 'modifiedfollowing' means to take the first valid day + later in time unless it is across a Month boundary, in which + case to take the first valid day earlier in time. + * 'modifiedpreceding' means to take the first valid day + earlier in time unless it is across a Month boundary, in which + case to take the first valid day later in time. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of datetime64[D], optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of datetime64[D] + An array with a shape from broadcasting ``dates`` and ``offsets`` + together, containing the dates with offsets applied. + + See Also + -------- + busdaycalendar: An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> # First business day in October 2011 (not accounting for holidays) + ... np.busday_offset('2011-10', 0, roll='forward') + numpy.datetime64('2011-10-03','D') + >>> # Last business day in February 2012 (not accounting for holidays) + ... np.busday_offset('2012-03', -1, roll='forward') + numpy.datetime64('2012-02-29','D') + >>> # Third Wednesday in January 2011 + ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') + numpy.datetime64('2011-01-19','D') + >>> # 2012 Mother's Day in Canada and the U.S. + ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') + numpy.datetime64('2012-05-13','D') + + >>> # First business day on or after a date + ... np.busday_offset('2011-03-20', 0, roll='forward') + numpy.datetime64('2011-03-21','D') + >>> np.busday_offset('2011-03-22', 0, roll='forward') + numpy.datetime64('2011-03-22','D') + >>> # First business day after a date + ... np.busday_offset('2011-03-20', 1, roll='backward') + numpy.datetime64('2011-03-21','D') + >>> np.busday_offset('2011-03-22', 1, roll='backward') + numpy.datetime64('2011-03-23','D') + """) + +add_newdoc('numpy.core.multiarray', 'busday_count', + """ + busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) + + Counts the number of valid days between `begindates` and + `enddates`, not including the day of `enddates`. + + If ``enddates`` specifies a date value that is earlier than the + corresponding ``begindates`` date value, the count will be negative. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + begindates : array_like of datetime64[D] + The array of the first dates for counting. + enddates : array_like of datetime64[D] + The array of the end dates for counting, which are excluded + from the count themselves. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of int, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of int + An array with a shape from broadcasting ``begindates`` and ``enddates`` + together, containing the number of valid days between + the begin and end dates. + + See Also + -------- + busdaycalendar: An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + + Examples + -------- + >>> # Number of weekdays in January 2011 + ... np.busday_count('2011-01', '2011-02') + 21 + >>> # Number of weekdays in 2011 + ... np.busday_count('2011', '2012') + 260 + >>> # Number of Saturdays in 2011 + ... np.busday_count('2011', '2012', weekmask='Sat') + 53 + """) + +############################################################################## +# +# nd_grid instances +# +############################################################################## + +add_newdoc('numpy.lib.index_tricks', 'mgrid', + """ + `nd_grid` instance which returns a dense multi-dimensional "meshgrid". + + An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense + (or fleshed out) mesh-grid when indexed, so that each returned argument + has the same shape. The dimensions and number of the output arrays are + equal to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ---------- + mesh-grid `ndarrays` all of the same dimensions + + See Also + -------- + numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects + ogrid : like mgrid but returns open (not fleshed out) mesh grids + r_ : array concatenator + + Examples + -------- + >>> np.mgrid[0:5,0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + + """) + +add_newdoc('numpy.lib.index_tricks', 'ogrid', + """ + `nd_grid` instance which returns an open multi-dimensional "meshgrid". + + An instance of `numpy.lib.index_tricks.nd_grid` which returns an open + (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension + of each returned array is greater than 1. The dimension and number of the + output arrays are equal to the number of indexing dimensions. If the step + length is not a complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ---------- + mesh-grid `ndarrays` with only one dimension :math:`\\neq 1` + + See Also + -------- + np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects + mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids + r_ : array concatenator + + Examples + -------- + >>> from numpy import ogrid + >>> ogrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + >>> ogrid[0:5,0:5] + [array([[0], + [1], + [2], + [3], + [4]]), array([[0, 1, 2, 3, 4]])] + + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'generic', + """ + Base class for numpy scalar types. + + Class from which most (all?) numpy scalar types are derived. For + consistency, exposes the same API as `ndarray`, despite many + consequent attributes being either "get-only," or completely irrelevant. + This is the class from which it is strongly suggested users should derive + custom scalar types. + + """) + +# Attributes + +add_newdoc('numpy.core.numerictypes', 'generic', ('T', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class so as to + provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('base', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class so as to + a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('data', + """Pointer to start of data.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', + """Get array data-descriptor.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flags', + """The integer value of flags.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flat', + """A 1-D view of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('imag', + """The imaginary part of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', + """The length of one element in bytes.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', + """The length of the scalar in bytes.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', + """The number of array dimensions.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('real', + """The real part of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('shape', + """Tuple of array dimensions.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('size', + """The number of elements in the gentype.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('strides', + """Tuple of bytes steps in each dimension.""")) + +# Methods + +add_newdoc('numpy.core.numerictypes', 'generic', ('all', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('any', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('astype', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class so as to + provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('choose', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('clip', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('compress', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('copy', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dump', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('fill', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('item', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('max', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('mean', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('min', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', + """ + newbyteorder(new_order='S') + + Return a new `dtype` with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + The `new_order` code can be any from the following: + + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * 'S' - swap dtype from current to opposite endian + * {'|', 'I'} - ignore (no change to byte order) + + Parameters + ---------- + new_order : str, optional + Byte order to force; a value from the byte order specifications + above. The default value ('S') results in swapping the current + byte order. The code does a case-insensitive check on the first + letter of `new_order` for the alternatives above. For example, + any of 'B' or 'b' or 'biggish' are valid to specify big-endian. + + + Returns + ------- + new_dtype : dtype + New `dtype` object with the given change to the byte order. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('prod', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('put', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('resize', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('round', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class so as to + provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sort', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('std', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sum', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('take', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('trace', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('var', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('view', + """ + Not implemented (virtual attribute) + + Class generic exists solely to derive numpy scalars from, and possesses, + albeit unimplemented, all the attributes of the ndarray class + so as to provide a uniform API. + + See Also + -------- + The corresponding attribute of the derived class of interest. + + """)) + + +############################################################################## +# +# Documentation for other scalar classes +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'bool_', + """Numpy's Boolean type. Character code: ``?``. Alias: bool8""") + +add_newdoc('numpy.core.numerictypes', 'complex64', + """ + Complex number type composed of two 32 bit floats. Character code: 'F'. + + """) + +add_newdoc('numpy.core.numerictypes', 'complex128', + """ + Complex number type composed of two 64 bit floats. Character code: 'D'. + Python complex compatible. + + """) + +add_newdoc('numpy.core.numerictypes', 'complex256', + """ + Complex number type composed of two 128-bit floats. Character code: 'G'. + + """) + +add_newdoc('numpy.core.numerictypes', 'float32', + """ + 32-bit floating-point number. Character code 'f'. C float compatible. + + """) + +add_newdoc('numpy.core.numerictypes', 'float64', + """ + 64-bit floating-point number. Character code 'd'. Python float compatible. + + """) + +add_newdoc('numpy.core.numerictypes', 'float96', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float128', + """ + 128-bit floating-point number. Character code: 'g'. C long float + compatible. + + """) + +add_newdoc('numpy.core.numerictypes', 'int8', + """8-bit integer. Character code ``b``. C char compatible.""") + +add_newdoc('numpy.core.numerictypes', 'int16', + """16-bit integer. Character code ``h``. C short compatible.""") + +add_newdoc('numpy.core.numerictypes', 'int32', + """32-bit integer. Character code 'i'. C int compatible.""") + +add_newdoc('numpy.core.numerictypes', 'int64', + """64-bit integer. Character code 'l'. Python int compatible.""") + +add_newdoc('numpy.core.numerictypes', 'object_', + """Any Python object. Character code: 'O'.""") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/__init__.py new file mode 100644 index 0000000000000..5b371f5c064ba --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/__init__.py @@ -0,0 +1,20 @@ +""" +Compatibility module. + +This module contains duplicated code from Python itself or 3rd party +extensions, which may be included for the following reasons: + + * compatibility + * we may only need a small subset of the copied library/module + +""" +from __future__ import division, absolute_import, print_function + +from . import _inspect +from . import py3k +from ._inspect import getargspec, formatargspec +from .py3k import * + +__all__ = [] +__all__.extend(_inspect.__all__) +__all__.extend(py3k.__all__) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/_inspect.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/_inspect.py new file mode 100644 index 0000000000000..6a499e727b6ee --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/_inspect.py @@ -0,0 +1,221 @@ +"""Subset of inspect module from upstream python + +We use this instead of upstream because upstream inspect is slow to import, and +significanly contributes to numpy import times. Importing this copy has almost +no overhead. + +""" +from __future__ import division, absolute_import, print_function + +import types + +__all__ = ['getargspec', 'formatargspec'] + +# ----------------------------------------------------------- type-checking +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + im_class class object in which this method belongs + im_func function object containing implementation of method + im_self instance to which this method is bound, or None""" + return isinstance(object, types.MethodType) + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + func_code code object containing compiled function bytecode + func_defaults tuple of any default values for arguments + func_doc (same as __doc__) + func_globals global namespace in which this function was defined + func_name (same as __name__)""" + return isinstance(object, types.FunctionType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables""" + return isinstance(object, types.CodeType) + +# ------------------------------------------------ argument list extraction +# These constants are from Python's compile.h. +CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where 'args' is + a list of argument names (possibly containing nested lists), and + 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" + + if not iscode(co): + raise TypeError('arg is not a code object') + + code = co.co_code + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + step = 0 + + # The following acrobatics are for anonymous (tuple) arguments. + for i in range(nargs): + if args[i][:1] in ['', '.']: + stack, remain, count = [], [], [] + while step < len(code): + op = ord(code[step]) + step = step + 1 + if op >= dis.HAVE_ARGUMENT: + opname = dis.opname[op] + value = ord(code[step]) + ord(code[step+1])*256 + step = step + 2 + if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']: + remain.append(value) + count.append(value) + elif opname == 'STORE_FAST': + stack.append(names[value]) + + # Special case for sublists of length 1: def foo((bar)) + # doesn't generate the UNPACK_TUPLE bytecode, so if + # `remain` is empty here, we have such a sublist. + if not remain: + stack[0] = [stack[0]] + break + else: + remain[-1] = remain[-1] - 1 + while remain[-1] == 0: + remain.pop() + size = count.pop() + stack[-size:] = [stack[-size:]] + if not remain: break + remain[-1] = remain[-1] - 1 + if not remain: break + args[i] = stack[0] + + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, varkw + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('arg is not a Python function') + args, varargs, varkw = getargs(func.__code__) + return args, varargs, varkw, func.__defaults__ + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame.""" + args, varargs, varkw = getargs(frame.f_code) + return args, varargs, varkw, frame.f_locals + +def joinseq(seq): + if len(seq) == 1: + return '(' + seq[0] + ',)' + else: + return '(' + ', '.join(seq) + ')' + +def strseq(object, convert, join=joinseq): + """Recursively walk a sequence, stringifying each element.""" + if type(object) in [list, tuple]: + return join([strseq(_o, convert, join) for _o in object]) + else: + return convert(object) + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargspec. + + The first four arguments are (args, varargs, varkw, defaults). The + other four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments.""" + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i in range(len(args)): + spec = strseq(args[i], formatarg, join) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(varargs)) + if varkw is not None: + specs.append(formatvarkw(varkw)) + return '(' + ', '.join(specs) + ')' + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments.""" + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [] + for i in range(len(args)): + specs.append(strseq(args[i], convert, join)) + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + string.join(specs, ', ') + ')' + +if __name__ == '__main__': + import inspect + def foo(x, y, z=None): + return None + + print(inspect.getargs(foo.__code__)) + print(getargs(foo.__code__)) + + print(inspect.getargspec(foo)) + print(getargspec(foo)) + + print(inspect.formatargspec(*inspect.getargspec(foo))) + print(formatargspec(*getargspec(foo))) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/py3k.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/py3k.py new file mode 100644 index 0000000000000..4607d95023322 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/py3k.py @@ -0,0 +1,89 @@ +""" +Python 3 compatibility tools. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', + 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', + 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', + 'integer_types'] + +import sys + +if sys.version_info[0] >= 3: + import io + + long = int + integer_types = (int,) + basestring = str + unicode = str + bytes = bytes + + def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') + + def asstr(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + def isfileobj(f): + return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) + + def open_latin1(filename, mode='r'): + return open(filename, mode=mode, encoding='iso-8859-1') + + def sixu(s): + return s + + strchar = 'U' + + +else: + bytes = str + long = long + basestring = basestring + unicode = unicode + integer_types = (int, long) + asbytes = str + asstr = str + strchar = 'S' + + + def isfileobj(f): + return isinstance(f, file) + + def asunicode(s): + if isinstance(s, unicode): + return s + return str(s).decode('ascii') + + def open_latin1(filename, mode='r'): + return open(filename, mode=mode) + + def sixu(s): + return unicode(s, 'unicode_escape') + + +def getexception(): + return sys.exc_info()[1] + +def asbytes_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asbytes_nested(y) for y in x] + else: + return asbytes(x) + +def asunicode_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asunicode_nested(y) for y in x] + else: + return asunicode(x) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/setup.py new file mode 100644 index 0000000000000..c163bcaf973c3 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/setup.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python +from __future__ import division, print_function + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('compat', parent_package, top_path) + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/__init__.py new file mode 100644 index 0000000000000..0b8d5bb17786a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/__init__.py @@ -0,0 +1,78 @@ +from __future__ import division, absolute_import, print_function + +from .info import __doc__ +from numpy.version import version as __version__ + +from . import multiarray +from . import umath +from . import _internal # for freeze programs +from . import numerictypes as nt +multiarray.set_typeDict(nt.sctypeDict) +from . import numeric +from .numeric import * +from . import fromnumeric +from .fromnumeric import * +from . import defchararray as char +from . import records as rec +from .records import * +from .memmap import * +from .defchararray import chararray +from . import scalarmath +from . import function_base +from .function_base import * +from . import machar +from .machar import * +from . import getlimits +from .getlimits import * +from . import shape_base +from .shape_base import * +del nt + +from .fromnumeric import amax as max, amin as min, \ + round_ as round +from .numeric import absolute as abs + +__all__ = ['char', 'rec', 'memmap'] +__all__ += numeric.__all__ +__all__ += fromnumeric.__all__ +__all__ += rec.__all__ +__all__ += ['chararray'] +__all__ += function_base.__all__ +__all__ += machar.__all__ +__all__ += getlimits.__all__ +__all__ += shape_base.__all__ + + +from numpy.testing import Tester +test = Tester().test +bench = Tester().bench + +# Make it possible so that ufuncs can be pickled +# Here are the loading and unloading functions +# The name numpy.core._ufunc_reconstruct must be +# available for unpickling to work. +def _ufunc_reconstruct(module, name): + # The `fromlist` kwarg is required to ensure that `mod` points to the + # inner-most module rather than the parent package when module name is + # nested. This makes it possible to pickle non-toplevel ufuncs such as + # scipy.special.expit for instance. + mod = __import__(module, fromlist=[name]) + return getattr(mod, name) + +def _ufunc_reduce(func): + from pickle import whichmodule + name = func.__name__ + return _ufunc_reconstruct, (whichmodule(func, name), name) + + +import sys +if sys.version_info[0] >= 3: + import copyreg +else: + import copy_reg as copyreg + +copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct) +# Unclutter namespace (must keep _ufunc_reconstruct for unpickling) +del copyreg +del sys +del _ufunc_reduce diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_dummy.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_dummy.py new file mode 100644 index 0000000000000..aaa56df579d40 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_dummy.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, '_dummy.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_internal.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_internal.py new file mode 100644 index 0000000000000..d32f593904ae7 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_internal.py @@ -0,0 +1,570 @@ +""" +A place for code to be called from core C-code. + +Some things are more easily handled Python. + +""" +from __future__ import division, absolute_import, print_function + +import re +import sys +import warnings + +from numpy.compat import asbytes, bytes + +if (sys.byteorder == 'little'): + _nbo = asbytes('<') +else: + _nbo = asbytes('>') + +def _makenames_list(adict, align): + from .multiarray import dtype + allfields = [] + fnames = list(adict.keys()) + for fname in fnames: + obj = adict[fname] + n = len(obj) + if not isinstance(obj, tuple) or n not in [2, 3]: + raise ValueError("entry not a 2- or 3- tuple") + if (n > 2) and (obj[2] == fname): + continue + num = int(obj[1]) + if (num < 0): + raise ValueError("invalid offset.") + format = dtype(obj[0], align=align) + if (format.itemsize == 0): + raise ValueError("all itemsizes must be fixed.") + if (n > 2): + title = obj[2] + else: + title = None + allfields.append((fname, format, num, title)) + # sort by offsets + allfields.sort(key=lambda x: x[2]) + names = [x[0] for x in allfields] + formats = [x[1] for x in allfields] + offsets = [x[2] for x in allfields] + titles = [x[3] for x in allfields] + + return names, formats, offsets, titles + +# Called in PyArray_DescrConverter function when +# a dictionary without "names" and "formats" +# fields is used as a data-type descriptor. +def _usefields(adict, align): + from .multiarray import dtype + try: + names = adict[-1] + except KeyError: + names = None + if names is None: + names, formats, offsets, titles = _makenames_list(adict, align) + else: + formats = [] + offsets = [] + titles = [] + for name in names: + res = adict[name] + formats.append(res[0]) + offsets.append(res[1]) + if (len(res) > 2): + titles.append(res[2]) + else: + titles.append(None) + + return dtype({"names" : names, + "formats" : formats, + "offsets" : offsets, + "titles" : titles}, align) + + +# construct an array_protocol descriptor list +# from the fields attribute of a descriptor +# This calls itself recursively but should eventually hit +# a descriptor that has no fields and then return +# a simple typestring + +def _array_descr(descriptor): + fields = descriptor.fields + if fields is None: + subdtype = descriptor.subdtype + if subdtype is None: + if descriptor.metadata is None: + return descriptor.str + else: + new = descriptor.metadata.copy() + if new: + return (descriptor.str, new) + else: + return descriptor.str + else: + return (_array_descr(subdtype[0]), subdtype[1]) + + + names = descriptor.names + ordered_fields = [fields[x] + (x,) for x in names] + result = [] + offset = 0 + for field in ordered_fields: + if field[1] > offset: + num = field[1] - offset + result.append(('', '|V%d' % num)) + offset += num + if len(field) > 3: + name = (field[2], field[3]) + else: + name = field[2] + if field[0].subdtype: + tup = (name, _array_descr(field[0].subdtype[0]), + field[0].subdtype[1]) + else: + tup = (name, _array_descr(field[0])) + offset += field[0].itemsize + result.append(tup) + + return result + +# Build a new array from the information in a pickle. +# Note that the name numpy.core._internal._reconstruct is embedded in +# pickles of ndarrays made with NumPy before release 1.0 +# so don't remove the name here, or you'll +# break backward compatibilty. +def _reconstruct(subtype, shape, dtype): + from .multiarray import ndarray + return ndarray.__new__(subtype, shape, dtype) + + +# format_re was originally from numarray by J. Todd Miller + +format_re = re.compile(asbytes( + r'(?P[<>|=]?)' + r'(?P *[(]?[ ,0-9L]*[)]? *)' + r'(?P[<>|=]?)' + r'(?P[A-Za-z0-9.]*(?:\[[a-zA-Z0-9,.]+\])?)')) +sep_re = re.compile(asbytes(r'\s*,\s*')) +space_re = re.compile(asbytes(r'\s+$')) + +# astr is a string (perhaps comma separated) + +_convorder = {asbytes('='): _nbo} + +def _commastring(astr): + startindex = 0 + result = [] + while startindex < len(astr): + mo = format_re.match(astr, pos=startindex) + try: + (order1, repeats, order2, dtype) = mo.groups() + except (TypeError, AttributeError): + raise ValueError('format number %d of "%s" is not recognized' % + (len(result)+1, astr)) + startindex = mo.end() + # Separator or ending padding + if startindex < len(astr): + if space_re.match(astr, pos=startindex): + startindex = len(astr) + else: + mo = sep_re.match(astr, pos=startindex) + if not mo: + raise ValueError( + 'format number %d of "%s" is not recognized' % + (len(result)+1, astr)) + startindex = mo.end() + + if order2 == asbytes(''): + order = order1 + elif order1 == asbytes(''): + order = order2 + else: + order1 = _convorder.get(order1, order1) + order2 = _convorder.get(order2, order2) + if (order1 != order2): + raise ValueError('inconsistent byte-order specification %s and %s' % (order1, order2)) + order = order1 + + if order in [asbytes('|'), asbytes('='), _nbo]: + order = asbytes('') + dtype = order + dtype + if (repeats == asbytes('')): + newitem = dtype + else: + newitem = (dtype, eval(repeats)) + result.append(newitem) + + return result + +def _getintp_ctype(): + from .multiarray import dtype + val = _getintp_ctype.cache + if val is not None: + return val + char = dtype('p').char + import ctypes + if (char == 'i'): + val = ctypes.c_int + elif char == 'l': + val = ctypes.c_long + elif char == 'q': + val = ctypes.c_longlong + else: + val = ctypes.c_long + _getintp_ctype.cache = val + return val +_getintp_ctype.cache = None + +# Used for .ctypes attribute of ndarray + +class _missing_ctypes(object): + def cast(self, num, obj): + return num + + def c_void_p(self, num): + return num + +class _ctypes(object): + def __init__(self, array, ptr=None): + try: + import ctypes + self._ctypes = ctypes + except ImportError: + self._ctypes = _missing_ctypes() + self._arr = array + self._data = ptr + if self._arr.ndim == 0: + self._zerod = True + else: + self._zerod = False + + def data_as(self, obj): + return self._ctypes.cast(self._data, obj) + + def shape_as(self, obj): + if self._zerod: + return None + return (obj*self._arr.ndim)(*self._arr.shape) + + def strides_as(self, obj): + if self._zerod: + return None + return (obj*self._arr.ndim)(*self._arr.strides) + + def get_data(self): + return self._data + + def get_shape(self): + if self._zerod: + return None + return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape) + + def get_strides(self): + if self._zerod: + return None + return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides) + + def get_as_parameter(self): + return self._ctypes.c_void_p(self._data) + + data = property(get_data, None, doc="c-types data") + shape = property(get_shape, None, doc="c-types shape") + strides = property(get_strides, None, doc="c-types strides") + _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_") + + +# Given a datatype and an order object +# return a new names tuple +# with the order indicated +def _newnames(datatype, order): + oldnames = datatype.names + nameslist = list(oldnames) + if isinstance(order, str): + order = [order] + if isinstance(order, (list, tuple)): + for name in order: + try: + nameslist.remove(name) + except ValueError: + raise ValueError("unknown field name: %s" % (name,)) + return tuple(list(order) + nameslist) + raise ValueError("unsupported order value: %s" % (order,)) + +# Given an array with fields and a sequence of field names +# construct a new array with just those fields copied over +def _index_fields(ary, fields): + from .multiarray import empty, dtype, array + dt = ary.dtype + + names = [name for name in fields if name in dt.names] + formats = [dt.fields[name][0] for name in fields if name in dt.names] + offsets = [dt.fields[name][1] for name in fields if name in dt.names] + + view_dtype = {'names':names, 'formats':formats, 'offsets':offsets, 'itemsize':dt.itemsize} + view = ary.view(dtype=view_dtype) + + # Return a copy for now until behavior is fully deprecated + # in favor of returning view + copy_dtype = {'names':view_dtype['names'], 'formats':view_dtype['formats']} + return array(view, dtype=copy_dtype, copy=True) + +# Given a string containing a PEP 3118 format specifier, +# construct a Numpy dtype + +_pep3118_native_map = { + '?': '?', + 'b': 'b', + 'B': 'B', + 'h': 'h', + 'H': 'H', + 'i': 'i', + 'I': 'I', + 'l': 'l', + 'L': 'L', + 'q': 'q', + 'Q': 'Q', + 'e': 'e', + 'f': 'f', + 'd': 'd', + 'g': 'g', + 'Zf': 'F', + 'Zd': 'D', + 'Zg': 'G', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) + +_pep3118_standard_map = { + '?': '?', + 'b': 'b', + 'B': 'B', + 'h': 'i2', + 'H': 'u2', + 'i': 'i4', + 'I': 'u4', + 'l': 'i4', + 'L': 'u4', + 'q': 'i8', + 'Q': 'u8', + 'e': 'f2', + 'f': 'f', + 'd': 'd', + 'Zf': 'F', + 'Zd': 'D', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) + +def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False): + from numpy.core.multiarray import dtype + + fields = {} + offset = 0 + explicit_name = False + this_explicit_name = False + common_alignment = 1 + is_padding = False + last_offset = 0 + + dummy_name_index = [0] + def next_dummy_name(): + dummy_name_index[0] += 1 + def get_dummy_name(): + while True: + name = 'f%d' % dummy_name_index[0] + if name not in fields: + return name + next_dummy_name() + + # Parse spec + while spec: + value = None + + # End of structure, bail out to upper level + if spec[0] == '}': + spec = spec[1:] + break + + # Sub-arrays (1) + shape = None + if spec[0] == '(': + j = spec.index(')') + shape = tuple(map(int, spec[1:j].split(','))) + spec = spec[j+1:] + + # Byte order + if spec[0] in ('@', '=', '<', '>', '^', '!'): + byteorder = spec[0] + if byteorder == '!': + byteorder = '>' + spec = spec[1:] + + # Byte order characters also control native vs. standard type sizes + if byteorder in ('@', '^'): + type_map = _pep3118_native_map + type_map_chars = _pep3118_native_typechars + else: + type_map = _pep3118_standard_map + type_map_chars = _pep3118_standard_typechars + + # Item sizes + itemsize = 1 + if spec[0].isdigit(): + j = 1 + for j in range(1, len(spec)): + if not spec[j].isdigit(): + break + itemsize = int(spec[:j]) + spec = spec[j:] + + # Data types + is_padding = False + + if spec[:2] == 'T{': + value, spec, align, next_byteorder = _dtype_from_pep3118( + spec[2:], byteorder=byteorder, is_subdtype=True) + elif spec[0] in type_map_chars: + next_byteorder = byteorder + if spec[0] == 'Z': + j = 2 + else: + j = 1 + typechar = spec[:j] + spec = spec[j:] + is_padding = (typechar == 'x') + dtypechar = type_map[typechar] + if dtypechar in 'USV': + dtypechar += '%d' % itemsize + itemsize = 1 + numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder) + value = dtype(numpy_byteorder + dtypechar) + align = value.alignment + else: + raise ValueError("Unknown PEP 3118 data type specifier %r" % spec) + + # + # Native alignment may require padding + # + # Here we assume that the presence of a '@' character implicitly implies + # that the start of the array is *already* aligned. + # + extra_offset = 0 + if byteorder == '@': + start_padding = (-offset) % align + intra_padding = (-value.itemsize) % align + + offset += start_padding + + if intra_padding != 0: + if itemsize > 1 or (shape is not None and _prod(shape) > 1): + # Inject internal padding to the end of the sub-item + value = _add_trailing_padding(value, intra_padding) + else: + # We can postpone the injection of internal padding, + # as the item appears at most once + extra_offset += intra_padding + + # Update common alignment + common_alignment = (align*common_alignment + / _gcd(align, common_alignment)) + + # Convert itemsize to sub-array + if itemsize != 1: + value = dtype((value, (itemsize,))) + + # Sub-arrays (2) + if shape is not None: + value = dtype((value, shape)) + + # Field name + this_explicit_name = False + if spec and spec.startswith(':'): + i = spec[1:].index(':') + 1 + name = spec[1:i] + spec = spec[i+1:] + explicit_name = True + this_explicit_name = True + else: + name = get_dummy_name() + + if not is_padding or this_explicit_name: + if name in fields: + raise RuntimeError("Duplicate field name '%s' in PEP3118 format" + % name) + fields[name] = (value, offset) + last_offset = offset + if not this_explicit_name: + next_dummy_name() + + byteorder = next_byteorder + + offset += value.itemsize + offset += extra_offset + + # Check if this was a simple 1-item type + if len(fields) == 1 and not explicit_name and fields['f0'][1] == 0 \ + and not is_subdtype: + ret = fields['f0'][0] + else: + ret = dtype(fields) + + # Trailing padding must be explicitly added + padding = offset - ret.itemsize + if byteorder == '@': + padding += (-offset) % common_alignment + if is_padding and not this_explicit_name: + ret = _add_trailing_padding(ret, padding) + + # Finished + if is_subdtype: + return ret, spec, common_alignment, byteorder + else: + return ret + +def _add_trailing_padding(value, padding): + """Inject the specified number of padding bytes at the end of a dtype""" + from numpy.core.multiarray import dtype + + if value.fields is None: + vfields = {'f0': (value, 0)} + else: + vfields = dict(value.fields) + + if value.names and value.names[-1] == '' and \ + value[''].char == 'V': + # A trailing padding field is already present + vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding), + vfields[''][1]) + value = dtype(vfields) + else: + # Get a free name for the padding field + j = 0 + while True: + name = 'pad%d' % j + if name not in vfields: + vfields[name] = ('V%d' % padding, value.itemsize) + break + j += 1 + + value = dtype(vfields) + if '' not in vfields: + # Strip out the name of the padding field + names = list(value.names) + names[-1] = '' + value.names = tuple(names) + return value + +def _prod(a): + p = 1 + for x in a: + p *= x + return p + +def _gcd(a, b): + """Calculate the greatest common divisor of a and b""" + while b: + a, b = b, a%b + return a diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_methods.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_methods.py new file mode 100644 index 0000000000000..00716e1b4e095 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_methods.py @@ -0,0 +1,134 @@ +""" +Array methods which are called by both the C-code for the method +and the Python code for the NumPy-namespace function + +""" +from __future__ import division, absolute_import, print_function + +import warnings + +from numpy.core import multiarray as mu +from numpy.core import umath as um +from numpy.core.numeric import asanyarray +from numpy.core import numerictypes as nt + +# save those O(100) nanoseconds! +umr_maximum = um.maximum.reduce +umr_minimum = um.minimum.reduce +umr_sum = um.add.reduce +umr_prod = um.multiply.reduce +umr_any = um.logical_or.reduce +umr_all = um.logical_and.reduce + +# avoid keyword arguments to speed up parsing, saves about 15%-20% for very +# small reductions +def _amax(a, axis=None, out=None, keepdims=False): + return umr_maximum(a, axis, None, out, keepdims) + +def _amin(a, axis=None, out=None, keepdims=False): + return umr_minimum(a, axis, None, out, keepdims) + +def _sum(a, axis=None, dtype=None, out=None, keepdims=False): + return umr_sum(a, axis, dtype, out, keepdims) + +def _prod(a, axis=None, dtype=None, out=None, keepdims=False): + return umr_prod(a, axis, dtype, out, keepdims) + +def _any(a, axis=None, dtype=None, out=None, keepdims=False): + return umr_any(a, axis, dtype, out, keepdims) + +def _all(a, axis=None, dtype=None, out=None, keepdims=False): + return umr_all(a, axis, dtype, out, keepdims) + +def _count_reduce_items(arr, axis): + if axis is None: + axis = tuple(range(arr.ndim)) + if not isinstance(axis, tuple): + axis = (axis,) + items = 1 + for ax in axis: + items *= arr.shape[ax] + return items + +def _mean(a, axis=None, dtype=None, out=None, keepdims=False): + arr = asanyarray(a) + + rcount = _count_reduce_items(arr, axis) + # Make this warning show up first + if rcount == 0: + warnings.warn("Mean of empty slice.", RuntimeWarning) + + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): + dtype = mu.dtype('f8') + + ret = umr_sum(arr, axis, dtype, out, keepdims) + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + arr = asanyarray(a) + + rcount = _count_reduce_items(arr, axis) + # Make this warning show up on top. + if ddof >= rcount: + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): + dtype = mu.dtype('f8') + + # Compute the mean. + # Note that if dtype is not of inexact type then arraymean will + # not be either. + arrmean = umr_sum(arr, axis, dtype, keepdims=True) + if isinstance(arrmean, mu.ndarray): + arrmean = um.true_divide( + arrmean, rcount, out=arrmean, casting='unsafe', subok=False) + else: + arrmean = arrmean.dtype.type(arrmean / rcount) + + # Compute sum of squared deviations from mean + # Note that x may not be inexact and that we need it to be an array, + # not a scalar. + x = asanyarray(arr - arrmean) + if issubclass(arr.dtype.type, nt.complexfloating): + x = um.multiply(x, um.conjugate(x), out=x).real + else: + x = um.multiply(x, x, out=x) + ret = umr_sum(x, axis, dtype, out, keepdims) + + # Compute degrees of freedom and make sure it is not negative. + rcount = max([rcount - ddof, 0]) + + # divide by degrees of freedom + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) + + if isinstance(ret, mu.ndarray): + ret = um.sqrt(ret, out=ret) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(um.sqrt(ret)) + else: + ret = um.sqrt(ret) + + return ret diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/arrayprint.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/arrayprint.py new file mode 100644 index 0000000000000..db491e6f5d74c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/arrayprint.py @@ -0,0 +1,752 @@ +"""Array printing function + +$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ["array2string", "set_printoptions", "get_printoptions"] +__docformat__ = 'restructuredtext' + +# +# Written by Konrad Hinsen +# last revision: 1996-3-13 +# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) +# and by Perry Greenfield 2000-4-1 for numarray +# and by Travis Oliphant 2005-8-22 for numpy + +import sys +from functools import reduce +from . import numerictypes as _nt +from .umath import maximum, minimum, absolute, not_equal, isnan, isinf +from .multiarray import format_longfloat, datetime_as_string, datetime_data +from .fromnumeric import ravel + +if sys.version_info[0] >= 3: + _MAXINT = sys.maxsize + _MININT = -sys.maxsize - 1 +else: + _MAXINT = sys.maxint + _MININT = -sys.maxint - 1 + +def product(x, y): return x*y + +_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension +_summaryThreshold = 1000 # total items > triggers array summarization + +_float_output_precision = 8 +_float_output_suppress_small = False +_line_width = 75 +_nan_str = 'nan' +_inf_str = 'inf' +_formatter = None # formatting function for array elements + + +def set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, + nanstr=None, infstr=None, + formatter=None): + """ + Set printing options. + + These options determine the way floating point numbers, arrays and + other NumPy objects are displayed. + + Parameters + ---------- + precision : int, optional + Number of digits of precision for floating point output (default 8). + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr (default 1000). + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension (default 3). + linewidth : int, optional + The number of characters per line for the purpose of inserting + line breaks (default 75). + suppress : bool, optional + Whether or not suppress printing of small floating point values + using scientific notation (default False). + nanstr : str, optional + String representation of floating point not-a-number (default nan). + infstr : str, optional + String representation of floating point infinity (default inf). + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are:: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` + - 'str' : all other strings + + Other keys that can be used to set a group of types at once are:: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'str' and 'numpystr' + + See Also + -------- + get_printoptions, set_string_function, array2string + + Notes + ----- + `formatter` is always reset with a call to `set_printoptions`. + + Examples + -------- + Floating point precision can be set: + + >>> np.set_printoptions(precision=4) + >>> print np.array([1.123456789]) + [ 1.1235] + + Long arrays can be summarised: + + >>> np.set_printoptions(threshold=5) + >>> print np.arange(10) + [0 1 2 ..., 7 8 9] + + Small results can be suppressed: + + >>> eps = np.finfo(float).eps + >>> x = np.arange(4.) + >>> x**2 - (x + eps)**2 + array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) + >>> np.set_printoptions(suppress=True) + >>> x**2 - (x + eps)**2 + array([-0., -0., 0., 0.]) + + A custom formatter can be used to display array elements as desired: + + >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) + >>> x = np.arange(3) + >>> x + array([int: 0, int: -1, int: -2]) + >>> np.set_printoptions() # formatter gets reset + >>> x + array([0, 1, 2]) + + To put back the default options, you can use: + + >>> np.set_printoptions(edgeitems=3,infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + """ + + global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \ + _line_width, _float_output_suppress_small, _nan_str, _inf_str, \ + _formatter + if linewidth is not None: + _line_width = linewidth + if threshold is not None: + _summaryThreshold = threshold + if edgeitems is not None: + _summaryEdgeItems = edgeitems + if precision is not None: + _float_output_precision = precision + if suppress is not None: + _float_output_suppress_small = not not suppress + if nanstr is not None: + _nan_str = nanstr + if infstr is not None: + _inf_str = infstr + _formatter = formatter + +def get_printoptions(): + """ + Return the current print options. + + Returns + ------- + print_opts : dict + Dictionary of current print options with keys + + - precision : int + - threshold : int + - edgeitems : int + - linewidth : int + - suppress : bool + - nanstr : str + - infstr : str + - formatter : dict of callables + + For a full description of these options, see `set_printoptions`. + + See Also + -------- + set_printoptions, set_string_function + + """ + d = dict(precision=_float_output_precision, + threshold=_summaryThreshold, + edgeitems=_summaryEdgeItems, + linewidth=_line_width, + suppress=_float_output_suppress_small, + nanstr=_nan_str, + infstr=_inf_str, + formatter=_formatter) + return d + +def _leading_trailing(a): + from . import numeric as _nc + if a.ndim == 1: + if len(a) > 2*_summaryEdgeItems: + b = _nc.concatenate((a[:_summaryEdgeItems], + a[-_summaryEdgeItems:])) + else: + b = a + else: + if len(a) > 2*_summaryEdgeItems: + l = [_leading_trailing(a[i]) for i in range( + min(len(a), _summaryEdgeItems))] + l.extend([_leading_trailing(a[-i]) for i in range( + min(len(a), _summaryEdgeItems), 0, -1)]) + else: + l = [_leading_trailing(a[i]) for i in range(0, len(a))] + b = _nc.concatenate(tuple(l)) + return b + +def _boolFormatter(x): + if x: + return ' True' + else: + return 'False' + + +def repr_format(x): + return repr(x) + +def _array2string(a, max_line_width, precision, suppress_small, separator=' ', + prefix="", formatter=None): + + if max_line_width is None: + max_line_width = _line_width + + if precision is None: + precision = _float_output_precision + + if suppress_small is None: + suppress_small = _float_output_suppress_small + + if formatter is None: + formatter = _formatter + + if a.size > _summaryThreshold: + summary_insert = "..., " + data = _leading_trailing(a) + else: + summary_insert = "" + data = ravel(a) + + formatdict = {'bool' : _boolFormatter, + 'int' : IntegerFormat(data), + 'float' : FloatFormat(data, precision, suppress_small), + 'longfloat' : LongFloatFormat(precision), + 'complexfloat' : ComplexFormat(data, precision, + suppress_small), + 'longcomplexfloat' : LongComplexFormat(precision), + 'datetime' : DatetimeFormat(data), + 'timedelta' : TimedeltaFormat(data), + 'numpystr' : repr_format, + 'str' : str} + + if formatter is not None: + fkeys = [k for k in formatter.keys() if formatter[k] is not None] + if 'all' in fkeys: + for key in formatdict.keys(): + formatdict[key] = formatter['all'] + if 'int_kind' in fkeys: + for key in ['int']: + formatdict[key] = formatter['int_kind'] + if 'float_kind' in fkeys: + for key in ['float', 'longfloat']: + formatdict[key] = formatter['float_kind'] + if 'complex_kind' in fkeys: + for key in ['complexfloat', 'longcomplexfloat']: + formatdict[key] = formatter['complex_kind'] + if 'str_kind' in fkeys: + for key in ['numpystr', 'str']: + formatdict[key] = formatter['str_kind'] + for key in formatdict.keys(): + if key in fkeys: + formatdict[key] = formatter[key] + + try: + format_function = a._format + msg = "The `_format` attribute is deprecated in Numpy 2.0 and " \ + "will be removed in 2.1. Use the `formatter` kw instead." + import warnings + warnings.warn(msg, DeprecationWarning) + except AttributeError: + # find the right formatting function for the array + dtypeobj = a.dtype.type + if issubclass(dtypeobj, _nt.bool_): + format_function = formatdict['bool'] + elif issubclass(dtypeobj, _nt.integer): + if issubclass(dtypeobj, _nt.timedelta64): + format_function = formatdict['timedelta'] + else: + format_function = formatdict['int'] + elif issubclass(dtypeobj, _nt.floating): + if issubclass(dtypeobj, _nt.longfloat): + format_function = formatdict['longfloat'] + else: + format_function = formatdict['float'] + elif issubclass(dtypeobj, _nt.complexfloating): + if issubclass(dtypeobj, _nt.clongfloat): + format_function = formatdict['longcomplexfloat'] + else: + format_function = formatdict['complexfloat'] + elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): + format_function = formatdict['numpystr'] + elif issubclass(dtypeobj, _nt.datetime64): + format_function = formatdict['datetime'] + else: + format_function = formatdict['numpystr'] + + # skip over "[" + next_line_prefix = " " + # skip over array( + next_line_prefix += " "*len(prefix) + + lst = _formatArray(a, format_function, len(a.shape), max_line_width, + next_line_prefix, separator, + _summaryEdgeItems, summary_insert)[:-1] + return lst + +def _convert_arrays(obj): + from . import numeric as _nc + newtup = [] + for k in obj: + if isinstance(k, _nc.ndarray): + k = k.tolist() + elif isinstance(k, tuple): + k = _convert_arrays(k) + newtup.append(k) + return tuple(newtup) + + +def array2string(a, max_line_width=None, precision=None, + suppress_small=None, separator=' ', prefix="", + style=repr, formatter=None): + """ + Return a string representation of an array. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + The maximum number of columns the string should span. Newline + characters splits the string appropriately after array elements. + precision : int, optional + Floating point precision. Default is the current printing + precision (usually 8), which can be altered using `set_printoptions`. + suppress_small : bool, optional + Represent very small numbers as zero. A number is "very small" if it + is smaller than the current printing precision. + separator : str, optional + Inserted between elements. + prefix : str, optional + An array is typically printed as:: + + 'prefix(' + array2string(a) + ')' + + The length of the prefix string is used to align the + output correctly. + style : function, optional + A function that accepts an ndarray and returns a string. Used only + when the shape of `a` is equal to ``()``, i.e. for 0-D arrays. + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are:: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` + - 'str' : all other strings + + Other keys that can be used to set a group of types at once are:: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'str' and 'numpystr' + + Returns + ------- + array_str : str + String representation of the array. + + Raises + ------ + TypeError + if a callable in `formatter` does not return a string. + + See Also + -------- + array_str, array_repr, set_printoptions, get_printoptions + + Notes + ----- + If a formatter is specified for a certain type, the `precision` keyword is + ignored for that type. + + Examples + -------- + >>> x = np.array([1e-16,1,2,3]) + >>> print np.array2string(x, precision=2, separator=',', + ... suppress_small=True) + [ 0., 1., 2., 3.] + + >>> x = np.arange(3.) + >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) + '[0.00 1.00 2.00]' + + >>> x = np.arange(3) + >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) + '[0x0L 0x1L 0x2L]' + + """ + + if a.shape == (): + x = a.item() + try: + lst = a._format(x) + msg = "The `_format` attribute is deprecated in Numpy " \ + "2.0 and will be removed in 2.1. Use the " \ + "`formatter` kw instead." + import warnings + warnings.warn(msg, DeprecationWarning) + except AttributeError: + if isinstance(x, tuple): + x = _convert_arrays(x) + lst = style(x) + elif reduce(product, a.shape) == 0: + # treat as a null array if any of shape elements == 0 + lst = "[]" + else: + lst = _array2string(a, max_line_width, precision, suppress_small, + separator, prefix, formatter=formatter) + return lst + +def _extendLine(s, line, word, max_line_len, next_line_prefix): + if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: + s += line.rstrip() + "\n" + line = next_line_prefix + line += word + return s, line + + +def _formatArray(a, format_function, rank, max_line_len, + next_line_prefix, separator, edge_items, summary_insert): + """formatArray is designed for two modes of operation: + + 1. Full output + + 2. Summarized output + + """ + if rank == 0: + obj = a.item() + if isinstance(obj, tuple): + obj = _convert_arrays(obj) + return str(obj) + + if summary_insert and 2*edge_items < len(a): + leading_items, trailing_items, summary_insert1 = \ + edge_items, edge_items, summary_insert + else: + leading_items, trailing_items, summary_insert1 = 0, len(a), "" + + if rank == 1: + s = "" + line = next_line_prefix + for i in range(leading_items): + word = format_function(a[i]) + separator + s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) + + if summary_insert1: + s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) + + for i in range(trailing_items, 1, -1): + word = format_function(a[-i]) + separator + s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) + + word = format_function(a[-1]) + s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) + s += line + "]\n" + s = '[' + s[len(next_line_prefix):] + else: + s = '[' + sep = separator.rstrip() + for i in range(leading_items): + if i > 0: + s += next_line_prefix + s += _formatArray(a[i], format_function, rank-1, max_line_len, + " " + next_line_prefix, separator, edge_items, + summary_insert) + s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) + + if summary_insert1: + s += next_line_prefix + summary_insert1 + "\n" + + for i in range(trailing_items, 1, -1): + if leading_items or i != trailing_items: + s += next_line_prefix + s += _formatArray(a[-i], format_function, rank-1, max_line_len, + " " + next_line_prefix, separator, edge_items, + summary_insert) + s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) + if leading_items or trailing_items > 1: + s += next_line_prefix + s += _formatArray(a[-1], format_function, rank-1, max_line_len, + " " + next_line_prefix, separator, edge_items, + summary_insert).rstrip()+']\n' + return s + +class FloatFormat(object): + def __init__(self, data, precision, suppress_small, sign=False): + self.precision = precision + self.suppress_small = suppress_small + self.sign = sign + self.exp_format = False + self.large_exponent = False + self.max_str_len = 0 + try: + self.fillFormat(data) + except (TypeError, NotImplementedError): + # if reduce(data) fails, this instance will not be called, just + # instantiated in formatdict. + pass + + def fillFormat(self, data): + from . import numeric as _nc + + with _nc.errstate(all='ignore'): + special = isnan(data) | isinf(data) + valid = not_equal(data, 0) & ~special + non_zero = absolute(data.compress(valid)) + if len(non_zero) == 0: + max_val = 0. + min_val = 0. + else: + max_val = maximum.reduce(non_zero) + min_val = minimum.reduce(non_zero) + if max_val >= 1.e8: + self.exp_format = True + if not self.suppress_small and (min_val < 0.0001 + or max_val/min_val > 1000.): + self.exp_format = True + + if self.exp_format: + self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100 + self.max_str_len = 8 + self.precision + if self.large_exponent: + self.max_str_len += 1 + if self.sign: + format = '%+' + else: + format = '%' + format = format + '%d.%de' % (self.max_str_len, self.precision) + else: + format = '%%.%df' % (self.precision,) + if len(non_zero): + precision = max([_digits(x, self.precision, format) + for x in non_zero]) + else: + precision = 0 + precision = min(self.precision, precision) + self.max_str_len = len(str(int(max_val))) + precision + 2 + if _nc.any(special): + self.max_str_len = max(self.max_str_len, + len(_nan_str), + len(_inf_str)+1) + if self.sign: + format = '%#+' + else: + format = '%#' + format = format + '%d.%df' % (self.max_str_len, precision) + + self.special_fmt = '%%%ds' % (self.max_str_len,) + self.format = format + + def __call__(self, x, strip_zeros=True): + from . import numeric as _nc + + with _nc.errstate(invalid='ignore'): + if isnan(x): + if self.sign: + return self.special_fmt % ('+' + _nan_str,) + else: + return self.special_fmt % (_nan_str,) + elif isinf(x): + if x > 0: + if self.sign: + return self.special_fmt % ('+' + _inf_str,) + else: + return self.special_fmt % (_inf_str,) + else: + return self.special_fmt % ('-' + _inf_str,) + + s = self.format % x + if self.large_exponent: + # 3-digit exponent + expsign = s[-3] + if expsign == '+' or expsign == '-': + s = s[1:-2] + '0' + s[-2:] + elif self.exp_format: + # 2-digit exponent + if s[-3] == '0': + s = ' ' + s[:-3] + s[-2:] + elif strip_zeros: + z = s.rstrip('0') + s = z + ' '*(len(s)-len(z)) + return s + + +def _digits(x, precision, format): + s = format % x + z = s.rstrip('0') + return precision - len(s) + len(z) + + +class IntegerFormat(object): + def __init__(self, data): + try: + max_str_len = max(len(str(maximum.reduce(data))), + len(str(minimum.reduce(data)))) + self.format = '%' + str(max_str_len) + 'd' + except (TypeError, NotImplementedError): + # if reduce(data) fails, this instance will not be called, just + # instantiated in formatdict. + pass + except ValueError: + # this occurs when everything is NA + pass + + def __call__(self, x): + if _MININT < x < _MAXINT: + return self.format % x + else: + return "%s" % x + +class LongFloatFormat(object): + # XXX Have to add something to determine the width to use a la FloatFormat + # Right now, things won't line up properly + def __init__(self, precision, sign=False): + self.precision = precision + self.sign = sign + + def __call__(self, x): + if isnan(x): + if self.sign: + return '+' + _nan_str + else: + return ' ' + _nan_str + elif isinf(x): + if x > 0: + if self.sign: + return '+' + _inf_str + else: + return ' ' + _inf_str + else: + return '-' + _inf_str + elif x >= 0: + if self.sign: + return '+' + format_longfloat(x, self.precision) + else: + return ' ' + format_longfloat(x, self.precision) + else: + return format_longfloat(x, self.precision) + + +class LongComplexFormat(object): + def __init__(self, precision): + self.real_format = LongFloatFormat(precision) + self.imag_format = LongFloatFormat(precision, sign=True) + + def __call__(self, x): + r = self.real_format(x.real) + i = self.imag_format(x.imag) + return r + i + 'j' + + +class ComplexFormat(object): + def __init__(self, x, precision, suppress_small): + self.real_format = FloatFormat(x.real, precision, suppress_small) + self.imag_format = FloatFormat(x.imag, precision, suppress_small, + sign=True) + + def __call__(self, x): + r = self.real_format(x.real, strip_zeros=False) + i = self.imag_format(x.imag, strip_zeros=False) + if not self.imag_format.exp_format: + z = i.rstrip('0') + i = z + 'j' + ' '*(len(i)-len(z)) + else: + i = i + 'j' + return r + i + +class DatetimeFormat(object): + def __init__(self, x, unit=None, + timezone=None, casting='same_kind'): + # Get the unit from the dtype + if unit is None: + if x.dtype.kind == 'M': + unit = datetime_data(x.dtype)[0] + else: + unit = 's' + + # If timezone is default, make it 'local' or 'UTC' based on the unit + if timezone is None: + # Date units -> UTC, time units -> local + if unit in ('Y', 'M', 'W', 'D'): + self.timezone = 'UTC' + else: + self.timezone = 'local' + else: + self.timezone = timezone + self.unit = unit + self.casting = casting + + def __call__(self, x): + return "'%s'" % datetime_as_string(x, + unit=self.unit, + timezone=self.timezone, + casting=self.casting) + +class TimedeltaFormat(object): + def __init__(self, data): + if data.dtype.kind == 'm': + v = data.view('i8') + max_str_len = max(len(str(maximum.reduce(v))), + len(str(minimum.reduce(v)))) + self.format = '%' + str(max_str_len) + 'd' + + def __call__(self, x): + return self.format % x.astype('i8') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/cversions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/cversions.py new file mode 100644 index 0000000000000..7995dd9931e7e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/cversions.py @@ -0,0 +1,15 @@ +"""Simple script to compute the api hash of the current API. + +The API has is defined by numpy_api_order and ufunc_api_order. + +""" +from __future__ import division, absolute_import, print_function + +from os.path import dirname + +from code_generators.genapi import fullapi_hash +from code_generators.numpy_api import full_api + +if __name__ == '__main__': + curdir = dirname(__file__) + print(fullapi_hash(full_api)) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/defchararray.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/defchararray.py new file mode 100644 index 0000000000000..121e323147bd4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/defchararray.py @@ -0,0 +1,2687 @@ +""" +This module contains a set of functions for vectorized string +operations and methods. + +.. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `string_` or `unicode_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + +Some methods will only be available if the corresponding string method is +available in your version of Python. + +The preferred alias for `defchararray` is `numpy.char`. + +""" +from __future__ import division, absolute_import, print_function + +import sys +from .numerictypes import string_, unicode_, integer, object_, bool_, character +from .numeric import ndarray, compare_chararrays +from .numeric import array as narray +from numpy.core.multiarray import _vec_string +from numpy.compat import asbytes, long +import numpy + +__all__ = ['chararray', + 'equal', 'not_equal', 'greater_equal', 'less_equal', 'greater', 'less', + 'str_len', 'add', 'multiply', 'mod', 'capitalize', 'center', 'count', + 'decode', 'encode', 'endswith', 'expandtabs', 'find', 'format', + 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', + 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', + 'partition', 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', + 'rsplit', 'rstrip', 'split', 'splitlines', 'startswith', 'strip', + 'swapcase', 'title', 'translate', 'upper', 'zfill', + 'isnumeric', 'isdecimal', + 'array', 'asarray'] + +_globalvar = 0 +if sys.version_info[0] >= 3: + _unicode = str + _bytes = bytes +else: + _unicode = unicode + _bytes = str +_len = len + +def _use_unicode(*args): + """ + Helper function for determining the output type of some string + operations. + + For an operation on two ndarrays, if at least one is unicode, the + result should be unicode. + """ + for x in args: + if (isinstance(x, _unicode) + or issubclass(numpy.asarray(x).dtype.type, unicode_)): + return unicode_ + return string_ + +def _to_string_or_unicode_array(result): + """ + Helper function to cast a result back into a string or unicode array + if an object array must be used as an intermediary. + """ + return numpy.asarray(result.tolist()) + +def _clean_args(*args): + """ + Helper function for delegating arguments to Python string + functions. + + Many of the Python string operations that have optional arguments + do not use 'None' to indicate a default value. In these cases, + we need to remove all `None` arguments, and those following them. + """ + newargs = [] + for chk in args: + if chk is None: + break + newargs.append(chk) + return newargs + +def _get_num_chars(a): + """ + Helper function that returns the number of characters per field in + a string or unicode array. This is to abstract out the fact that + for a unicode array this is itemsize / 4. + """ + if issubclass(a.dtype.type, unicode_): + return a.itemsize // 4 + return a.itemsize + + +def equal(x1, x2): + """ + Return (x1 == x2) element-wise. + + Unlike `numpy.equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : {ndarray, bool} + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + not_equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '==', True) + +def not_equal(x1, x2): + """ + Return (x1 != x2) element-wise. + + Unlike `numpy.not_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : {ndarray, bool} + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '!=', True) + +def greater_equal(x1, x2): + """ + Return (x1 >= x2) element-wise. + + Unlike `numpy.greater_equal`, this comparison is performed by + first stripping whitespace characters from the end of the string. + This behavior is provided for backward-compatibility with + numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : {ndarray, bool} + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, not_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '>=', True) + +def less_equal(x1, x2): + """ + Return (x1 <= x2) element-wise. + + Unlike `numpy.less_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : {ndarray, bool} + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, not_equal, greater_equal, greater, less + """ + return compare_chararrays(x1, x2, '<=', True) + +def greater(x1, x2): + """ + Return (x1 > x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : {ndarray, bool} + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, less + """ + return compare_chararrays(x1, x2, '>', True) + +def less(x1, x2): + """ + Return (x1 < x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : {ndarray, bool} + Output array of bools, or a single bool if x1 and x2 are scalars. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, greater + """ + return compare_chararrays(x1, x2, '<', True) + +def str_len(a): + """ + Return len(a) element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of integers + + See also + -------- + __builtin__.len + """ + return _vec_string(a, integer, '__len__') + +def add(x1, x2): + """ + Return element-wise string concatenation for two arrays of str or unicode. + + Arrays `x1` and `x2` must have the same shape. + + Parameters + ---------- + x1 : array_like of str or unicode + Input array. + x2 : array_like of str or unicode + Input array. + + Returns + ------- + add : ndarray + Output array of `string_` or `unicode_`, depending on input types + of the same shape as `x1` and `x2`. + + """ + arr1 = numpy.asarray(x1) + arr2 = numpy.asarray(x2) + out_size = _get_num_chars(arr1) + _get_num_chars(arr2) + dtype = _use_unicode(arr1, arr2) + return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,)) + +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in `i` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like of str or unicode + + i : array_like of ints + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + """ + a_arr = numpy.asarray(a) + i_arr = numpy.asarray(i) + if not issubclass(i_arr.dtype.type, integer): + raise ValueError("Can only multiply by integers") + out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0) + return _vec_string( + a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) + +def mod(a, values): + """ + Return (a % i), that is pre-Python 2.6 string formatting + (iterpolation), element-wise for a pair of array_likes of str + or unicode. + + Parameters + ---------- + a : array_like of str or unicode + + values : array_like of values + These values will be element-wise interpolated into the string. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + See also + -------- + str.__mod__ + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, '__mod__', (values,))) + +def capitalize(a): + """ + Return a copy of `a` with only the first character of each element + capitalized. + + Calls `str.capitalize` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + Input array of strings to capitalize. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input + types + + See also + -------- + str.capitalize + + Examples + -------- + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], + dtype='|S4') + >>> np.char.capitalize(c) + array(['A1b2', '1b2a', 'B2a1', '2a1b'], + dtype='|S4') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'capitalize') + + +def center(a, width, fillchar=' '): + """ + Return a copy of `a` with its elements centered in a string of + length `width`. + + Calls `str.center` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The padding character to use (default is space). + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input + types + + See also + -------- + str.center + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = long(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.string_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar)) + + +def count(a, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + Calls `str.count` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + The substring to search for. + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as slice + notation to specify the range in which to count. + + Returns + ------- + out : ndarray + Output array of ints. + + See also + -------- + str.count + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + >>> np.char.count(c, 'A') + array([3, 1, 1]) + >>> np.char.count(c, 'aA') + array([3, 1, 0]) + >>> np.char.count(c, 'A', start=1, end=4) + array([2, 1, 1]) + >>> np.char.count(c, 'A', start=1, end=3) + array([1, 0, 0]) + + """ + return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end)) + + +def decode(a, encoding=None, errors=None): + """ + Calls `str.decode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the + :mod:`codecs` module. + + Parameters + ---------- + a : array_like of str or unicode + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See also + -------- + str.decode + + Notes + ----- + The type of the result will depend on the encoding specified. + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + >>> np.char.encode(c, encoding='cp037') + array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@', + '\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], + dtype='|S7') + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'decode', _clean_args(encoding, errors))) + + +def encode(a, encoding=None, errors=None): + """ + Calls `str.encode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the codecs + module. + + Parameters + ---------- + a : array_like of str or unicode + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See also + -------- + str.encode + + Notes + ----- + The type of the result will depend on the encoding specified. + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'encode', _clean_args(encoding, errors))) + + +def endswith(a, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `a` ends with `suffix`, otherwise `False`. + + Calls `str.endswith` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + suffix : str + + start, end : int, optional + With optional `start`, test beginning at that position. With + optional `end`, stop comparing at that position. + + Returns + ------- + out : ndarray + Outputs an array of bools. + + See also + -------- + str.endswith + + Examples + -------- + >>> s = np.array(['foo', 'bar']) + >>> s[0] = 'foo' + >>> s[1] = 'bar' + >>> s + array(['foo', 'bar'], + dtype='|S3') + >>> np.char.endswith(s, 'ar') + array([False, True], dtype=bool) + >>> np.char.endswith(s, 'a', start=1, end=2) + array([False, True], dtype=bool) + + """ + return _vec_string( + a, bool_, 'endswith', [suffix, start] + _clean_args(end)) + + +def expandtabs(a, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + Calls `str.expandtabs` element-wise. + + Return a copy of each string element where all tab characters are + replaced by one or more spaces, depending on the current column + and the given `tabsize`. The column number is reset to zero after + each newline occurring in the string. This doesn't understand other + non-printing characters or escape sequences. + + Parameters + ---------- + a : array_like of str or unicode + Input array + tabsize : int, optional + Replace tabs with `tabsize` number of spaces. If not given defaults + to 8 spaces. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.expandtabs + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'expandtabs', (tabsize,))) + + +def find(a, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + Calls `str.find` element-wise. + + For each element, return the lowest index in the string where + substring `sub` is found, such that `sub` is contained in the + range [`start`, `end`]. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as in + slice notation. + + Returns + ------- + out : ndarray or int + Output array of ints. Returns -1 if `sub` is not found. + + See also + -------- + str.find + + """ + return _vec_string( + a, integer, 'find', [sub, start] + _clean_args(end)) + + +def index(a, sub, start=0, end=None): + """ + Like `find`, but raises `ValueError` when the substring is not found. + + Calls `str.index` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + + start, end : int, optional + + Returns + ------- + out : ndarray + Output array of ints. Returns -1 if `sub` is not found. + + See also + -------- + find, str.find + + """ + return _vec_string( + a, integer, 'index', [sub, start] + _clean_args(end)) + +def isalnum(a): + """ + Returns true for each element if all characters in the string are + alphanumeric and there is at least one character, false otherwise. + + Calls `str.isalnum` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.isalnum + """ + return _vec_string(a, bool_, 'isalnum') + +def isalpha(a): + """ + Returns true for each element if all characters in the string are + alphabetic and there is at least one character, false otherwise. + + Calls `str.isalpha` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.isalpha + """ + return _vec_string(a, bool_, 'isalpha') + +def isdigit(a): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + Calls `str.isdigit` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.isdigit + """ + return _vec_string(a, bool_, 'isdigit') + +def islower(a): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + Calls `str.islower` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.islower + """ + return _vec_string(a, bool_, 'islower') + +def isspace(a): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + Calls `str.isspace` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.isspace + """ + return _vec_string(a, bool_, 'isspace') + +def istitle(a): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + Call `str.istitle` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.istitle + """ + return _vec_string(a, bool_, 'istitle') + +def isupper(a): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + Call `str.isupper` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See also + -------- + str.isupper + """ + return _vec_string(a, bool_, 'isupper') + +def join(sep, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + Calls `str.join` element-wise. + + Parameters + ---------- + sep : array_like of str or unicode + seq : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + See also + -------- + str.join + """ + return _to_string_or_unicode_array( + _vec_string(sep, object_, 'join', (seq,))) + + +def ljust(a, width, fillchar=' '): + """ + Return an array with the elements of `a` left-justified in a + string of length `width`. + + Calls `str.ljust` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The character to use for padding + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.ljust + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = long(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.string_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar)) + + +def lower(a): + """ + Return an array with the elements converted to lowercase. + + Call `str.lower` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.lower + + Examples + -------- + >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c + array(['A1B C', '1BCA', 'BCA1'], + dtype='|S5') + >>> np.char.lower(c) + array(['a1b c', '1bca', 'bca1'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'lower') + + +def lstrip(a, chars=None): + """ + For each element in `a`, return a copy with the leading characters + removed. + + Calls `str.lstrip` element-wise. + + Parameters + ---------- + a : array-like, {str, unicode} + Input array. + + chars : {str, unicode}, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a prefix; rather, all combinations of its values are + stripped. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.lstrip + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + + The 'a' variable is unstripped from c[1] because whitespace leading. + + >>> np.char.lstrip(c, 'a') + array(['AaAaA', ' aA ', 'bBABba'], + dtype='|S7') + + + >>> np.char.lstrip(c, 'A') # leaves c unchanged + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() + ... # XXX: is this a regression? this line now returns False + ... # np.char.lstrip(c,'') does not modify c at all. + True + >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() + True + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,)) + + +def partition(a, sep): + """ + Partition each element in `a` around `sep`. + + Calls `str.partition` element-wise. + + For each element in `a`, split the element as the first + occurrence of `sep`, and return 3 strings containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, return 3 strings + containing the string itself, followed by two empty strings. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array + sep : {str, unicode} + Separator to split each string element in `a`. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type. + The output array will have an extra dimension with 3 + elements per input element. + + See also + -------- + str.partition + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'partition', (sep,))) + + +def replace(a, old, new, count=None): + """ + For each element in `a`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + Calls `str.replace` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + old, new : str or unicode + + count : int, optional + If the optional argument `count` is given, only the first + `count` occurrences are replaced. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.replace + + """ + return _to_string_or_unicode_array( + _vec_string( + a, object_, 'replace', [old, new] +_clean_args(count))) + + +def rfind(a, sub, start=0, end=None): + """ + For each element in `a`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + Calls `str.rfind` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + sub : str or unicode + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as in + slice notation. + + Returns + ------- + out : ndarray + Output array of ints. Return -1 on failure. + + See also + -------- + str.rfind + + """ + return _vec_string( + a, integer, 'rfind', [sub, start] + _clean_args(end)) + + +def rindex(a, sub, start=0, end=None): + """ + Like `rfind`, but raises `ValueError` when the substring `sub` is + not found. + + Calls `str.rindex` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + sub : str or unicode + + start, end : int, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See also + -------- + rfind, str.rindex + + """ + return _vec_string( + a, integer, 'rindex', [sub, start] + _clean_args(end)) + + +def rjust(a, width, fillchar=' '): + """ + Return an array with the elements of `a` right-justified in a + string of length `width`. + + Calls `str.rjust` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The character to use for padding + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.rjust + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = long(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.string_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) + + +def rpartition(a, sep): + """ + Partition (split) each element around the right-most separator. + + Calls `str.rpartition` element-wise. + + For each element in `a`, split the element as the last + occurrence of `sep`, and return 3 strings containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, return 3 strings + containing the string itself, followed by two empty strings. + + Parameters + ---------- + a : array_like of str or unicode + Input array + sep : str or unicode + Right-most separator to split each element in array. + + Returns + ------- + out : ndarray + Output array of string or unicode, depending on input + type. The output array will have an extra dimension with + 3 elements per input element. + + See also + -------- + str.rpartition + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'rpartition', (sep,))) + + +def rsplit(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls `str.rsplit` element-wise. + + Except for splitting from the right, `rsplit` + behaves like `split`. + + Parameters + ---------- + a : array_like of str or unicode + + sep : str or unicode, optional + If `sep` is not specified or `None`, any whitespace string + is a separator. + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done, + the rightmost ones. + + Returns + ------- + out : ndarray + Array of list objects + + See also + -------- + str.rsplit, split + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, object_, 'rsplit', [sep] + _clean_args(maxsplit)) + + +def rstrip(a, chars=None): + """ + For each element in `a`, return a copy with the trailing + characters removed. + + Calls `str.rstrip` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + chars : str or unicode, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a suffix; rather, all combinations of its values are + stripped. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.rstrip + + Examples + -------- + >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c + array(['aAaAaA', 'abBABba'], + dtype='|S7') + >>> np.char.rstrip(c, 'a') + array(['aAaAaA', 'abBABb'], + dtype='|S7') + >>> np.char.rstrip(c, 'A') + array(['aAaAa', 'abBABba'], + dtype='|S7') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) + + +def split(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls `str.rsplit` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sep : str or unicode, optional + If `sep` is not specified or `None`, any whitespace string is a + separator. + + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done. + + Returns + ------- + out : ndarray + Array of list objects + + See also + -------- + str.split, rsplit + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, object_, 'split', [sep] + _clean_args(maxsplit)) + + +def splitlines(a, keepends=None): + """ + For each element in `a`, return a list of the lines in the + element, breaking at line boundaries. + + Calls `str.splitlines` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + keepends : bool, optional + Line breaks are not included in the resulting list unless + keepends is given and true. + + Returns + ------- + out : ndarray + Array of list objects + + See also + -------- + str.splitlines + + """ + return _vec_string( + a, object_, 'splitlines', _clean_args(keepends)) + + +def startswith(a, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `a` starts with `prefix`, otherwise `False`. + + Calls `str.startswith` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + prefix : str + + start, end : int, optional + With optional `start`, test beginning at that position. With + optional `end`, stop comparing at that position. + + Returns + ------- + out : ndarray + Array of booleans + + See also + -------- + str.startswith + + """ + return _vec_string( + a, bool_, 'startswith', [prefix, start] + _clean_args(end)) + + +def strip(a, chars=None): + """ + For each element in `a`, return a copy with the leading and + trailing characters removed. + + Calls `str.rstrip` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + chars : str or unicode, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a prefix or suffix; rather, all combinations of its + values are stripped. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.strip + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], + dtype='|S7') + >>> np.char.strip(c) + array(['aAaAaA', 'aA', 'abBABba'], + dtype='|S7') + >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads + array(['AaAaA', ' aA ', 'bBABb'], + dtype='|S7') + >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails + array(['aAaAa', ' aA ', 'abBABba'], + dtype='|S7') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars)) + + +def swapcase(a): + """ + Return element-wise a copy of the string with + uppercase characters converted to lowercase and vice versa. + + Calls `str.swapcase` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.swapcase + + Examples + -------- + >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c + array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], + dtype='|S5') + >>> np.char.swapcase(c) + array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'swapcase') + + +def title(a): + """ + Return element-wise title cased version of string or unicode. + + Title case words start with uppercase characters, all remaining cased + characters are lowercase. + + Calls `str.title` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.title + + Examples + -------- + >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c + array(['a1b c', '1b ca', 'b ca1', 'ca1b'], + dtype='|S5') + >>> np.char.title(c) + array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'title') + + +def translate(a, table, deletechars=None): + """ + For each element in `a`, return a copy of the string where all + characters occurring in the optional argument `deletechars` are + removed, and the remaining characters have been mapped through the + given translation table. + + Calls `str.translate` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + table : str of length 256 + + deletechars : str + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See also + -------- + str.translate + + """ + a_arr = numpy.asarray(a) + if issubclass(a_arr.dtype.type, unicode_): + return _vec_string( + a_arr, a_arr.dtype, 'translate', (table,)) + else: + return _vec_string( + a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars)) + + +def upper(a): + """ + Return an array with the elements converted to uppercase. + + Calls `str.upper` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.upper + + Examples + -------- + >>> c = np.array(['a1b c', '1bca', 'bca1']); c + array(['a1b c', '1bca', 'bca1'], + dtype='|S5') + >>> np.char.upper(c) + array(['A1B C', '1BCA', 'BCA1'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'upper') + + +def zfill(a, width): + """ + Return the numeric string left-filled with zeros + + Calls `str.zfill` element-wise. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + width : int + Width of string to left-fill elements in `a`. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See also + -------- + str.zfill + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = long(numpy.max(width_arr.flat)) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,)) + + +def isnumeric(a): + """ + For each element, return True if there are only numeric + characters in the element. + + Calls `unicode.isnumeric` element-wise. + + Numeric characters include digit characters, and all characters + that have the Unicode numeric value property, e.g. ``U+2155, + VULGAR FRACTION ONE FIFTH``. + + Parameters + ---------- + a : array_like, unicode + Input array. + + Returns + ------- + out : ndarray, bool + Array of booleans of same shape as `a`. + + See also + -------- + unicode.isnumeric + + """ + if _use_unicode(a) != unicode_: + raise TypeError("isnumeric is only available for Unicode strings and arrays") + return _vec_string(a, bool_, 'isnumeric') + + +def isdecimal(a): + """ + For each element, return True if there are only decimal + characters in the element. + + Calls `unicode.isdecimal` element-wise. + + Decimal characters include digit characters, and all characters + that that can be used to form decimal-radix numbers, + e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``. + + Parameters + ---------- + a : array_like, unicode + Input array. + + Returns + ------- + out : ndarray, bool + Array of booleans identical in shape to `a`. + + See also + -------- + unicode.isdecimal + + """ + if _use_unicode(a) != unicode_: + raise TypeError("isnumeric is only available for Unicode strings and arrays") + return _vec_string(a, bool_, 'isdecimal') + + +class chararray(ndarray): + """ + chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0, + strides=None, order=None) + + Provides a convenient view on arrays of string and unicode values. + + .. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `string_` or `unicode_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + + Versus a regular Numpy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``) + + chararrays should be created using `numpy.char.array` or + `numpy.char.asarray`, rather than this constructor directly. + + This constructor creates the array, using `buffer` (with `offset` + and `strides`) if it is not ``None``. If `buffer` is ``None``, then + constructs a new array with `strides` in "C order", unless both + ``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides` + is in "Fortran order". + + Methods + ------- + astype + argsort + copy + count + decode + dump + dumps + encode + endswith + expandtabs + fill + find + flatten + getfield + index + isalnum + isalpha + isdecimal + isdigit + islower + isnumeric + isspace + istitle + isupper + item + join + ljust + lower + lstrip + nonzero + put + ravel + repeat + replace + reshape + resize + rfind + rindex + rjust + rsplit + rstrip + searchsorted + setfield + setflags + sort + split + splitlines + squeeze + startswith + strip + swapaxes + swapcase + take + title + tofile + tolist + tostring + translate + transpose + upper + view + zfill + + Parameters + ---------- + shape : tuple + Shape of the array. + itemsize : int, optional + Length of each array element, in number of characters. Default is 1. + unicode : bool, optional + Are the array elements of type unicode (True) or string (False). + Default is False. + buffer : int, optional + Memory address of the start of the array data. Default is None, + in which case a new array is created. + offset : int, optional + Fixed stride displacement from the beginning of an axis? + Default is 0. Needs to be >=0. + strides : array_like of ints, optional + Strides for the array (see `ndarray.strides` for full description). + Default is None. + order : {'C', 'F'}, optional + The order in which the array data is stored in memory: 'C' -> + "row major" order (the default), 'F' -> "column major" + (Fortran) order. + + Examples + -------- + >>> charar = np.chararray((3, 3)) + >>> charar[:] = 'a' + >>> charar + chararray([['a', 'a', 'a'], + ['a', 'a', 'a'], + ['a', 'a', 'a']], + dtype='|S1') + + >>> charar = np.chararray(charar.shape, itemsize=5) + >>> charar[:] = 'abc' + >>> charar + chararray([['abc', 'abc', 'abc'], + ['abc', 'abc', 'abc'], + ['abc', 'abc', 'abc']], + dtype='|S5') + + """ + def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + offset=0, strides=None, order='C'): + global _globalvar + + if unicode: + dtype = unicode_ + else: + dtype = string_ + + # force itemsize to be a Python long, since using Numpy integer + # types results in itemsize.itemsize being used as the size of + # strings in the new array. + itemsize = long(itemsize) + + if sys.version_info[0] >= 3 and isinstance(buffer, _unicode): + # On Py3, unicode objects do not have the buffer interface + filler = buffer + buffer = None + else: + filler = None + + _globalvar = 1 + if buffer is None: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + order=order) + else: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + buffer=buffer, + offset=offset, strides=strides, + order=order) + if filler is not None: + self[...] = filler + _globalvar = 0 + return self + + def __array_finalize__(self, obj): + # The b is a special case because it is used for reconstructing. + if not _globalvar and self.dtype.char not in 'SUbc': + raise ValueError("Can only create a chararray from string data.") + + def __getitem__(self, obj): + val = ndarray.__getitem__(self, obj) + if issubclass(val.dtype.type, character) and not _len(val) == 0: + temp = val.rstrip() + if _len(temp) == 0: + val = '' + else: + val = temp + return val + + # IMPLEMENTATION NOTE: Most of the methods of this class are + # direct delegations to the free functions in this module. + # However, those that return an array of strings should instead + # return a chararray, so some extra wrapping is required. + + def __eq__(self, other): + """ + Return (self == other) element-wise. + + See also + -------- + equal + """ + return equal(self, other) + + def __ne__(self, other): + """ + Return (self != other) element-wise. + + See also + -------- + not_equal + """ + return not_equal(self, other) + + def __ge__(self, other): + """ + Return (self >= other) element-wise. + + See also + -------- + greater_equal + """ + return greater_equal(self, other) + + def __le__(self, other): + """ + Return (self <= other) element-wise. + + See also + -------- + less_equal + """ + return less_equal(self, other) + + def __gt__(self, other): + """ + Return (self > other) element-wise. + + See also + -------- + greater + """ + return greater(self, other) + + def __lt__(self, other): + """ + Return (self < other) element-wise. + + See also + -------- + less + """ + return less(self, other) + + def __add__(self, other): + """ + Return (self + other), that is string concatenation, + element-wise for a pair of array_likes of str or unicode. + + See also + -------- + add + """ + return asarray(add(self, other)) + + def __radd__(self, other): + """ + Return (other + self), that is string concatenation, + element-wise for a pair of array_likes of `string_` or `unicode_`. + + See also + -------- + add + """ + return asarray(add(numpy.asarray(other), self)) + + def __mul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __rmul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __mod__(self, i): + """ + Return (self % i), that is pre-Python 2.6 string formatting + (iterpolation), element-wise for a pair of array_likes of `string_` + or `unicode_`. + + See also + -------- + mod + """ + return asarray(mod(self, i)) + + def __rmod__(self, other): + return NotImplemented + + def argsort(self, axis=-1, kind='quicksort', order=None): + """ + Return the indices that sort the array lexicographically. + + For full documentation see `numpy.argsort`, for which this method is + in fact merely a "thin wrapper." + + Examples + -------- + >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') + >>> c = c.view(np.chararray); c + chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], + dtype='|S5') + >>> c[c.argsort()] + chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], + dtype='|S5') + + """ + return self.__array__().argsort(axis, kind, order) + argsort.__doc__ = ndarray.argsort.__doc__ + + def capitalize(self): + """ + Return a copy of `self` with only the first character of each element + capitalized. + + See also + -------- + char.capitalize + + """ + return asarray(capitalize(self)) + + def center(self, width, fillchar=' '): + """ + Return a copy of `self` with its elements centered in a + string of length `width`. + + See also + -------- + center + """ + return asarray(center(self, width, fillchar)) + + def count(self, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + See also + -------- + char.count + + """ + return count(self, sub, start, end) + + + def decode(self, encoding=None, errors=None): + """ + Calls `str.decode` element-wise. + + See also + -------- + char.decode + + """ + return decode(self, encoding, errors) + + def encode(self, encoding=None, errors=None): + """ + Calls `str.encode` element-wise. + + See also + -------- + char.encode + + """ + return encode(self, encoding, errors) + + def endswith(self, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` ends with `suffix`, otherwise `False`. + + See also + -------- + char.endswith + + """ + return endswith(self, suffix, start, end) + + def expandtabs(self, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + See also + -------- + char.expandtabs + + """ + return asarray(expandtabs(self, tabsize)) + + def find(self, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + See also + -------- + char.find + + """ + return find(self, sub, start, end) + + def index(self, sub, start=0, end=None): + """ + Like `find`, but raises `ValueError` when the substring is not found. + + See also + -------- + char.index + + """ + return index(self, sub, start, end) + + def isalnum(self): + """ + Returns true for each element if all characters in the string + are alphanumeric and there is at least one character, false + otherwise. + + See also + -------- + char.isalnum + + """ + return isalnum(self) + + def isalpha(self): + """ + Returns true for each element if all characters in the string + are alphabetic and there is at least one character, false + otherwise. + + See also + -------- + char.isalpha + + """ + return isalpha(self) + + def isdigit(self): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + See also + -------- + char.isdigit + + """ + return isdigit(self) + + def islower(self): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + See also + -------- + char.islower + + """ + return islower(self) + + def isspace(self): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + See also + -------- + char.isspace + + """ + return isspace(self) + + def istitle(self): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + See also + -------- + char.istitle + + """ + return istitle(self) + + def isupper(self): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + See also + -------- + char.isupper + + """ + return isupper(self) + + def join(self, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + See also + -------- + char.join + + """ + return join(self, seq) + + def ljust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` left-justified in a + string of length `width`. + + See also + -------- + char.ljust + + """ + return asarray(ljust(self, width, fillchar)) + + def lower(self): + """ + Return an array with the elements of `self` converted to + lowercase. + + See also + -------- + char.lower + + """ + return asarray(lower(self)) + + def lstrip(self, chars=None): + """ + For each element in `self`, return a copy with the leading characters + removed. + + See also + -------- + char.lstrip + + """ + return asarray(lstrip(self, chars)) + + def partition(self, sep): + """ + Partition each element in `self` around `sep`. + + See also + -------- + partition + """ + return asarray(partition(self, sep)) + + def replace(self, old, new, count=None): + """ + For each element in `self`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + See also + -------- + char.replace + + """ + return asarray(replace(self, old, new, count)) + + def rfind(self, sub, start=0, end=None): + """ + For each element in `self`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + See also + -------- + char.rfind + + """ + return rfind(self, sub, start, end) + + def rindex(self, sub, start=0, end=None): + """ + Like `rfind`, but raises `ValueError` when the substring `sub` is + not found. + + See also + -------- + char.rindex + + """ + return rindex(self, sub, start, end) + + def rjust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` + right-justified in a string of length `width`. + + See also + -------- + char.rjust + + """ + return asarray(rjust(self, width, fillchar)) + + def rpartition(self, sep): + """ + Partition each element in `self` around `sep`. + + See also + -------- + rpartition + """ + return asarray(rpartition(self, sep)) + + def rsplit(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in + the string, using `sep` as the delimiter string. + + See also + -------- + char.rsplit + + """ + return rsplit(self, sep, maxsplit) + + def rstrip(self, chars=None): + """ + For each element in `self`, return a copy with the trailing + characters removed. + + See also + -------- + char.rstrip + + """ + return asarray(rstrip(self, chars)) + + def split(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in the + string, using `sep` as the delimiter string. + + See also + -------- + char.split + + """ + return split(self, sep, maxsplit) + + def splitlines(self, keepends=None): + """ + For each element in `self`, return a list of the lines in the + element, breaking at line boundaries. + + See also + -------- + char.splitlines + + """ + return splitlines(self, keepends) + + def startswith(self, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` starts with `prefix`, otherwise `False`. + + See also + -------- + char.startswith + + """ + return startswith(self, prefix, start, end) + + def strip(self, chars=None): + """ + For each element in `self`, return a copy with the leading and + trailing characters removed. + + See also + -------- + char.strip + + """ + return asarray(strip(self, chars)) + + def swapcase(self): + """ + For each element in `self`, return a copy of the string with + uppercase characters converted to lowercase and vice versa. + + See also + -------- + char.swapcase + + """ + return asarray(swapcase(self)) + + def title(self): + """ + For each element in `self`, return a titlecased version of the + string: words start with uppercase characters, all remaining cased + characters are lowercase. + + See also + -------- + char.title + + """ + return asarray(title(self)) + + def translate(self, table, deletechars=None): + """ + For each element in `self`, return a copy of the string where + all characters occurring in the optional argument + `deletechars` are removed, and the remaining characters have + been mapped through the given translation table. + + See also + -------- + char.translate + + """ + return asarray(translate(self, table, deletechars)) + + def upper(self): + """ + Return an array with the elements of `self` converted to + uppercase. + + See also + -------- + char.upper + + """ + return asarray(upper(self)) + + def zfill(self, width): + """ + Return the numeric string left-filled with zeros in a string of + length `width`. + + See also + -------- + char.zfill + + """ + return asarray(zfill(self, width)) + + def isnumeric(self): + """ + For each element in `self`, return True if there are only + numeric characters in the element. + + See also + -------- + char.isnumeric + + """ + return isnumeric(self) + + def isdecimal(self): + """ + For each element in `self`, return True if there are only + decimal characters in the element. + + See also + -------- + char.isdecimal + + """ + return isdecimal(self) + + +def array(obj, itemsize=None, copy=True, unicode=None, order=None): + """ + Create a `chararray`. + + .. note:: + This class is provided for numarray backward-compatibility. + New code (not concerned with numarray compatibility) should use + arrays of type `string_` or `unicode_` and use the free functions + in :mod:`numpy.char ` for fast + vectorized string operations instead. + + Versus a regular Numpy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy + will only be made if __array__ returns a copy, if obj is a + nested sequence, or if a copy is needed to satisfy any of the other + requirements (`itemsize`, unicode, `order`, etc.). + + unicode : bool, optional + When true, the resulting `chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + `None` and `obj` is one of the following: + + - a `chararray`, + - an ndarray of type `str` or `unicode` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). If order is 'A', then the returned array may + be in any order (either C-, Fortran-contiguous, or even + discontiguous). + """ + if isinstance(obj, (_bytes, _unicode)): + if unicode is None: + if isinstance(obj, _unicode): + unicode = True + else: + unicode = False + + if itemsize is None: + itemsize = _len(obj) + shape = _len(obj) // itemsize + + if unicode: + if sys.maxunicode == 0xffff: + # On a narrow Python build, the buffer for Unicode + # strings is UCS2, which doesn't match the buffer for + # Numpy Unicode types, which is ALWAYS UCS4. + # Therefore, we need to convert the buffer. On Python + # 2.6 and later, we can use the utf_32 codec. Earlier + # versions don't have that codec, so we convert to a + # numerical array that matches the input buffer, and + # then use Numpy to convert it to UCS4. All of this + # should happen in native endianness. + if sys.hexversion >= 0x2060000: + obj = obj.encode('utf_32') + else: + if isinstance(obj, str): + ascii = numpy.frombuffer(obj, 'u1') + ucs4 = numpy.array(ascii, 'u4') + obj = ucs4.data + else: + ucs2 = numpy.frombuffer(obj, 'u2') + ucs4 = numpy.array(ucs2, 'u4') + obj = ucs4.data + else: + obj = _unicode(obj) + else: + # Let the default Unicode -> string encoding (if any) take + # precedence. + obj = _bytes(obj) + + return chararray(shape, itemsize=itemsize, unicode=unicode, + buffer=obj, order=order) + + if isinstance(obj, (list, tuple)): + obj = numpy.asarray(obj) + + if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character): + # If we just have a vanilla chararray, create a chararray + # view around it. + if not isinstance(obj, chararray): + obj = obj.view(chararray) + + if itemsize is None: + itemsize = obj.itemsize + # itemsize is in 8-bit chars, so for Unicode, we need + # to divide by the size of a single Unicode character, + # which for Numpy is always 4 + if issubclass(obj.dtype.type, unicode_): + itemsize //= 4 + + if unicode is None: + if issubclass(obj.dtype.type, unicode_): + unicode = True + else: + unicode = False + + if unicode: + dtype = unicode_ + else: + dtype = string_ + + if order is not None: + obj = numpy.asarray(obj, order=order) + if (copy + or (itemsize != obj.itemsize) + or (not unicode and isinstance(obj, unicode_)) + or (unicode and isinstance(obj, string_))): + obj = obj.astype((dtype, long(itemsize))) + return obj + + if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object): + if itemsize is None: + # Since no itemsize was specified, convert the input array to + # a list so the ndarray constructor will automatically + # determine the itemsize for us. + obj = obj.tolist() + # Fall through to the default case + + if unicode: + dtype = unicode_ + else: + dtype = string_ + + if itemsize is None: + val = narray(obj, dtype=dtype, order=order, subok=True) + else: + val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True) + return val.view(chararray) + + +def asarray(obj, itemsize=None, unicode=None, order=None): + """ + Convert the input to a `chararray`, copying the data only if + necessary. + + Versus a regular Numpy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `str.endswith`) and infix operators (e.g. +, *, %) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + unicode : bool, optional + When true, the resulting `chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + `None` and `obj` is one of the following: + + - a `chararray`, + - an ndarray of type `str` or 'unicode` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). + """ + return array(obj, itemsize, copy=False, + unicode=unicode, order=order) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/fromnumeric.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/fromnumeric.py new file mode 100644 index 0000000000000..49fd57e29c34a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/fromnumeric.py @@ -0,0 +1,2930 @@ +"""Module containing non-deprecated functions borrowed from Numeric. + +""" +from __future__ import division, absolute_import, print_function + +import types +import warnings + +from .. import VisibleDeprecationWarning +from . import multiarray as mu +from . import umath as um +from . import numerictypes as nt +from .numeric import asarray, array, asanyarray, concatenate +from . import _methods + +_dt_ = nt.sctype2char + + +# functions that are methods +__all__ = [ + 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', + 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', + 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', + 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', + 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_', + 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', + 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', + ] + + +try: + _gentype = types.GeneratorType +except AttributeError: + _gentype = type(None) + +# save away Python sum +_sum_ = sum + +# functions that are now methods +def _wrapit(obj, method, *args, **kwds): + try: + wrap = obj.__array_wrap__ + except AttributeError: + wrap = None + result = getattr(asarray(obj), method)(*args, **kwds) + if wrap: + if not isinstance(result, mu.ndarray): + result = asarray(result) + result = wrap(result) + return result + + +def take(a, indices, axis=None, out=None, mode='raise'): + """ + Take elements from an array along an axis. + + This function does the same thing as "fancy" indexing (indexing arrays + using arrays); however, it can be easier to use if you need elements + along a given axis. + + Parameters + ---------- + a : array_like + The source array. + indices : array_like + The indices of the values to extract. + + .. versionadded:: 1.8.0 + + Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : ndarray, optional + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + subarray : ndarray + The returned array has the same type as `a`. + + See Also + -------- + compress : Take elements using a boolean mask + ndarray.take : equivalent method + + Examples + -------- + >>> a = [4, 3, 5, 7, 6, 8] + >>> indices = [0, 1, 4] + >>> np.take(a, indices) + array([4, 3, 6]) + + In this example if `a` is an ndarray, "fancy" indexing can be used. + + >>> a = np.array(a) + >>> a[indices] + array([4, 3, 6]) + + If `indices` is not one dimensional, the output also has these dimensions. + + >>> np.take(a, [[0, 1], [2, 3]]) + array([[4, 3], + [5, 7]]) + """ + try: + take = a.take + except AttributeError: + return _wrapit(a, 'take', indices, axis, out, mode) + return take(indices, axis, out, mode) + + +# not deprecated --- copy if necessary, view otherwise +def reshape(a, newshape, order='C'): + """ + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred + from the length of the array and remaining dimensions. + order : {'C', 'F', 'A'}, optional + Read the elements of `a` using this index order, and place the elements + into the reshaped array using this index order. 'C' means to + read / write the elements using C-like index order, with the last axis index + changing fastest, back to the first axis index changing slowest. 'F' + means to read / write the elements using Fortran-like index order, with + the first index changing fastest, and the last index changing slowest. + Note that the 'C' and 'F' options take no account of the memory layout + of the underlying array, and only refer to the order of indexing. 'A' + means to read / write the elements in Fortran-like index order if `a` is + Fortran *contiguous* in memory, C-like order otherwise. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. Note there is no guarantee of the *memory layout* (C- or + Fortran- contiguous) of the returned array. + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raise if the data is copied, + you should assign the new shape to the shape attribute of the array:: + + >>> a = np.zeros((10, 2)) + # A transpose make the array non-contiguous + >>> b = a.T + # Taking a view makes it possible to modify the shape without modifying the + # initial object. + >>> c = b.view() + >>> c.shape = (20) + AttributeError: incompatible shape for a non-contiguous array + + The `order` keyword gives the index ordering both for *fetching* the values + from `a`, and then *placing* the values into the output array. For example, + let's say you have an array: + + >>> a = np.arange(6).reshape((3, 2)) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + + You can think of reshaping as first raveling the array (using the given + index order), then inserting the elements from the raveled array into the + new array using the same kind of index ordering as was used for the + raveling. + + >>> np.reshape(a, (2, 3)) # C-like index ordering + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering + array([[0, 4, 3], + [2, 1, 5]]) + >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') + array([[0, 4, 3], + [2, 1, 5]]) + + Examples + -------- + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> np.reshape(a, 6) + array([1, 2, 3, 4, 5, 6]) + >>> np.reshape(a, 6, order='F') + array([1, 4, 2, 5, 3, 6]) + + >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 + array([[1, 2], + [3, 4], + [5, 6]]) + """ + try: + reshape = a.reshape + except AttributeError: + return _wrapit(a, 'reshape', newshape, order=order) + return reshape(newshape, order=order) + + +def choose(a, choices, out=None, mode='raise'): + """ + Construct an array from an index array and a set of arrays to choose from. + + First of all, if confused or uncertain, definitely look at the Examples - + in its full generality, this function is less simple than it might + seem from the following code description (below ndi = + `numpy.lib.index_tricks`): + + ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. + + But this omits some subtleties. Here is a fully general summary: + + Given an "index" array (`a`) of integers and a sequence of `n` arrays + (`choices`), `a` and each choice array are first broadcast, as necessary, + to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = + 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` + for each `i`. Then, a new array with shape ``Ba.shape`` is created as + follows: + + * if ``mode=raise`` (the default), then, first of all, each element of + `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that + `i` (in that range) is the value at the `(j0, j1, ..., jm)` position + in `Ba` - then the value at the same position in the new array is the + value in `Bchoices[i]` at that same position; + + * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) + integer; modular arithmetic is used to map integers outside the range + `[0, n-1]` back into that range; and then the new array is constructed + as above; + + * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) + integer; negative integers are mapped to 0; values greater than `n-1` + are mapped to `n-1`; and then the new array is constructed as above. + + Parameters + ---------- + a : int array + This array must contain integers in `[0, n-1]`, where `n` is the number + of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any + integers are permissible. + choices : sequence of arrays + Choice arrays. `a` and all of the choices must be broadcastable to the + same shape. If `choices` is itself an array (not recommended), then + its outermost dimension (i.e., the one corresponding to + ``choices.shape[0]``) is taken as defining the "sequence". + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + mode : {'raise' (default), 'wrap', 'clip'}, optional + Specifies how indices outside `[0, n-1]` will be treated: + + * 'raise' : an exception is raised + * 'wrap' : value becomes value mod `n` + * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 + + Returns + ------- + merged_array : array + The merged result. + + Raises + ------ + ValueError: shape mismatch + If `a` and each choice array are not all broadcastable to the same + shape. + + See Also + -------- + ndarray.choose : equivalent method + + Notes + ----- + To reduce the chance of misinterpretation, even though the following + "abuse" is nominally supported, `choices` should neither be, nor be + thought of as, a single array, i.e., the outermost sequence-like container + should be either a list or a tuple. + + Examples + -------- + + >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], + ... [20, 21, 22, 23], [30, 31, 32, 33]] + >>> np.choose([2, 3, 1, 0], choices + ... # the first element of the result will be the first element of the + ... # third (2+1) "array" in choices, namely, 20; the second element + ... # will be the second element of the fourth (3+1) choice array, i.e., + ... # 31, etc. + ... ) + array([20, 31, 12, 3]) + >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) + array([20, 31, 12, 3]) + >>> # because there are 4 choice arrays + >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) + array([20, 1, 12, 3]) + >>> # i.e., 0 + + A couple examples illustrating how choose broadcasts: + + >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] + >>> choices = [-10, 10] + >>> np.choose(a, choices) + array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]]) + + >>> # With thanks to Anne Archibald + >>> a = np.array([0, 1]).reshape((2,1,1)) + >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) + >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) + >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 + array([[[ 1, 1, 1, 1, 1], + [ 2, 2, 2, 2, 2], + [ 3, 3, 3, 3, 3]], + [[-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5]]]) + + """ + try: + choose = a.choose + except AttributeError: + return _wrapit(a, 'choose', choices, out=out, mode=mode) + return choose(choices, out=out, mode=mode) + + +def repeat(a, repeats, axis=None): + """ + Repeat elements of an array. + + Parameters + ---------- + a : array_like + Input array. + repeats : {int, array of ints} + The number of repetitions for each element. `repeats` is broadcasted + to fit the shape of the given axis. + axis : int, optional + The axis along which to repeat values. By default, use the + flattened input array, and return a flat output array. + + Returns + ------- + repeated_array : ndarray + Output array which has the same shape as `a`, except along + the given axis. + + See Also + -------- + tile : Tile an array. + + Examples + -------- + >>> x = np.array([[1,2],[3,4]]) + >>> np.repeat(x, 2) + array([1, 1, 2, 2, 3, 3, 4, 4]) + >>> np.repeat(x, 3, axis=1) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> np.repeat(x, [1, 2], axis=0) + array([[1, 2], + [3, 4], + [3, 4]]) + + """ + try: + repeat = a.repeat + except AttributeError: + return _wrapit(a, 'repeat', repeats, axis) + return repeat(repeats, axis) + + +def put(a, ind, v, mode='raise'): + """ + Replaces specified elements of an array with given values. + + The indexing works on the flattened target array. `put` is roughly + equivalent to: + + :: + + a.flat[ind] = v + + Parameters + ---------- + a : ndarray + Target array. + ind : array_like + Target indices, interpreted as integers. + v : array_like + Values to place in `a` at target indices. If `v` is shorter than + `ind` it will be repeated as necessary. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + See Also + -------- + putmask, place + + Examples + -------- + >>> a = np.arange(5) + >>> np.put(a, [0, 2], [-44, -55]) + >>> a + array([-44, 1, -55, 3, 4]) + + >>> a = np.arange(5) + >>> np.put(a, 22, -5, mode='clip') + >>> a + array([ 0, 1, 2, 3, -5]) + + """ + return a.put(ind, v, mode) + + +def swapaxes(a, axis1, axis2): + """ + Interchange two axes of an array. + + Parameters + ---------- + a : array_like + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + + Returns + ------- + a_swapped : ndarray + If `a` is an ndarray, then a view of `a` is returned; otherwise + a new array is created. + + Examples + -------- + >>> x = np.array([[1,2,3]]) + >>> np.swapaxes(x,0,1) + array([[1], + [2], + [3]]) + + >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + >>> np.swapaxes(x,0,2) + array([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]]) + + """ + try: + swapaxes = a.swapaxes + except AttributeError: + return _wrapit(a, 'swapaxes', axis1, axis2) + return swapaxes(axis1, axis2) + + +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + Parameters + ---------- + a : array_like + Input array. + axes : list of ints, optional + By default, reverse the dimensions, otherwise permute the axes + according to the values given. + + Returns + ------- + p : ndarray + `a` with its axes permuted. A view is returned whenever + possible. + + See Also + -------- + rollaxis + + Examples + -------- + >>> x = np.arange(4).reshape((2,2)) + >>> x + array([[0, 1], + [2, 3]]) + + >>> np.transpose(x) + array([[0, 2], + [1, 3]]) + + >>> x = np.ones((1, 2, 3)) + >>> np.transpose(x, (1, 0, 2)).shape + (2, 1, 3) + + """ + try: + transpose = a.transpose + except AttributeError: + return _wrapit(a, 'transpose', axes) + return transpose(axes) + + +def partition(a, kth, axis=-1, kind='introselect', order=None): + """ + Return a partitioned copy of an array. + + Creates a copy of the array with its elements rearranged in such a way that + the value of the element in kth position is in the position it would be in + a sorted array. All elements smaller than the kth element are moved before + this element and all equal or greater are moved behind it. The ordering of + the elements in the two partitions is undefined. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array to be sorted. + kth : int or sequence of ints + Element index to partition by. The kth value of the element will be in + its final sorted position and all smaller elements will be moved before + it and all equal or greater elements behind it. + The order all elements in the partitions is undefined. + If provided with a sequence of kth it will partition all elements + indexed by kth of them into their sorted position at once. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + + Returns + ------- + partitioned_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.partition : Method to sort an array in-place. + argpartition : Indirect partition. + sort : Full sorting + + Notes + ----- + The various selection algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative order. The + available algorithms have the following properties: + + ================= ======= ============= ============ ======= + kind speed worst case work space stable + ================= ======= ============= ============ ======= + 'introselect' 1 O(n) 0 no + ================= ======= ============= ============ ======= + + All the partition algorithms make temporary copies of the data when + partitioning along any but the last axis. Consequently, partitioning + along the last axis is faster and uses less space than partitioning + along any other axis. + + The sort order for complex numbers is lexicographic. If both the real + and imaginary parts are non-nan then the order is determined by the + real parts except when they are equal, in which case the order is + determined by the imaginary parts. + + Examples + -------- + >>> a = np.array([3, 4, 2, 1]) + >>> np.partition(a, 3) + array([2, 1, 3, 4]) + + >>> np.partition(a, (1, 3)) + array([1, 2, 3, 4]) + + """ + if axis is None: + a = asanyarray(a).flatten() + axis = 0 + else: + a = asanyarray(a).copy(order="K") + a.partition(kth, axis=axis, kind=kind, order=order) + return a + + +def argpartition(a, kth, axis=-1, kind='introselect', order=None): + """ + Perform an indirect partition along the given axis using the algorithm + specified by the `kind` keyword. It returns an array of indices of the + same shape as `a` that index data along the given axis in partitioned + order. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array to sort. + kth : int or sequence of ints + Element index to partition by. The kth element will be in its final + sorted position and all smaller elements will be moved before it and + all larger elements behind it. + The order all elements in the partitions is undefined. + If provided with a sequence of kth it will partition all of them into + their sorted position at once. + axis : int or None, optional + Axis along which to sort. The default is -1 (the last axis). If None, + the flattened array is used. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect' + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + + Returns + ------- + index_array : ndarray, int + Array of indices that partition `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + partition : Describes partition algorithms used. + ndarray.partition : Inplace partition. + argsort : Full indirect sort + + Notes + ----- + See `partition` for notes on the different selection algorithms. + + Examples + -------- + One dimensional array: + + >>> x = np.array([3, 4, 2, 1]) + >>> x[np.argpartition(x, 3)] + array([2, 1, 3, 4]) + >>> x[np.argpartition(x, (1, 3))] + array([1, 2, 3, 4]) + + """ + return a.argpartition(kth, axis, kind=kind, order=order) + + +def sort(a, axis=-1, kind='quicksort', order=None): + """ + Return a sorted copy of an array. + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. Default is 'quicksort'. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + partition : Partial sort. + + Notes + ----- + The various sorting algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative + order. The three available algorithms have the following + properties: + + =========== ======= ============= ============ ======= + kind speed worst case work space stable + =========== ======= ============= ============ ======= + 'quicksort' 1 O(n^2) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'heapsort' 3 O(n*log(n)) 0 no + =========== ======= ============= ============ ======= + + All the sort algorithms make temporary copies of the data when + sorting along any but the last axis. Consequently, sorting along + the last axis is faster and uses less space than sorting along + any other axis. + + The sort order for complex numbers is lexicographic. If both the real + and imaginary parts are non-nan then the order is determined by the + real parts except when they are equal, in which case the order is + determined by the imaginary parts. + + Previous to numpy 1.4.0 sorting real and complex arrays containing nan + values led to undefined behaviour. In numpy versions >= 1.4.0 nan + values are sorted to the end. The extended sort order is: + + * Real: [R, nan] + * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] + + where R is a non-nan real value. Complex values with the same nan + placements are sorted according to the non-nan part if it exists. + Non-nan values are sorted as before. + + Examples + -------- + >>> a = np.array([[1,4],[3,1]]) + >>> np.sort(a) # sort along the last axis + array([[1, 4], + [1, 3]]) + >>> np.sort(a, axis=None) # sort the flattened array + array([1, 1, 3, 4]) + >>> np.sort(a, axis=0) # sort along the first axis + array([[1, 1], + [3, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] + >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), + ... ('Galahad', 1.7, 38)] + >>> a = np.array(values, dtype=dtype) # create a structured array + >>> np.sort(a, order='height') # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.8999999999999999, 38)], + dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), + ('Arthur', 1.8, 41)], + dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2]) + >>> np.argsort(x) + array([1, 2, 0]) + + Two-dimensional array: + + >>> x = np.array([[0, 3], [2, 2]]) + >>> x + array([[0, 3], + [2, 2]]) + + >>> np.argsort(x, axis=0) + array([[0, 1], + [1, 0]]) + + >>> np.argsort(x, axis=1) + array([[0, 1], + [0, 1]]) + + Sorting with keys: + + >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x + array([(1, 0), (0, 1)], + dtype=[('x', '>> np.argsort(x, order=('x','y')) + array([1, 0]) + + >>> np.argsort(x, order=('y','x')) + array([0, 1]) + + """ + try: + argsort = a.argsort + except AttributeError: + return _wrapit(a, 'argsort', axis, kind, order) + return argsort(axis, kind, order) + + +def argmax(a, axis=None): + """ + Indices of the maximum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. + + See Also + -------- + ndarray.argmax, argmin + amax : The maximum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + + Notes + ----- + In case of multiple occurrences of the maximum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.argmax(a) + 5 + >>> np.argmax(a, axis=0) + array([1, 1, 1]) + >>> np.argmax(a, axis=1) + array([2, 2]) + + >>> b = np.arange(6) + >>> b[1] = 5 + >>> b + array([0, 5, 2, 3, 4, 5]) + >>> np.argmax(b) # Only the first occurrence is returned. + 1 + + """ + try: + argmax = a.argmax + except AttributeError: + return _wrapit(a, 'argmax', axis) + return argmax(axis) + + +def argmin(a, axis=None): + """ + Return the indices of the minimum values along an axis. + + See Also + -------- + argmax : Similar function. Please refer to `numpy.argmax` for detailed + documentation. + + """ + try: + argmin = a.argmin + except AttributeError: + return _wrapit(a, 'argmin', axis) + return argmin(axis) + + +def searchsorted(a, v, side='left', sorter=None): + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `a` such that, if the + corresponding elements in `v` were inserted before the indices, the + order of `a` would be preserved. + + Parameters + ---------- + a : 1-D array_like + Input array. If `sorter` is None, then it must be sorted in + ascending order, otherwise `sorter` must be an array of indices + that sort it. + v : array_like + Values to insert into `a`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `a`). + sorter : 1-D array_like, optional + .. versionadded:: 1.7.0 + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + Returns + ------- + indices : array of ints + Array of insertion points with the same shape as `v`. + + See Also + -------- + sort : Return a sorted copy of an array. + histogram : Produce histogram from 1-D data. + + Notes + ----- + Binary search is used to find the required insertion points. + + As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing + `nan` values. The enhanced sort order is documented in `sort`. + + Examples + -------- + >>> np.searchsorted([1,2,3,4,5], 3) + 2 + >>> np.searchsorted([1,2,3,4,5], 3, side='right') + 3 + >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) + array([0, 5, 1, 2]) + + """ + try: + searchsorted = a.searchsorted + except AttributeError: + return _wrapit(a, 'searchsorted', v, side, sorter) + return searchsorted(v, side, sorter) + + +def resize(a, new_shape): + """ + Return a new array with the specified shape. + + If the new array is larger than the original array, then the new + array is filled with repeated copies of `a`. Note that this behavior + is different from a.resize(new_shape) which fills with zeros instead + of repeated copies of `a`. + + Parameters + ---------- + a : array_like + Array to be resized. + + new_shape : int or tuple of int + Shape of resized array. + + Returns + ------- + reshaped_array : ndarray + The new array is formed from the data in the old array, repeated + if necessary to fill out the required number of elements. The + data are repeated in the order that they are stored in memory. + + See Also + -------- + ndarray.resize : resize an array in-place. + + Examples + -------- + >>> a=np.array([[0,1],[2,3]]) + >>> np.resize(a,(1,4)) + array([[0, 1, 2, 3]]) + >>> np.resize(a,(2,4)) + array([[0, 1, 2, 3], + [0, 1, 2, 3]]) + + """ + if isinstance(new_shape, (int, nt.integer)): + new_shape = (new_shape,) + a = ravel(a) + Na = len(a) + if not Na: return mu.zeros(new_shape, a.dtype.char) + total_size = um.multiply.reduce(new_shape) + n_copies = int(total_size / Na) + extra = total_size % Na + + if total_size == 0: + return a[:0] + + if extra != 0: + n_copies = n_copies+1 + extra = Na-extra + + a = concatenate( (a,)*n_copies) + if extra > 0: + a = a[:-extra] + + return reshape(a, new_shape) + + +def squeeze(a, axis=None): + """ + Remove single-dimensional entries from the shape of an array. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + .. versionadded:: 1.7.0 + + Selects a subset of the single-dimensional entries in the + shape. If an axis is selected with shape entry greater than + one, an error is raised. + + Returns + ------- + squeezed : ndarray + The input array, but with all or a subset of the + dimensions of length 1 removed. This is always `a` itself + or a view into `a`. + + Examples + -------- + >>> x = np.array([[[0], [1], [2]]]) + >>> x.shape + (1, 3, 1) + >>> np.squeeze(x).shape + (3,) + >>> np.squeeze(x, axis=(2,)).shape + (1, 3) + + """ + try: + squeeze = a.squeeze + except AttributeError: + return _wrapit(a, 'squeeze') + try: + # First try to use the new axis= parameter + return squeeze(axis=axis) + except TypeError: + # For backwards compatibility + return squeeze() + + +def diagonal(a, offset=0, axis1=0, axis2=1): + """ + Return specified diagonals. + + If `a` is 2-D, returns the diagonal of `a` with the given offset, + i.e., the collection of elements of the form ``a[i, i+offset]``. If + `a` has more than two dimensions, then the axes specified by `axis1` + and `axis2` are used to determine the 2-D sub-array whose diagonal is + returned. The shape of the resulting array can be determined by + removing `axis1` and `axis2` and appending an index to the right equal + to the size of the resulting diagonals. + + In versions of NumPy prior to 1.7, this function always returned a new, + independent array containing a copy of the values in the diagonal. + + In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, + but depending on this fact is deprecated. Writing to the resulting + array continues to work as it used to, but a FutureWarning is issued. + + In NumPy 1.9 it returns a read-only view on the original array. + Attempting to write to the resulting array will produce an error. + + In NumPy 1.10, it will return a read/write view, Writing to the returned + array will alter your original array. + + If you don't write to the array returned by this function, then you can + just ignore all of the above. + + If you depend on the current behavior, then we suggest copying the + returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead of + just ``np.diagonal(a)``. This will work with both past and future versions + of NumPy. + + Parameters + ---------- + a : array_like + Array from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be positive or + negative. Defaults to main diagonal (0). + axis1 : int, optional + Axis to be used as the first axis of the 2-D sub-arrays from which + the diagonals should be taken. Defaults to first axis (0). + axis2 : int, optional + Axis to be used as the second axis of the 2-D sub-arrays from + which the diagonals should be taken. Defaults to second axis (1). + + Returns + ------- + array_of_diagonals : ndarray + If `a` is 2-D, a 1-D array containing the diagonal is returned. + If the dimension of `a` is larger, then an array of diagonals is + returned, "packed" from left-most dimension to right-most (e.g., + if `a` is 3-D, then the diagonals are "packed" along rows). + + Raises + ------ + ValueError + If the dimension of `a` is less than 2. + + See Also + -------- + diag : MATLAB work-a-like for 1-D and 2-D arrays. + diagflat : Create diagonal arrays. + trace : Sum along diagonals. + + Examples + -------- + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> a.diagonal() + array([0, 3]) + >>> a.diagonal(1) + array([1]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2,2,2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> a.diagonal(0, # Main diagonals of two arrays created by skipping + ... 0, # across the outer(left)-most axis last and + ... 1) # the "middle" (row) axis first. + array([[0, 6], + [1, 7]]) + + The sub-arrays whose main diagonals we just obtained; note that each + corresponds to fixing the right-most (column) axis, and that the + diagonals are "packed" in rows. + + >>> a[:,:,0] # main diagonal is [0 6] + array([[0, 2], + [4, 6]]) + >>> a[:,:,1] # main diagonal is [1 7] + array([[1, 3], + [5, 7]]) + + """ + return asarray(a).diagonal(offset, axis1, axis2) + + +def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + Return the sum along diagonals of the array. + + If `a` is 2-D, the sum along its diagonal with the given offset + is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. + + If `a` has more than two dimensions, then the axes specified by axis1 and + axis2 are used to determine the 2-D sub-arrays whose traces are returned. + The shape of the resulting array is the same as that of `a` with `axis1` + and `axis2` removed. + + Parameters + ---------- + a : array_like + Input array, from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be both positive + and negative. Defaults to 0. + axis1, axis2 : int, optional + Axes to be used as the first and second axis of the 2-D sub-arrays + from which the diagonals should be taken. Defaults are the first two + axes of `a`. + dtype : dtype, optional + Determines the data-type of the returned array and of the accumulator + where the elements are summed. If dtype has the value None and `a` is + of integer type of precision less than the default integer + precision, then the default integer precision is used. Otherwise, + the precision is the same as that of `a`. + out : ndarray, optional + Array into which the output is placed. Its type is preserved and + it must be of the right shape to hold the output. + + Returns + ------- + sum_along_diagonals : ndarray + If `a` is 2-D, the sum along the diagonal is returned. If `a` has + larger dimensions, then an array of sums along diagonals is returned. + + See Also + -------- + diag, diagonal, diagflat + + Examples + -------- + >>> np.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2,2,2)) + >>> np.trace(a) + array([6, 8]) + + >>> a = np.arange(24).reshape((2,2,2,3)) + >>> np.trace(a).shape + (2, 3) + + """ + return asarray(a).trace(offset, axis1, axis2, dtype, out) + +def ravel(a, order='C'): + """ + Return a flattened array. + + A 1-D array, containing the elements of the input, is returned. A copy is + made only if needed. + + Parameters + ---------- + a : array_like + Input array. The elements in `a` are read in the order specified by + `order`, and packed as a 1-D array. + order : {'C','F', 'A', 'K'}, optional + The elements of `a` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index changing + fastest, back to the first axis index changing slowest. 'F' means to + index the elements in Fortran-like index order, with the first index + changing fastest, and the last index changing slowest. Note that the 'C' + and 'F' options take no account of the memory layout of the underlying + array, and only refer to the order of axis indexing. 'A' means to read + the elements in Fortran-like index order if `a` is Fortran *contiguous* + in memory, C-like order otherwise. 'K' means to read the elements in + the order they occur in memory, except for reversing the data when + strides are negative. By default, 'C' index order is used. + + Returns + ------- + 1d_array : ndarray + Output of the same dtype as `a`, and of shape ``(a.size,)``. + + See Also + -------- + ndarray.flat : 1-D iterator over an array. + ndarray.flatten : 1-D array copy of the elements of an array + in row-major order. + + Notes + ----- + In C-like (row-major) order, in two dimensions, the row index varies the + slowest, and the column index the quickest. This can be generalized to + multiple dimensions, where row-major order implies that the index along the + first axis varies slowest, and the index along the last quickest. The + opposite holds for Fortran-like, or column-major, index ordering. + + Examples + -------- + It is equivalent to ``reshape(-1, order=order)``. + + >>> x = np.array([[1, 2, 3], [4, 5, 6]]) + >>> print np.ravel(x) + [1 2 3 4 5 6] + + >>> print x.reshape(-1) + [1 2 3 4 5 6] + + >>> print np.ravel(x, order='F') + [1 4 2 5 3 6] + + When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: + + >>> print np.ravel(x.T) + [1 4 2 5 3 6] + >>> print np.ravel(x.T, order='A') + [1 2 3 4 5 6] + + When ``order`` is 'K', it will preserve orderings that are neither 'C' + nor 'F', but won't reverse axes: + + >>> a = np.arange(3)[::-1]; a + array([2, 1, 0]) + >>> a.ravel(order='C') + array([2, 1, 0]) + >>> a.ravel(order='K') + array([2, 1, 0]) + + >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a + array([[[ 0, 2, 4], + [ 1, 3, 5]], + [[ 6, 8, 10], + [ 7, 9, 11]]]) + >>> a.ravel(order='C') + array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) + >>> a.ravel(order='K') + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + """ + return asarray(a).ravel(order) + + +def nonzero(a): + """ + Return the indices of the elements that are non-zero. + + Returns a tuple of arrays, one for each dimension of `a`, containing + the indices of the non-zero elements in that dimension. The + corresponding non-zero values can be obtained with:: + + a[nonzero(a)] + + To group the indices by element, rather than dimension, use:: + + transpose(nonzero(a)) + + The result of this is always a 2-D array, with a row for + each non-zero element. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> x = np.eye(3) + >>> x + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> np.nonzero(x) + (array([0, 1, 2]), array([0, 1, 2])) + + >>> x[np.nonzero(x)] + array([ 1., 1., 1.]) + >>> np.transpose(np.nonzero(x)) + array([[0, 0], + [1, 1], + [2, 2]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, np.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = np.array([[1,2,3],[4,5,6],[7,8,9]]) + >>> a > 3 + array([[False, False, False], + [ True, True, True], + [ True, True, True]], dtype=bool) + >>> np.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + The ``nonzero`` method of the boolean array can also be called. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + try: + nonzero = a.nonzero + except AttributeError: + res = _wrapit(a, 'nonzero') + else: + res = nonzero() + return res + + +def shape(a): + """ + Return the shape of an array. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + shape : tuple of ints + The elements of the shape tuple give the lengths of the + corresponding array dimensions. + + See Also + -------- + alen + ndarray.shape : Equivalent array method. + + Examples + -------- + >>> np.shape(np.eye(3)) + (3, 3) + >>> np.shape([[1, 2]]) + (1, 2) + >>> np.shape([0]) + (1,) + >>> np.shape(0) + () + + >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + >>> np.shape(a) + (2,) + >>> a.shape + (2,) + + """ + try: + result = a.shape + except AttributeError: + result = asarray(a).shape + return result + + +def compress(condition, a, axis=None, out=None): + """ + Return selected slices of an array along given axis. + + When working along a given axis, a slice along that axis is returned in + `output` for each index where `condition` evaluates to True. When + working on a 1-D array, `compress` is equivalent to `extract`. + + Parameters + ---------- + condition : 1-D array of bools + Array that selects which entries to return. If len(condition) + is less than the size of `a` along the given axis, then output is + truncated to the length of the condition array. + a : array_like + Array from which to extract a part. + axis : int, optional + Axis along which to take slices. If None (default), work on the + flattened array. + out : ndarray, optional + Output array. Its type is preserved and it must be of the right + shape to hold the output. + + Returns + ------- + compressed_array : ndarray + A copy of `a` without the slices along axis for which `condition` + is false. + + See Also + -------- + take, choose, diag, diagonal, select + ndarray.compress : Equivalent method in ndarray + np.extract: Equivalent method when working on 1-D arrays + numpy.doc.ufuncs : Section "Output arguments" + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4], [5, 6]]) + >>> a + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.compress([0, 1], a, axis=0) + array([[3, 4]]) + >>> np.compress([False, True, True], a, axis=0) + array([[3, 4], + [5, 6]]) + >>> np.compress([False, True], a, axis=1) + array([[2], + [4], + [6]]) + + Working on the flattened array does not return slices along an axis but + selects elements. + + >>> np.compress([False, True], a) + array([2]) + + """ + try: + compress = a.compress + except AttributeError: + return _wrapit(a, 'compress', condition, axis, out) + return compress(condition, axis, out) + + +def clip(a, a_min, a_max, out=None): + """ + Clip (limit) the values in an array. + + Given an interval, values outside the interval are clipped to + the interval edges. For example, if an interval of ``[0, 1]`` + is specified, values smaller than 0 become 0, and values larger + than 1 become 1. + + Parameters + ---------- + a : array_like + Array containing elements to clip. + a_min : scalar or array_like + Minimum value. + a_max : scalar or array_like + Maximum value. If `a_min` or `a_max` are array_like, then they will + be broadcasted to the shape of `a`. + out : ndarray, optional + The results will be placed in this array. It may be the input + array for in-place clipping. `out` must be of the right shape + to hold the output. Its type is preserved. + + Returns + ------- + clipped_array : ndarray + An array with the elements of `a`, but where values + < `a_min` are replaced with `a_min`, and those > `a_max` + with `a_max`. + + See Also + -------- + numpy.doc.ufuncs : Section "Output arguments" + + Examples + -------- + >>> a = np.arange(10) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 3, 6, out=a) + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8) + array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) + + """ + try: + clip = a.clip + except AttributeError: + return _wrapit(a, 'clip', a_min, a_max, out) + return clip(a_min, a_max, out) + + +def sum(a, axis=None, dtype=None, out=None, keepdims=False): + """ + Sum of array elements over a given axis. + + Parameters + ---------- + a : array_like + Elements to sum. + axis : None or int or tuple of ints, optional + Axis or axes along which a sum is performed. + The default (`axis` = `None`) is perform a sum over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a sum is performed on multiple + axes, instead of a single axis or all the axes as before. + dtype : dtype, optional + The type of the returned array and of the accumulator in which + the elements are summed. By default, the dtype of `a` is used. + An exception is when `a` has an integer type with less precision + than the default platform integer. In that case, the default + platform integer is used instead. + out : ndarray, optional + Array into which the output is placed. By default, a new array is + created. If `out` is given, it must be of the appropriate shape + (the shape of `a` with `axis` removed, i.e., + ``numpy.delete(a.shape, axis)``). Its type is preserved. See + `doc.ufuncs` (Section "Output arguments") for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + sum_along_axis : ndarray + An array with the same shape as `a`, with the specified + axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar + is returned. If an output array is specified, a reference to + `out` is returned. + + See Also + -------- + ndarray.sum : Equivalent method. + + cumsum : Cumulative sum of array elements. + + trapz : Integration of array values using the composite trapezoidal rule. + + mean, average + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> np.sum([0.5, 1.5]) + 2.0 + >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) + 1 + >>> np.sum([[0, 1], [0, 5]]) + 6 + >>> np.sum([[0, 1], [0, 5]], axis=0) + array([0, 6]) + >>> np.sum([[0, 1], [0, 5]], axis=1) + array([1, 5]) + + If the accumulator is too small, overflow occurs: + + >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) + -128 + + """ + if isinstance(a, _gentype): + res = _sum_(a) + if out is not None: + out[...] = res + return out + return res + elif type(a) is not mu.ndarray: + try: + sum = a.sum + except AttributeError: + return _methods._sum(a, axis=axis, dtype=dtype, + out=out, keepdims=keepdims) + # NOTE: Dropping the keepdims parameters here... + return sum(axis=axis, dtype=dtype, out=out) + else: + return _methods._sum(a, axis=axis, dtype=dtype, + out=out, keepdims=keepdims) + +def product (a, axis=None, dtype=None, out=None, keepdims=False): + """ + Return the product of array elements over a given axis. + + See Also + -------- + prod : equivalent function; see for details. + + """ + return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + + +def sometrue(a, axis=None, out=None, keepdims=False): + """ + Check whether some values are true. + + Refer to `any` for full documentation. + + See Also + -------- + any : equivalent function + + """ + arr = asanyarray(a) + + try: + return arr.any(axis=axis, out=out, keepdims=keepdims) + except TypeError: + return arr.any(axis=axis, out=out) + +def alltrue (a, axis=None, out=None, keepdims=False): + """ + Check if all elements of input array are true. + + See Also + -------- + numpy.all : Equivalent function; see for details. + + """ + arr = asanyarray(a) + + try: + return arr.all(axis=axis, out=out, keepdims=keepdims) + except TypeError: + return arr.all(axis=axis, out=out) + +def any(a, axis=None, out=None, keepdims=False): + """ + Test whether any array element along a given axis evaluates to True. + + Returns single boolean unless `axis` is not ``None`` + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical OR reduction is performed. + The default (`axis` = `None`) is to perform a logical OR over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output and its type is preserved + (e.g., if it is of type float, then it will remain so, returning + 1.0 for True and 0.0 for False, regardless of the type of `a`). + See `doc.ufuncs` (Section "Output arguments") for details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + any : bool or ndarray + A new boolean or `ndarray` is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.any : equivalent method + + all : Test whether all elements along a given axis evaluate to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity evaluate + to `True` because these are not equal to zero. + + Examples + -------- + >>> np.any([[True, False], [True, True]]) + True + + >>> np.any([[True, False], [False, False]], axis=0) + array([ True, False], dtype=bool) + + >>> np.any([-1, 0, 5]) + True + + >>> np.any(np.nan) + True + + >>> o=np.array([False]) + >>> z=np.any([-1, 4, 5], out=o) + >>> z, o + (array([ True], dtype=bool), array([ True], dtype=bool)) + >>> # Check now that z is a reference to o + >>> z is o + True + >>> id(z), id(o) # identity of z and o # doctest: +SKIP + (191614240, 191614240) + + """ + arr = asanyarray(a) + + try: + return arr.any(axis=axis, out=out, keepdims=keepdims) + except TypeError: + return arr.any(axis=axis, out=out) + +def all(a, axis=None, out=None, keepdims=False): + """ + Test whether all array elements along a given axis evaluate to True. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical AND reduction is performed. + The default (`axis` = `None`) is to perform a logical AND over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. + It must have the same shape as the expected output and its + type is preserved (e.g., if ``dtype(out)`` is float, the result + will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section + "Output arguments") for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + all : ndarray, bool + A new boolean or array is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.all : equivalent method + + any : Test whether any element along a given axis evaluates to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to `True` because these are not equal to zero. + + Examples + -------- + >>> np.all([[True,False],[True,True]]) + False + + >>> np.all([[True,False],[True,True]], axis=0) + array([ True, False], dtype=bool) + + >>> np.all([-1, 4, 5]) + True + + >>> np.all([1.0, np.nan]) + True + + >>> o=np.array([False]) + >>> z=np.all([-1, 4, 5], out=o) + >>> id(z), id(o), z # doctest: +SKIP + (28293632, 28293632, array([ True], dtype=bool)) + + """ + arr = asanyarray(a) + + try: + return arr.all(axis=axis, out=out, keepdims=keepdims) + except TypeError: + return arr.all(axis=axis, out=out) + +def cumsum (a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See `doc.ufuncs` + (Section "Output arguments") for more details. + + Returns + ------- + cumsum_along_axis : ndarray. + A new array holding the result is returned unless `out` is + specified, in which case a reference to `out` is returned. The + result has the same size as `a`, and the same shape as `a` if + `axis` is not None or `a` is a 1-d array. + + + See Also + -------- + sum : Sum array elements. + + trapz : Integration of array values using the composite trapezoidal rule. + + diff : Calculate the n-th order discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.cumsum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + """ + try: + cumsum = a.cumsum + except AttributeError: + return _wrapit(a, 'cumsum', axis, dtype, out) + return cumsum(axis, dtype, out) + + +def cumproduct(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product over the given axis. + + + See Also + -------- + cumprod : equivalent function; see for details. + + """ + try: + cumprod = a.cumprod + except AttributeError: + return _wrapit(a, 'cumprod', axis, dtype, out) + return cumprod(axis, dtype, out) + + +def ptp(a, axis=None, out=None): + """ + Range of values (maximum - minimum) along an axis. + + The name of the function comes from the acronym for 'peak to peak'. + + Parameters + ---------- + a : array_like + Input values. + axis : int, optional + Axis along which to find the peaks. By default, flatten the + array. + out : array_like + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type of the output values will be cast if necessary. + + Returns + ------- + ptp : ndarray + A new array holding the result, unless `out` was + specified, in which case a reference to `out` is returned. + + Examples + -------- + >>> x = np.arange(4).reshape((2,2)) + >>> x + array([[0, 1], + [2, 3]]) + + >>> np.ptp(x, axis=0) + array([2, 2]) + + >>> np.ptp(x, axis=1) + array([1, 1]) + + """ + try: + ptp = a.ptp + except AttributeError: + return _wrapit(a, 'ptp', axis, out) + return ptp(axis, out) + + +def amax(a, axis=None, out=None, keepdims=False): + """ + Return the maximum of an array or maximum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default, flattened input is used. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See `doc.ufuncs` (Section "Output arguments") for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + amax : ndarray or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is an array of dimension + ``a.ndim - 1``. + + See Also + -------- + amin : + The minimum value of an array along a given axis, propagating any NaNs. + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + argmax : + Return the indices of the maximum values. + + nanmin, minimum, fmin + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding max value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmax. + + Don't use `amax` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than + ``amax(a, axis=0)``. + + Examples + -------- + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.amax(a) # Maximum of the flattened array + 3 + >>> np.amax(a, axis=0) # Maxima along the first axis + array([2, 3]) + >>> np.amax(a, axis=1) # Maxima along the second axis + array([1, 3]) + + >>> b = np.arange(5, dtype=np.float) + >>> b[2] = np.NaN + >>> np.amax(b) + nan + >>> np.nanmax(b) + 4.0 + + """ + if type(a) is not mu.ndarray: + try: + amax = a.max + except AttributeError: + return _methods._amax(a, axis=axis, + out=out, keepdims=keepdims) + # NOTE: Dropping the keepdims parameter + return amax(axis=axis, out=out) + else: + return _methods._amax(a, axis=axis, + out=out, keepdims=keepdims) + +def amin(a, axis=None, out=None, keepdims=False): + """ + Return the minimum of an array or minimum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default, flattened input is used. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See `doc.ufuncs` (Section "Output arguments") for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + amin : ndarray or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is an array of dimension + ``a.ndim - 1``. + + See Also + -------- + amax : + The maximum value of an array along a given axis, propagating any NaNs. + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + argmin : + Return the indices of the minimum values. + + nanmax, maximum, fmax + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding min value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmin. + + Don't use `amin` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than + ``amin(a, axis=0)``. + + Examples + -------- + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.amin(a) # Minimum of the flattened array + 0 + >>> np.amin(a, axis=0) # Minima along the first axis + array([0, 1]) + >>> np.amin(a, axis=1) # Minima along the second axis + array([0, 2]) + + >>> b = np.arange(5, dtype=np.float) + >>> b[2] = np.NaN + >>> np.amin(b) + nan + >>> np.nanmin(b) + 0.0 + + """ + if type(a) is not mu.ndarray: + try: + amin = a.min + except AttributeError: + return _methods._amin(a, axis=axis, + out=out, keepdims=keepdims) + # NOTE: Dropping the keepdims parameter + return amin(axis=axis, out=out) + else: + return _methods._amin(a, axis=axis, + out=out, keepdims=keepdims) + +def alen(a): + """ + Return the length of the first dimension of the input array. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + alen : int + Length of the first dimension of `a`. + + See Also + -------- + shape, size + + Examples + -------- + >>> a = np.zeros((7,4,5)) + >>> a.shape[0] + 7 + >>> np.alen(a) + 7 + + """ + try: + return len(a) + except TypeError: + return len(array(a, ndmin=1)) + + +def prod(a, axis=None, dtype=None, out=None, keepdims=False): + """ + Return the product of array elements over a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which a product is performed. + The default (`axis` = `None`) is perform a product over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a product is performed on multiple + axes, instead of a single axis or all the axes as before. + dtype : data-type, optional + The data-type of the returned array, as well as of the accumulator + in which the elements are multiplied. By default, if `a` is of + integer type, `dtype` is the default platform integer. (Note: if + the type of `a` is unsigned, then so is `dtype`.) Otherwise, + the dtype is the same as that of `a`. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the + output values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + product_along_axis : ndarray, see `dtype` parameter above. + An array shaped as `a` but with the specified axis removed. + Returns a reference to `out` if specified. + + See Also + -------- + ndarray.prod : equivalent method + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. That means that, on a 32-bit platform: + + >>> x = np.array([536870910, 536870910, 536870910, 536870910]) + >>> np.prod(x) #random + 16 + + Examples + -------- + By default, calculate the product of all elements: + + >>> np.prod([1.,2.]) + 2.0 + + Even when the input array is two-dimensional: + + >>> np.prod([[1.,2.],[3.,4.]]) + 24.0 + + But we can also specify the axis over which to multiply: + + >>> np.prod([[1.,2.],[3.,4.]], axis=1) + array([ 2., 12.]) + + If the type of `x` is unsigned, then the output type is + the unsigned platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.uint8) + >>> np.prod(x).dtype == np.uint + True + + If `x` is of a signed integer type, then the output type + is the default platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.int8) + >>> np.prod(x).dtype == np.int + True + + """ + if type(a) is not mu.ndarray: + try: + prod = a.prod + except AttributeError: + return _methods._prod(a, axis=axis, dtype=dtype, + out=out, keepdims=keepdims) + return prod(axis=axis, dtype=dtype, out=out) + else: + return _methods._prod(a, axis=axis, dtype=dtype, + out=out, keepdims=keepdims) + +def cumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + cumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case a reference to out is returned. + + See Also + -------- + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1,2,3]) + >>> np.cumprod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumprod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of `a`: + + >>> np.cumprod(a, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of `a`: + + >>> np.cumprod(a,axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + try: + cumprod = a.cumprod + except AttributeError: + return _wrapit(a, 'cumprod', axis, dtype, out) + return cumprod(axis, dtype, out) + + +def ndim(a): + """ + Return the number of dimensions of an array. + + Parameters + ---------- + a : array_like + Input array. If it is not already an ndarray, a conversion is + attempted. + + Returns + ------- + number_of_dimensions : int + The number of dimensions in `a`. Scalars are zero-dimensional. + + See Also + -------- + ndarray.ndim : equivalent method + shape : dimensions of array + ndarray.shape : dimensions of array + + Examples + -------- + >>> np.ndim([[1,2,3],[4,5,6]]) + 2 + >>> np.ndim(np.array([[1,2,3],[4,5,6]])) + 2 + >>> np.ndim(1) + 0 + + """ + try: + return a.ndim + except AttributeError: + return asarray(a).ndim + + +def rank(a): + """ + Return the number of dimensions of an array. + + If `a` is not already an array, a conversion is attempted. + Scalars are zero dimensional. + + .. note:: + This function is deprecated in NumPy 1.9 to avoid confusion with + `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function + should be used instead. + + Parameters + ---------- + a : array_like + Array whose number of dimensions is desired. If `a` is not an array, + a conversion is attempted. + + Returns + ------- + number_of_dimensions : int + The number of dimensions in the array. + + See Also + -------- + ndim : equivalent function + ndarray.ndim : equivalent property + shape : dimensions of array + ndarray.shape : dimensions of array + + Notes + ----- + In the old Numeric package, `rank` was the term used for the number of + dimensions, but in Numpy `ndim` is used instead. + + Examples + -------- + >>> np.rank([1,2,3]) + 1 + >>> np.rank(np.array([[1,2,3],[4,5,6]])) + 2 + >>> np.rank(1) + 0 + + """ + warnings.warn( + "`rank` is deprecated; use the `ndim` attribute or function instead. " + "To find the rank of a matrix see `numpy.linalg.matrix_rank`.", + VisibleDeprecationWarning) + try: + return a.ndim + except AttributeError: + return asarray(a).ndim + + +def size(a, axis=None): + """ + Return the number of elements along a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which the elements are counted. By default, give + the total number of elements. + + Returns + ------- + element_count : int + Number of elements along the specified axis. + + See Also + -------- + shape : dimensions of array + ndarray.shape : dimensions of array + ndarray.size : number of elements in array + + Examples + -------- + >>> a = np.array([[1,2,3],[4,5,6]]) + >>> np.size(a) + 6 + >>> np.size(a,1) + 3 + >>> np.size(a,0) + 2 + + """ + if axis is None: + try: + return a.size + except AttributeError: + return asarray(a).size + else: + try: + return a.shape[axis] + except AttributeError: + return asarray(a).shape[axis] + + +def around(a, decimals=0, out=None): + """ + Evenly round to the given number of decimals. + + Parameters + ---------- + a : array_like + Input data. + decimals : int, optional + Number of decimal places to round to (default: 0). If + decimals is negative, it specifies the number of positions to + the left of the decimal point. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. See `doc.ufuncs` (Section + "Output arguments") for details. + + Returns + ------- + rounded_array : ndarray + An array of the same type as `a`, containing the rounded values. + Unless `out` was specified, a new array is created. A reference to + the result is returned. + + The real and imaginary parts of complex numbers are rounded + separately. The result of rounding a float is a float. + + See Also + -------- + ndarray.round : equivalent method + + ceil, fix, floor, rint, trunc + + + Notes + ----- + For values exactly halfway between rounded decimal values, Numpy + rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, + -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due + to the inexact representation of decimal fractions in the IEEE + floating point standard [1]_ and errors introduced when scaling + by powers of ten. + + References + ---------- + .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, + http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF + .. [2] "How Futile are Mindless Assessments of + Roundoff in Floating-Point Computation?", William Kahan, + http://www.cs.berkeley.edu/~wkahan/Mindless.pdf + + Examples + -------- + >>> np.around([0.37, 1.64]) + array([ 0., 2.]) + >>> np.around([0.37, 1.64], decimals=1) + array([ 0.4, 1.6]) + >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value + array([ 0., 2., 2., 4., 4.]) + >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned + array([ 1, 2, 3, 11]) + >>> np.around([1,2,3,11], decimals=-1) + array([ 0, 0, 0, 10]) + + """ + try: + round = a.round + except AttributeError: + return _wrapit(a, 'round', decimals, out) + return round(decimals, out) + + +def round_(a, decimals=0, out=None): + """ + Round an array to the given number of decimals. + + Refer to `around` for full documentation. + + See Also + -------- + around : equivalent function + + """ + try: + round = a.round + except AttributeError: + return _wrapit(a, 'round', decimals, out) + return round(decimals, out) + + +def mean(a, axis=None, dtype=None, out=None, keepdims=False): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : int, optional + Axis along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for floating point inputs, it is the same as the + input dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See `doc.ufuncs` for details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. + + See Also + -------- + average : Weighted average + std, var, nanmean, nanstd, nanvar + + Notes + ----- + The arithmetic mean is the sum of the elements along the axis divided + by the number of elements. + + Note that for floating-point input, the mean is computed using the + same precision the input has. Depending on the input data, this can + cause the results to be inaccurate, especially for `float32` (see + example below). Specifying a higher-precision accumulator using the + `dtype` keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.mean(a) + 2.5 + >>> np.mean(a, axis=0) + array([ 2., 3.]) + >>> np.mean(a, axis=1) + array([ 1.5, 3.5]) + + In single precision, `mean` can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.mean(a) + 0.546875 + + Computing the mean in float64 is more accurate: + + >>> np.mean(a, dtype=np.float64) + 0.55000000074505806 + + """ + if type(a) is not mu.ndarray: + try: + mean = a.mean + return mean(axis=axis, dtype=dtype, out=out) + except AttributeError: + pass + + return _methods._mean(a, axis=axis, dtype=dtype, + out=out, keepdims=keepdims) + +def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + """ + Compute the standard deviation along the specified axis. + + Returns the standard deviation, a measure of the spread of a distribution, + of the array elements. The standard deviation is computed for the + flattened array by default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of these values. + axis : int, optional + Axis along which the standard deviation is computed. The default is + to compute the standard deviation of the flattened array. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it is + the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : int, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard deviation, + otherwise return a reference to the output array. + + See Also + -------- + var, mean, nanmean, nanstd, nanvar + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, + the divisor ``N - ddof`` is used instead. In standard statistical + practice, ``ddof=1`` provides an unbiased estimator of the variance + of the infinite population. ``ddof=0`` provides a maximum likelihood + estimate of the variance for normally distributed variables. The + standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute + value before squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example below). + Specifying a higher-accuracy accumulator using the `dtype` keyword can + alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.std(a) + 1.1180339887498949 + >>> np.std(a, axis=0) + array([ 1., 1.]) + >>> np.std(a, axis=1) + array([ 0.5, 0.5]) + + In single precision, std() can be inaccurate: + + >>> a = np.zeros((2,512*512), dtype=np.float32) + >>> a[0,:] = 1.0 + >>> a[1,:] = 0.1 + >>> np.std(a) + 0.45172946707416706 + + Computing the standard deviation in float64 is more accurate: + + >>> np.std(a, dtype=np.float64) + 0.44999999925552653 + + """ + if type(a) is not mu.ndarray: + try: + std = a.std + return std(axis=axis, dtype=dtype, out=out, ddof=ddof) + except AttributeError: + pass + + return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) + +def var(a, axis=None, dtype=None, out=None, ddof=0, + keepdims=False): + """ + Compute the variance along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : int, optional + Axis along which the variance is computed. The default is to compute + the variance of the flattened array. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float32`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : int, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + variance : ndarray, see dtype parameter above + If ``out=None``, returns a new array containing the variance; + otherwise, a reference to the output array is returned. + + See Also + -------- + std , mean, nanmean, nanstd, nanvar + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1,2],[3,4]]) + >>> np.var(a) + 1.25 + >>> np.var(a, axis=0) + array([ 1., 1.]) + >>> np.var(a, axis=1) + array([ 0.25, 0.25]) + + In single precision, var() can be inaccurate: + + >>> a = np.zeros((2,512*512), dtype=np.float32) + >>> a[0,:] = 1.0 + >>> a[1,:] = 0.1 + >>> np.var(a) + 0.20405951142311096 + + Computing the variance in float64 is more accurate: + + >>> np.var(a, dtype=np.float64) + 0.20249999932997387 + >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 + 0.20250000000000001 + + """ + if type(a) is not mu.ndarray: + try: + var = a.var + return var(axis=axis, dtype=dtype, out=out, ddof=ddof) + except AttributeError: + pass + + return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/function_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/function_base.py new file mode 100644 index 0000000000000..0bf93390e062a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/function_base.py @@ -0,0 +1,188 @@ +from __future__ import division, absolute_import, print_function + +__all__ = ['logspace', 'linspace'] + +from . import numeric as _nx +from .numeric import array, result_type + + +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None): + """ + Return evenly spaced numbers over a specified interval. + + Returns `num` evenly spaced samples, calculated over the + interval [`start`, `stop` ]. + + The endpoint of the interval can optionally be excluded. + + Parameters + ---------- + start : scalar + The starting value of the sequence. + stop : scalar + The end value of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced samples, so that `stop` is excluded. Note that the step + size changes when `endpoint` is False. + num : int, optional + Number of samples to generate. Default is 50. + endpoint : bool, optional + If True, `stop` is the last sample. Otherwise, it is not included. + Default is True. + retstep : bool, optional + If True, return (`samples`, `step`), where `step` is the spacing + between samples. + dtype : dtype + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + + .. versionadded:: 1.9.0 + + Returns + ------- + samples : ndarray + There are `num` equally spaced samples in the closed interval + ``[start, stop]`` or the half-open interval ``[start, stop)`` + (depending on whether `endpoint` is True or False). + step : float (only if `retstep` is True) + Size of spacing between samples. + + + See Also + -------- + arange : Similar to `linspace`, but uses a step size (instead of the + number of samples). + logspace : Samples uniformly distributed in log space. + + Examples + -------- + >>> np.linspace(2.0, 3.0, num=5) + array([ 2. , 2.25, 2.5 , 2.75, 3. ]) + >>> np.linspace(2.0, 3.0, num=5, endpoint=False) + array([ 2. , 2.2, 2.4, 2.6, 2.8]) + >>> np.linspace(2.0, 3.0, num=5, retstep=True) + (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 8 + >>> y = np.zeros(N) + >>> x1 = np.linspace(0, 10, N, endpoint=True) + >>> x2 = np.linspace(0, 10, N, endpoint=False) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + num = int(num) + + # Convert float/complex array scalars to float, gh-3504 + start = start * 1. + stop = stop * 1. + + if dtype is None: + dtype = result_type(start, stop, float(num)) + + if num <= 0: + return array([], dtype) + if endpoint: + if num == 1: + return array([start], dtype=dtype) + step = (stop-start)/float((num-1)) + y = _nx.arange(0, num, dtype=dtype) * step + start + y[-1] = stop + else: + step = (stop-start)/float(num) + y = _nx.arange(0, num, dtype=dtype) * step + start + if retstep: + return y.astype(dtype), step + else: + return y.astype(dtype) + + +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None): + """ + Return numbers spaced evenly on a log scale. + + In linear space, the sequence starts at ``base ** start`` + (`base` to the power of `start`) and ends with ``base ** stop`` + (see `endpoint` below). + + Parameters + ---------- + start : float + ``base ** start`` is the starting value of the sequence. + stop : float + ``base ** stop`` is the final value of the sequence, unless `endpoint` + is False. In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length ``num``) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + base : float, optional + The base of the log space. The step size between the elements in + ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. + Default is 10.0. + dtype : dtype + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + arange : Similar to linspace, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the + endpoint may or may not be included. + linspace : Similar to logspace, but with the samples uniformly distributed + in linear space, instead of log space. + + Notes + ----- + Logspace is equivalent to the code + + >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) + ... # doctest: +SKIP + >>> power(base, y).astype(dtype) + ... # doctest: +SKIP + + Examples + -------- + >>> np.logspace(2.0, 3.0, num=4) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) + >>> np.logspace(2.0, 3.0, num=4, endpoint=False) + array([ 100. , 177.827941 , 316.22776602, 562.34132519]) + >>> np.logspace(2.0, 3.0, num=4, base=2.0) + array([ 4. , 5.0396842 , 6.34960421, 8. ]) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> x1 = np.logspace(0.1, 1, N, endpoint=True) + >>> x2 = np.logspace(0.1, 1, N, endpoint=False) + >>> y = np.zeros(N) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + y = linspace(start, stop, num=num, endpoint=endpoint) + if dtype is None: + return _nx.power(base, y) + return _nx.power(base, y).astype(dtype) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/generate_numpy_api.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/generate_numpy_api.py new file mode 100644 index 0000000000000..415cbf7fcd00c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/generate_numpy_api.py @@ -0,0 +1,259 @@ +from __future__ import division, print_function + +import os +import genapi + +from genapi import \ + TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi + +import numpy_api + +# use annotated api when running under cpychecker +h_template = r""" +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION +extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; +#else +NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; +NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; +#endif + +%s + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern void **PyArray_API; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +void **PyArray_API; +#else +static void **PyArray_API=NULL; +#endif +#endif + +%s + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); + return -1; + } + +#if PY_VERSION_HEX >= 0x03000000 + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); +#else + if (!PyCObject_Check(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); +#endif + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* Perform runtime check of C API version */ + if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version %%x but this version of numpy is %%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "API version %%x but this version of numpy is %%x", \ + (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "big endian, but detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "little endian, but detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#if PY_VERSION_HEX >= 0x03000000 +#define NUMPY_IMPORT_ARRAY_RETVAL NULL +#else +#define NUMPY_IMPORT_ARRAY_RETVAL +#endif + +#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } + +#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } + +#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } + +#endif + +#endif +""" + + +c_template = r""" +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyArray_API[] = { +%s +}; +""" + +c_api_header = """ +=========== +Numpy C-API +=========== +""" + +def generate_api(output_dir, force=False): + basename = 'multiarray_api' + + h_file = os.path.join(output_dir, '__%s.h' % basename) + c_file = os.path.join(output_dir, '__%s.c' % basename) + d_file = os.path.join(output_dir, '%s.txt' % basename) + targets = (h_file, c_file, d_file) + + sources = numpy_api.multiarray_api + + if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])): + return targets + else: + do_generate_api(targets, sources) + + return targets + +def do_generate_api(targets, sources): + header_file = targets[0] + c_file = targets[1] + doc_file = targets[2] + + global_vars = sources[0] + scalar_bool_values = sources[1] + types_api = sources[2] + multiarray_funcs = sources[3] + + multiarray_api = sources[:] + + module_list = [] + extension_list = [] + init_list = [] + + # Check multiarray api indexes + multiarray_api_index = genapi.merge_api_dicts(multiarray_api) + genapi.check_api_dict(multiarray_api_index) + + numpyapi_list = genapi.get_api_functions('NUMPY_API', + multiarray_funcs) + ordered_funcs_api = genapi.order_dict(multiarray_funcs) + + # Create dict name -> *Api instance + api_name = 'PyArray_API' + multiarray_api_dict = {} + for f in numpyapi_list: + name = f.name + index = multiarray_funcs[name][0] + annotations = multiarray_funcs[name][1:] + multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations, + f.return_type, + f.args, api_name) + + for name, val in global_vars.items(): + index, type = val + multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name) + + for name, val in scalar_bool_values.items(): + index = val[0] + multiarray_api_dict[name] = BoolValuesApi(name, index, api_name) + + for name, val in types_api.items(): + index = val[0] + multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) + + if len(multiarray_api_dict) != len(multiarray_api_index): + raise AssertionError("Multiarray API size mismatch %d %d" % + (len(multiarray_api_dict), len(multiarray_api_index))) + + extension_list = [] + for name, index in genapi.order_dict(multiarray_api_index): + api_item = multiarray_api_dict[name] + extension_list.append(api_item.define_from_array_api_string()) + init_list.append(api_item.array_api_define()) + module_list.append(api_item.internal_define()) + + # Write to header + fid = open(header_file, 'w') + s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) + fid.write(s) + fid.close() + + # Write to c-code + fid = open(c_file, 'w') + s = c_template % ',\n'.join(init_list) + fid.write(s) + fid.close() + + # write to documentation + fid = open(doc_file, 'w') + fid.write(c_api_header) + for func in numpyapi_list: + fid.write(func.to_ReST()) + fid.write('\n\n') + fid.close() + + return targets diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/getlimits.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/getlimits.py new file mode 100644 index 0000000000000..165ea68604933 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/getlimits.py @@ -0,0 +1,306 @@ +"""Machine limits for Float32 and Float64 and (long double) if available... + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['finfo', 'iinfo'] + +from .machar import MachAr +from . import numeric +from . import numerictypes as ntypes +from .numeric import array + +def _frz(a): + """fix rank-0 --> rank-1""" + if a.ndim == 0: a.shape = (1,) + return a + +_convert_to_float = { + ntypes.csingle: ntypes.single, + ntypes.complex_: ntypes.float_, + ntypes.clongfloat: ntypes.longfloat + } + +class finfo(object): + """ + finfo(dtype) + + Machine limits for floating point types. + + Attributes + ---------- + eps : float + The smallest representable positive number such that + ``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating + point type. + epsneg : floating point number of the appropriate type + The smallest representable positive number such that + ``1.0 - epsneg != 1.0``. + iexp : int + The number of bits in the exponent portion of the floating point + representation. + machar : MachAr + The object which calculated these parameters and holds more + detailed information. + machep : int + The exponent that yields `eps`. + max : floating point number of the appropriate type + The largest representable number. + maxexp : int + The smallest positive power of the base (2) that causes overflow. + min : floating point number of the appropriate type + The smallest representable number, typically ``-max``. + minexp : int + The most negative power of the base (2) consistent with there + being no leading 0's in the mantissa. + negep : int + The exponent that yields `epsneg`. + nexp : int + The number of bits in the exponent including its sign and bias. + nmant : int + The number of bits in the mantissa. + precision : int + The approximate number of decimal digits to which this kind of + float is precise. + resolution : floating point number of the appropriate type + The approximate decimal resolution of this type, i.e., + ``10**-precision``. + tiny : float + The smallest positive usable number. Type of `tiny` is an + appropriate floating point type. + + Parameters + ---------- + dtype : float, dtype, or instance + Kind of floating point data-type about which to get information. + + See Also + -------- + MachAr : The implementation of the tests that produce this information. + iinfo : The equivalent for integer data types. + + Notes + ----- + For developers of NumPy: do not instantiate this at the module level. + The initial calculation of these parameters is expensive and negatively + impacts import times. These objects are cached, so calling ``finfo()`` + repeatedly inside your functions is not a problem. + + """ + + _finfo_cache = {} + + def __new__(cls, dtype): + try: + dtype = numeric.dtype(dtype) + except TypeError: + # In case a float instance was given + dtype = numeric.dtype(type(dtype)) + + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + return obj + dtypes = [dtype] + newdtype = numeric.obj2sctype(dtype) + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + if not issubclass(dtype, numeric.inexact): + raise ValueError("data type %r not inexact" % (dtype)) + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + return obj + if not issubclass(dtype, numeric.floating): + newdtype = _convert_to_float[dtype] + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + return obj + obj = object.__new__(cls)._init(dtype) + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + + def _init(self, dtype): + self.dtype = numeric.dtype(dtype) + if dtype is ntypes.double: + itype = ntypes.int64 + fmt = '%24.16e' + precname = 'double' + elif dtype is ntypes.single: + itype = ntypes.int32 + fmt = '%15.7e' + precname = 'single' + elif dtype is ntypes.longdouble: + itype = ntypes.longlong + fmt = '%s' + precname = 'long double' + elif dtype is ntypes.half: + itype = ntypes.int16 + fmt = '%12.5e' + precname = 'half' + else: + raise ValueError(repr(dtype)) + + machar = MachAr(lambda v:array([v], dtype), + lambda v:_frz(v.astype(itype))[0], + lambda v:array(_frz(v)[0], dtype), + lambda v: fmt % array(_frz(v)[0], dtype), + 'numpy %s precision floating point number' % precname) + + for word in ['precision', 'iexp', + 'maxexp', 'minexp', 'negep', + 'machep']: + setattr(self, word, getattr(machar, word)) + for word in ['tiny', 'resolution', 'epsneg']: + setattr(self, word, getattr(machar, word).flat[0]) + self.max = machar.huge.flat[0] + self.min = -self.max + self.eps = machar.eps.flat[0] + self.nexp = machar.iexp + self.nmant = machar.it + self.machar = machar + self._str_tiny = machar._str_xmin.strip() + self._str_max = machar._str_xmax.strip() + self._str_epsneg = machar._str_epsneg.strip() + self._str_eps = machar._str_eps.strip() + self._str_resolution = machar._str_resolution.strip() + return self + + def __str__(self): + return '''\ +Machine parameters for %(dtype)s +--------------------------------------------------------------------- +precision=%(precision)3s resolution= %(_str_resolution)s +machep=%(machep)6s eps= %(_str_eps)s +negep =%(negep)6s epsneg= %(_str_epsneg)s +minexp=%(minexp)6s tiny= %(_str_tiny)s +maxexp=%(maxexp)6s max= %(_str_max)s +nexp =%(nexp)6s min= -max +--------------------------------------------------------------------- +''' % self.__dict__ + + def __repr__(self): + c = self.__class__.__name__ + d = self.__dict__.copy() + d['klass'] = c + return ("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," \ + + " max=%(_str_max)s, dtype=%(dtype)s)") \ + % d + + +class iinfo(object): + """ + iinfo(type) + + Machine limits for integer types. + + Attributes + ---------- + min : int + The smallest integer expressible by the type. + max : int + The largest integer expressible by the type. + + Parameters + ---------- + type : integer type, dtype, or instance + The kind of integer data type to get information about. + + See Also + -------- + finfo : The equivalent for floating point data types. + + Examples + -------- + With types: + + >>> ii16 = np.iinfo(np.int16) + >>> ii16.min + -32768 + >>> ii16.max + 32767 + >>> ii32 = np.iinfo(np.int32) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + With instances: + + >>> ii32 = np.iinfo(np.int32(10)) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + """ + + _min_vals = {} + _max_vals = {} + + def __init__(self, int_type): + try: + self.dtype = numeric.dtype(int_type) + except TypeError: + self.dtype = numeric.dtype(type(int_type)) + self.kind = self.dtype.kind + self.bits = self.dtype.itemsize * 8 + self.key = "%s%d" % (self.kind, self.bits) + if not self.kind in 'iu': + raise ValueError("Invalid integer data type.") + + def min(self): + """Minimum value of given dtype.""" + if self.kind == 'u': + return 0 + else: + try: + val = iinfo._min_vals[self.key] + except KeyError: + val = int(-(1 << (self.bits-1))) + iinfo._min_vals[self.key] = val + return val + + min = property(min) + + def max(self): + """Maximum value of given dtype.""" + try: + val = iinfo._max_vals[self.key] + except KeyError: + if self.kind == 'u': + val = int((1 << self.bits) - 1) + else: + val = int((1 << (self.bits-1)) - 1) + iinfo._max_vals[self.key] = val + return val + + max = property(max) + + def __str__(self): + """String representation.""" + return '''\ +Machine parameters for %(dtype)s +--------------------------------------------------------------------- +min = %(min)s +max = %(max)s +--------------------------------------------------------------------- +''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max} + + def __repr__(self): + return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, + self.min, self.max, self.dtype) + +if __name__ == '__main__': + f = finfo(ntypes.single) + print('single epsilon:', f.eps) + print('single tiny:', f.tiny) + f = finfo(ntypes.float) + print('float epsilon:', f.eps) + print('float tiny:', f.tiny) + f = finfo(ntypes.longfloat) + print('longfloat epsilon:', f.eps) + print('longfloat tiny:', f.tiny) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__multiarray_api.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__multiarray_api.h new file mode 100644 index 0000000000000..b95762c4a43cb --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__multiarray_api.h @@ -0,0 +1,1721 @@ + +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION +extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; +#else +NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; +NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; +#endif + +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ + (void); +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type; +#else + NPY_NO_EXPORT PyTypeObject PyBigArray_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyArray_Type; +#else + NPY_NO_EXPORT PyTypeObject PyArray_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type; +#else + NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; +#else + NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; +#else + NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; +#else + NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; +#else + NPY_NO_EXPORT int NPY_NUMUSERTYPES; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; +#else +NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; +#endif + +NPY_NO_EXPORT int PyArray_SetNumericOps \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_GetNumericOps \ + (void); +NPY_NO_EXPORT int PyArray_INCREF \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_XDECREF \ + (PyArrayObject *); +NPY_NO_EXPORT void PyArray_SetStringFunction \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ + (int); +NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ + (int); +NPY_NO_EXPORT char * PyArray_Zero \ + (PyArrayObject *); +NPY_NO_EXPORT char * PyArray_One \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_CastToType \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_CastTo \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CastAnyTo \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CanCastSafely \ + (int, int); +NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_ObjectType \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ + (PyObject *, int *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ + (PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_Size \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Scalar \ + (void *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_ScalarAsCtype \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_CastScalarToCtype \ + (PyObject *, void *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_CastScalarDirect \ + (PyObject *, PyArray_Descr *, void *, int); +NPY_NO_EXPORT PyObject * PyArray_ScalarFromObject \ + (PyObject *); +NPY_NO_EXPORT PyArray_VectorUnaryFunc * PyArray_GetCastFunc \ + (PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromDims \ + (int, int *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \ + (int, int *, PyArray_Descr *, char *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ + (PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromFile \ + (FILE *, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromString \ + (char *, npy_intp, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ + (PyObject *, PyArray_Descr *, npy_intp, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ + (PyObject *, PyArray_Descr *, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_GetField \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) int PyArray_SetField \ + (PyArrayObject *, PyArray_Descr *, int, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Byteswap \ + (PyArrayObject *, npy_bool); +NPY_NO_EXPORT PyObject * PyArray_Resize \ + (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER); +NPY_NO_EXPORT int PyArray_MoveInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyAnyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_NewCopy \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_ToList \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ToString \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT int PyArray_ToFile \ + (PyArrayObject *, FILE *, char *, char *); +NPY_NO_EXPORT int PyArray_Dump \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Dumps \ + (PyObject *, int); +NPY_NO_EXPORT int PyArray_ValidType \ + (int); +NPY_NO_EXPORT void PyArray_UpdateFlags \ + (PyArrayObject *, int); +NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_New \ + (PyTypeObject *, int, npy_intp *, int, npy_intp *, void *, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(1) NPY_GCC_NONNULL(2) PyObject * PyArray_NewFromDescr \ + (PyTypeObject *, PyArray_Descr *, int, npy_intp *, npy_intp *, void *, int, PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ + (PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ + (int); +NPY_NO_EXPORT double PyArray_GetPriority \ + (PyObject *, double); +NPY_NO_EXPORT PyObject * PyArray_IterNew \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MultiIterNew \ + (int, ...); +NPY_NO_EXPORT int PyArray_PyIntAsInt \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ + (PyObject *); +NPY_NO_EXPORT int PyArray_Broadcast \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT void PyArray_FillObjectArray \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT int PyArray_FillWithScalar \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ + (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ + (PyArray_Descr *, char); +NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ + (PyObject *, int *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ + (PyObject *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ + (int, PyArrayObject **); +NPY_NO_EXPORT int PyArray_CanCoerceScalar \ + (int, int, NPY_SCALARKIND); +NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject \ + (PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ + (PyTypeObject *, PyTypeObject *); +NPY_NO_EXPORT int PyArray_CompareUCS4 \ + (npy_ucs4 *, npy_ucs4 *, size_t); +NPY_NO_EXPORT int PyArray_RemoveSmallest \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_ElementStrides \ + (PyObject *); +NPY_NO_EXPORT void PyArray_Item_INCREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_Item_XDECREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT PyObject * PyArray_FieldNames \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Transpose \ + (PyArrayObject *, PyArray_Dims *); +NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ + (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutTo \ + (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutMask \ + (PyArrayObject *, PyObject*, PyObject*); +NPY_NO_EXPORT PyObject * PyArray_Repeat \ + (PyArrayObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Choose \ + (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT int PyArray_Sort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgSort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ + (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMax \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMin \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Reshape \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Newshape \ + (PyArrayObject *, PyArray_Dims *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Squeeze \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ + (PyArrayObject *, PyArray_Descr *, PyTypeObject *); +NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ + (PyArrayObject *, int, int); +NPY_NO_EXPORT PyObject * PyArray_Max \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Min \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Ptp \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Mean \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Trace \ + (PyArrayObject *, int, int, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Diagonal \ + (PyArrayObject *, int, int, int); +NPY_NO_EXPORT PyObject * PyArray_Clip \ + (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Conjugate \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Nonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Std \ + (PyArrayObject *, int, int, PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Sum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumSum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Prod \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumProd \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_All \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Any \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Compress \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Flatten \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Ravel \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ + (npy_intp *, int); +NPY_NO_EXPORT int PyArray_MultiplyIntList \ + (int *, int); +NPY_NO_EXPORT void * PyArray_GetPtr \ + (PyArrayObject *, npy_intp*); +NPY_NO_EXPORT int PyArray_CompareLists \ + (npy_intp *, npy_intp *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ + (PyObject **, void *, npy_intp *, int, PyArray_Descr*); +NPY_NO_EXPORT int PyArray_As1D \ + (PyObject **, char **, int *, int); +NPY_NO_EXPORT int PyArray_As2D \ + (PyObject **, char ***, int *, int *, int); +NPY_NO_EXPORT int PyArray_Free \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_Converter \ + (PyObject *, PyObject **); +NPY_NO_EXPORT int PyArray_IntpFromSequence \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT PyObject * PyArray_Concatenate \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_CopyAndTranspose \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Correlate \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT int PyArray_TypestrConvert \ + (int, int); +NPY_NO_EXPORT int PyArray_DescrConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_IntpConverter \ + (PyObject *, PyArray_Dims *); +NPY_NO_EXPORT int PyArray_BufferConverter \ + (PyObject *, PyArray_Chunk *); +NPY_NO_EXPORT int PyArray_AxisConverter \ + (PyObject *, int *); +NPY_NO_EXPORT int PyArray_BoolConverter \ + (PyObject *, npy_bool *); +NPY_NO_EXPORT int PyArray_ByteorderConverter \ + (PyObject *, char *); +NPY_NO_EXPORT int PyArray_OrderConverter \ + (PyObject *, NPY_ORDER *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ + (int, npy_intp *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ + (int, npy_intp *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_Where \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Arange \ + (double, double, double, int); +NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ + (PyObject *, PyObject *, PyObject *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_SortkindConverter \ + (PyObject *, NPY_SORTKIND *); +NPY_NO_EXPORT PyObject * PyArray_LexSort \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Round \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ + (int, int); +NPY_NO_EXPORT int PyArray_RegisterDataType \ + (PyArray_Descr *); +NPY_NO_EXPORT int PyArray_RegisterCastFunc \ + (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); +NPY_NO_EXPORT int PyArray_RegisterCanCast \ + (PyArray_Descr *, int, NPY_SCALARKIND); +NPY_NO_EXPORT void PyArray_InitArrFuncs \ + (PyArray_ArrFuncs *); +NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ + (int, npy_intp *); +NPY_NO_EXPORT int PyArray_TypeNumFromName \ + (char *); +NPY_NO_EXPORT int PyArray_ClipmodeConverter \ + (PyObject *, NPY_CLIPMODE *); +NPY_NO_EXPORT int PyArray_OutputConverter \ + (PyObject *, PyArrayObject **); +NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT void _PyArray_SigintHandler \ + (int); +NPY_NO_EXPORT void* _PyArray_GetSigintBuf \ + (void); +NPY_NO_EXPORT int PyArray_DescrAlignConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_SearchsideConverter \ + (PyObject *, void *); +NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ + (PyArrayObject *, int *, int); +NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ + (npy_intp *, int); +NPY_NO_EXPORT int PyArray_CompareString \ + (char *, char *, size_t); +NPY_NO_EXPORT PyObject * PyArray_MultiIterFromObjects \ + (PyObject **, int, int, ...); +NPY_NO_EXPORT int PyArray_GetEndianness \ + (void); +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ + (void); +NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ + (PyArrayIterObject *, npy_intp *, int, PyArrayObject*); +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; +#else + NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; +#else + NPY_NO_EXPORT PyTypeObject NpyIter_Type; +#endif + +NPY_NO_EXPORT void PyArray_SetDatetimeParseFunction \ + (PyObject *); +NPY_NO_EXPORT void PyArray_DatetimeToDatetimeStruct \ + (npy_datetime, NPY_DATETIMEUNIT, npy_datetimestruct *); +NPY_NO_EXPORT void PyArray_TimedeltaToTimedeltaStruct \ + (npy_timedelta, NPY_DATETIMEUNIT, npy_timedeltastruct *); +NPY_NO_EXPORT npy_datetime PyArray_DatetimeStructToDatetime \ + (NPY_DATETIMEUNIT, npy_datetimestruct *); +NPY_NO_EXPORT npy_datetime PyArray_TimedeltaStructToTimedelta \ + (NPY_DATETIMEUNIT, npy_timedeltastruct *); +NPY_NO_EXPORT NpyIter * NpyIter_New \ + (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); +NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); +NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); +NPY_NO_EXPORT NpyIter * NpyIter_Copy \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Deallocate \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Reset \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_ResetBasePointers \ + (NpyIter *, char **, char **); +NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ + (NpyIter *, npy_intp, npy_intp, char **); +NPY_NO_EXPORT int NpyIter_GetNDim \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetNOp \ + (NpyIter *); +NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ + (NpyIter *, char **); +NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ + (NpyIter *, npy_intp *, npy_intp *); +NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIterIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetShape \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ + (NpyIter *); +NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT void NpyIter_GetReadFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_GetWriteFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_DebugPrint \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT int NpyIter_RemoveAxis \ + (NpyIter *, int); +NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ + (NpyIter *, int); +NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ + (NpyIter *); +NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ + (NpyIter *, npy_intp, npy_intp *); +NPY_NO_EXPORT int PyArray_CastingConverter \ + (PyObject *, NPY_CASTING *); +NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ + (npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **); +NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ + (PyArrayObject *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ + (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ + (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) NPY_GCC_NONNULL(1) PyObject * PyArray_NewLikeArray \ + (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject \ + (PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *); +NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ + (PyObject *, NPY_CLIPMODE *, int); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ + (PyObject *, PyObject *, PyArrayObject*); +NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ + (NpyIter *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ + (int, npy_intp *, npy_stride_sort_item *); +NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ + (PyArrayObject *, npy_bool *); +NPY_NO_EXPORT void PyArray_DebugPrint \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ + (PyArrayObject *, const char *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT void * PyDataMem_NEW \ + (size_t); +NPY_NO_EXPORT void PyDataMem_FREE \ + (void *); +NPY_NO_EXPORT void * PyDataMem_RENEW \ + (void *, size_t); +NPY_NO_EXPORT PyDataMem_EventHookFunc * PyDataMem_SetEventHook \ + (PyDataMem_EventHookFunc *, void *, void **); +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; +#else + NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; +#endif + +NPY_NO_EXPORT void PyArray_MapIterSwapAxes \ + (PyArrayMapIterObject *, PyArrayObject **, int); +NPY_NO_EXPORT PyObject * PyArray_MapIterArray \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_MapIterNext \ + (PyArrayMapIterObject *); +NPY_NO_EXPORT int PyArray_Partition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT int PyArray_SelectkindConverter \ + (PyObject *, NPY_SELECTKIND *); +NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ + (size_t, size_t); + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern void **PyArray_API; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +void **PyArray_API; +#else +static void **PyArray_API=NULL; +#endif +#endif + +#define PyArray_GetNDArrayCVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[0]) +#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) +#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) +#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) +#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) +#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) +#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) +#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) +#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) +#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) +#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) +#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) +#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) +#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) +#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) +#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) +#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) +#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) +#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) +#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) +#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) +#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) +#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) +#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) +#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) +#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) +#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) +#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) +#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) +#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) +#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) +#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) +#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) +#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) +#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) +#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) +#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) +#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) +#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) +#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) +#define PyArray_SetNumericOps \ + (*(int (*)(PyObject *)) \ + PyArray_API[40]) +#define PyArray_GetNumericOps \ + (*(PyObject * (*)(void)) \ + PyArray_API[41]) +#define PyArray_INCREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[42]) +#define PyArray_XDECREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[43]) +#define PyArray_SetStringFunction \ + (*(void (*)(PyObject *, int)) \ + PyArray_API[44]) +#define PyArray_DescrFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[45]) +#define PyArray_TypeObjectFromType \ + (*(PyObject * (*)(int)) \ + PyArray_API[46]) +#define PyArray_Zero \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[47]) +#define PyArray_One \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[48]) +#define PyArray_CastToType \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[49]) +#define PyArray_CastTo \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[50]) +#define PyArray_CastAnyTo \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[51]) +#define PyArray_CanCastSafely \ + (*(int (*)(int, int)) \ + PyArray_API[52]) +#define PyArray_CanCastTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[53]) +#define PyArray_ObjectType \ + (*(int (*)(PyObject *, int)) \ + PyArray_API[54]) +#define PyArray_DescrFromObject \ + (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[55]) +#define PyArray_ConvertToCommonType \ + (*(PyArrayObject ** (*)(PyObject *, int *)) \ + PyArray_API[56]) +#define PyArray_DescrFromScalar \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[57]) +#define PyArray_DescrFromTypeObject \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[58]) +#define PyArray_Size \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[59]) +#define PyArray_Scalar \ + (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ + PyArray_API[60]) +#define PyArray_FromScalar \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[61]) +#define PyArray_ScalarAsCtype \ + (*(void (*)(PyObject *, void *)) \ + PyArray_API[62]) +#define PyArray_CastScalarToCtype \ + (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ + PyArray_API[63]) +#define PyArray_CastScalarDirect \ + (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ + PyArray_API[64]) +#define PyArray_ScalarFromObject \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[65]) +#define PyArray_GetCastFunc \ + (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \ + PyArray_API[66]) +#define PyArray_FromDims \ + (*(PyObject * (*)(int, int *, int)) \ + PyArray_API[67]) +#define PyArray_FromDimsAndDataAndDescr \ + (*(PyObject * (*)(int, int *, PyArray_Descr *, char *)) \ + PyArray_API[68]) +#define PyArray_FromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[69]) +#define PyArray_EnsureArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[70]) +#define PyArray_EnsureAnyArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[71]) +#define PyArray_FromFile \ + (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[72]) +#define PyArray_FromString \ + (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[73]) +#define PyArray_FromBuffer \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ + PyArray_API[74]) +#define PyArray_FromIter \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ + PyArray_API[75]) +#define PyArray_Return \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[76]) +#define PyArray_GetField \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[77]) +#define PyArray_SetField \ + (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ + PyArray_API[78]) +#define PyArray_Byteswap \ + (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ + PyArray_API[79]) +#define PyArray_Resize \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER)) \ + PyArray_API[80]) +#define PyArray_MoveInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[81]) +#define PyArray_CopyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[82]) +#define PyArray_CopyAnyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[83]) +#define PyArray_CopyObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[84]) +#define PyArray_NewCopy \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[85]) +#define PyArray_ToList \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[86]) +#define PyArray_ToString \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[87]) +#define PyArray_ToFile \ + (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ + PyArray_API[88]) +#define PyArray_Dump \ + (*(int (*)(PyObject *, PyObject *, int)) \ + PyArray_API[89]) +#define PyArray_Dumps \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[90]) +#define PyArray_ValidType \ + (*(int (*)(int)) \ + PyArray_API[91]) +#define PyArray_UpdateFlags \ + (*(void (*)(PyArrayObject *, int)) \ + PyArray_API[92]) +#define PyArray_New \ + (*(PyObject * (*)(PyTypeObject *, int, npy_intp *, int, npy_intp *, void *, int, int, PyObject *)) \ + PyArray_API[93]) +#define PyArray_NewFromDescr \ + (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp *, npy_intp *, void *, int, PyObject *)) \ + PyArray_API[94]) +#define PyArray_DescrNew \ + (*(PyArray_Descr * (*)(PyArray_Descr *)) \ + PyArray_API[95]) +#define PyArray_DescrNewFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[96]) +#define PyArray_GetPriority \ + (*(double (*)(PyObject *, double)) \ + PyArray_API[97]) +#define PyArray_IterNew \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[98]) +#define PyArray_MultiIterNew \ + (*(PyObject * (*)(int, ...)) \ + PyArray_API[99]) +#define PyArray_PyIntAsInt \ + (*(int (*)(PyObject *)) \ + PyArray_API[100]) +#define PyArray_PyIntAsIntp \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[101]) +#define PyArray_Broadcast \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[102]) +#define PyArray_FillObjectArray \ + (*(void (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[103]) +#define PyArray_FillWithScalar \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[104]) +#define PyArray_CheckStrides \ + (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)) \ + PyArray_API[105]) +#define PyArray_DescrNewByteorder \ + (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ + PyArray_API[106]) +#define PyArray_IterAllButAxis \ + (*(PyObject * (*)(PyObject *, int *)) \ + PyArray_API[107]) +#define PyArray_CheckFromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[108]) +#define PyArray_FromArray \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[109]) +#define PyArray_FromInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[110]) +#define PyArray_FromStructInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[111]) +#define PyArray_FromArrayAttr \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ + PyArray_API[112]) +#define PyArray_ScalarKind \ + (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ + PyArray_API[113]) +#define PyArray_CanCoerceScalar \ + (*(int (*)(int, int, NPY_SCALARKIND)) \ + PyArray_API[114]) +#define PyArray_NewFlagsObject \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[115]) +#define PyArray_CanCastScalar \ + (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ + PyArray_API[116]) +#define PyArray_CompareUCS4 \ + (*(int (*)(npy_ucs4 *, npy_ucs4 *, size_t)) \ + PyArray_API[117]) +#define PyArray_RemoveSmallest \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[118]) +#define PyArray_ElementStrides \ + (*(int (*)(PyObject *)) \ + PyArray_API[119]) +#define PyArray_Item_INCREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[120]) +#define PyArray_Item_XDECREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[121]) +#define PyArray_FieldNames \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[122]) +#define PyArray_Transpose \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ + PyArray_API[123]) +#define PyArray_TakeFrom \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[124]) +#define PyArray_PutTo \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ + PyArray_API[125]) +#define PyArray_PutMask \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ + PyArray_API[126]) +#define PyArray_Repeat \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ + PyArray_API[127]) +#define PyArray_Choose \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[128]) +#define PyArray_Sort \ + (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[129]) +#define PyArray_ArgSort \ + (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[130]) +#define PyArray_SearchSorted \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ + PyArray_API[131]) +#define PyArray_ArgMax \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[132]) +#define PyArray_ArgMin \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[133]) +#define PyArray_Reshape \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[134]) +#define PyArray_Newshape \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ + PyArray_API[135]) +#define PyArray_Squeeze \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[136]) +#define PyArray_View \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ + PyArray_API[137]) +#define PyArray_SwapAxes \ + (*(PyObject * (*)(PyArrayObject *, int, int)) \ + PyArray_API[138]) +#define PyArray_Max \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[139]) +#define PyArray_Min \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[140]) +#define PyArray_Ptp \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[141]) +#define PyArray_Mean \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[142]) +#define PyArray_Trace \ + (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ + PyArray_API[143]) +#define PyArray_Diagonal \ + (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ + PyArray_API[144]) +#define PyArray_Clip \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ + PyArray_API[145]) +#define PyArray_Conjugate \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[146]) +#define PyArray_Nonzero \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[147]) +#define PyArray_Std \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ + PyArray_API[148]) +#define PyArray_Sum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[149]) +#define PyArray_CumSum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[150]) +#define PyArray_Prod \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[151]) +#define PyArray_CumProd \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[152]) +#define PyArray_All \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[153]) +#define PyArray_Any \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[154]) +#define PyArray_Compress \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[155]) +#define PyArray_Flatten \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[156]) +#define PyArray_Ravel \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[157]) +#define PyArray_MultiplyList \ + (*(npy_intp (*)(npy_intp *, int)) \ + PyArray_API[158]) +#define PyArray_MultiplyIntList \ + (*(int (*)(int *, int)) \ + PyArray_API[159]) +#define PyArray_GetPtr \ + (*(void * (*)(PyArrayObject *, npy_intp*)) \ + PyArray_API[160]) +#define PyArray_CompareLists \ + (*(int (*)(npy_intp *, npy_intp *, int)) \ + PyArray_API[161]) +#define PyArray_AsCArray \ + (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ + PyArray_API[162]) +#define PyArray_As1D \ + (*(int (*)(PyObject **, char **, int *, int)) \ + PyArray_API[163]) +#define PyArray_As2D \ + (*(int (*)(PyObject **, char ***, int *, int *, int)) \ + PyArray_API[164]) +#define PyArray_Free \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[165]) +#define PyArray_Converter \ + (*(int (*)(PyObject *, PyObject **)) \ + PyArray_API[166]) +#define PyArray_IntpFromSequence \ + (*(int (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[167]) +#define PyArray_Concatenate \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[168]) +#define PyArray_InnerProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[169]) +#define PyArray_MatrixProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[170]) +#define PyArray_CopyAndTranspose \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[171]) +#define PyArray_Correlate \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[172]) +#define PyArray_TypestrConvert \ + (*(int (*)(int, int)) \ + PyArray_API[173]) +#define PyArray_DescrConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[174]) +#define PyArray_DescrConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[175]) +#define PyArray_IntpConverter \ + (*(int (*)(PyObject *, PyArray_Dims *)) \ + PyArray_API[176]) +#define PyArray_BufferConverter \ + (*(int (*)(PyObject *, PyArray_Chunk *)) \ + PyArray_API[177]) +#define PyArray_AxisConverter \ + (*(int (*)(PyObject *, int *)) \ + PyArray_API[178]) +#define PyArray_BoolConverter \ + (*(int (*)(PyObject *, npy_bool *)) \ + PyArray_API[179]) +#define PyArray_ByteorderConverter \ + (*(int (*)(PyObject *, char *)) \ + PyArray_API[180]) +#define PyArray_OrderConverter \ + (*(int (*)(PyObject *, NPY_ORDER *)) \ + PyArray_API[181]) +#define PyArray_EquivTypes \ + (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[182]) +#define PyArray_Zeros \ + (*(PyObject * (*)(int, npy_intp *, PyArray_Descr *, int)) \ + PyArray_API[183]) +#define PyArray_Empty \ + (*(PyObject * (*)(int, npy_intp *, PyArray_Descr *, int)) \ + PyArray_API[184]) +#define PyArray_Where \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ + PyArray_API[185]) +#define PyArray_Arange \ + (*(PyObject * (*)(double, double, double, int)) \ + PyArray_API[186]) +#define PyArray_ArangeObj \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ + PyArray_API[187]) +#define PyArray_SortkindConverter \ + (*(int (*)(PyObject *, NPY_SORTKIND *)) \ + PyArray_API[188]) +#define PyArray_LexSort \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[189]) +#define PyArray_Round \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[190]) +#define PyArray_EquivTypenums \ + (*(unsigned char (*)(int, int)) \ + PyArray_API[191]) +#define PyArray_RegisterDataType \ + (*(int (*)(PyArray_Descr *)) \ + PyArray_API[192]) +#define PyArray_RegisterCastFunc \ + (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ + PyArray_API[193]) +#define PyArray_RegisterCanCast \ + (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ + PyArray_API[194]) +#define PyArray_InitArrFuncs \ + (*(void (*)(PyArray_ArrFuncs *)) \ + PyArray_API[195]) +#define PyArray_IntTupleFromIntp \ + (*(PyObject * (*)(int, npy_intp *)) \ + PyArray_API[196]) +#define PyArray_TypeNumFromName \ + (*(int (*)(char *)) \ + PyArray_API[197]) +#define PyArray_ClipmodeConverter \ + (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ + PyArray_API[198]) +#define PyArray_OutputConverter \ + (*(int (*)(PyObject *, PyArrayObject **)) \ + PyArray_API[199]) +#define PyArray_BroadcastToShape \ + (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[200]) +#define _PyArray_SigintHandler \ + (*(void (*)(int)) \ + PyArray_API[201]) +#define _PyArray_GetSigintBuf \ + (*(void* (*)(void)) \ + PyArray_API[202]) +#define PyArray_DescrAlignConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[203]) +#define PyArray_DescrAlignConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[204]) +#define PyArray_SearchsideConverter \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[205]) +#define PyArray_CheckAxis \ + (*(PyObject * (*)(PyArrayObject *, int *, int)) \ + PyArray_API[206]) +#define PyArray_OverflowMultiplyList \ + (*(npy_intp (*)(npy_intp *, int)) \ + PyArray_API[207]) +#define PyArray_CompareString \ + (*(int (*)(char *, char *, size_t)) \ + PyArray_API[208]) +#define PyArray_MultiIterFromObjects \ + (*(PyObject * (*)(PyObject **, int, int, ...)) \ + PyArray_API[209]) +#define PyArray_GetEndianness \ + (*(int (*)(void)) \ + PyArray_API[210]) +#define PyArray_GetNDArrayCFeatureVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[211]) +#define PyArray_Correlate2 \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[212]) +#define PyArray_NeighborhoodIterNew \ + (*(PyObject* (*)(PyArrayIterObject *, npy_intp *, int, PyArrayObject*)) \ + PyArray_API[213]) +#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) +#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) +#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) +#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) +#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) +#define PyArray_SetDatetimeParseFunction \ + (*(void (*)(PyObject *)) \ + PyArray_API[219]) +#define PyArray_DatetimeToDatetimeStruct \ + (*(void (*)(npy_datetime, NPY_DATETIMEUNIT, npy_datetimestruct *)) \ + PyArray_API[220]) +#define PyArray_TimedeltaToTimedeltaStruct \ + (*(void (*)(npy_timedelta, NPY_DATETIMEUNIT, npy_timedeltastruct *)) \ + PyArray_API[221]) +#define PyArray_DatetimeStructToDatetime \ + (*(npy_datetime (*)(NPY_DATETIMEUNIT, npy_datetimestruct *)) \ + PyArray_API[222]) +#define PyArray_TimedeltaStructToTimedelta \ + (*(npy_datetime (*)(NPY_DATETIMEUNIT, npy_timedeltastruct *)) \ + PyArray_API[223]) +#define NpyIter_New \ + (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ + PyArray_API[224]) +#define NpyIter_MultiNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ + PyArray_API[225]) +#define NpyIter_AdvancedNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ + PyArray_API[226]) +#define NpyIter_Copy \ + (*(NpyIter * (*)(NpyIter *)) \ + PyArray_API[227]) +#define NpyIter_Deallocate \ + (*(int (*)(NpyIter *)) \ + PyArray_API[228]) +#define NpyIter_HasDelayedBufAlloc \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[229]) +#define NpyIter_HasExternalLoop \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[230]) +#define NpyIter_EnableExternalLoop \ + (*(int (*)(NpyIter *)) \ + PyArray_API[231]) +#define NpyIter_GetInnerStrideArray \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[232]) +#define NpyIter_GetInnerLoopSizePtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[233]) +#define NpyIter_Reset \ + (*(int (*)(NpyIter *, char **)) \ + PyArray_API[234]) +#define NpyIter_ResetBasePointers \ + (*(int (*)(NpyIter *, char **, char **)) \ + PyArray_API[235]) +#define NpyIter_ResetToIterIndexRange \ + (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ + PyArray_API[236]) +#define NpyIter_GetNDim \ + (*(int (*)(NpyIter *)) \ + PyArray_API[237]) +#define NpyIter_GetNOp \ + (*(int (*)(NpyIter *)) \ + PyArray_API[238]) +#define NpyIter_GetIterNext \ + (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ + PyArray_API[239]) +#define NpyIter_GetIterSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[240]) +#define NpyIter_GetIterIndexRange \ + (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ + PyArray_API[241]) +#define NpyIter_GetIterIndex \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[242]) +#define NpyIter_GotoIterIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[243]) +#define NpyIter_HasMultiIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[244]) +#define NpyIter_GetShape \ + (*(int (*)(NpyIter *, npy_intp *)) \ + PyArray_API[245]) +#define NpyIter_GetGetMultiIndex \ + (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ + PyArray_API[246]) +#define NpyIter_GotoMultiIndex \ + (*(int (*)(NpyIter *, npy_intp *)) \ + PyArray_API[247]) +#define NpyIter_RemoveMultiIndex \ + (*(int (*)(NpyIter *)) \ + PyArray_API[248]) +#define NpyIter_HasIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[249]) +#define NpyIter_IsBuffered \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[250]) +#define NpyIter_IsGrowInner \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[251]) +#define NpyIter_GetBufferSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[252]) +#define NpyIter_GetIndexPtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[253]) +#define NpyIter_GotoIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[254]) +#define NpyIter_GetDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[255]) +#define NpyIter_GetDescrArray \ + (*(PyArray_Descr ** (*)(NpyIter *)) \ + PyArray_API[256]) +#define NpyIter_GetOperandArray \ + (*(PyArrayObject ** (*)(NpyIter *)) \ + PyArray_API[257]) +#define NpyIter_GetIterView \ + (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ + PyArray_API[258]) +#define NpyIter_GetReadFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[259]) +#define NpyIter_GetWriteFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[260]) +#define NpyIter_DebugPrint \ + (*(void (*)(NpyIter *)) \ + PyArray_API[261]) +#define NpyIter_IterationNeedsAPI \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[262]) +#define NpyIter_GetInnerFixedStrideArray \ + (*(void (*)(NpyIter *, npy_intp *)) \ + PyArray_API[263]) +#define NpyIter_RemoveAxis \ + (*(int (*)(NpyIter *, int)) \ + PyArray_API[264]) +#define NpyIter_GetAxisStrideArray \ + (*(npy_intp * (*)(NpyIter *, int)) \ + PyArray_API[265]) +#define NpyIter_RequiresBuffering \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[266]) +#define NpyIter_GetInitialDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[267]) +#define NpyIter_CreateCompatibleStrides \ + (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ + PyArray_API[268]) +#define PyArray_CastingConverter \ + (*(int (*)(PyObject *, NPY_CASTING *)) \ + PyArray_API[269]) +#define PyArray_CountNonzero \ + (*(npy_intp (*)(PyArrayObject *)) \ + PyArray_API[270]) +#define PyArray_PromoteTypes \ + (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[271]) +#define PyArray_MinScalarType \ + (*(PyArray_Descr * (*)(PyArrayObject *)) \ + PyArray_API[272]) +#define PyArray_ResultType \ + (*(PyArray_Descr * (*)(npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **)) \ + PyArray_API[273]) +#define PyArray_CanCastArrayTo \ + (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[274]) +#define PyArray_CanCastTypeTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[275]) +#define PyArray_EinsteinSum \ + (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ + PyArray_API[276]) +#define PyArray_NewLikeArray \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ + PyArray_API[277]) +#define PyArray_GetArrayParamsFromObject \ + (*(int (*)(PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *)) \ + PyArray_API[278]) +#define PyArray_ConvertClipmodeSequence \ + (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ + PyArray_API[279]) +#define PyArray_MatrixProduct2 \ + (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ + PyArray_API[280]) +#define NpyIter_IsFirstVisit \ + (*(npy_bool (*)(NpyIter *, int)) \ + PyArray_API[281]) +#define PyArray_SetBaseObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[282]) +#define PyArray_CreateSortedStridePerm \ + (*(void (*)(int, npy_intp *, npy_stride_sort_item *)) \ + PyArray_API[283]) +#define PyArray_RemoveAxesInPlace \ + (*(void (*)(PyArrayObject *, npy_bool *)) \ + PyArray_API[284]) +#define PyArray_DebugPrint \ + (*(void (*)(PyArrayObject *)) \ + PyArray_API[285]) +#define PyArray_FailUnlessWriteable \ + (*(int (*)(PyArrayObject *, const char *)) \ + PyArray_API[286]) +#define PyArray_SetUpdateIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[287]) +#define PyDataMem_NEW \ + (*(void * (*)(size_t)) \ + PyArray_API[288]) +#define PyDataMem_FREE \ + (*(void (*)(void *)) \ + PyArray_API[289]) +#define PyDataMem_RENEW \ + (*(void * (*)(void *, size_t)) \ + PyArray_API[290]) +#define PyDataMem_SetEventHook \ + (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \ + PyArray_API[291]) +#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) +#define PyArray_MapIterSwapAxes \ + (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \ + PyArray_API[293]) +#define PyArray_MapIterArray \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[294]) +#define PyArray_MapIterNext \ + (*(void (*)(PyArrayMapIterObject *)) \ + PyArray_API[295]) +#define PyArray_Partition \ + (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[296]) +#define PyArray_ArgPartition \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[297]) +#define PyArray_SelectkindConverter \ + (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ + PyArray_API[298]) +#define PyDataMem_NEW_ZEROED \ + (*(void * (*)(size_t, size_t)) \ + PyArray_API[299]) + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); + return -1; + } + +#if PY_VERSION_HEX >= 0x03000000 + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); +#else + if (!PyCObject_Check(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); +#endif + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* Perform runtime check of C API version */ + if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version %x but this version of numpy is %x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "API version %x but this version of numpy is %x", \ + (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "big endian, but detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "little endian, but detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#if PY_VERSION_HEX >= 0x03000000 +#define NUMPY_IMPORT_ARRAY_RETVAL NULL +#else +#define NUMPY_IMPORT_ARRAY_RETVAL +#endif + +#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } + +#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } + +#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } + +#endif + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__ufunc_api.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__ufunc_api.h new file mode 100644 index 0000000000000..e1fd1cda05ad5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__ufunc_api.h @@ -0,0 +1,328 @@ + +#ifdef _UMATHMODULE + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; +#else +NPY_NO_EXPORT PyTypeObject PyUFunc_Type; +#endif + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; +#else + NPY_NO_EXPORT PyTypeObject PyUFunc_Type; +#endif + +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ + (PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *); +NPY_NO_EXPORT int PyUFunc_GenericFunction \ + (PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **); +NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **); +NPY_NO_EXPORT int PyUFunc_checkfperr \ + (int, PyObject *, int *); +NPY_NO_EXPORT void PyUFunc_clearfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_getfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_handlefperr \ + (int, PyObject *, int, int *); +NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ + (PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *); +NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \ + (void **, size_t); +NPY_NO_EXPORT void PyUFunc_e_e \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *); +NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_ValidateCasting \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ + (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); + +#else + +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) +extern void **PyUFunc_API; +#else +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +void **PyUFunc_API; +#else +static void **PyUFunc_API=NULL; +#endif +#endif + +#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) +#define PyUFunc_FromFuncAndData \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \ + PyUFunc_API[1]) +#define PyUFunc_RegisterLoopForType \ + (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *)) \ + PyUFunc_API[2]) +#define PyUFunc_GenericFunction \ + (*(int (*)(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **)) \ + PyUFunc_API[3]) +#define PyUFunc_f_f_As_d_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[4]) +#define PyUFunc_d_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[5]) +#define PyUFunc_f_f \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[6]) +#define PyUFunc_g_g \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[7]) +#define PyUFunc_F_F_As_D_D \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[8]) +#define PyUFunc_F_F \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[9]) +#define PyUFunc_D_D \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[10]) +#define PyUFunc_G_G \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[11]) +#define PyUFunc_O_O \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[12]) +#define PyUFunc_ff_f_As_dd_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[13]) +#define PyUFunc_ff_f \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[14]) +#define PyUFunc_dd_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[15]) +#define PyUFunc_gg_g \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[16]) +#define PyUFunc_FF_F_As_DD_D \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[17]) +#define PyUFunc_DD_D \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[18]) +#define PyUFunc_FF_F \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[19]) +#define PyUFunc_GG_G \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[20]) +#define PyUFunc_OO_O \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[21]) +#define PyUFunc_O_O_method \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[22]) +#define PyUFunc_OO_O_method \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[23]) +#define PyUFunc_On_Om \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[24]) +#define PyUFunc_GetPyValues \ + (*(int (*)(char *, int *, int *, PyObject **)) \ + PyUFunc_API[25]) +#define PyUFunc_checkfperr \ + (*(int (*)(int, PyObject *, int *)) \ + PyUFunc_API[26]) +#define PyUFunc_clearfperr \ + (*(void (*)(void)) \ + PyUFunc_API[27]) +#define PyUFunc_getfperr \ + (*(int (*)(void)) \ + PyUFunc_API[28]) +#define PyUFunc_handlefperr \ + (*(int (*)(int, PyObject *, int, int *)) \ + PyUFunc_API[29]) +#define PyUFunc_ReplaceLoopBySignature \ + (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)) \ + PyUFunc_API[30]) +#define PyUFunc_FromFuncAndDataAndSignature \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \ + PyUFunc_API[31]) +#define PyUFunc_SetUsesArraysAsData \ + (*(int (*)(void **, size_t)) \ + PyUFunc_API[32]) +#define PyUFunc_e_e \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[33]) +#define PyUFunc_e_e_As_f_f \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[34]) +#define PyUFunc_e_e_As_d_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[35]) +#define PyUFunc_ee_e \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[36]) +#define PyUFunc_ee_e_As_ff_f \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[37]) +#define PyUFunc_ee_e_As_dd_d \ + (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ + PyUFunc_API[38]) +#define PyUFunc_DefaultTypeResolver \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ + PyUFunc_API[39]) +#define PyUFunc_ValidateCasting \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \ + PyUFunc_API[40]) +#define PyUFunc_RegisterLoopForDescr \ + (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ + PyUFunc_API[41]) + +static int +_import_umath(void) +{ + PyObject *numpy = PyImport_ImportModule("numpy.core.umath"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); + return -1; + } + +#if PY_VERSION_HEX >= 0x03000000 + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); +#else + if (!PyCObject_Check(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object"); + Py_DECREF(c_api); + return -1; + } + PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); +#endif + Py_DECREF(c_api); + if (PyUFunc_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); + return -1; + } + return 0; +} + +#if PY_VERSION_HEX >= 0x03000000 +#define NUMPY_IMPORT_UMATH_RETVAL NULL +#else +#define NUMPY_IMPORT_UMATH_RETVAL +#endif + +#define import_umath() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + return NUMPY_IMPORT_UMATH_RETVAL;\ + }\ + } while(0) + +#define import_umath1(ret) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + return ret;\ + }\ + } while(0) + +#define import_umath2(ret, msg) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError, msg);\ + return ret;\ + }\ + } while(0) + +#define import_ufunc() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + }\ + } while(0) + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_neighborhood_iterator_imp.h new file mode 100644 index 0000000000000..e8860cbc73bba --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_neighborhood_iterator_imp.h @@ -0,0 +1,90 @@ +#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP +#error You should not include this header directly +#endif +/* + * Private API (here for inline) + */ +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); + +/* + * Update to next item of the iterator + * + * Note: this simply increment the coordinates vector, last dimension + * incremented first , i.e, for dimension 3 + * ... + * -1, -1, -1 + * -1, -1, 0 + * -1, -1, 1 + * .... + * -1, 0, -1 + * -1, 0, 0 + * .... + * 0, -1, -1 + * 0, -1, 0 + * .... + */ +#define _UPDATE_COORD_ITER(c) \ + wb = iter->coordinates[c] < iter->bounds[c][1]; \ + if (wb) { \ + iter->coordinates[c] += 1; \ + return 0; \ + } \ + else { \ + iter->coordinates[c] = iter->bounds[c][0]; \ + } + +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i, wb; + + for (i = iter->nd - 1; i >= 0; --i) { + _UPDATE_COORD_ITER(i) + } + + return 0; +} + +/* + * Version optimized for 2d arrays, manual loop unrolling + */ +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp wb; + + _UPDATE_COORD_ITER(1) + _UPDATE_COORD_ITER(0) + + return 0; +} +#undef _UPDATE_COORD_ITER + +/* + * Advance to the next neighbour + */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) +{ + _PyArrayNeighborhoodIter_IncrCoord (iter); + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} + +/* + * Reset functions + */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i; + + for (i = 0; i < iter->nd; ++i) { + iter->coordinates[i] = iter->bounds[i][0]; + } + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_numpyconfig.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_numpyconfig.h new file mode 100644 index 0000000000000..79ccc290418ff --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_numpyconfig.h @@ -0,0 +1,32 @@ +#define NPY_HAVE_ENDIAN_H 1 +#define NPY_SIZEOF_SHORT SIZEOF_SHORT +#define NPY_SIZEOF_INT SIZEOF_INT +#define NPY_SIZEOF_LONG SIZEOF_LONG +#define NPY_SIZEOF_FLOAT 4 +#define NPY_SIZEOF_COMPLEX_FLOAT 8 +#define NPY_SIZEOF_DOUBLE 8 +#define NPY_SIZEOF_COMPLEX_DOUBLE 16 +#define NPY_SIZEOF_LONGDOUBLE 16 +#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 +#define NPY_SIZEOF_PY_INTPTR_T 8 +#define NPY_SIZEOF_OFF_T 8 +#define NPY_SIZEOF_PY_LONG_LONG 8 +#define NPY_SIZEOF_LONGLONG 8 +#define NPY_NO_SMP 0 +#define NPY_HAVE_DECL_ISNAN +#define NPY_HAVE_DECL_ISINF +#define NPY_HAVE_DECL_ISFINITE +#define NPY_HAVE_DECL_SIGNBIT +#define NPY_USE_C99_COMPLEX 1 +#define NPY_HAVE_COMPLEX_DOUBLE 1 +#define NPY_HAVE_COMPLEX_FLOAT 1 +#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1 +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_USE_C99_FORMATS 1 +#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#define NPY_ABI_VERSION 0x01000009 +#define NPY_API_VERSION 0x00000009 + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayobject.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayobject.h new file mode 100644 index 0000000000000..4f46d6b1ac91d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayobject.h @@ -0,0 +1,11 @@ +#ifndef Py_ARRAYOBJECT_H +#define Py_ARRAYOBJECT_H + +#include "ndarrayobject.h" +#include "npy_interrupt.h" + +#ifdef NPY_NO_PREFIX +#include "noprefix.h" +#endif + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayscalars.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayscalars.h new file mode 100644 index 0000000000000..64450e7132132 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayscalars.h @@ -0,0 +1,175 @@ +#ifndef _NPY_ARRAYSCALARS_H_ +#define _NPY_ARRAYSCALARS_H_ + +#ifndef _MULTIARRAYMODULE +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; +#endif + + +typedef struct { + PyObject_HEAD + signed char obval; +} PyByteScalarObject; + + +typedef struct { + PyObject_HEAD + short obval; +} PyShortScalarObject; + + +typedef struct { + PyObject_HEAD + int obval; +} PyIntScalarObject; + + +typedef struct { + PyObject_HEAD + long obval; +} PyLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longlong obval; +} PyLongLongScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned char obval; +} PyUByteScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned short obval; +} PyUShortScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned int obval; +} PyUIntScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned long obval; +} PyULongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_ulonglong obval; +} PyULongLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_half obval; +} PyHalfScalarObject; + + +typedef struct { + PyObject_HEAD + float obval; +} PyFloatScalarObject; + + +typedef struct { + PyObject_HEAD + double obval; +} PyDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longdouble obval; +} PyLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cfloat obval; +} PyCFloatScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cdouble obval; +} PyCDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_clongdouble obval; +} PyCLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + PyObject * obval; +} PyObjectScalarObject; + +typedef struct { + PyObject_HEAD + npy_datetime obval; + PyArray_DatetimeMetaData obmeta; +} PyDatetimeScalarObject; + +typedef struct { + PyObject_HEAD + npy_timedelta obval; + PyArray_DatetimeMetaData obmeta; +} PyTimedeltaScalarObject; + + +typedef struct { + PyObject_HEAD + char obval; +} PyScalarObject; + +#define PyStringScalarObject PyStringObject +#define PyUnicodeScalarObject PyUnicodeObject + +typedef struct { + PyObject_VAR_HEAD + char *obval; + PyArray_Descr *descr; + int flags; + PyObject *base; +} PyVoidScalarObject; + +/* Macros + PyScalarObject + PyArrType_Type + are defined in ndarrayobject.h +*/ + +#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) +#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) +#define PyArrayScalar_FromLong(i) \ + ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ + return Py_INCREF(PyArrayScalar_FromLong(i)), \ + PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_FALSE \ + return Py_INCREF(PyArrayScalar_False), \ + PyArrayScalar_False +#define PyArrayScalar_RETURN_TRUE \ + return Py_INCREF(PyArrayScalar_True), \ + PyArrayScalar_True + +#define PyArrayScalar_New(cls) \ + Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) +#define PyArrayScalar_VAL(obj, cls) \ + ((Py##cls##ScalarObject *)obj)->obval +#define PyArrayScalar_ASSIGN(obj, cls, val) \ + PyArrayScalar_VAL(obj, cls) = val + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/halffloat.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/halffloat.h new file mode 100644 index 0000000000000..944f0ea34b482 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/halffloat.h @@ -0,0 +1,69 @@ +#ifndef __NPY_HALFFLOAT_H__ +#define __NPY_HALFFLOAT_H__ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Half-precision routines + */ + +/* Conversions */ +float npy_half_to_float(npy_half h); +double npy_half_to_double(npy_half h); +npy_half npy_float_to_half(float f); +npy_half npy_double_to_half(double d); +/* Comparisons */ +int npy_half_eq(npy_half h1, npy_half h2); +int npy_half_ne(npy_half h1, npy_half h2); +int npy_half_le(npy_half h1, npy_half h2); +int npy_half_lt(npy_half h1, npy_half h2); +int npy_half_ge(npy_half h1, npy_half h2); +int npy_half_gt(npy_half h1, npy_half h2); +/* faster *_nonan variants for when you know h1 and h2 are not NaN */ +int npy_half_eq_nonan(npy_half h1, npy_half h2); +int npy_half_lt_nonan(npy_half h1, npy_half h2); +int npy_half_le_nonan(npy_half h1, npy_half h2); +/* Miscellaneous functions */ +int npy_half_iszero(npy_half h); +int npy_half_isnan(npy_half h); +int npy_half_isinf(npy_half h); +int npy_half_isfinite(npy_half h); +int npy_half_signbit(npy_half h); +npy_half npy_half_copysign(npy_half x, npy_half y); +npy_half npy_half_spacing(npy_half h); +npy_half npy_half_nextafter(npy_half x, npy_half y); + +/* + * Half-precision constants + */ + +#define NPY_HALF_ZERO (0x0000u) +#define NPY_HALF_PZERO (0x0000u) +#define NPY_HALF_NZERO (0x8000u) +#define NPY_HALF_ONE (0x3c00u) +#define NPY_HALF_NEGONE (0xbc00u) +#define NPY_HALF_PINF (0x7c00u) +#define NPY_HALF_NINF (0xfc00u) +#define NPY_HALF_NAN (0x7e00u) + +#define NPY_MAX_HALF (0x7bffu) + +/* + * Bit-level conversions + */ + +npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); +npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); +npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); +npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/multiarray_api.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/multiarray_api.txt new file mode 100644 index 0000000000000..599ac5cb19221 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/multiarray_api.txt @@ -0,0 +1,2442 @@ + +=========== +Numpy C-API +=========== +:: + + unsigned int + PyArray_GetNDArrayCVersion(void ) + + +Included at the very first so not auto-grabbed and thus not labeled. + +:: + + int + PyArray_SetNumericOps(PyObject *dict) + +Set internal structure with number functions that all arrays will use + +:: + + PyObject * + PyArray_GetNumericOps(void ) + +Get dictionary showing number functions that all arrays will use + +:: + + int + PyArray_INCREF(PyArrayObject *mp) + +For object arrays, increment all internal references. + +:: + + int + PyArray_XDECREF(PyArrayObject *mp) + +Decrement all internal references for object arrays. +(or arrays with object fields) + +:: + + void + PyArray_SetStringFunction(PyObject *op, int repr) + +Set the array print function to be a Python function. + +:: + + PyArray_Descr * + PyArray_DescrFromType(int type) + +Get the PyArray_Descr structure for a type. + +:: + + PyObject * + PyArray_TypeObjectFromType(int type) + +Get a typeobject from a type-number -- can return NULL. + +New reference + +:: + + char * + PyArray_Zero(PyArrayObject *arr) + +Get pointer to zero of correct type for array. + +:: + + char * + PyArray_One(PyArrayObject *arr) + +Get pointer to one of correct type for array + +:: + + PyObject * + PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int + is_f_order) + +For backward compatibility + +Cast an array using typecode structure. +steals reference to dtype --- cannot be NULL + +This function always makes a copy of arr, even if the dtype +doesn't change. + +:: + + int + PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp) + +Cast to an already created array. + +:: + + int + PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) + +Cast to an already created array. Arrays don't have to be "broadcastable" +Only requirement is they have the same number of elements. + +:: + + int + PyArray_CanCastSafely(int fromtype, int totype) + +Check the type coercion rules. + +:: + + npy_bool + PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) + +leaves reference count alone --- cannot be NULL + +PyArray_CanCastTypeTo is equivalent to this, but adds a 'casting' +parameter. + +:: + + int + PyArray_ObjectType(PyObject *op, int minimum_type) + +Return the typecode of the array a Python object would be converted to + +Returns the type number the result should have, or NPY_NOTYPE on error. + +:: + + PyArray_Descr * + PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) + +new reference -- accepts NULL for mintype + +:: + + PyArrayObject ** + PyArray_ConvertToCommonType(PyObject *op, int *retn) + + +:: + + PyArray_Descr * + PyArray_DescrFromScalar(PyObject *sc) + +Return descr object from array scalar. + +New reference + +:: + + PyArray_Descr * + PyArray_DescrFromTypeObject(PyObject *type) + + +:: + + npy_intp + PyArray_Size(PyObject *op) + +Compute the size of an array (in number of items) + +:: + + PyObject * + PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) + +Get scalar-equivalent to a region of memory described by a descriptor. + +:: + + PyObject * + PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) + +Get 0-dim array from scalar + +0-dim array from array-scalar object +always contains a copy of the data +unless outcode is NULL, it is of void type and the referrer does +not own it either. + +steals reference to outcode + +:: + + void + PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) + +Convert to c-type + +no error checking is performed -- ctypeptr must be same type as scalar +in case of flexible type, the data is not copied +into ctypeptr which is expected to be a pointer to pointer + +:: + + int + PyArray_CastScalarToCtype(PyObject *scalar, void + *ctypeptr, PyArray_Descr *outcode) + +Cast Scalar to c-type + +The output buffer must be large-enough to receive the value +Even for flexible types which is different from ScalarAsCtype +where only a reference for flexible types is returned + +This may not work right on narrow builds for NumPy unicode scalars. + +:: + + int + PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr + *indescr, void *ctypeptr, int outtype) + +Cast Scalar to c-type + +:: + + PyObject * + PyArray_ScalarFromObject(PyObject *object) + +Get an Array Scalar From a Python Object + +Returns NULL if unsuccessful but error is only set if another error occurred. +Currently only Numeric-like object supported. + +:: + + PyArray_VectorUnaryFunc * + PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) + +Get a cast function to cast from the input descriptor to the +output type_number (must be a registered data-type). +Returns NULL if un-successful. + +:: + + PyObject * + PyArray_FromDims(int nd, int *d, int type) + +Construct an empty array from dimensions and typenum + +:: + + PyObject * + PyArray_FromDimsAndDataAndDescr(int nd, int *d, PyArray_Descr + *descr, char *data) + +Like FromDimsAndData but uses the Descr structure instead of typecode +as input. + +:: + + PyObject * + PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int + min_depth, int max_depth, int flags, PyObject + *context) + +Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags +Steals a reference to newtype --- which can be NULL + +:: + + PyObject * + PyArray_EnsureArray(PyObject *op) + +This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, ENSUREARRAY) +that special cases Arrays and PyArray_Scalars up front +It *steals a reference* to the object +It also guarantees that the result is PyArray_Type +Because it decrefs op if any conversion needs to take place +so it can be used like PyArray_EnsureArray(some_function(...)) + +:: + + PyObject * + PyArray_EnsureAnyArray(PyObject *op) + + +:: + + PyObject * + PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char + *sep) + + +Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an +array corresponding to the data encoded in that file. + +If the dtype is NULL, the default array type is used (double). +If non-null, the reference is stolen. + +The number of elements to read is given as ``num``; if it is < 0, then +then as many as possible are read. + +If ``sep`` is NULL or empty, then binary data is assumed, else +text data, with ``sep`` as the separator between elements. Whitespace in +the separator matches any length of whitespace in the text, and a match +for whitespace around the separator is added. + +For memory-mapped files, use the buffer interface. No more data than +necessary is read by this routine. + +:: + + PyObject * + PyArray_FromString(char *data, npy_intp slen, PyArray_Descr + *dtype, npy_intp num, char *sep) + + +Given a pointer to a string ``data``, a string length ``slen``, and +a ``PyArray_Descr``, return an array corresponding to the data +encoded in that string. + +If the dtype is NULL, the default array type is used (double). +If non-null, the reference is stolen. + +If ``slen`` is < 0, then the end of string is used for text data. +It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs +would be the norm). + +The number of elements to read is given as ``num``; if it is < 0, then +then as many as possible are read. + +If ``sep`` is NULL or empty, then binary data is assumed, else +text data, with ``sep`` as the separator between elements. Whitespace in +the separator matches any length of whitespace in the text, and a match +for whitespace around the separator is added. + +:: + + PyObject * + PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, npy_intp + count, npy_intp offset) + + +:: + + PyObject * + PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) + + +steals a reference to dtype (which cannot be NULL) + +:: + + PyObject * + PyArray_Return(PyArrayObject *mp) + + +Return either an array or the appropriate Python object if the array +is 0d and matches a Python type. +steals reference to mp + +:: + + PyObject * + PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int + offset) + +Get a subset of bytes from each element of the array +steals reference to typed, must not be NULL + +:: + + int + PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int + offset, PyObject *val) + +Set a subset of bytes from each element of the array +steals reference to dtype, must not be NULL + +:: + + PyObject * + PyArray_Byteswap(PyArrayObject *self, npy_bool inplace) + + +:: + + PyObject * + PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int + refcheck, NPY_ORDER order) + +Resize (reallocate data). Only works if nothing else is referencing this +array and it is contiguous. If refcheck is 0, then the reference count is +not checked and assumed to be 1. You still must own this data and have no +weak-references and no base object. + +:: + + int + PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src) + +Move the memory of one array into another, allowing for overlapping data. + +Returns 0 on success, negative on failure. + +:: + + int + PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src) + +Copy an Array into another array. +Broadcast to the destination shape if necessary. + +Returns 0 on success, -1 on failure. + +:: + + int + PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src) + +Copy an Array into another array -- memory must not overlap +Does not require src and dest to have "broadcastable" shapes +(only the same number of elements). + +TODO: For NumPy 2.0, this could accept an order parameter which +only allows NPY_CORDER and NPY_FORDER. Could also rename +this to CopyAsFlat to make the name more intuitive. + +Returns 0 on success, -1 on error. + +:: + + int + PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) + + +:: + + PyObject * + PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order) + +Copy an array. + +:: + + PyObject * + PyArray_ToList(PyArrayObject *self) + +To List + +:: + + PyObject * + PyArray_ToString(PyArrayObject *self, NPY_ORDER order) + + +:: + + int + PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) + +To File + +:: + + int + PyArray_Dump(PyObject *self, PyObject *file, int protocol) + + +:: + + PyObject * + PyArray_Dumps(PyObject *self, int protocol) + + +:: + + int + PyArray_ValidType(int type) + +Is the typenum valid? + +:: + + void + PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) + +Update Several Flags at once. + +:: + + PyObject * + PyArray_New(PyTypeObject *subtype, int nd, npy_intp *dims, int + type_num, npy_intp *strides, void *data, int itemsize, int + flags, PyObject *obj) + +Generic new array creation routine. + +:: + + PyObject * + PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int + nd, npy_intp *dims, npy_intp *strides, void + *data, int flags, PyObject *obj) + +Generic new array creation routine. + +steals a reference to descr (even on failure) + +:: + + PyArray_Descr * + PyArray_DescrNew(PyArray_Descr *base) + +base cannot be NULL + +:: + + PyArray_Descr * + PyArray_DescrNewFromType(int type_num) + + +:: + + double + PyArray_GetPriority(PyObject *obj, double default_) + +Get Priority from object + +:: + + PyObject * + PyArray_IterNew(PyObject *obj) + +Get Iterator. + +:: + + PyObject * + PyArray_MultiIterNew(int n, ... ) + +Get MultiIterator, + +:: + + int + PyArray_PyIntAsInt(PyObject *o) + + +:: + + npy_intp + PyArray_PyIntAsIntp(PyObject *o) + + +:: + + int + PyArray_Broadcast(PyArrayMultiIterObject *mit) + + +:: + + void + PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) + +Assumes contiguous + +:: + + int + PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) + + +:: + + npy_bool + PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp + offset, npy_intp *dims, npy_intp *newstrides) + + +:: + + PyArray_Descr * + PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) + + +returns a copy of the PyArray_Descr structure with the byteorder +altered: +no arguments: The byteorder is swapped (in all subfields as well) +single argument: The byteorder is forced to the given state +(in all subfields as well) + +Valid states: ('big', '>') or ('little' or '<') +('native', or '=') + +If a descr structure with | is encountered it's own +byte-order is not changed but any fields are: + + +Deep bytorder change of a data-type descriptor +Leaves reference count of self unchanged --- does not DECREF self *** + +:: + + PyObject * + PyArray_IterAllButAxis(PyObject *obj, int *inaxis) + +Get Iterator that iterates over all but one axis (don't use this with +PyArray_ITER_GOTO1D). The axis will be over-written if negative +with the axis having the smallest stride. + +:: + + PyObject * + PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int + min_depth, int max_depth, int requires, PyObject + *context) + +steals a reference to descr -- accepts NULL + +:: + + PyObject * + PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int + flags) + +steals reference to newtype --- acc. NULL + +:: + + PyObject * + PyArray_FromInterface(PyObject *origin) + + +:: + + PyObject * + PyArray_FromStructInterface(PyObject *input) + + +:: + + PyObject * + PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject + *context) + + +:: + + NPY_SCALARKIND + PyArray_ScalarKind(int typenum, PyArrayObject **arr) + +ScalarKind + +Returns the scalar kind of a type number, with an +optional tweak based on the scalar value itself. +If no scalar is provided, it returns INTPOS_SCALAR +for both signed and unsigned integers, otherwise +it checks the sign of any signed integer to choose +INTNEG_SCALAR when appropriate. + +:: + + int + PyArray_CanCoerceScalar(int thistype, int neededtype, NPY_SCALARKIND + scalar) + + +Determines whether the data type 'thistype', with +scalar kind 'scalar', can be coerced into 'neededtype'. + +:: + + PyObject * + PyArray_NewFlagsObject(PyObject *obj) + + +Get New ArrayFlagsObject + +:: + + npy_bool + PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) + +See if array scalars can be cast. + +TODO: For NumPy 2.0, add a NPY_CASTING parameter. + +:: + + int + PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) + + +:: + + int + PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) + +Adjusts previously broadcasted iterators so that the axis with +the smallest sum of iterator strides is not iterated over. +Returns dimension which is smallest in the range [0,multi->nd). +A -1 is returned if multi->nd == 0. + +don't use with PyArray_ITER_GOTO1D because factors are not adjusted + +:: + + int + PyArray_ElementStrides(PyObject *obj) + + +:: + + void + PyArray_Item_INCREF(char *data, PyArray_Descr *descr) + + +:: + + void + PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) + + +:: + + PyObject * + PyArray_FieldNames(PyObject *fields) + +Return the tuple of ordered field names from a dictionary. + +:: + + PyObject * + PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute) + +Return Transpose. + +:: + + PyObject * + PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int + axis, PyArrayObject *out, NPY_CLIPMODE clipmode) + +Take + +:: + + PyObject * + PyArray_PutTo(PyArrayObject *self, PyObject*values0, PyObject + *indices0, NPY_CLIPMODE clipmode) + +Put values into an array + +:: + + PyObject * + PyArray_PutMask(PyArrayObject *self, PyObject*values0, PyObject*mask0) + +Put values into an array according to a mask. + +:: + + PyObject * + PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) + +Repeat the array. + +:: + + PyObject * + PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject + *out, NPY_CLIPMODE clipmode) + + +:: + + int + PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) + +Sort an array in-place + +:: + + PyObject * + PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) + +ArgSort an array + +:: + + PyObject * + PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE + side, PyObject *perm) + + +Search the sorted array op1 for the location of the items in op2. The +result is an array of indexes, one for each element in op2, such that if +the item were to be inserted in op1 just before that index the array +would still be in sorted order. + +Parameters +---------- +op1 : PyArrayObject * +Array to be searched, must be 1-D. +op2 : PyObject * +Array of items whose insertion indexes in op1 are wanted +side : {NPY_SEARCHLEFT, NPY_SEARCHRIGHT} +If NPY_SEARCHLEFT, return first valid insertion indexes +If NPY_SEARCHRIGHT, return last valid insertion indexes +perm : PyObject * +Permutation array that sorts op1 (optional) + +Returns +------- +ret : PyObject * +New reference to npy_intp array containing indexes where items in op2 +could be validly inserted into op1. NULL on error. + +Notes +----- +Binary search is used to find the indexes. + +:: + + PyObject * + PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) + +ArgMax + +:: + + PyObject * + PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out) + +ArgMin + +:: + + PyObject * + PyArray_Reshape(PyArrayObject *self, PyObject *shape) + +Reshape + +:: + + PyObject * + PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER + order) + +New shape for an array + +:: + + PyObject * + PyArray_Squeeze(PyArrayObject *self) + + +return a new view of the array object with all of its unit-length +dimensions squeezed out if needed, otherwise +return the same array. + +:: + + PyObject * + PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject + *pytype) + +View +steals a reference to type -- accepts NULL + +:: + + PyObject * + PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) + +SwapAxes + +:: + + PyObject * + PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) + +Max + +:: + + PyObject * + PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) + +Min + +:: + + PyObject * + PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) + +Ptp + +:: + + PyObject * + PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +Mean + +:: + + PyObject * + PyArray_Trace(PyArrayObject *self, int offset, int axis1, int + axis2, int rtype, PyArrayObject *out) + +Trace + +:: + + PyObject * + PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int + axis2) + +Diagonal + +In NumPy versions prior to 1.7, this function always returned a copy of +the diagonal array. In 1.7, the code has been updated to compute a view +onto 'self', but it still copies this array before returning, as well as +setting the internal WARN_ON_WRITE flag. In a future version, it will +simply return a view onto self. + +:: + + PyObject * + PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject + *max, PyArrayObject *out) + +Clip + +:: + + PyObject * + PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) + +Conjugate + +:: + + PyObject * + PyArray_Nonzero(PyArrayObject *self) + +Nonzero + +TODO: In NumPy 2.0, should make the iteration order a parameter. + +:: + + PyObject * + PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out, int variance) + +Set variance to 1 to by-pass square-root calculation and return variance +Std + +:: + + PyObject * + PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +Sum + +:: + + PyObject * + PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +CumSum + +:: + + PyObject * + PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +Prod + +:: + + PyObject * + PyArray_CumProd(PyArrayObject *self, int axis, int + rtype, PyArrayObject *out) + +CumProd + +:: + + PyObject * + PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) + +All + +:: + + PyObject * + PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) + +Any + +:: + + PyObject * + PyArray_Compress(PyArrayObject *self, PyObject *condition, int + axis, PyArrayObject *out) + +Compress + +:: + + PyObject * + PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) + +Flatten + +:: + + PyObject * + PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order) + +Ravel +Returns a contiguous array + +:: + + npy_intp + PyArray_MultiplyList(npy_intp *l1, int n) + +Multiply a List + +:: + + int + PyArray_MultiplyIntList(int *l1, int n) + +Multiply a List of ints + +:: + + void * + PyArray_GetPtr(PyArrayObject *obj, npy_intp*ind) + +Produce a pointer into array + +:: + + int + PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n) + +Compare Lists + +:: + + int + PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int + nd, PyArray_Descr*typedescr) + +Simulate a C-array +steals a reference to typedescr -- can be NULL + +:: + + int + PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) + +Convert to a 1D C-array + +:: + + int + PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int + typecode) + +Convert to a 2D C-array + +:: + + int + PyArray_Free(PyObject *op, void *ptr) + +Free pointers created if As2D is called + +:: + + int + PyArray_Converter(PyObject *object, PyObject **address) + + +Useful to pass as converter function for O& processing in PyArgs_ParseTuple. + +This conversion function can be used with the "O&" argument for +PyArg_ParseTuple. It will immediately return an object of array type +or will convert to a NPY_ARRAY_CARRAY any other object. + +If you use PyArray_Converter, you must DECREF the array when finished +as you get a new reference to it. + +:: + + int + PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) + +PyArray_IntpFromSequence +Returns the number of integers converted or -1 if an error occurred. +vals must be large enough to hold maxvals + +:: + + PyObject * + PyArray_Concatenate(PyObject *op, int axis) + +Concatenate + +Concatenate an arbitrary Python sequence into an array. +op is a python object supporting the sequence interface. +Its elements will be concatenated together to form a single +multidimensional array. If axis is NPY_MAXDIMS or bigger, then +each sequence object will be flattened before concatenation + +:: + + PyObject * + PyArray_InnerProduct(PyObject *op1, PyObject *op2) + +Numeric.innerproduct(a,v) + +:: + + PyObject * + PyArray_MatrixProduct(PyObject *op1, PyObject *op2) + +Numeric.matrixproduct(a,v) +just like inner product but does the swapaxes stuff on the fly + +:: + + PyObject * + PyArray_CopyAndTranspose(PyObject *op) + +Copy and Transpose + +Could deprecate this function, as there isn't a speed benefit over +calling Transpose and then Copy. + +:: + + PyObject * + PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) + +Numeric.correlate(a1,a2,mode) + +:: + + int + PyArray_TypestrConvert(int itemsize, int gentype) + +Typestr converter + +:: + + int + PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) + +Get typenum from an object -- None goes to NPY_DEFAULT_TYPE +This function takes a Python object representing a type and converts it +to a the correct PyArray_Descr * structure to describe the type. + +Many objects can be used to represent a data-type which in NumPy is +quite a flexible concept. + +This is the central code that converts Python objects to +Type-descriptor objects that are used throughout numpy. + +Returns a new reference in *at, but the returned should not be +modified as it may be one of the canonical immutable objects or +a reference to the input obj. + +:: + + int + PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) + +Get typenum from an object -- None goes to NULL + +:: + + int + PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) + +Get intp chunk from sequence + +This function takes a Python sequence object and allocates and +fills in an intp array with the converted values. + +Remember to free the pointer seq.ptr when done using +PyDimMem_FREE(seq.ptr)** + +:: + + int + PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) + +Get buffer chunk from object + +this function takes a Python object which exposes the (single-segment) +buffer interface and returns a pointer to the data segment + +You should increment the reference count by one of buf->base +if you will hang on to a reference + +You only get a borrowed reference to the object. Do not free the +memory... + +:: + + int + PyArray_AxisConverter(PyObject *obj, int *axis) + +Get axis from an object (possibly None) -- a converter function, + +See also PyArray_ConvertMultiAxis, which also handles a tuple of axes. + +:: + + int + PyArray_BoolConverter(PyObject *object, npy_bool *val) + +Convert an object to true / false + +:: + + int + PyArray_ByteorderConverter(PyObject *obj, char *endian) + +Convert object to endian + +:: + + int + PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) + +Convert an object to FORTRAN / C / ANY / KEEP + +:: + + unsigned char + PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) + + +This function returns true if the two typecodes are +equivalent (same basic kind and same itemsize). + +:: + + PyObject * + PyArray_Zeros(int nd, npy_intp *dims, PyArray_Descr *type, int + is_f_order) + +Zeros + +steal a reference +accepts NULL type + +:: + + PyObject * + PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int + is_f_order) + +Empty + +accepts NULL type +steals referenct to type + +:: + + PyObject * + PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) + +Where + +:: + + PyObject * + PyArray_Arange(double start, double stop, double step, int type_num) + +Arange, + +:: + + PyObject * + PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject + *step, PyArray_Descr *dtype) + + +ArangeObj, + +this doesn't change the references + +:: + + int + PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) + +Convert object to sort kind + +:: + + PyObject * + PyArray_LexSort(PyObject *sort_keys, int axis) + +LexSort an array providing indices that will sort a collection of arrays +lexicographically. The first key is sorted on first, followed by the second key +-- requires that arg"merge"sort is available for each sort_key + +Returns an index array that shows the indexes for the lexicographic sort along +the given axis. + +:: + + PyObject * + PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) + +Round + +:: + + unsigned char + PyArray_EquivTypenums(int typenum1, int typenum2) + + +:: + + int + PyArray_RegisterDataType(PyArray_Descr *descr) + +Register Data type +Does not change the reference count of descr + +:: + + int + PyArray_RegisterCastFunc(PyArray_Descr *descr, int + totype, PyArray_VectorUnaryFunc *castfunc) + +Register Casting Function +Replaces any function currently stored. + +:: + + int + PyArray_RegisterCanCast(PyArray_Descr *descr, int + totype, NPY_SCALARKIND scalar) + +Register a type number indicating that a descriptor can be cast +to it safely + +:: + + void + PyArray_InitArrFuncs(PyArray_ArrFuncs *f) + +Initialize arrfuncs to NULL + +:: + + PyObject * + PyArray_IntTupleFromIntp(int len, npy_intp *vals) + +PyArray_IntTupleFromIntp + +:: + + int + PyArray_TypeNumFromName(char *str) + + +:: + + int + PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) + +Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP + +:: + + int + PyArray_OutputConverter(PyObject *object, PyArrayObject **address) + +Useful to pass as converter function for O& processing in +PyArgs_ParseTuple for output arrays + +:: + + PyObject * + PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd) + +Get Iterator broadcast to a particular shape + +:: + + void + _PyArray_SigintHandler(int signum) + + +:: + + void* + _PyArray_GetSigintBuf(void ) + + +:: + + int + PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) + + +Get type-descriptor from an object forcing alignment if possible +None goes to DEFAULT type. + +any object with the .fields attribute and/or .itemsize attribute (if the +.fields attribute does not give the total size -- i.e. a partial record +naming). If itemsize is given it must be >= size computed from fields + +The .fields attribute must return a convertible dictionary if present. +Result inherits from NPY_VOID. + +:: + + int + PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) + + +Get type-descriptor from an object forcing alignment if possible +None goes to NULL. + +:: + + int + PyArray_SearchsideConverter(PyObject *obj, void *addr) + +Convert object to searchsorted side + +:: + + PyObject * + PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags) + +PyArray_CheckAxis + +check that axis is valid +convert 0-d arrays to 1-d arrays + +:: + + npy_intp + PyArray_OverflowMultiplyList(npy_intp *l1, int n) + +Multiply a List of Non-negative numbers with over-flow detection. + +:: + + int + PyArray_CompareString(char *s1, char *s2, size_t len) + + +:: + + PyObject * + PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ... ) + +Get MultiIterator from array of Python objects and any additional + +PyObject **mps -- array of PyObjects +int n - number of PyObjects in the array +int nadd - number of additional arrays to include in the iterator. + +Returns a multi-iterator object. + +:: + + int + PyArray_GetEndianness(void ) + + +:: + + unsigned int + PyArray_GetNDArrayCFeatureVersion(void ) + +Returns the built-in (at compilation time) C API version + +:: + + PyObject * + PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) + +correlate(a1,a2,mode) + +This function computes the usual correlation (correlate(a1, a2) != +correlate(a2, a1), and conjugate the second argument for complex inputs + +:: + + PyObject* + PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp + *bounds, int mode, PyArrayObject*fill) + +A Neighborhood Iterator object. + +:: + + void + PyArray_SetDatetimeParseFunction(PyObject *op) + +This function is scheduled to be removed + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + void + PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT + fr, npy_datetimestruct *result) + +Fill the datetime struct from the value and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + void + PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT + fr, npy_timedeltastruct *result) + +Fill the timedelta struct from the timedelta value and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + npy_datetime + PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT + fr, npy_datetimestruct *d) + +Create a datetime value from a filled datetime struct and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + npy_datetime + PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT + fr, npy_timedeltastruct *d) + +Create a timdelta value from a filled timedelta struct and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + NpyIter * + NpyIter_New(PyArrayObject *op, npy_uint32 flags, NPY_ORDER + order, NPY_CASTING casting, PyArray_Descr*dtype) + +Allocate a new iterator for one array object. + +:: + + NpyIter * + NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32 + flags, NPY_ORDER order, NPY_CASTING + casting, npy_uint32 *op_flags, PyArray_Descr + **op_request_dtypes) + +Allocate a new iterator for more than one array object, using +standard NumPy broadcasting rules and the default buffer size. + +:: + + NpyIter * + NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 + flags, NPY_ORDER order, NPY_CASTING + casting, npy_uint32 *op_flags, PyArray_Descr + **op_request_dtypes, int oa_ndim, int + **op_axes, npy_intp *itershape, npy_intp + buffersize) + +Allocate a new iterator for multiple array objects, and advanced +options for controlling the broadcasting, shape, and buffer size. + +:: + + NpyIter * + NpyIter_Copy(NpyIter *iter) + +Makes a copy of the iterator + +:: + + int + NpyIter_Deallocate(NpyIter *iter) + +Deallocate an iterator + +:: + + npy_bool + NpyIter_HasDelayedBufAlloc(NpyIter *iter) + +Whether the buffer allocation is being delayed + +:: + + npy_bool + NpyIter_HasExternalLoop(NpyIter *iter) + +Whether the iterator handles the inner loop + +:: + + int + NpyIter_EnableExternalLoop(NpyIter *iter) + +Removes the inner loop handling (so HasExternalLoop returns true) + +:: + + npy_intp * + NpyIter_GetInnerStrideArray(NpyIter *iter) + +Get the array of strides for the inner loop (when HasExternalLoop is true) + +This function may be safely called without holding the Python GIL. + +:: + + npy_intp * + NpyIter_GetInnerLoopSizePtr(NpyIter *iter) + +Get a pointer to the size of the inner loop (when HasExternalLoop is true) + +This function may be safely called without holding the Python GIL. + +:: + + int + NpyIter_Reset(NpyIter *iter, char **errmsg) + +Resets the iterator to its initial state + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + int + NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char + **errmsg) + +Resets the iterator to its initial state, with new base data pointers. +This function requires great caution. + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + int + NpyIter_ResetToIterIndexRange(NpyIter *iter, npy_intp istart, npy_intp + iend, char **errmsg) + +Resets the iterator to a new iterator index range + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + int + NpyIter_GetNDim(NpyIter *iter) + +Gets the number of dimensions being iterated + +:: + + int + NpyIter_GetNOp(NpyIter *iter) + +Gets the number of operands being iterated + +:: + + NpyIter_IterNextFunc * + NpyIter_GetIterNext(NpyIter *iter, char **errmsg) + +Compute the specialized iteration function for an iterator + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + npy_intp + NpyIter_GetIterSize(NpyIter *iter) + +Gets the number of elements being iterated + +:: + + void + NpyIter_GetIterIndexRange(NpyIter *iter, npy_intp *istart, npy_intp + *iend) + +Gets the range of iteration indices being iterated + +:: + + npy_intp + NpyIter_GetIterIndex(NpyIter *iter) + +Gets the current iteration index + +:: + + int + NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex) + +Sets the iterator position to the specified iterindex, +which matches the iteration order of the iterator. + +Returns NPY_SUCCEED on success, NPY_FAIL on failure. + +:: + + npy_bool + NpyIter_HasMultiIndex(NpyIter *iter) + +Whether the iterator is tracking a multi-index + +:: + + int + NpyIter_GetShape(NpyIter *iter, npy_intp *outshape) + +Gets the broadcast shape if a multi-index is being tracked by the iterator, +otherwise gets the shape of the iteration as Fortran-order +(fastest-changing index first). + +The reason Fortran-order is returned when a multi-index +is not enabled is that this is providing a direct view into how +the iterator traverses the n-dimensional space. The iterator organizes +its memory from fastest index to slowest index, and when +a multi-index is enabled, it uses a permutation to recover the original +order. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + NpyIter_GetMultiIndexFunc * + NpyIter_GetGetMultiIndex(NpyIter *iter, char **errmsg) + +Compute a specialized get_multi_index function for the iterator + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + int + NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp *multi_index) + +Sets the iterator to the specified multi-index, which must have the +correct number of entries for 'ndim'. It is only valid +when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation +fails if the multi-index is out of bounds. + +Returns NPY_SUCCEED on success, NPY_FAIL on failure. + +:: + + int + NpyIter_RemoveMultiIndex(NpyIter *iter) + +Removes multi-index support from an iterator. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + npy_bool + NpyIter_HasIndex(NpyIter *iter) + +Whether the iterator is tracking an index + +:: + + npy_bool + NpyIter_IsBuffered(NpyIter *iter) + +Whether the iterator is buffered + +:: + + npy_bool + NpyIter_IsGrowInner(NpyIter *iter) + +Whether the inner loop can grow if buffering is unneeded + +:: + + npy_intp + NpyIter_GetBufferSize(NpyIter *iter) + +Gets the size of the buffer, or 0 if buffering is not enabled + +:: + + npy_intp * + NpyIter_GetIndexPtr(NpyIter *iter) + +Get a pointer to the index, if it is being tracked + +:: + + int + NpyIter_GotoIndex(NpyIter *iter, npy_intp flat_index) + +If the iterator is tracking an index, sets the iterator +to the specified index. + +Returns NPY_SUCCEED on success, NPY_FAIL on failure. + +:: + + char ** + NpyIter_GetDataPtrArray(NpyIter *iter) + +Get the array of data pointers (1 per object being iterated) + +This function may be safely called without holding the Python GIL. + +:: + + PyArray_Descr ** + NpyIter_GetDescrArray(NpyIter *iter) + +Get the array of data type pointers (1 per object being iterated) + +:: + + PyArrayObject ** + NpyIter_GetOperandArray(NpyIter *iter) + +Get the array of objects being iterated + +:: + + PyArrayObject * + NpyIter_GetIterView(NpyIter *iter, npy_intp i) + +Returns a view to the i-th object with the iterator's internal axes + +:: + + void + NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags) + +Gets an array of read flags (1 per object being iterated) + +:: + + void + NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags) + +Gets an array of write flags (1 per object being iterated) + +:: + + void + NpyIter_DebugPrint(NpyIter *iter) + +For debugging + +:: + + npy_bool + NpyIter_IterationNeedsAPI(NpyIter *iter) + +Whether the iteration loop, and in particular the iternext() +function, needs API access. If this is true, the GIL must +be retained while iterating. + +:: + + void + NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides) + +Get an array of strides which are fixed. Any strides which may +change during iteration receive the value NPY_MAX_INTP. Once +the iterator is ready to iterate, call this to get the strides +which will always be fixed in the inner loop, then choose optimized +inner loop functions which take advantage of those fixed strides. + +This function may be safely called without holding the Python GIL. + +:: + + int + NpyIter_RemoveAxis(NpyIter *iter, int axis) + +Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX +was set for iterator creation, and does not work if buffering is +enabled. This function also resets the iterator to its initial state. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + npy_intp * + NpyIter_GetAxisStrideArray(NpyIter *iter, int axis) + +Gets the array of strides for the specified axis. +If the iterator is tracking a multi-index, gets the strides +for the axis specified, otherwise gets the strides for +the iteration axis as Fortran order (fastest-changing axis first). + +Returns NULL if an error occurs. + +:: + + npy_bool + NpyIter_RequiresBuffering(NpyIter *iter) + +Whether the iteration could be done with no buffering. + +:: + + char ** + NpyIter_GetInitialDataPtrArray(NpyIter *iter) + +Get the array of data pointers (1 per object being iterated), +directly into the arrays (never pointing to a buffer), for starting +unbuffered iteration. This always returns the addresses for the +iterator position as reset to iterator index 0. + +These pointers are different from the pointers accepted by +NpyIter_ResetBasePointers, because the direction along some +axes may have been reversed, requiring base offsets. + +This function may be safely called without holding the Python GIL. + +:: + + int + NpyIter_CreateCompatibleStrides(NpyIter *iter, npy_intp + itemsize, npy_intp *outstrides) + +Builds a set of strides which are the same as the strides of an +output array created using the NPY_ITER_ALLOCATE flag, where NULL +was passed for op_axes. This is for data packed contiguously, +but not necessarily in C or Fortran order. This should be used +together with NpyIter_GetShape and NpyIter_GetNDim. + +A use case for this function is to match the shape and layout of +the iterator and tack on one or more dimensions. For example, +in order to generate a vector per input value for a numerical gradient, +you pass in ndim*itemsize for itemsize, then add another dimension to +the end with size ndim and stride itemsize. To do the Hessian matrix, +you do the same thing but add two dimensions, or take advantage of +the symmetry and pack it into 1 dimension with a particular encoding. + +This function may only be called if the iterator is tracking a multi-index +and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from +being iterated in reverse order. + +If an array is created with this method, simply adding 'itemsize' +for each iteration will traverse the new array matching the +iterator. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + int + PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) + +Convert any Python object, *obj*, to an NPY_CASTING enum. + +:: + + npy_intp + PyArray_CountNonzero(PyArrayObject *self) + +Counts the number of non-zero elements in the array. + +Returns -1 on error. + +:: + + PyArray_Descr * + PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) + +Produces the smallest size and lowest kind type to which both +input types can be cast. + +:: + + PyArray_Descr * + PyArray_MinScalarType(PyArrayObject *arr) + +If arr is a scalar (has 0 dimensions) with a built-in number data type, +finds the smallest type size/kind which can still represent its data. +Otherwise, returns the array's data type. + + +:: + + PyArray_Descr * + PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, npy_intp + ndtypes, PyArray_Descr **dtypes) + +Produces the result type of a bunch of inputs, using the UFunc +type promotion rules. Use this function when you have a set of +input arrays, and need to determine an output array dtype. + +If all the inputs are scalars (have 0 dimensions) or the maximum "kind" +of the scalars is greater than the maximum "kind" of the arrays, does +a regular type promotion. + +Otherwise, does a type promotion on the MinScalarType +of all the inputs. Data types passed directly are treated as array +types. + + +:: + + npy_bool + PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr + *to, NPY_CASTING casting) + +Returns 1 if the array object may be cast to the given data type using +the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in +that it handles scalar arrays (0 dimensions) specially, by checking +their value. + +:: + + npy_bool + PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr + *to, NPY_CASTING casting) + +Returns true if data of type 'from' may be cast to data of type +'to' according to the rule 'casting'. + +:: + + PyArrayObject * + PyArray_EinsteinSum(char *subscripts, npy_intp nop, PyArrayObject + **op_in, PyArray_Descr *dtype, NPY_ORDER + order, NPY_CASTING casting, PyArrayObject *out) + +This function provides summation of array elements according to +the Einstein summation convention. For example: +- trace(a) -> einsum("ii", a) +- transpose(a) -> einsum("ji", a) +- multiply(a,b) -> einsum(",", a, b) +- inner(a,b) -> einsum("i,i", a, b) +- outer(a,b) -> einsum("i,j", a, b) +- matvec(a,b) -> einsum("ij,j", a, b) +- matmat(a,b) -> einsum("ij,jk", a, b) + +subscripts: The string of subscripts for einstein summation. +nop: The number of operands +op_in: The array of operands +dtype: Either NULL, or the data type to force the calculation as. +order: The order for the calculation/the output axes. +casting: What kind of casts should be permitted. +out: Either NULL, or an array into which the output should be placed. + +By default, the labels get placed in alphabetical order +at the end of the output. So, if c = einsum("i,j", a, b) +then c[i,j] == a[i]*b[j], but if c = einsum("j,i", a, b) +then c[i,j] = a[j]*b[i]. + +Alternatively, you can control the output order or prevent +an axis from being summed/force an axis to be summed by providing +indices for the output. This allows us to turn 'trace' into +'diag', for example. +- diag(a) -> einsum("ii->i", a) +- sum(a, axis=0) -> einsum("i...->", a) + +Subscripts at the beginning and end may be specified by +putting an ellipsis "..." in the middle. For example, +the function einsum("i...i", a) takes the diagonal of +the first and last dimensions of the operand, and +einsum("ij...,jk...->ik...") takes the matrix product using +the first two indices of each operand instead of the last two. + +When there is only one operand, no axes being summed, and +no output parameter, this function returns a view +into the operand instead of making a copy. + +:: + + PyObject * + PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER + order, PyArray_Descr *dtype, int subok) + +Creates a new array with the same shape as the provided one, +with possible memory layout order and data type changes. + +prototype - The array the new one should be like. +order - NPY_CORDER - C-contiguous result. +NPY_FORTRANORDER - Fortran-contiguous result. +NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise. +NPY_KEEPORDER - Keeps the axis ordering of prototype. +dtype - If not NULL, overrides the data type of the result. +subok - If 1, use the prototype's array subtype, otherwise +always create a base-class array. + +NOTE: If dtype is not NULL, steals the dtype reference. + +:: + + int + PyArray_GetArrayParamsFromObject(PyObject *op, PyArray_Descr + *requested_dtype, npy_bool + writeable, PyArray_Descr + **out_dtype, int *out_ndim, npy_intp + *out_dims, PyArrayObject + **out_arr, PyObject *context) + +Retrieves the array parameters for viewing/converting an arbitrary +PyObject* to a NumPy array. This allows the "innate type and shape" +of Python list-of-lists to be discovered without +actually converting to an array. + +In some cases, such as structured arrays and the __array__ interface, +a data type needs to be used to make sense of the object. When +this is needed, provide a Descr for 'requested_dtype', otherwise +provide NULL. This reference is not stolen. Also, if the requested +dtype doesn't modify the interpretation of the input, out_dtype will +still get the "innate" dtype of the object, not the dtype passed +in 'requested_dtype'. + +If writing to the value in 'op' is desired, set the boolean +'writeable' to 1. This raises an error when 'op' is a scalar, list +of lists, or other non-writeable 'op'. + +Result: When success (0 return value) is returned, either out_arr +is filled with a non-NULL PyArrayObject and +the rest of the parameters are untouched, or out_arr is +filled with NULL, and the rest of the parameters are +filled. + +Typical usage: + +PyArrayObject *arr = NULL; +PyArray_Descr *dtype = NULL; +int ndim = 0; +npy_intp dims[NPY_MAXDIMS]; + +if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype, +&ndim, dims, &arr, NULL) < 0) { +return NULL; +} +if (arr == NULL) { +... validate/change dtype, validate flags, ndim, etc ... +// Could make custom strides here too +arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, +dims, NULL, +is_f_order ? NPY_ARRAY_F_CONTIGUOUS : 0, +NULL); +if (arr == NULL) { +return NULL; +} +if (PyArray_CopyObject(arr, op) < 0) { +Py_DECREF(arr); +return NULL; +} +} +else { +... in this case the other parameters weren't filled, just +validate and possibly copy arr itself ... +} +... use arr ... + +:: + + int + PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE + *modes, int n) + +Convert an object to an array of n NPY_CLIPMODE values. +This is intended to be used in functions where a different mode +could be applied to each axis, like in ravel_multi_index. + +:: + + PyObject * + PyArray_MatrixProduct2(PyObject *op1, PyObject + *op2, PyArrayObject*out) + +Numeric.matrixproduct(a,v,out) +just like inner product but does the swapaxes stuff on the fly + +:: + + npy_bool + NpyIter_IsFirstVisit(NpyIter *iter, int iop) + +Checks to see whether this is the first time the elements +of the specified reduction operand which the iterator points at are +being seen for the first time. The function returns +a reasonable answer for reduction operands and when buffering is +disabled. The answer may be incorrect for buffered non-reduction +operands. + +This function is intended to be used in EXTERNAL_LOOP mode only, +and will produce some wrong answers when that mode is not enabled. + +If this function returns true, the caller should also +check the inner loop stride of the operand, because if +that stride is 0, then only the first element of the innermost +external loop is being visited for the first time. + +WARNING: For performance reasons, 'iop' is not bounds-checked, +it is not confirmed that 'iop' is actually a reduction +operand, and it is not confirmed that EXTERNAL_LOOP +mode is enabled. These checks are the responsibility of +the caller, and should be done outside of any inner loops. + +:: + + int + PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) + +Sets the 'base' attribute of the array. This steals a reference +to 'obj'. + +Returns 0 on success, -1 on failure. + +:: + + void + PyArray_CreateSortedStridePerm(int ndim, npy_intp + *strides, npy_stride_sort_item + *out_strideperm) + + +This function populates the first ndim elements +of strideperm with sorted descending by their absolute values. +For example, the stride array (4, -2, 12) becomes +[(2, 12), (0, 4), (1, -2)]. + +:: + + void + PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags) + + +Removes the axes flagged as True from the array, +modifying it in place. If an axis flagged for removal +has a shape entry bigger than one, this effectively selects +index zero for that axis. + +WARNING: If an axis flagged for removal has a shape equal to zero, +the array will point to invalid memory. The caller must +validate this! +If an axis flagged for removal has a shape larger then one, +the aligned flag (and in the future the contiguous flags), +may need explicite update. +(check also NPY_RELAXED_STRIDES_CHECKING) + +For example, this can be used to remove the reduction axes +from a reduction result once its computation is complete. + +:: + + void + PyArray_DebugPrint(PyArrayObject *obj) + +Prints the raw data of the ndarray in a form useful for debugging +low-level C issues. + +:: + + int + PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name) + + +This function does nothing if obj is writeable, and raises an exception +(and returns -1) if obj is not writeable. It may also do other +house-keeping, such as issuing warnings on arrays which are transitioning +to become views. Always call this function at some point before writing to +an array. + +'name' is a name for the array, used to give better error +messages. Something like "assignment destination", "output array", or even +just "array". + +:: + + int + PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) + + +Precondition: 'arr' is a copy of 'base' (though possibly with different +strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the +->base pointer on 'arr', so that when 'arr' is destructed, it will copy any +changes back to 'base'. + +Steals a reference to 'base'. + +Returns 0 on success, -1 on failure. + +:: + + void * + PyDataMem_NEW(size_t size) + +Allocates memory for array data. + +:: + + void + PyDataMem_FREE(void *ptr) + +Free memory for array data. + +:: + + void * + PyDataMem_RENEW(void *ptr, size_t size) + +Reallocate/resize memory for array data. + +:: + + PyDataMem_EventHookFunc * + PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void + *user_data, void **old_data) + +Sets the allocation event hook for numpy array data. +Takes a PyDataMem_EventHookFunc *, which has the signature: +void hook(void *old, void *new, size_t size, void *user_data). +Also takes a void *user_data, and void **old_data. + +Returns a pointer to the previous hook or NULL. If old_data is +non-NULL, the previous user_data pointer will be copied to it. + +If not NULL, hook will be called at the end of each PyDataMem_NEW/FREE/RENEW: +result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data) +PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data) +result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data) + +When the hook is called, the GIL will be held by the calling +thread. The hook should be written to be reentrant, if it performs +operations that might cause new allocation events (such as the +creation/descruction numpy objects, or creating/destroying Python +objects which might cause a gc) + +:: + + void + PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject + **ret, int getmap) + + +:: + + PyObject * + PyArray_MapIterArray(PyArrayObject *a, PyObject *index) + + +Use advanced indexing to iterate an array. Please note +that most of this public API is currently not guaranteed +to stay the same between versions. If you plan on using +it, please consider adding more utility functions here +to accommodate new features. + +:: + + void + PyArray_MapIterNext(PyArrayMapIterObject *mit) + +This function needs to update the state of the map iterator +and point mit->dataptr to the memory-location of the next object + +Note that this function never handles an extra operand but provides +compatibility for an old (exposed) API. + +:: + + int + PyArray_Partition(PyArrayObject *op, PyArrayObject *ktharray, int + axis, NPY_SELECTKIND which) + +Partition an array in-place + +:: + + PyObject * + PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int + axis, NPY_SELECTKIND which) + +ArgPartition an array + +:: + + int + PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind) + +Convert object to select kind + +:: + + void * + PyDataMem_NEW_ZEROED(size_t size, size_t elsize) + +Allocates zeroed memory for array data. + diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarrayobject.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarrayobject.h new file mode 100644 index 0000000000000..b8c7c3a2d38e6 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarrayobject.h @@ -0,0 +1,237 @@ +/* + * DON'T INCLUDE THIS DIRECTLY. + */ + +#ifndef NPY_NDARRAYOBJECT_H +#define NPY_NDARRAYOBJECT_H +#ifdef __cplusplus +#define CONFUSE_EMACS { +#define CONFUSE_EMACS2 } +extern "C" CONFUSE_EMACS +#undef CONFUSE_EMACS +#undef CONFUSE_EMACS2 +/* ... otherwise a semi-smart identer (like emacs) tries to indent + everything when you're typing */ +#endif + +#include "ndarraytypes.h" + +/* Includes the "function" C-API -- these are all stored in a + list of pointers --- one for each file + The two lists are concatenated into one in multiarray. + + They are available as import_array() +*/ + +#include "__multiarray_api.h" + + +/* C-API that requries previous API to be defined */ + +#define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type) + +#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) +#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) + +#define PyArray_HasArrayInterfaceType(op, type, context, out) \ + ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromArrayAttr(op, type, context)) != \ + Py_NotImplemented)) + +#define PyArray_HasArrayInterface(op, out) \ + PyArray_HasArrayInterfaceType(op, NULL, NULL, out) + +#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ + (PyArray_NDIM((PyArrayObject *)op) == 0)) + +#define PyArray_IsScalar(obj, cls) \ + (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) + +#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ + PyArray_IsZeroDim(m)) + +#define PyArray_IsPythonNumber(obj) \ + (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \ + PyLong_Check(obj) || PyBool_Check(obj)) + +#define PyArray_IsPythonScalar(obj) \ + (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \ + PyUnicode_Check(obj)) + +#define PyArray_IsAnyScalar(obj) \ + (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) + +#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ + PyArray_CheckScalar(obj)) + +#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \ + || PyLong_Check(obj) \ + || PyArray_IsScalar((obj), Integer)) + + +#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ + Py_INCREF(m), (m) : \ + (PyArrayObject *)(PyArray_Copy(m))) + +#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ + PyArray_CompareLists(PyArray_DIMS(a1), \ + PyArray_DIMS(a2), \ + PyArray_NDIM(a1))) + +#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) +#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) +#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) + +#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ + NULL) + +#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ + PyArray_DescrFromType(type), 0, 0, 0, NULL); + +#define PyArray_FROM_OTF(m, type, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) + +#define PyArray_FROMANY(m, type, min, max, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) + +#define PyArray_ZEROS(m, dims, type, is_f_order) \ + PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_EMPTY(m, dims, type, is_f_order) \ + PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ + PyArray_NBYTES(obj)) + +#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) +#define NPY_REFCOUNT PyArray_REFCOUNT +#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE) + +#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT, NULL) + +#define PyArray_EquivArrTypes(a1, a2) \ + PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) + +#define PyArray_EquivByteorders(b1, b2) \ + (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) + +#define PyArray_SimpleNew(nd, dims, typenum) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) + +#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ + data, 0, NPY_ARRAY_CARRAY, NULL) + +#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ + PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ + NULL, NULL, 0, NULL) + +#define PyArray_ToScalar(data, arr) \ + PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) + + +/* These might be faster without the dereferencing of obj + going on inside -- of course an optimizing compiler should + inline the constants inside a for loop making it a moot point +*/ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0])) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1])) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2])) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2] + \ + (l)*PyArray_STRIDES(obj)[3])) + +static NPY_INLINE void +PyArray_XDECREF_ERR(PyArrayObject *arr) +{ + if (arr != NULL) { + if (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY) { + PyArrayObject *base = (PyArrayObject *)PyArray_BASE(arr); + PyArray_ENABLEFLAGS(base, NPY_ARRAY_WRITEABLE); + PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); + } + Py_DECREF(arr); + } +} + +#define PyArray_DESCR_REPLACE(descr) do { \ + PyArray_Descr *_new_; \ + _new_ = PyArray_DescrNew(descr); \ + Py_XDECREF(descr); \ + descr = _new_; \ + } while(0) + +/* Copy should always return contiguous array */ +#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) + +#define PyArray_FromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_BEHAVED | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_ENSURECOPY | \ + NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_Cast(mp, type_num) \ + PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) + +#define PyArray_Take(ap, items, axis) \ + PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) + +#define PyArray_Put(ap, items, values) \ + PyArray_PutTo(ap, items, values, NPY_RAISE) + +/* Compatibility with old Numeric stuff -- don't use in new code */ + +#define PyArray_FromDimsAndData(nd, d, type, data) \ + PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ + data) + + +/* + Check to see if this key in the dictionary is the "title" + entry of the tuple (i.e. a duplicate dictionary entry in the fields + dict. +*/ + +#define NPY_TITLE_KEY(key, value) ((PyTuple_GET_SIZE((value))==3) && \ + (PyTuple_GET_ITEM((value), 2) == (key))) + + +#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) +#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) + + +#ifdef __cplusplus +} +#endif + + +#endif /* NPY_NDARRAYOBJECT_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarraytypes.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarraytypes.h new file mode 100644 index 0000000000000..21ff8cd1ae894 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarraytypes.h @@ -0,0 +1,1820 @@ +#ifndef NDARRAYTYPES_H +#define NDARRAYTYPES_H + +#include "npy_common.h" +#include "npy_endian.h" +#include "npy_cpu.h" +#include "utils.h" + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of UPDATEIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that shold be + * decref'd on deletion + * + * For UPDATEIFCOPY flag this is an + * array to-be-updated upon deletion + * of this one + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional + * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements + * and the array is contiguous if ndarray.squeeze() is contiguous. + * I.e. dimensions for which `ndarray.shape[dimension] == 1` are + * ignored. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropiate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when this array is deallocated + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +/* the variable is used in some places, so always define it */ +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do { if (_save) \ + { PyEval_RestoreThread(_save); _save = NULL;} } while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define _PyArray_ITER_NEXT3(it) do { \ + if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ + (it)->coordinates[2]++; \ + (it)->dataptr += (it)->strides[2]; \ + } \ + else { \ + (it)->coordinates[2] = 0; \ + (it)->dataptr -= (it)->backstrides[2]; \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] \ + (it)->backstrides[1]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp) (ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + + +/* + * Store the information needed for fancy-indexing over an array. The + * fields are slightly unordered to keep consec, dataptr and subspace + * where they were originally. + */ +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + NpyIter *outer; /* index objects + iterator */ + void *unused[NPY_MAXDIMS - 2]; + PyArrayObject *array; + /* Flat iterator for the indexed array. For compatibility solely. */ + PyArrayIterObject *ait; + + /* + * Subspace array. For binary compatibility (was an iterator, + * but only the check for NULL should be used). + */ + PyArrayObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + npy_intp fancy_strides[NPY_MAXDIMS]; + + /* pointer when all fancy indices are 0 */ + char *baseoffset; + + /* + * after binding consec denotes at which axis the fancy axes + * are inserted. + */ + int consec; + char *dataptr; + + int nd_fancy; + npy_intp fancy_dims[NPY_MAXDIMS]; + + /* Whether the iterator (any of the iterators) requires API */ + int needs_api; + + /* + * Extra op information. + */ + PyArrayObject *extra_op; + PyArray_Descr *extra_op_dtype; /* desired dtype */ + npy_uint32 *extra_op_flags; /* Iterator flags */ + + NpyIter *extra_op_iter; + NpyIter_IterNextFunc *extra_op_next; + char **extra_op_ptrs; + + /* + * Information about the iteration state. + */ + NpyIter_IterNextFunc *outer_next; + char **outer_ptrs; + npy_intp *outer_strides; + + /* + * Information about the subspace iterator. + */ + NpyIter *subspace_iter; + NpyIter_IterNextFunc *subspace_next; + char **subspace_ptrs; + npy_intp *subspace_strides; + + /* Count for the external loop (which ever it is) for API iteration */ + npy_intp iter_count; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static NPY_INLINE int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) +/* + * Changing access macros into functions, to allow for future hiding + * of the internal memory layout. This later hiding will allow the 2.x series + * to change the internal representation of arrays without affecting + * ABI compatibility. + */ + +static NPY_INLINE int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static NPY_INLINE void * +PyArray_DATA(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE char * +PyArray_BYTES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE npy_intp * +PyArray_DIMS(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static NPY_INLINE npy_intp * +PyArray_STRIDES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static NPY_INLINE npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static NPY_INLINE npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject * +PyArray_BASE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static NPY_INLINE NPY_RETURNS_BORROWED_REF PyArray_Descr * +PyArray_DESCR(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + +static NPY_INLINE npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->elsize; +} + +static NPY_INLINE int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static NPY_INLINE int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static NPY_INLINE PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return ((PyArrayObject_fields *)arr)->descr->f->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +static NPY_INLINE int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return ((PyArrayObject_fields *)arr)->descr->f->setitem( + v, itemptr, arr); +} + +#else + +/* These macros are deprecated as of NumPy 1.7. */ +#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) +#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) +#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) +#define PyArray_ITEMSIZE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->type_num) +#define PyArray_GETITEM(obj,itemptr) \ + PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) +#endif + +static NPY_INLINE PyArray_Descr * +PyArray_DTYPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE npy_intp * +PyArray_SHAPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) +#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's returned pointed by the + * PyCObject attribute of an array __array_struct__. See + * http://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + +/* + * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. + * See the documentation for PyDataMem_SetEventHook. + */ +typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, + void *user_data); + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +#include "npy_1_7_deprecated_api.h" +#endif +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + */ +#undef NPY_DEPRECATED_INCLUDES + +#endif /* NPY_ARRAYTYPES_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/noprefix.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/noprefix.h new file mode 100644 index 0000000000000..8306170876ba4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/noprefix.h @@ -0,0 +1,209 @@ +#ifndef NPY_NOPREFIX_H +#define NPY_NOPREFIX_H + +/* + * You can directly include noprefix.h as a backward + * compatibility measure + */ +#ifndef NPY_NO_PREFIX +#include "ndarrayobject.h" +#include "npy_interrupt.h" +#endif + +#define SIGSETJMP NPY_SIGSETJMP +#define SIGLONGJMP NPY_SIGLONGJMP +#define SIGJMP_BUF NPY_SIGJMP_BUF + +#define MAX_DIMS NPY_MAXDIMS + +#define longlong npy_longlong +#define ulonglong npy_ulonglong +#define Bool npy_bool +#define longdouble npy_longdouble +#define byte npy_byte + +#ifndef _BSD_SOURCE +#define ushort npy_ushort +#define uint npy_uint +#define ulong npy_ulong +#endif + +#define ubyte npy_ubyte +#define ushort npy_ushort +#define uint npy_uint +#define ulong npy_ulong +#define cfloat npy_cfloat +#define cdouble npy_cdouble +#define clongdouble npy_clongdouble +#define Int8 npy_int8 +#define UInt8 npy_uint8 +#define Int16 npy_int16 +#define UInt16 npy_uint16 +#define Int32 npy_int32 +#define UInt32 npy_uint32 +#define Int64 npy_int64 +#define UInt64 npy_uint64 +#define Int128 npy_int128 +#define UInt128 npy_uint128 +#define Int256 npy_int256 +#define UInt256 npy_uint256 +#define Float16 npy_float16 +#define Complex32 npy_complex32 +#define Float32 npy_float32 +#define Complex64 npy_complex64 +#define Float64 npy_float64 +#define Complex128 npy_complex128 +#define Float80 npy_float80 +#define Complex160 npy_complex160 +#define Float96 npy_float96 +#define Complex192 npy_complex192 +#define Float128 npy_float128 +#define Complex256 npy_complex256 +#define intp npy_intp +#define uintp npy_uintp +#define datetime npy_datetime +#define timedelta npy_timedelta + +#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG +#define SIZEOF_INTP NPY_SIZEOF_INTP +#define SIZEOF_UINTP NPY_SIZEOF_UINTP +#define SIZEOF_HALF NPY_SIZEOF_HALF +#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE +#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME +#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA + +#define LONGLONG_FMT NPY_LONGLONG_FMT +#define ULONGLONG_FMT NPY_ULONGLONG_FMT +#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX +#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX + +#define MAX_INT8 127 +#define MIN_INT8 -128 +#define MAX_UINT8 255 +#define MAX_INT16 32767 +#define MIN_INT16 -32768 +#define MAX_UINT16 65535 +#define MAX_INT32 2147483647 +#define MIN_INT32 (-MAX_INT32 - 1) +#define MAX_UINT32 4294967295U +#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) +#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) +#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) +#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) +#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) +#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) +#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) + +#define MAX_BYTE NPY_MAX_BYTE +#define MIN_BYTE NPY_MIN_BYTE +#define MAX_UBYTE NPY_MAX_UBYTE +#define MAX_SHORT NPY_MAX_SHORT +#define MIN_SHORT NPY_MIN_SHORT +#define MAX_USHORT NPY_MAX_USHORT +#define MAX_INT NPY_MAX_INT +#define MIN_INT NPY_MIN_INT +#define MAX_UINT NPY_MAX_UINT +#define MAX_LONG NPY_MAX_LONG +#define MIN_LONG NPY_MIN_LONG +#define MAX_ULONG NPY_MAX_ULONG +#define MAX_LONGLONG NPY_MAX_LONGLONG +#define MIN_LONGLONG NPY_MIN_LONGLONG +#define MAX_ULONGLONG NPY_MAX_ULONGLONG +#define MIN_DATETIME NPY_MIN_DATETIME +#define MAX_DATETIME NPY_MAX_DATETIME +#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA +#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA + +#define BITSOF_BOOL NPY_BITSOF_BOOL +#define BITSOF_CHAR NPY_BITSOF_CHAR +#define BITSOF_SHORT NPY_BITSOF_SHORT +#define BITSOF_INT NPY_BITSOF_INT +#define BITSOF_LONG NPY_BITSOF_LONG +#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG +#define BITSOF_HALF NPY_BITSOF_HALF +#define BITSOF_FLOAT NPY_BITSOF_FLOAT +#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE +#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE +#define BITSOF_DATETIME NPY_BITSOF_DATETIME +#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA + +#define _pya_malloc PyArray_malloc +#define _pya_free PyArray_free +#define _pya_realloc PyArray_realloc + +#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF +#define BEGIN_THREADS NPY_BEGIN_THREADS +#define END_THREADS NPY_END_THREADS +#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF +#define ALLOW_C_API NPY_ALLOW_C_API +#define DISABLE_C_API NPY_DISABLE_C_API + +#define PY_FAIL NPY_FAIL +#define PY_SUCCEED NPY_SUCCEED + +#ifndef TRUE +#define TRUE NPY_TRUE +#endif + +#ifndef FALSE +#define FALSE NPY_FALSE +#endif + +#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT + +#define CONTIGUOUS NPY_CONTIGUOUS +#define C_CONTIGUOUS NPY_C_CONTIGUOUS +#define FORTRAN NPY_FORTRAN +#define F_CONTIGUOUS NPY_F_CONTIGUOUS +#define OWNDATA NPY_OWNDATA +#define FORCECAST NPY_FORCECAST +#define ENSURECOPY NPY_ENSURECOPY +#define ENSUREARRAY NPY_ENSUREARRAY +#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES +#define ALIGNED NPY_ALIGNED +#define NOTSWAPPED NPY_NOTSWAPPED +#define WRITEABLE NPY_WRITEABLE +#define UPDATEIFCOPY NPY_UPDATEIFCOPY +#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR +#define BEHAVED NPY_BEHAVED +#define BEHAVED_NS NPY_BEHAVED_NS +#define CARRAY NPY_CARRAY +#define CARRAY_RO NPY_CARRAY_RO +#define FARRAY NPY_FARRAY +#define FARRAY_RO NPY_FARRAY_RO +#define DEFAULT NPY_DEFAULT +#define IN_ARRAY NPY_IN_ARRAY +#define OUT_ARRAY NPY_OUT_ARRAY +#define INOUT_ARRAY NPY_INOUT_ARRAY +#define IN_FARRAY NPY_IN_FARRAY +#define OUT_FARRAY NPY_OUT_FARRAY +#define INOUT_FARRAY NPY_INOUT_FARRAY +#define UPDATE_ALL NPY_UPDATE_ALL + +#define OWN_DATA NPY_OWNDATA +#define BEHAVED_FLAGS NPY_BEHAVED +#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS +#define CARRAY_FLAGS_RO NPY_CARRAY_RO +#define CARRAY_FLAGS NPY_CARRAY +#define FARRAY_FLAGS NPY_FARRAY +#define FARRAY_FLAGS_RO NPY_FARRAY_RO +#define DEFAULT_FLAGS NPY_DEFAULT +#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS + +#ifndef MIN +#define MIN PyArray_MIN +#endif +#ifndef MAX +#define MAX PyArray_MAX +#endif +#define MAX_INTP NPY_MAX_INTP +#define MIN_INTP NPY_MIN_INTP +#define MAX_UINTP NPY_MAX_UINTP +#define INTP_FMT NPY_INTP_FMT + +#define REFCOUNT PyArray_REFCOUNT +#define MAX_ELSIZE NPY_MAX_ELSIZE + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_1_7_deprecated_api.h new file mode 100644 index 0000000000000..4c318bc4784c2 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_1_7_deprecated_api.h @@ -0,0 +1,130 @@ +#ifndef _NPY_1_7_DEPRECATED_API_H +#define _NPY_1_7_DEPRECATED_API_H + +#ifndef NPY_DEPRECATED_INCLUDES +#error "Should never include npy_*_*_deprecated_api directly." +#endif + +#if defined(_WIN32) +#define _WARN___STR2__(x) #x +#define _WARN___STR1__(x) _WARN___STR2__(x) +#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " +#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it by " \ + "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") +#elif defined(__GNUC__) +#warning "Using deprecated NumPy API, disable it by " \ + "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" +#endif +/* TODO: How to do this warning message for other compilers? */ + +/* + * This header exists to collect all dangerous/deprecated NumPy API + * as of NumPy 1.7. + * + * This is an attempt to remove bad API, the proliferation of macros, + * and namespace pollution currently produced by the NumPy headers. + */ + +/* These array flags are deprecated as of NumPy 1.7 */ +#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS +#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS + +/* + * The consistent NPY_ARRAY_* names which don't pollute the NPY_* + * namespace were added in NumPy 1.7. + * + * These versions of the carray flags are deprecated, but + * probably should only be removed after two releases instead of one. + */ +#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS +#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS +#define NPY_OWNDATA NPY_ARRAY_OWNDATA +#define NPY_FORCECAST NPY_ARRAY_FORCECAST +#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY +#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY +#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES +#define NPY_ALIGNED NPY_ARRAY_ALIGNED +#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED +#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE +#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY +#define NPY_BEHAVED NPY_ARRAY_BEHAVED +#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS +#define NPY_CARRAY NPY_ARRAY_CARRAY +#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO +#define NPY_DEFAULT NPY_ARRAY_DEFAULT +#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY +#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY +#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY +#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY +#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY +#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY +#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL + +/* This way of accessing the default type is deprecated as of NumPy 1.7 */ +#define PyArray_DEFAULT NPY_DEFAULT_TYPE + +/* These DATETIME bits aren't used internally */ +#if PY_VERSION_HEX >= 0x03000000 +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ + PyDict_GetItemString( \ + descr->metadata, NPY_METADATA_DTSTR), NULL)))) +#else +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ + PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) +#endif + +/* + * Deprecated as of NumPy 1.7, this kind of shortcut doesn't + * belong in the public API. + */ +#define NPY_AO PyArrayObject + +/* + * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't + * belong in the public API. + */ +#define fortran fortran_ + +/* + * Deprecated as of NumPy 1.7, as it is a namespace-polluting + * macro. + */ +#define FORTRAN_IF PyArray_FORTRAN_IF + +/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ +#define NPY_METADATA_DTSTR "__timeunit__" + +/* + * Deprecated as of NumPy 1.7. + * The reasoning: + * - These are for datetime, but there's no datetime "namespace". + * - They just turn NPY_STR_ into "", which is just + * making something simple be indirected. + */ +#define NPY_STR_Y "Y" +#define NPY_STR_M "M" +#define NPY_STR_W "W" +#define NPY_STR_D "D" +#define NPY_STR_h "h" +#define NPY_STR_m "m" +#define NPY_STR_s "s" +#define NPY_STR_ms "ms" +#define NPY_STR_us "us" +#define NPY_STR_ns "ns" +#define NPY_STR_ps "ps" +#define NPY_STR_fs "fs" +#define NPY_STR_as "as" + +/* + * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be + * removed in the next major release. + */ +#include "old_defines.h" + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_3kcompat.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_3kcompat.h new file mode 100644 index 0000000000000..fec95779a1dfd --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_3kcompat.h @@ -0,0 +1,506 @@ +/* + * This is a convenience header file providing compatibility utilities + * for supporting Python 2 and Python 3 in the same code base. + * + * If you want to use this for your own projects, it's recommended to make a + * copy of it. Although the stuff below is unlikely to change, we don't provide + * strong backwards compatibility guarantees at the moment. + */ + +#ifndef _NPY_3KCOMPAT_H_ +#define _NPY_3KCOMPAT_H_ + +#include +#include + +#if PY_VERSION_HEX >= 0x03000000 +#ifndef NPY_PY3K +#define NPY_PY3K 1 +#endif +#endif + +#include "numpy/npy_common.h" +#include "numpy/ndarrayobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * PyInt -> PyLong + */ + +#if defined(NPY_PY3K) +/* Return True only if the long fits in a C long */ +static NPY_INLINE int PyInt_Check(PyObject *op) { + int overflow = 0; + if (!PyLong_Check(op)) { + return 0; + } + PyLong_AsLongAndOverflow(op, &overflow); + return (overflow == 0); +} + +#define PyInt_FromLong PyLong_FromLong +#define PyInt_AsLong PyLong_AsLong +#define PyInt_AS_LONG PyLong_AsLong +#define PyInt_AsSsize_t PyLong_AsSsize_t + +/* NOTE: + * + * Since the PyLong type is very different from the fixed-range PyInt, + * we don't define PyInt_Type -> PyLong_Type. + */ +#endif /* NPY_PY3K */ + +/* + * PyString -> PyBytes + */ + +#if defined(NPY_PY3K) + +#define PyString_Type PyBytes_Type +#define PyString_Check PyBytes_Check +#define PyStringObject PyBytesObject +#define PyString_FromString PyBytes_FromString +#define PyString_FromStringAndSize PyBytes_FromStringAndSize +#define PyString_AS_STRING PyBytes_AS_STRING +#define PyString_AsStringAndSize PyBytes_AsStringAndSize +#define PyString_FromFormat PyBytes_FromFormat +#define PyString_Concat PyBytes_Concat +#define PyString_ConcatAndDel PyBytes_ConcatAndDel +#define PyString_AsString PyBytes_AsString +#define PyString_GET_SIZE PyBytes_GET_SIZE +#define PyString_Size PyBytes_Size + +#define PyUString_Type PyUnicode_Type +#define PyUString_Check PyUnicode_Check +#define PyUStringObject PyUnicodeObject +#define PyUString_FromString PyUnicode_FromString +#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize +#define PyUString_FromFormat PyUnicode_FromFormat +#define PyUString_Concat PyUnicode_Concat2 +#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel +#define PyUString_GET_SIZE PyUnicode_GET_SIZE +#define PyUString_Size PyUnicode_Size +#define PyUString_InternFromString PyUnicode_InternFromString +#define PyUString_Format PyUnicode_Format + +#else + +#define PyBytes_Type PyString_Type +#define PyBytes_Check PyString_Check +#define PyBytesObject PyStringObject +#define PyBytes_FromString PyString_FromString +#define PyBytes_FromStringAndSize PyString_FromStringAndSize +#define PyBytes_AS_STRING PyString_AS_STRING +#define PyBytes_AsStringAndSize PyString_AsStringAndSize +#define PyBytes_FromFormat PyString_FromFormat +#define PyBytes_Concat PyString_Concat +#define PyBytes_ConcatAndDel PyString_ConcatAndDel +#define PyBytes_AsString PyString_AsString +#define PyBytes_GET_SIZE PyString_GET_SIZE +#define PyBytes_Size PyString_Size + +#define PyUString_Type PyString_Type +#define PyUString_Check PyString_Check +#define PyUStringObject PyStringObject +#define PyUString_FromString PyString_FromString +#define PyUString_FromStringAndSize PyString_FromStringAndSize +#define PyUString_FromFormat PyString_FromFormat +#define PyUString_Concat PyString_Concat +#define PyUString_ConcatAndDel PyString_ConcatAndDel +#define PyUString_GET_SIZE PyString_GET_SIZE +#define PyUString_Size PyString_Size +#define PyUString_InternFromString PyString_InternFromString +#define PyUString_Format PyString_Format + +#endif /* NPY_PY3K */ + + +static NPY_INLINE void +PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) +{ + PyObject *newobj; + newobj = PyUnicode_Concat(*left, right); + Py_DECREF(*left); + Py_DECREF(right); + *left = newobj; +} + +static NPY_INLINE void +PyUnicode_Concat2(PyObject **left, PyObject *right) +{ + PyObject *newobj; + newobj = PyUnicode_Concat(*left, right); + Py_DECREF(*left); + *left = newobj; +} + +/* + * PyFile_* compatibility + */ +#if defined(NPY_PY3K) +/* + * Get a FILE* handle to the file represented by the Python object + */ +static NPY_INLINE FILE* +npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) +{ + int fd, fd2; + PyObject *ret, *os; + npy_off_t pos; + FILE *handle; + + /* Flush first to ensure things end up in the file in the correct order */ + ret = PyObject_CallMethod(file, "flush", ""); + if (ret == NULL) { + return NULL; + } + Py_DECREF(ret); + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return NULL; + } + + /* + * The handle needs to be dup'd because we have to call fclose + * at the end + */ + os = PyImport_ImportModule("os"); + if (os == NULL) { + return NULL; + } + ret = PyObject_CallMethod(os, "dup", "i", fd); + Py_DECREF(os); + if (ret == NULL) { + return NULL; + } + fd2 = PyNumber_AsSsize_t(ret, NULL); + Py_DECREF(ret); + + /* Convert to FILE* handle */ +#ifdef _WIN32 + handle = _fdopen(fd2, mode); +#else + handle = fdopen(fd2, mode); +#endif + if (handle == NULL) { + PyErr_SetString(PyExc_IOError, + "Getting a FILE* from a Python file object failed"); + } + + /* Record the original raw file handle position */ + *orig_pos = npy_ftell(handle); + if (*orig_pos == -1) { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + fclose(handle); + return NULL; + } + + /* Seek raw handle to the Python-side position */ + ret = PyObject_CallMethod(file, "tell", ""); + if (ret == NULL) { + fclose(handle); + return NULL; + } + pos = PyLong_AsLongLong(ret); + Py_DECREF(ret); + if (PyErr_Occurred()) { + fclose(handle); + return NULL; + } + if (npy_fseek(handle, pos, SEEK_SET) == -1) { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + fclose(handle); + return NULL; + } + return handle; +} + +/* + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static NPY_INLINE int +npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) +{ + int fd; + PyObject *ret; + npy_off_t position; + + position = npy_ftell(handle); + + /* Close the FILE* handle */ + fclose(handle); + + /* + * Restore original file handle position, in order to not confuse + * Python-side data structures + */ + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return -1; + } + if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + return -1; + } + + if (position == -1) { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + return -1; + } + + /* Seek Python-side handle to the FILE* handle position */ + ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +static NPY_INLINE int +npy_PyFile_Check(PyObject *file) +{ + int fd; + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + PyErr_Clear(); + return 0; + } + return 1; +} + +/* + * DEPRECATED DO NOT USE + * use npy_PyFile_DupClose2 instead + * this function will mess ups python3 internal file object buffering + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static NPY_INLINE int +npy_PyFile_DupClose(PyObject *file, FILE* handle) +{ + PyObject *ret; + Py_ssize_t position; + position = npy_ftell(handle); + fclose(handle); + + ret = PyObject_CallMethod(file, "seek", NPY_SSIZE_T_PYFMT "i", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + + +#else + +/* DEPRECATED, DO NOT USE */ +#define npy_PyFile_DupClose(f, h, p) npy_PyFile_DupClose2((f), (h), (p)) + +/* use these */ +static NPY_INLINE FILE * +npy_PyFile_Dup2(PyObject *file, + const char *NPY_UNUSED(mode), npy_off_t *NPY_UNUSED(orig_pos)) +{ + return PyFile_AsFile(file); +} + +static NPY_INLINE int +npy_PyFile_DupClose2(PyObject *NPY_UNUSED(file), FILE* NPY_UNUSED(handle), + npy_off_t NPY_UNUSED(orig_pos)) +{ + return 0; +} + +#define npy_PyFile_Check PyFile_Check + +#endif + +/* + * DEPRECATED, DO NOT USE + * Use npy_PyFile_Dup2 instead. + * This function will mess up python3 internal file object buffering. + * Get a FILE* handle to the file represented by the Python object. + */ +static NPY_INLINE FILE* +npy_PyFile_Dup(PyObject *file, char *mode) +{ + npy_off_t orig; + if (DEPRECATE("npy_PyFile_Dup is deprecated, use " + "npy_PyFile_Dup2") < 0) { + return NULL; + } + + return npy_PyFile_Dup2(file, mode, &orig); +} + +static NPY_INLINE PyObject* +npy_PyFile_OpenFile(PyObject *filename, const char *mode) +{ + PyObject *open; + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + if (open == NULL) { + return NULL; + } + return PyObject_CallFunction(open, "Os", filename, mode); +} + +static NPY_INLINE int +npy_PyFile_CloseFile(PyObject *file) +{ + PyObject *ret; + + ret = PyObject_CallMethod(file, "close", NULL); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +/* + * PyObject_Cmp + */ +#if defined(NPY_PY3K) +static NPY_INLINE int +PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) +{ + int v; + v = PyObject_RichCompareBool(i1, i2, Py_LT); + if (v == 0) { + *cmp = -1; + return 1; + } + else if (v == -1) { + return -1; + } + + v = PyObject_RichCompareBool(i1, i2, Py_GT); + if (v == 0) { + *cmp = 1; + return 1; + } + else if (v == -1) { + return -1; + } + + v = PyObject_RichCompareBool(i1, i2, Py_EQ); + if (v == 0) { + *cmp = 0; + return 1; + } + else { + *cmp = 0; + return -1; + } +} +#endif + +/* + * PyCObject functions adapted to PyCapsules. + * + * The main job here is to get rid of the improved error handling + * of PyCapsules. It's a shame... + */ +#if PY_VERSION_HEX >= 0x03000000 + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) +{ + PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); + if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { + PyErr_Clear(); + Py_DECREF(ret); + ret = NULL; + } + return ret; +} + +static NPY_INLINE void * +NpyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static NPY_INLINE void * +NpyCapsule_GetDesc(PyObject *obj) +{ + return PyCapsule_GetContext(obj); +} + +static NPY_INLINE int +NpyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#else + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)) +{ + return PyCObject_FromVoidPtr(ptr, dtor); +} + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, + void (*dtor)(void *, void *)) +{ + return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor); +} + +static NPY_INLINE void * +NpyCapsule_AsVoidPtr(PyObject *ptr) +{ + return PyCObject_AsVoidPtr(ptr); +} + +static NPY_INLINE void * +NpyCapsule_GetDesc(PyObject *obj) +{ + return PyCObject_GetDesc(obj); +} + +static NPY_INLINE int +NpyCapsule_Check(PyObject *ptr) +{ + return PyCObject_Check(ptr); +} + +#endif + +/* + * Hash value compatibility. + * As of Python 3.2 hash values are of type Py_hash_t. + * Previous versions use C long. + */ +#if PY_VERSION_HEX < 0x03020000 +typedef long npy_hash_t; +#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG +#else +typedef Py_hash_t npy_hash_t; +#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _NPY_3KCOMPAT_H_ */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_common.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_common.h new file mode 100644 index 0000000000000..5cba8c9d2a3ce --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_common.h @@ -0,0 +1,1046 @@ +#ifndef _NPY_COMMON_H_ +#define _NPY_COMMON_H_ + +/* numpconfig.h is auto-generated */ +#include "numpyconfig.h" +#ifdef HAVE_NPY_CONFIG_H +#include +#endif + +/* + * gcc does not unroll even with -O3 + * use with care, unrolling on modern cpus rarely speeds things up + */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS +#define NPY_GCC_UNROLL_LOOPS \ + __attribute__((optimize("unroll-loops"))) +#else +#define NPY_GCC_UNROLL_LOOPS +#endif + +/* highest gcc optimization level, enabled autovectorizer */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 +#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) +#else +#define NPY_GCC_OPT_3 +#endif + +/* + * mark an argument (starting from 1) that must not be NULL and is not checked + * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check + */ +#ifdef HAVE_ATTRIBUTE_NONNULL +#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) +#else +#define NPY_GCC_NONNULL(n) +#endif + +#if defined HAVE_XMMINTRIN_H && defined HAVE__MM_LOAD_PS +#define NPY_HAVE_SSE_INTRINSICS +#endif + +#if defined HAVE_EMMINTRIN_H && defined HAVE__MM_LOAD_PD +#define NPY_HAVE_SSE2_INTRINSICS +#endif + +/* + * give a hint to the compiler which branch is more likely or unlikely + * to occur, e.g. rare error cases: + * + * if (NPY_UNLIKELY(failure == 0)) + * return NULL; + * + * the double !! is to cast the expression (e.g. NULL) to a boolean required by + * the intrinsic + */ +#ifdef HAVE___BUILTIN_EXPECT +#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) +#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define NPY_LIKELY(x) (x) +#define NPY_UNLIKELY(x) (x) +#endif + +#if defined(_MSC_VER) + #define NPY_INLINE __inline +#elif defined(__GNUC__) + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif +#else + #define NPY_INLINE +#endif + +#ifdef HAVE___THREAD + #define NPY_TLS __thread +#else + #ifdef HAVE___DECLSPEC_THREAD_ + #define NPY_TLS __declspec(thread) + #else + #define NPY_TLS + #endif +#endif + +#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE + #define NPY_RETURNS_BORROWED_REF \ + __attribute__((cpychecker_returns_borrowed_ref)) +#else + #define NPY_RETURNS_BORROWED_REF +#endif + +#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE + #define NPY_STEALS_REF_TO_ARG(n) \ + __attribute__((cpychecker_steals_reference_to_arg(n))) +#else + #define NPY_STEALS_REF_TO_ARG(n) +#endif + +/* 64 bit file position support, also on win-amd64. Ticket #1660 */ +#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ + defined(__MINGW32__) || defined(__MINGW64__) + #include + +/* mingw based on 3.4.5 has lseek but not ftell/fseek */ +#if defined(__MINGW32__) || defined(__MINGW64__) +extern int __cdecl _fseeki64(FILE *, long long, int); +extern long long __cdecl _ftelli64(FILE *); +#endif + + #define npy_fseek _fseeki64 + #define npy_ftell _ftelli64 + #define npy_lseek _lseeki64 + #define npy_off_t npy_int64 + + #if NPY_SIZEOF_INT == 8 + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_LONG == 8 + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_LONGLONG == 8 + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#else +#ifdef HAVE_FSEEKO + #define npy_fseek fseeko +#else + #define npy_fseek fseek +#endif +#ifdef HAVE_FTELLO + #define npy_ftell ftello +#else + #define npy_ftell ftell +#endif + #define npy_lseek lseek + #define npy_off_t off_t + + #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT + #define NPY_OFF_T_PYFMT "h" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#endif + +/* enums for detected endianness */ +enum { + NPY_CPU_UNKNOWN_ENDIAN, + NPY_CPU_LITTLE, + NPY_CPU_BIG +}; + +/* + * This is to typedef npy_intp to the appropriate pointer size for this + * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. + */ +typedef Py_intptr_t npy_intp; +typedef Py_uintptr_t npy_uintp; + +/* + * Define sizes that were not defined in numpyconfig.h. + */ +#define NPY_SIZEOF_CHAR 1 +#define NPY_SIZEOF_BYTE 1 +#define NPY_SIZEOF_DATETIME 8 +#define NPY_SIZEOF_TIMEDELTA 8 +#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T +#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T +#define NPY_SIZEOF_HALF 2 +#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT +#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE +#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE + +#ifdef constchar +#undef constchar +#endif + +#define NPY_SSIZE_T_PYFMT "n" +#define constchar char + +/* NPY_INTP_FMT Note: + * Unlike the other NPY_*_FMT macros which are used with + * PyOS_snprintf, NPY_INTP_FMT is used with PyErr_Format and + * PyString_Format. These functions use different formatting + * codes which are portably specified according to the Python + * documentation. See ticket #1795. + * + * On Windows x64, the LONGLONG formatter should be used, but + * in Python 2.6 the %lld formatter is not supported. In this + * case we work around the problem by using the %zd formatter. + */ +#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT + #define NPY_INTP NPY_INT + #define NPY_UINTP NPY_UINT + #define PyIntpArrType_Type PyIntArrType_Type + #define PyUIntpArrType_Type PyUIntArrType_Type + #define NPY_MAX_INTP NPY_MAX_INT + #define NPY_MIN_INTP NPY_MIN_INT + #define NPY_MAX_UINTP NPY_MAX_UINT + #define NPY_INTP_FMT "d" +#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG + #define NPY_INTP NPY_LONG + #define NPY_UINTP NPY_ULONG + #define PyIntpArrType_Type PyLongArrType_Type + #define PyUIntpArrType_Type PyULongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONG + #define NPY_MIN_INTP NPY_MIN_LONG + #define NPY_MAX_UINTP NPY_MAX_ULONG + #define NPY_INTP_FMT "ld" +#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) + #define NPY_INTP NPY_LONGLONG + #define NPY_UINTP NPY_ULONGLONG + #define PyIntpArrType_Type PyLongLongArrType_Type + #define PyUIntpArrType_Type PyULongLongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONGLONG + #define NPY_MIN_INTP NPY_MIN_LONGLONG + #define NPY_MAX_UINTP NPY_MAX_ULONGLONG + #if (PY_VERSION_HEX >= 0x02070000) + #define NPY_INTP_FMT "lld" + #else + #define NPY_INTP_FMT "zd" + #endif +#endif + +/* + * We can only use C99 formats for npy_int_p if it is the same as + * intp_t, hence the condition on HAVE_UNITPTR_T + */ +#if (NPY_USE_C99_FORMATS) == 1 \ + && (defined HAVE_UINTPTR_T) \ + && (defined HAVE_INTTYPES_H) + #include + #undef NPY_INTP_FMT + #define NPY_INTP_FMT PRIdPTR +#endif + + +/* + * Some platforms don't define bool, long long, or long double. + * Handle that here. + */ +#define NPY_BYTE_FMT "hhd" +#define NPY_UBYTE_FMT "hhu" +#define NPY_SHORT_FMT "hd" +#define NPY_USHORT_FMT "hu" +#define NPY_INT_FMT "d" +#define NPY_UINT_FMT "u" +#define NPY_LONG_FMT "ld" +#define NPY_ULONG_FMT "lu" +#define NPY_HALF_FMT "g" +#define NPY_FLOAT_FMT "g" +#define NPY_DOUBLE_FMT "g" + + +#ifdef PY_LONG_LONG +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +# ifdef _MSC_VER +# define NPY_LONGLONG_FMT "I64d" +# define NPY_ULONGLONG_FMT "I64u" +# elif defined(__APPLE__) || defined(__FreeBSD__) +/* "%Ld" only parses 4 bytes -- "L" is floating modifier on MacOS X/BSD */ +# define NPY_LONGLONG_FMT "lld" +# define NPY_ULONGLONG_FMT "llu" +/* + another possible variant -- *quad_t works on *BSD, but is deprecated: + #define LONGLONG_FMT "qd" + #define ULONGLONG_FMT "qu" +*/ +# else +# define NPY_LONGLONG_FMT "Ld" +# define NPY_ULONGLONG_FMT "Lu" +# endif +# ifdef _MSC_VER +# define NPY_LONGLONG_SUFFIX(x) (x##i64) +# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) +# else +# define NPY_LONGLONG_SUFFIX(x) (x##LL) +# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) +# endif +#else +typedef long npy_longlong; +typedef unsigned long npy_ulonglong; +# define NPY_LONGLONG_SUFFIX(x) (x##L) +# define NPY_ULONGLONG_SUFFIX(x) (x##UL) +#endif + + +typedef unsigned char npy_bool; +#define NPY_FALSE 0 +#define NPY_TRUE 1 + + +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + typedef double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "g" +#else + typedef long double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "Lg" +#endif + +#ifndef Py_USING_UNICODE +#error Must use Python with unicode enabled. +#endif + + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +/* + * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being + * able to do .real/.imag. Will have to convert code first. + */ +#if 0 +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE) +typedef complex npy_cdouble; +#else +typedef struct { double real, imag; } npy_cdouble; +#endif + +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT) +typedef complex float npy_cfloat; +#else +typedef struct { float real, imag; } npy_cfloat; +#endif + +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE) +typedef complex long double npy_clongdouble; +#else +typedef struct {npy_longdouble real, imag;} npy_clongdouble; +#endif +#endif +#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE +#error npy_cdouble definition is not compatible with C99 complex definition ! \ + Please contact Numpy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { double real, imag; } npy_cdouble; + +#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT +#error npy_cfloat definition is not compatible with C99 complex definition ! \ + Please contact Numpy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { float real, imag; } npy_cfloat; + +#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE +#error npy_clongdouble definition is not compatible with C99 complex definition ! \ + Please contact Numpy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { npy_longdouble real, imag; } npy_clongdouble; + +/* + * numarray-style bit-width typedefs + */ +#define NPY_MAX_INT8 127 +#define NPY_MIN_INT8 -128 +#define NPY_MAX_UINT8 255 +#define NPY_MAX_INT16 32767 +#define NPY_MIN_INT16 -32768 +#define NPY_MAX_UINT16 65535 +#define NPY_MAX_INT32 2147483647 +#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) +#define NPY_MAX_UINT32 4294967295U +#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) +#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) +#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) +#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) +#define NPY_MIN_DATETIME NPY_MIN_INT64 +#define NPY_MAX_DATETIME NPY_MAX_INT64 +#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 +#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 + + /* Need to find the number of bits for each type and + make definitions accordingly. + + C states that sizeof(char) == 1 by definition + + So, just using the sizeof keyword won't help. + + It also looks like Python itself uses sizeof(char) quite a + bit, which by definition should be 1 all the time. + + Idea: Make Use of CHAR_BIT which should tell us how many + BITS per CHARACTER + */ + + /* Include platform definitions -- These are in the C89/90 standard */ +#include +#define NPY_MAX_BYTE SCHAR_MAX +#define NPY_MIN_BYTE SCHAR_MIN +#define NPY_MAX_UBYTE UCHAR_MAX +#define NPY_MAX_SHORT SHRT_MAX +#define NPY_MIN_SHORT SHRT_MIN +#define NPY_MAX_USHORT USHRT_MAX +#define NPY_MAX_INT INT_MAX +#ifndef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#endif +#define NPY_MIN_INT INT_MIN +#define NPY_MAX_UINT UINT_MAX +#define NPY_MAX_LONG LONG_MAX +#define NPY_MIN_LONG LONG_MIN +#define NPY_MAX_ULONG ULONG_MAX + +#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) +#define NPY_BITSOF_CHAR CHAR_BIT +#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) +#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) +#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) +#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) +#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) +#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) +#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) +#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) +#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) +#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) +#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) +#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) + +#if NPY_BITSOF_LONG == 8 +#define NPY_INT8 NPY_LONG +#define NPY_UINT8 NPY_ULONG + typedef long npy_int8; + typedef unsigned long npy_uint8; +#define PyInt8ScalarObject PyLongScalarObject +#define PyInt8ArrType_Type PyLongArrType_Type +#define PyUInt8ScalarObject PyULongScalarObject +#define PyUInt8ArrType_Type PyULongArrType_Type +#define NPY_INT8_FMT NPY_LONG_FMT +#define NPY_UINT8_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 16 +#define NPY_INT16 NPY_LONG +#define NPY_UINT16 NPY_ULONG + typedef long npy_int16; + typedef unsigned long npy_uint16; +#define PyInt16ScalarObject PyLongScalarObject +#define PyInt16ArrType_Type PyLongArrType_Type +#define PyUInt16ScalarObject PyULongScalarObject +#define PyUInt16ArrType_Type PyULongArrType_Type +#define NPY_INT16_FMT NPY_LONG_FMT +#define NPY_UINT16_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 32 +#define NPY_INT32 NPY_LONG +#define NPY_UINT32 NPY_ULONG + typedef long npy_int32; + typedef unsigned long npy_uint32; + typedef unsigned long npy_ucs4; +#define PyInt32ScalarObject PyLongScalarObject +#define PyInt32ArrType_Type PyLongArrType_Type +#define PyUInt32ScalarObject PyULongScalarObject +#define PyUInt32ArrType_Type PyULongArrType_Type +#define NPY_INT32_FMT NPY_LONG_FMT +#define NPY_UINT32_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 64 +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG + typedef long npy_int64; + typedef unsigned long npy_uint64; +#define PyInt64ScalarObject PyLongScalarObject +#define PyInt64ArrType_Type PyLongArrType_Type +#define PyUInt64ScalarObject PyULongScalarObject +#define PyUInt64ArrType_Type PyULongArrType_Type +#define NPY_INT64_FMT NPY_LONG_FMT +#define NPY_UINT64_FMT NPY_ULONG_FMT +#define MyPyLong_FromInt64 PyLong_FromLong +#define MyPyLong_AsInt64 PyLong_AsLong +#elif NPY_BITSOF_LONG == 128 +#define NPY_INT128 NPY_LONG +#define NPY_UINT128 NPY_ULONG + typedef long npy_int128; + typedef unsigned long npy_uint128; +#define PyInt128ScalarObject PyLongScalarObject +#define PyInt128ArrType_Type PyLongArrType_Type +#define PyUInt128ScalarObject PyULongScalarObject +#define PyUInt128ArrType_Type PyULongArrType_Type +#define NPY_INT128_FMT NPY_LONG_FMT +#define NPY_UINT128_FMT NPY_ULONG_FMT +#endif + +#if NPY_BITSOF_LONGLONG == 8 +# ifndef NPY_INT8 +# define NPY_INT8 NPY_LONGLONG +# define NPY_UINT8 NPY_ULONGLONG + typedef npy_longlong npy_int8; + typedef npy_ulonglong npy_uint8; +# define PyInt8ScalarObject PyLongLongScalarObject +# define PyInt8ArrType_Type PyLongLongArrType_Type +# define PyUInt8ScalarObject PyULongLongScalarObject +# define PyUInt8ArrType_Type PyULongLongArrType_Type +#define NPY_INT8_FMT NPY_LONGLONG_FMT +#define NPY_UINT8_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT8 +# define NPY_MIN_LONGLONG NPY_MIN_INT8 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 +#elif NPY_BITSOF_LONGLONG == 16 +# ifndef NPY_INT16 +# define NPY_INT16 NPY_LONGLONG +# define NPY_UINT16 NPY_ULONGLONG + typedef npy_longlong npy_int16; + typedef npy_ulonglong npy_uint16; +# define PyInt16ScalarObject PyLongLongScalarObject +# define PyInt16ArrType_Type PyLongLongArrType_Type +# define PyUInt16ScalarObject PyULongLongScalarObject +# define PyUInt16ArrType_Type PyULongLongArrType_Type +#define NPY_INT16_FMT NPY_LONGLONG_FMT +#define NPY_UINT16_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT16 +# define NPY_MIN_LONGLONG NPY_MIN_INT16 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 +#elif NPY_BITSOF_LONGLONG == 32 +# ifndef NPY_INT32 +# define NPY_INT32 NPY_LONGLONG +# define NPY_UINT32 NPY_ULONGLONG + typedef npy_longlong npy_int32; + typedef npy_ulonglong npy_uint32; + typedef npy_ulonglong npy_ucs4; +# define PyInt32ScalarObject PyLongLongScalarObject +# define PyInt32ArrType_Type PyLongLongArrType_Type +# define PyUInt32ScalarObject PyULongLongScalarObject +# define PyUInt32ArrType_Type PyULongLongArrType_Type +#define NPY_INT32_FMT NPY_LONGLONG_FMT +#define NPY_UINT32_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT32 +# define NPY_MIN_LONGLONG NPY_MIN_INT32 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 +#elif NPY_BITSOF_LONGLONG == 64 +# ifndef NPY_INT64 +# define NPY_INT64 NPY_LONGLONG +# define NPY_UINT64 NPY_ULONGLONG + typedef npy_longlong npy_int64; + typedef npy_ulonglong npy_uint64; +# define PyInt64ScalarObject PyLongLongScalarObject +# define PyInt64ArrType_Type PyLongLongArrType_Type +# define PyUInt64ScalarObject PyULongLongScalarObject +# define PyUInt64ArrType_Type PyULongLongArrType_Type +#define NPY_INT64_FMT NPY_LONGLONG_FMT +#define NPY_UINT64_FMT NPY_ULONGLONG_FMT +# define MyPyLong_FromInt64 PyLong_FromLongLong +# define MyPyLong_AsInt64 PyLong_AsLongLong +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT64 +# define NPY_MIN_LONGLONG NPY_MIN_INT64 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 +#elif NPY_BITSOF_LONGLONG == 128 +# ifndef NPY_INT128 +# define NPY_INT128 NPY_LONGLONG +# define NPY_UINT128 NPY_ULONGLONG + typedef npy_longlong npy_int128; + typedef npy_ulonglong npy_uint128; +# define PyInt128ScalarObject PyLongLongScalarObject +# define PyInt128ArrType_Type PyLongLongArrType_Type +# define PyUInt128ScalarObject PyULongLongScalarObject +# define PyUInt128ArrType_Type PyULongLongArrType_Type +#define NPY_INT128_FMT NPY_LONGLONG_FMT +#define NPY_UINT128_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT128 +# define NPY_MIN_LONGLONG NPY_MIN_INT128 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 +#elif NPY_BITSOF_LONGLONG == 256 +# define NPY_INT256 NPY_LONGLONG +# define NPY_UINT256 NPY_ULONGLONG + typedef npy_longlong npy_int256; + typedef npy_ulonglong npy_uint256; +# define PyInt256ScalarObject PyLongLongScalarObject +# define PyInt256ArrType_Type PyLongLongArrType_Type +# define PyUInt256ScalarObject PyULongLongScalarObject +# define PyUInt256ArrType_Type PyULongLongArrType_Type +#define NPY_INT256_FMT NPY_LONGLONG_FMT +#define NPY_UINT256_FMT NPY_ULONGLONG_FMT +# define NPY_MAX_LONGLONG NPY_MAX_INT256 +# define NPY_MIN_LONGLONG NPY_MIN_INT256 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 +#endif + +#if NPY_BITSOF_INT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_INT +#define NPY_UINT8 NPY_UINT + typedef int npy_int8; + typedef unsigned int npy_uint8; +# define PyInt8ScalarObject PyIntScalarObject +# define PyInt8ArrType_Type PyIntArrType_Type +# define PyUInt8ScalarObject PyUIntScalarObject +# define PyUInt8ArrType_Type PyUIntArrType_Type +#define NPY_INT8_FMT NPY_INT_FMT +#define NPY_UINT8_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_INT +#define NPY_UINT16 NPY_UINT + typedef int npy_int16; + typedef unsigned int npy_uint16; +# define PyInt16ScalarObject PyIntScalarObject +# define PyInt16ArrType_Type PyIntArrType_Type +# define PyUInt16ScalarObject PyIntUScalarObject +# define PyUInt16ArrType_Type PyIntUArrType_Type +#define NPY_INT16_FMT NPY_INT_FMT +#define NPY_UINT16_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT + typedef int npy_int32; + typedef unsigned int npy_uint32; + typedef unsigned int npy_ucs4; +# define PyInt32ScalarObject PyIntScalarObject +# define PyInt32ArrType_Type PyIntArrType_Type +# define PyUInt32ScalarObject PyUIntScalarObject +# define PyUInt32ArrType_Type PyUIntArrType_Type +#define NPY_INT32_FMT NPY_INT_FMT +#define NPY_UINT32_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_INT +#define NPY_UINT64 NPY_UINT + typedef int npy_int64; + typedef unsigned int npy_uint64; +# define PyInt64ScalarObject PyIntScalarObject +# define PyInt64ArrType_Type PyIntArrType_Type +# define PyUInt64ScalarObject PyUIntScalarObject +# define PyUInt64ArrType_Type PyUIntArrType_Type +#define NPY_INT64_FMT NPY_INT_FMT +#define NPY_UINT64_FMT NPY_UINT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_INT == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_INT +#define NPY_UINT128 NPY_UINT + typedef int npy_int128; + typedef unsigned int npy_uint128; +# define PyInt128ScalarObject PyIntScalarObject +# define PyInt128ArrType_Type PyIntArrType_Type +# define PyUInt128ScalarObject PyUIntScalarObject +# define PyUInt128ArrType_Type PyUIntArrType_Type +#define NPY_INT128_FMT NPY_INT_FMT +#define NPY_UINT128_FMT NPY_UINT_FMT +#endif +#endif + +#if NPY_BITSOF_SHORT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_SHORT +#define NPY_UINT8 NPY_USHORT + typedef short npy_int8; + typedef unsigned short npy_uint8; +# define PyInt8ScalarObject PyShortScalarObject +# define PyInt8ArrType_Type PyShortArrType_Type +# define PyUInt8ScalarObject PyUShortScalarObject +# define PyUInt8ArrType_Type PyUShortArrType_Type +#define NPY_INT8_FMT NPY_SHORT_FMT +#define NPY_UINT8_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT + typedef short npy_int16; + typedef unsigned short npy_uint16; +# define PyInt16ScalarObject PyShortScalarObject +# define PyInt16ArrType_Type PyShortArrType_Type +# define PyUInt16ScalarObject PyUShortScalarObject +# define PyUInt16ArrType_Type PyUShortArrType_Type +#define NPY_INT16_FMT NPY_SHORT_FMT +#define NPY_UINT16_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_SHORT +#define NPY_UINT32 NPY_USHORT + typedef short npy_int32; + typedef unsigned short npy_uint32; + typedef unsigned short npy_ucs4; +# define PyInt32ScalarObject PyShortScalarObject +# define PyInt32ArrType_Type PyShortArrType_Type +# define PyUInt32ScalarObject PyUShortScalarObject +# define PyUInt32ArrType_Type PyUShortArrType_Type +#define NPY_INT32_FMT NPY_SHORT_FMT +#define NPY_UINT32_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_SHORT +#define NPY_UINT64 NPY_USHORT + typedef short npy_int64; + typedef unsigned short npy_uint64; +# define PyInt64ScalarObject PyShortScalarObject +# define PyInt64ArrType_Type PyShortArrType_Type +# define PyUInt64ScalarObject PyUShortScalarObject +# define PyUInt64ArrType_Type PyUShortArrType_Type +#define NPY_INT64_FMT NPY_SHORT_FMT +#define NPY_UINT64_FMT NPY_USHORT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_SHORT == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_SHORT +#define NPY_UINT128 NPY_USHORT + typedef short npy_int128; + typedef unsigned short npy_uint128; +# define PyInt128ScalarObject PyShortScalarObject +# define PyInt128ArrType_Type PyShortArrType_Type +# define PyUInt128ScalarObject PyUShortScalarObject +# define PyUInt128ArrType_Type PyUShortArrType_Type +#define NPY_INT128_FMT NPY_SHORT_FMT +#define NPY_UINT128_FMT NPY_USHORT_FMT +#endif +#endif + + +#if NPY_BITSOF_CHAR == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE + typedef signed char npy_int8; + typedef unsigned char npy_uint8; +# define PyInt8ScalarObject PyByteScalarObject +# define PyInt8ArrType_Type PyByteArrType_Type +# define PyUInt8ScalarObject PyUByteScalarObject +# define PyUInt8ArrType_Type PyUByteArrType_Type +#define NPY_INT8_FMT NPY_BYTE_FMT +#define NPY_UINT8_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_BYTE +#define NPY_UINT16 NPY_UBYTE + typedef signed char npy_int16; + typedef unsigned char npy_uint16; +# define PyInt16ScalarObject PyByteScalarObject +# define PyInt16ArrType_Type PyByteArrType_Type +# define PyUInt16ScalarObject PyUByteScalarObject +# define PyUInt16ArrType_Type PyUByteArrType_Type +#define NPY_INT16_FMT NPY_BYTE_FMT +#define NPY_UINT16_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_BYTE +#define NPY_UINT32 NPY_UBYTE + typedef signed char npy_int32; + typedef unsigned char npy_uint32; + typedef unsigned char npy_ucs4; +# define PyInt32ScalarObject PyByteScalarObject +# define PyInt32ArrType_Type PyByteArrType_Type +# define PyUInt32ScalarObject PyUByteScalarObject +# define PyUInt32ArrType_Type PyUByteArrType_Type +#define NPY_INT32_FMT NPY_BYTE_FMT +#define NPY_UINT32_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_BYTE +#define NPY_UINT64 NPY_UBYTE + typedef signed char npy_int64; + typedef unsigned char npy_uint64; +# define PyInt64ScalarObject PyByteScalarObject +# define PyInt64ArrType_Type PyByteArrType_Type +# define PyUInt64ScalarObject PyUByteScalarObject +# define PyUInt64ArrType_Type PyUByteArrType_Type +#define NPY_INT64_FMT NPY_BYTE_FMT +#define NPY_UINT64_FMT NPY_UBYTE_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_CHAR == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_BYTE +#define NPY_UINT128 NPY_UBYTE + typedef signed char npy_int128; + typedef unsigned char npy_uint128; +# define PyInt128ScalarObject PyByteScalarObject +# define PyInt128ArrType_Type PyByteArrType_Type +# define PyUInt128ScalarObject PyUByteScalarObject +# define PyUInt128ArrType_Type PyUByteArrType_Type +#define NPY_INT128_FMT NPY_BYTE_FMT +#define NPY_UINT128_FMT NPY_UBYTE_FMT +#endif +#endif + + + +#if NPY_BITSOF_DOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_DOUBLE +#define NPY_COMPLEX64 NPY_CDOUBLE + typedef double npy_float32; + typedef npy_cdouble npy_complex64; +# define PyFloat32ScalarObject PyDoubleScalarObject +# define PyComplex64ScalarObject PyCDoubleScalarObject +# define PyFloat32ArrType_Type PyDoubleArrType_Type +# define PyComplex64ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX128 NPY_CDOUBLE + typedef double npy_float64; + typedef npy_cdouble npy_complex128; +# define PyFloat64ScalarObject PyDoubleScalarObject +# define PyComplex128ScalarObject PyCDoubleScalarObject +# define PyFloat64ArrType_Type PyDoubleArrType_Type +# define PyComplex128ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_DOUBLE +#define NPY_COMPLEX160 NPY_CDOUBLE + typedef double npy_float80; + typedef npy_cdouble npy_complex160; +# define PyFloat80ScalarObject PyDoubleScalarObject +# define PyComplex160ScalarObject PyCDoubleScalarObject +# define PyFloat80ArrType_Type PyDoubleArrType_Type +# define PyComplex160ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_DOUBLE +#define NPY_COMPLEX192 NPY_CDOUBLE + typedef double npy_float96; + typedef npy_cdouble npy_complex192; +# define PyFloat96ScalarObject PyDoubleScalarObject +# define PyComplex192ScalarObject PyCDoubleScalarObject +# define PyFloat96ArrType_Type PyDoubleArrType_Type +# define PyComplex192ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_DOUBLE +#define NPY_COMPLEX256 NPY_CDOUBLE + typedef double npy_float128; + typedef npy_cdouble npy_complex256; +# define PyFloat128ScalarObject PyDoubleScalarObject +# define PyComplex256ScalarObject PyCDoubleScalarObject +# define PyFloat128ArrType_Type PyDoubleArrType_Type +# define PyComplex256ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT +#endif +#endif + + + +#if NPY_BITSOF_FLOAT == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_COMPLEX64 NPY_CFLOAT + typedef float npy_float32; + typedef npy_cfloat npy_complex64; +# define PyFloat32ScalarObject PyFloatScalarObject +# define PyComplex64ScalarObject PyCFloatScalarObject +# define PyFloat32ArrType_Type PyFloatArrType_Type +# define PyComplex64ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT32_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_FLOAT +#define NPY_COMPLEX128 NPY_CFLOAT + typedef float npy_float64; + typedef npy_cfloat npy_complex128; +# define PyFloat64ScalarObject PyFloatScalarObject +# define PyComplex128ScalarObject PyCFloatScalarObject +# define PyFloat64ArrType_Type PyFloatArrType_Type +# define PyComplex128ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT64_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_FLOAT +#define NPY_COMPLEX160 NPY_CFLOAT + typedef float npy_float80; + typedef npy_cfloat npy_complex160; +# define PyFloat80ScalarObject PyFloatScalarObject +# define PyComplex160ScalarObject PyCFloatScalarObject +# define PyFloat80ArrType_Type PyFloatArrType_Type +# define PyComplex160ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT80_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_FLOAT +#define NPY_COMPLEX192 NPY_CFLOAT + typedef float npy_float96; + typedef npy_cfloat npy_complex192; +# define PyFloat96ScalarObject PyFloatScalarObject +# define PyComplex192ScalarObject PyCFloatScalarObject +# define PyFloat96ArrType_Type PyFloatArrType_Type +# define PyComplex192ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT96_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_FLOAT +#define NPY_COMPLEX256 NPY_CFLOAT + typedef float npy_float128; + typedef npy_cfloat npy_complex256; +# define PyFloat128ScalarObject PyFloatScalarObject +# define PyComplex256ScalarObject PyCFloatScalarObject +# define PyFloat128ArrType_Type PyFloatArrType_Type +# define PyComplex256ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT128_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT +#endif +#endif + +/* half/float16 isn't a floating-point type in C */ +#define NPY_FLOAT16 NPY_HALF +typedef npy_uint16 npy_half; +typedef npy_half npy_float16; + +#if NPY_BITSOF_LONGDOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_LONGDOUBLE +#define NPY_COMPLEX64 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float32; + typedef npy_clongdouble npy_complex64; +# define PyFloat32ScalarObject PyLongDoubleScalarObject +# define PyComplex64ScalarObject PyCLongDoubleScalarObject +# define PyFloat32ArrType_Type PyLongDoubleArrType_Type +# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_LONGDOUBLE +#define NPY_COMPLEX128 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float64; + typedef npy_clongdouble npy_complex128; +# define PyFloat64ScalarObject PyLongDoubleScalarObject +# define PyComplex128ScalarObject PyCLongDoubleScalarObject +# define PyFloat64ArrType_Type PyLongDoubleArrType_Type +# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_LONGDOUBLE +#define NPY_COMPLEX160 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float80; + typedef npy_clongdouble npy_complex160; +# define PyFloat80ScalarObject PyLongDoubleScalarObject +# define PyComplex160ScalarObject PyCLongDoubleScalarObject +# define PyFloat80ArrType_Type PyLongDoubleArrType_Type +# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_LONGDOUBLE +#define NPY_COMPLEX192 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float96; + typedef npy_clongdouble npy_complex192; +# define PyFloat96ScalarObject PyLongDoubleScalarObject +# define PyComplex192ScalarObject PyCLongDoubleScalarObject +# define PyFloat96ArrType_Type PyLongDoubleArrType_Type +# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_LONGDOUBLE +#define NPY_COMPLEX256 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float128; + typedef npy_clongdouble npy_complex256; +# define PyFloat128ScalarObject PyLongDoubleScalarObject +# define PyComplex256ScalarObject PyCLongDoubleScalarObject +# define PyFloat128ArrType_Type PyLongDoubleArrType_Type +# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 256 +#define NPY_FLOAT256 NPY_LONGDOUBLE +#define NPY_COMPLEX512 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float256; + typedef npy_clongdouble npy_complex512; +# define PyFloat256ScalarObject PyLongDoubleScalarObject +# define PyComplex512ScalarObject PyCLongDoubleScalarObject +# define PyFloat256ArrType_Type PyLongDoubleArrType_Type +# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT +#endif + +/* datetime typedefs */ +typedef npy_int64 npy_timedelta; +typedef npy_int64 npy_datetime; +#define NPY_DATETIME_FMT NPY_INT64_FMT +#define NPY_TIMEDELTA_FMT NPY_INT64_FMT + +/* End of typedefs for numarray style bit-width names */ + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_cpu.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_cpu.h new file mode 100644 index 0000000000000..24d4ce1fc8ce8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_cpu.h @@ -0,0 +1,122 @@ +/* + * This set (target) cpu specific macros: + * - Possible values: + * NPY_CPU_X86 + * NPY_CPU_AMD64 + * NPY_CPU_PPC + * NPY_CPU_PPC64 + * NPY_CPU_PPC64LE + * NPY_CPU_SPARC + * NPY_CPU_S390 + * NPY_CPU_IA64 + * NPY_CPU_HPPA + * NPY_CPU_ALPHA + * NPY_CPU_ARMEL + * NPY_CPU_ARMEB + * NPY_CPU_SH_LE + * NPY_CPU_SH_BE + */ +#ifndef _NPY_CPUARCH_H_ +#define _NPY_CPUARCH_H_ + +#include "numpyconfig.h" + +#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) + /* + * __i386__ is defined by gcc and Intel compiler on Linux, + * _M_IX86 by VS compiler, + * i386 by Sun compilers on opensolaris at least + */ + #define NPY_CPU_X86 +#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) + /* + * both __x86_64__ and __amd64__ are defined by gcc + * __x86_64 defined by sun compiler on opensolaris at least + * _M_AMD64 defined by MS compiler + */ + #define NPY_CPU_AMD64 +#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) + /* + * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, + * but can't find it ATM + * _ARCH_PPC is used by at least gcc on AIX + */ + #define NPY_CPU_PPC +#elif defined(__ppc64le__) + #define NPY_CPU_PPC64LE +#elif defined(__ppc64__) + #define NPY_CPU_PPC64 +#elif defined(__sparc__) || defined(__sparc) + /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ + #define NPY_CPU_SPARC +#elif defined(__s390__) + #define NPY_CPU_S390 +#elif defined(__ia64) + #define NPY_CPU_IA64 +#elif defined(__hppa) + #define NPY_CPU_HPPA +#elif defined(__alpha__) + #define NPY_CPU_ALPHA +#elif defined(__arm__) && defined(__ARMEL__) + #define NPY_CPU_ARMEL +#elif defined(__arm__) && defined(__ARMEB__) + #define NPY_CPU_ARMEB +#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_SH_LE +#elif defined(__sh__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_SH_BE +#elif defined(__MIPSEL__) + #define NPY_CPU_MIPSEL +#elif defined(__MIPSEB__) + #define NPY_CPU_MIPSEB +#elif defined(__or1k__) + #define NPY_CPU_OR1K +#elif defined(__aarch64__) + #define NPY_CPU_AARCH64 +#elif defined(__mc68000__) + #define NPY_CPU_M68K +#else + #error Unknown CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) +#endif + +/* + This "white-lists" the architectures that we know don't require + pointer alignment. We white-list, since the memcpy version will + work everywhere, whereas assignment will only work where pointer + dereferencing doesn't require alignment. + + TODO: There may be more architectures we can white list. +*/ +#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) + #define NPY_COPY_PYOBJECT_PTR(dst, src) (*((PyObject **)(dst)) = *((PyObject **)(src))) +#else + #if NPY_SIZEOF_PY_INTPTR_T == 4 + #define NPY_COPY_PYOBJECT_PTR(dst, src) \ + ((char*)(dst))[0] = ((char*)(src))[0]; \ + ((char*)(dst))[1] = ((char*)(src))[1]; \ + ((char*)(dst))[2] = ((char*)(src))[2]; \ + ((char*)(dst))[3] = ((char*)(src))[3]; + #elif NPY_SIZEOF_PY_INTPTR_T == 8 + #define NPY_COPY_PYOBJECT_PTR(dst, src) \ + ((char*)(dst))[0] = ((char*)(src))[0]; \ + ((char*)(dst))[1] = ((char*)(src))[1]; \ + ((char*)(dst))[2] = ((char*)(src))[2]; \ + ((char*)(dst))[3] = ((char*)(src))[3]; \ + ((char*)(dst))[4] = ((char*)(src))[4]; \ + ((char*)(dst))[5] = ((char*)(src))[5]; \ + ((char*)(dst))[6] = ((char*)(src))[6]; \ + ((char*)(dst))[7] = ((char*)(src))[7]; + #else + #error Unknown architecture, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) + #endif +#endif + +#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)) +#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1 +#else +#define NPY_CPU_HAVE_UNALIGNED_ACCESS 0 +#endif + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_endian.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_endian.h new file mode 100644 index 0000000000000..3ba03d0e38724 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_endian.h @@ -0,0 +1,49 @@ +#ifndef _NPY_ENDIAN_H_ +#define _NPY_ENDIAN_H_ + +/* + * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in + * endian.h + */ + +#ifdef NPY_HAVE_ENDIAN_H + /* Use endian.h if available */ + #include + + #define NPY_BYTE_ORDER __BYTE_ORDER + #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN + #define NPY_BIG_ENDIAN __BIG_ENDIAN +#else + /* Set endianness info using target CPU */ + #include "npy_cpu.h" + + #define NPY_LITTLE_ENDIAN 1234 + #define NPY_BIG_ENDIAN 4321 + + #if defined(NPY_CPU_X86) \ + || defined(NPY_CPU_AMD64) \ + || defined(NPY_CPU_IA64) \ + || defined(NPY_CPU_ALPHA) \ + || defined(NPY_CPU_ARMEL) \ + || defined(NPY_CPU_AARCH64) \ + || defined(NPY_CPU_SH_LE) \ + || defined(NPY_CPU_MIPSEL) \ + || defined(NPY_CPU_PPC64LE) + #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + #elif defined(NPY_CPU_PPC) \ + || defined(NPY_CPU_SPARC) \ + || defined(NPY_CPU_S390) \ + || defined(NPY_CPU_HPPA) \ + || defined(NPY_CPU_PPC64) \ + || defined(NPY_CPU_ARMEB) \ + || defined(NPY_CPU_SH_BE) \ + || defined(NPY_CPU_MIPSEB) \ + || defined(NPY_CPU_OR1K) \ + || defined(NPY_CPU_M68K) + #define NPY_BYTE_ORDER NPY_BIG_ENDIAN + #else + #error Unknown CPU: can not set endianness + #endif +#endif + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_interrupt.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_interrupt.h new file mode 100644 index 0000000000000..f71fd689ebfb5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_interrupt.h @@ -0,0 +1,117 @@ + +/* Signal handling: + +This header file defines macros that allow your code to handle +interrupts received during processing. Interrupts that +could reasonably be handled: + +SIGINT, SIGABRT, SIGALRM, SIGSEGV + +****Warning*************** + +Do not allow code that creates temporary memory or increases reference +counts of Python objects to be interrupted unless you handle it +differently. + +************************** + +The mechanism for handling interrupts is conceptually simple: + + - replace the signal handler with our own home-grown version + and store the old one. + - run the code to be interrupted -- if an interrupt occurs + the handler should basically just cause a return to the + calling function for finish work. + - restore the old signal handler + +Of course, every code that allows interrupts must account for +returning via the interrupt and handle clean-up correctly. But, +even still, the simple paradigm is complicated by at least three +factors. + + 1) platform portability (i.e. Microsoft says not to use longjmp + to return from signal handling. They have a __try and __except + extension to C instead but what about mingw?). + + 2) how to handle threads: apparently whether signals are delivered to + every thread of the process or the "invoking" thread is platform + dependent. --- we don't handle threads for now. + + 3) do we need to worry about re-entrance. For now, assume the + code will not call-back into itself. + +Ideas: + + 1) Start by implementing an approach that works on platforms that + can use setjmp and longjmp functionality and does nothing + on other platforms. + + 2) Ignore threads --- i.e. do not mix interrupt handling and threads + + 3) Add a default signal_handler function to the C-API but have the rest + use macros. + + +Simple Interface: + + +In your C-extension: around a block of code you want to be interruptable +with a SIGINT + +NPY_SIGINT_ON +[code] +NPY_SIGINT_OFF + +In order for this to work correctly, the +[code] block must not allocate any memory or alter the reference count of any +Python objects. In other words [code] must be interruptible so that continuation +after NPY_SIGINT_OFF will only be "missing some computations" + +Interrupt handling does not work well with threads. + +*/ + +/* Add signal handling macros + Make the global variable and signal handler part of the C-API +*/ + +#ifndef NPY_INTERRUPT_H +#define NPY_INTERRUPT_H + +#ifndef NPY_NO_SIGNAL + +#include +#include + +#ifndef sigsetjmp + +#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1) +#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) +#define NPY_SIGJMP_BUF jmp_buf + +#else + +#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) +#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) +#define NPY_SIGJMP_BUF sigjmp_buf + +#endif + +# define NPY_SIGINT_ON { \ + PyOS_sighandler_t _npy_sig_save; \ + _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ + if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ + 1) == 0) { \ + +# define NPY_SIGINT_OFF } \ + PyOS_setsig(SIGINT, _npy_sig_save); \ + } + +#else /* NPY_NO_SIGNAL */ + +#define NPY_SIGINT_ON +#define NPY_SIGINT_OFF + +#endif /* HAVE_SIGSETJMP */ + +#endif /* NPY_INTERRUPT_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_math.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_math.h new file mode 100644 index 0000000000000..b7920460d88ad --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_math.h @@ -0,0 +1,479 @@ +#ifndef __NPY_MATH_C99_H_ +#define __NPY_MATH_C99_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifdef __SUNPRO_CC +#include +#endif +#ifdef HAVE_NPY_CONFIG_H +#include +#endif +#include + + +/* + * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 + * for INFINITY) + * + * XXX: I should test whether INFINITY and NAN are available on the platform + */ +NPY_INLINE static float __npy_inff(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; + return __bint.__f; +} + +NPY_INLINE static float __npy_nanf(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; + return __bint.__f; +} + +NPY_INLINE static float __npy_pzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; + return __bint.__f; +} + +NPY_INLINE static float __npy_nzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; + return __bint.__f; +} + +#define NPY_INFINITYF __npy_inff() +#define NPY_NANF __npy_nanf() +#define NPY_PZEROF __npy_pzerof() +#define NPY_NZEROF __npy_nzerof() + +#define NPY_INFINITY ((npy_double)NPY_INFINITYF) +#define NPY_NAN ((npy_double)NPY_NANF) +#define NPY_PZERO ((npy_double)NPY_PZEROF) +#define NPY_NZERO ((npy_double)NPY_NZEROF) + +#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) +#define NPY_NANL ((npy_longdouble)NPY_NANF) +#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) +#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) + +/* + * Useful constants + */ +#define NPY_E 2.718281828459045235360287471352662498 /* e */ +#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ +#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ +#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ +#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ +#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ +#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ +#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ +#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ +#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ +#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ +#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ +#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ + +#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ +#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ +#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ +#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ +#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ +#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ +#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ +#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ +#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ +#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ +#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constan*/ +#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ +#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ + +#define NPY_El 2.718281828459045235360287471352662498L /* e */ +#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ +#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ +#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ +#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ +#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ +#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ +#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ +#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ +#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ +#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constan*/ +#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ +#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ + +/* + * C99 double math funcs + */ +double npy_sin(double x); +double npy_cos(double x); +double npy_tan(double x); +double npy_sinh(double x); +double npy_cosh(double x); +double npy_tanh(double x); + +double npy_asin(double x); +double npy_acos(double x); +double npy_atan(double x); +double npy_aexp(double x); +double npy_alog(double x); +double npy_asqrt(double x); +double npy_afabs(double x); + +double npy_log(double x); +double npy_log10(double x); +double npy_exp(double x); +double npy_sqrt(double x); + +double npy_fabs(double x); +double npy_ceil(double x); +double npy_fmod(double x, double y); +double npy_floor(double x); + +double npy_expm1(double x); +double npy_log1p(double x); +double npy_hypot(double x, double y); +double npy_acosh(double x); +double npy_asinh(double xx); +double npy_atanh(double x); +double npy_rint(double x); +double npy_trunc(double x); +double npy_exp2(double x); +double npy_log2(double x); + +double npy_atan2(double x, double y); +double npy_pow(double x, double y); +double npy_modf(double x, double* y); + +double npy_copysign(double x, double y); +double npy_nextafter(double x, double y); +double npy_spacing(double x); + +/* + * IEEE 754 fpu handling. Those are guaranteed to be macros + */ + +/* use builtins to avoid function calls in tight loops + * only available if npy_config.h is available (= numpys own build) */ +#if HAVE___BUILTIN_ISNAN + #define npy_isnan(x) __builtin_isnan(x) +#else + #ifndef NPY_HAVE_DECL_ISNAN + #define npy_isnan(x) ((x) != (x)) + #else + #ifdef _MSC_VER + #define npy_isnan(x) _isnan((x)) + #else + #define npy_isnan(x) isnan(x) + #endif + #endif +#endif + + +/* only available if npy_config.h is available (= numpys own build) */ +#if HAVE___BUILTIN_ISFINITE + #define npy_isfinite(x) __builtin_isfinite(x) +#else + #ifndef NPY_HAVE_DECL_ISFINITE + #ifdef _MSC_VER + #define npy_isfinite(x) _finite((x)) + #else + #define npy_isfinite(x) !npy_isnan((x) + (-x)) + #endif + #else + #define npy_isfinite(x) isfinite((x)) + #endif +#endif + +/* only available if npy_config.h is available (= numpys own build) */ +#if HAVE___BUILTIN_ISINF + #define npy_isinf(x) __builtin_isinf(x) +#else + #ifndef NPY_HAVE_DECL_ISINF + #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) + #else + #ifdef _MSC_VER + #define npy_isinf(x) (!_finite((x)) && !_isnan((x))) + #else + #define npy_isinf(x) isinf((x)) + #endif + #endif +#endif + +#ifndef NPY_HAVE_DECL_SIGNBIT + int _npy_signbit_f(float x); + int _npy_signbit_d(double x); + int _npy_signbit_ld(long double x); + #define npy_signbit(x) \ + (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ + : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ + : _npy_signbit_f (x)) +#else + #define npy_signbit(x) signbit((x)) +#endif + +/* + * float C99 math functions + */ + +float npy_sinf(float x); +float npy_cosf(float x); +float npy_tanf(float x); +float npy_sinhf(float x); +float npy_coshf(float x); +float npy_tanhf(float x); +float npy_fabsf(float x); +float npy_floorf(float x); +float npy_ceilf(float x); +float npy_rintf(float x); +float npy_truncf(float x); +float npy_sqrtf(float x); +float npy_log10f(float x); +float npy_logf(float x); +float npy_expf(float x); +float npy_expm1f(float x); +float npy_asinf(float x); +float npy_acosf(float x); +float npy_atanf(float x); +float npy_asinhf(float x); +float npy_acoshf(float x); +float npy_atanhf(float x); +float npy_log1pf(float x); +float npy_exp2f(float x); +float npy_log2f(float x); + +float npy_atan2f(float x, float y); +float npy_hypotf(float x, float y); +float npy_powf(float x, float y); +float npy_fmodf(float x, float y); + +float npy_modff(float x, float* y); + +float npy_copysignf(float x, float y); +float npy_nextafterf(float x, float y); +float npy_spacingf(float x); + +/* + * float C99 math functions + */ + +npy_longdouble npy_sinl(npy_longdouble x); +npy_longdouble npy_cosl(npy_longdouble x); +npy_longdouble npy_tanl(npy_longdouble x); +npy_longdouble npy_sinhl(npy_longdouble x); +npy_longdouble npy_coshl(npy_longdouble x); +npy_longdouble npy_tanhl(npy_longdouble x); +npy_longdouble npy_fabsl(npy_longdouble x); +npy_longdouble npy_floorl(npy_longdouble x); +npy_longdouble npy_ceill(npy_longdouble x); +npy_longdouble npy_rintl(npy_longdouble x); +npy_longdouble npy_truncl(npy_longdouble x); +npy_longdouble npy_sqrtl(npy_longdouble x); +npy_longdouble npy_log10l(npy_longdouble x); +npy_longdouble npy_logl(npy_longdouble x); +npy_longdouble npy_expl(npy_longdouble x); +npy_longdouble npy_expm1l(npy_longdouble x); +npy_longdouble npy_asinl(npy_longdouble x); +npy_longdouble npy_acosl(npy_longdouble x); +npy_longdouble npy_atanl(npy_longdouble x); +npy_longdouble npy_asinhl(npy_longdouble x); +npy_longdouble npy_acoshl(npy_longdouble x); +npy_longdouble npy_atanhl(npy_longdouble x); +npy_longdouble npy_log1pl(npy_longdouble x); +npy_longdouble npy_exp2l(npy_longdouble x); +npy_longdouble npy_log2l(npy_longdouble x); + +npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); + +npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); + +npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_spacingl(npy_longdouble x); + +/* + * Non standard functions + */ +double npy_deg2rad(double x); +double npy_rad2deg(double x); +double npy_logaddexp(double x, double y); +double npy_logaddexp2(double x, double y); + +float npy_deg2radf(float x); +float npy_rad2degf(float x); +float npy_logaddexpf(float x, float y); +float npy_logaddexp2f(float x, float y); + +npy_longdouble npy_deg2radl(npy_longdouble x); +npy_longdouble npy_rad2degl(npy_longdouble x); +npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); + +#define npy_degrees npy_rad2deg +#define npy_degreesf npy_rad2degf +#define npy_degreesl npy_rad2degl + +#define npy_radians npy_deg2rad +#define npy_radiansf npy_deg2radf +#define npy_radiansl npy_deg2radl + +/* + * Complex declarations + */ + +/* + * C99 specifies that complex numbers have the same representation as + * an array of two elements, where the first element is the real part + * and the second element is the imaginary part. + */ +#define __NPY_CPACK_IMP(x, y, type, ctype) \ + union { \ + ctype z; \ + type a[2]; \ + } z1;; \ + \ + z1.a[0] = (x); \ + z1.a[1] = (y); \ + \ + return z1.z; + +static NPY_INLINE npy_cdouble npy_cpack(double x, double y) +{ + __NPY_CPACK_IMP(x, y, double, npy_cdouble); +} + +static NPY_INLINE npy_cfloat npy_cpackf(float x, float y) +{ + __NPY_CPACK_IMP(x, y, float, npy_cfloat); +} + +static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) +{ + __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble); +} +#undef __NPY_CPACK_IMP + +/* + * Same remark as above, but in the other direction: extract first/second + * member of complex number, assuming a C99-compatible representation + * + * Those are defineds as static inline, and such as a reasonable compiler would + * most likely compile this to one or two instructions (on CISC at least) + */ +#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \ + union { \ + ctype z; \ + type a[2]; \ + } __z_repr; \ + __z_repr.z = z; \ + \ + return __z_repr.a[index]; + +static NPY_INLINE double npy_creal(npy_cdouble z) +{ + __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble); +} + +static NPY_INLINE double npy_cimag(npy_cdouble z) +{ + __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble); +} + +static NPY_INLINE float npy_crealf(npy_cfloat z) +{ + __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat); +} + +static NPY_INLINE float npy_cimagf(npy_cfloat z) +{ + __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat); +} + +static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z) +{ + __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble); +} + +static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z) +{ + __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble); +} +#undef __NPY_CEXTRACT_IMP + +/* + * Double precision complex functions + */ +double npy_cabs(npy_cdouble z); +double npy_carg(npy_cdouble z); + +npy_cdouble npy_cexp(npy_cdouble z); +npy_cdouble npy_clog(npy_cdouble z); +npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); + +npy_cdouble npy_csqrt(npy_cdouble z); + +npy_cdouble npy_ccos(npy_cdouble z); +npy_cdouble npy_csin(npy_cdouble z); + +/* + * Single precision complex functions + */ +float npy_cabsf(npy_cfloat z); +float npy_cargf(npy_cfloat z); + +npy_cfloat npy_cexpf(npy_cfloat z); +npy_cfloat npy_clogf(npy_cfloat z); +npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); + +npy_cfloat npy_csqrtf(npy_cfloat z); + +npy_cfloat npy_ccosf(npy_cfloat z); +npy_cfloat npy_csinf(npy_cfloat z); + +/* + * Extended precision complex functions + */ +npy_longdouble npy_cabsl(npy_clongdouble z); +npy_longdouble npy_cargl(npy_clongdouble z); + +npy_clongdouble npy_cexpl(npy_clongdouble z); +npy_clongdouble npy_clogl(npy_clongdouble z); +npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); + +npy_clongdouble npy_csqrtl(npy_clongdouble z); + +npy_clongdouble npy_ccosl(npy_clongdouble z); +npy_clongdouble npy_csinl(npy_clongdouble z); + +/* + * Functions that set the floating point error + * status word. + */ + +/* + * platform-dependent code translates floating point + * status to an integer sum of these values + */ +#define NPY_FPE_DIVIDEBYZERO 1 +#define NPY_FPE_OVERFLOW 2 +#define NPY_FPE_UNDERFLOW 4 +#define NPY_FPE_INVALID 8 + +int npy_get_floatstatus(void); +int npy_clear_floatstatus(void); +void npy_set_floatstatus_divbyzero(void); +void npy_set_floatstatus_overflow(void); +void npy_set_floatstatus_underflow(void); +void npy_set_floatstatus_invalid(void); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_no_deprecated_api.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_no_deprecated_api.h new file mode 100644 index 0000000000000..6183dc2784a78 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_no_deprecated_api.h @@ -0,0 +1,19 @@ +/* + * This include file is provided for inclusion in Cython *.pyd files where + * one would like to define the NPY_NO_DEPRECATED_API macro. It can be + * included by + * + * cdef extern from "npy_no_deprecated_api.h": pass + * + */ +#ifndef NPY_NO_DEPRECATED_API + +/* put this check here since there may be multiple includes in C extensions. */ +#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \ + defined(OLD_DEFINES_H) +#error "npy_no_deprecated_api.h" must be first among numpy includes. +#else +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_os.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_os.h new file mode 100644 index 0000000000000..9228c3916eab5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_os.h @@ -0,0 +1,30 @@ +#ifndef _NPY_OS_H_ +#define _NPY_OS_H_ + +#if defined(linux) || defined(__linux) || defined(__linux__) + #define NPY_OS_LINUX +#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__) || defined(__DragonFly__) + #define NPY_OS_BSD + #ifdef __FreeBSD__ + #define NPY_OS_FREEBSD + #elif defined(__NetBSD__) + #define NPY_OS_NETBSD + #elif defined(__OpenBSD__) + #define NPY_OS_OPENBSD + #elif defined(__DragonFly__) + #define NPY_OS_DRAGONFLY + #endif +#elif defined(sun) || defined(__sun) + #define NPY_OS_SOLARIS +#elif defined(__CYGWIN__) + #define NPY_OS_CYGWIN +#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) + #define NPY_OS_WIN32 +#elif defined(__APPLE__) + #define NPY_OS_DARWIN +#else + #define NPY_OS_UNKNOWN +#endif + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/numpyconfig.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/numpyconfig.h new file mode 100644 index 0000000000000..9d6dce004ac59 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/numpyconfig.h @@ -0,0 +1,35 @@ +#ifndef _NPY_NUMPYCONFIG_H_ +#define _NPY_NUMPYCONFIG_H_ + +#include "_numpyconfig.h" + +/* + * On Mac OS X, because there is only one configuration stage for all the archs + * in universal builds, any macro which depends on the arch needs to be + * harcoded + */ +#ifdef __APPLE__ + #undef NPY_SIZEOF_LONG + #undef NPY_SIZEOF_PY_INTPTR_T + + #ifdef __LP64__ + #define NPY_SIZEOF_LONG 8 + #define NPY_SIZEOF_PY_INTPTR_T 8 + #else + #define NPY_SIZEOF_LONG 4 + #define NPY_SIZEOF_PY_INTPTR_T 4 + #endif +#endif + +/** + * To help with the NPY_NO_DEPRECATED_API macro, we include API version + * numbers for specific versions of NumPy. To exclude all API that was + * deprecated as of 1.7, add the following before #including any NumPy + * headers: + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + */ +#define NPY_1_7_API_VERSION 0x00000007 +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_1_9_API_VERSION 0x00000008 + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/old_defines.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/old_defines.h new file mode 100644 index 0000000000000..abf81595ae160 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/old_defines.h @@ -0,0 +1,187 @@ +/* This header is deprecated as of NumPy 1.7 */ +#ifndef OLD_DEFINES_H +#define OLD_DEFINES_H + +#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION +#error The header "old_defines.h" is deprecated as of NumPy 1.7. +#endif + +#define NDARRAY_VERSION NPY_VERSION + +#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE +#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE +#define PyArray_BUFSIZE NPY_BUFSIZE + +#define PyArray_PRIORITY NPY_PRIORITY +#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY +#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE + +#define NPY_MAX PyArray_MAX +#define NPY_MIN PyArray_MIN + +#define PyArray_TYPES NPY_TYPES +#define PyArray_BOOL NPY_BOOL +#define PyArray_BYTE NPY_BYTE +#define PyArray_UBYTE NPY_UBYTE +#define PyArray_SHORT NPY_SHORT +#define PyArray_USHORT NPY_USHORT +#define PyArray_INT NPY_INT +#define PyArray_UINT NPY_UINT +#define PyArray_LONG NPY_LONG +#define PyArray_ULONG NPY_ULONG +#define PyArray_LONGLONG NPY_LONGLONG +#define PyArray_ULONGLONG NPY_ULONGLONG +#define PyArray_HALF NPY_HALF +#define PyArray_FLOAT NPY_FLOAT +#define PyArray_DOUBLE NPY_DOUBLE +#define PyArray_LONGDOUBLE NPY_LONGDOUBLE +#define PyArray_CFLOAT NPY_CFLOAT +#define PyArray_CDOUBLE NPY_CDOUBLE +#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE +#define PyArray_OBJECT NPY_OBJECT +#define PyArray_STRING NPY_STRING +#define PyArray_UNICODE NPY_UNICODE +#define PyArray_VOID NPY_VOID +#define PyArray_DATETIME NPY_DATETIME +#define PyArray_TIMEDELTA NPY_TIMEDELTA +#define PyArray_NTYPES NPY_NTYPES +#define PyArray_NOTYPE NPY_NOTYPE +#define PyArray_CHAR NPY_CHAR +#define PyArray_USERDEF NPY_USERDEF +#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES + +#define PyArray_INTP NPY_INTP +#define PyArray_UINTP NPY_UINTP + +#define PyArray_INT8 NPY_INT8 +#define PyArray_UINT8 NPY_UINT8 +#define PyArray_INT16 NPY_INT16 +#define PyArray_UINT16 NPY_UINT16 +#define PyArray_INT32 NPY_INT32 +#define PyArray_UINT32 NPY_UINT32 + +#ifdef NPY_INT64 +#define PyArray_INT64 NPY_INT64 +#define PyArray_UINT64 NPY_UINT64 +#endif + +#ifdef NPY_INT128 +#define PyArray_INT128 NPY_INT128 +#define PyArray_UINT128 NPY_UINT128 +#endif + +#ifdef NPY_FLOAT16 +#define PyArray_FLOAT16 NPY_FLOAT16 +#define PyArray_COMPLEX32 NPY_COMPLEX32 +#endif + +#ifdef NPY_FLOAT80 +#define PyArray_FLOAT80 NPY_FLOAT80 +#define PyArray_COMPLEX160 NPY_COMPLEX160 +#endif + +#ifdef NPY_FLOAT96 +#define PyArray_FLOAT96 NPY_FLOAT96 +#define PyArray_COMPLEX192 NPY_COMPLEX192 +#endif + +#ifdef NPY_FLOAT128 +#define PyArray_FLOAT128 NPY_FLOAT128 +#define PyArray_COMPLEX256 NPY_COMPLEX256 +#endif + +#define PyArray_FLOAT32 NPY_FLOAT32 +#define PyArray_COMPLEX64 NPY_COMPLEX64 +#define PyArray_FLOAT64 NPY_FLOAT64 +#define PyArray_COMPLEX128 NPY_COMPLEX128 + + +#define PyArray_TYPECHAR NPY_TYPECHAR +#define PyArray_BOOLLTR NPY_BOOLLTR +#define PyArray_BYTELTR NPY_BYTELTR +#define PyArray_UBYTELTR NPY_UBYTELTR +#define PyArray_SHORTLTR NPY_SHORTLTR +#define PyArray_USHORTLTR NPY_USHORTLTR +#define PyArray_INTLTR NPY_INTLTR +#define PyArray_UINTLTR NPY_UINTLTR +#define PyArray_LONGLTR NPY_LONGLTR +#define PyArray_ULONGLTR NPY_ULONGLTR +#define PyArray_LONGLONGLTR NPY_LONGLONGLTR +#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR +#define PyArray_HALFLTR NPY_HALFLTR +#define PyArray_FLOATLTR NPY_FLOATLTR +#define PyArray_DOUBLELTR NPY_DOUBLELTR +#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR +#define PyArray_CFLOATLTR NPY_CFLOATLTR +#define PyArray_CDOUBLELTR NPY_CDOUBLELTR +#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR +#define PyArray_OBJECTLTR NPY_OBJECTLTR +#define PyArray_STRINGLTR NPY_STRINGLTR +#define PyArray_STRINGLTR2 NPY_STRINGLTR2 +#define PyArray_UNICODELTR NPY_UNICODELTR +#define PyArray_VOIDLTR NPY_VOIDLTR +#define PyArray_DATETIMELTR NPY_DATETIMELTR +#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR +#define PyArray_CHARLTR NPY_CHARLTR +#define PyArray_INTPLTR NPY_INTPLTR +#define PyArray_UINTPLTR NPY_UINTPLTR +#define PyArray_GENBOOLLTR NPY_GENBOOLLTR +#define PyArray_SIGNEDLTR NPY_SIGNEDLTR +#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR +#define PyArray_FLOATINGLTR NPY_FLOATINGLTR +#define PyArray_COMPLEXLTR NPY_COMPLEXLTR + +#define PyArray_QUICKSORT NPY_QUICKSORT +#define PyArray_HEAPSORT NPY_HEAPSORT +#define PyArray_MERGESORT NPY_MERGESORT +#define PyArray_SORTKIND NPY_SORTKIND +#define PyArray_NSORTS NPY_NSORTS + +#define PyArray_NOSCALAR NPY_NOSCALAR +#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR +#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR +#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR +#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR +#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR +#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR +#define PyArray_SCALARKIND NPY_SCALARKIND +#define PyArray_NSCALARKINDS NPY_NSCALARKINDS + +#define PyArray_ANYORDER NPY_ANYORDER +#define PyArray_CORDER NPY_CORDER +#define PyArray_FORTRANORDER NPY_FORTRANORDER +#define PyArray_ORDER NPY_ORDER + +#define PyDescr_ISBOOL PyDataType_ISBOOL +#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED +#define PyDescr_ISSIGNED PyDataType_ISSIGNED +#define PyDescr_ISINTEGER PyDataType_ISINTEGER +#define PyDescr_ISFLOAT PyDataType_ISFLOAT +#define PyDescr_ISNUMBER PyDataType_ISNUMBER +#define PyDescr_ISSTRING PyDataType_ISSTRING +#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX +#define PyDescr_ISPYTHON PyDataType_ISPYTHON +#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE +#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF +#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED +#define PyDescr_ISOBJECT PyDataType_ISOBJECT +#define PyDescr_HASFIELDS PyDataType_HASFIELDS + +#define PyArray_LITTLE NPY_LITTLE +#define PyArray_BIG NPY_BIG +#define PyArray_NATIVE NPY_NATIVE +#define PyArray_SWAP NPY_SWAP +#define PyArray_IGNORE NPY_IGNORE + +#define PyArray_NATBYTE NPY_NATBYTE +#define PyArray_OPPBYTE NPY_OPPBYTE + +#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE + +#define PyArray_USE_PYMEM NPY_USE_PYMEM + +#define PyArray_RemoveLargest PyArray_RemoveSmallest + +#define PyArray_UCS4 npy_ucs4 + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/oldnumeric.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/oldnumeric.h new file mode 100644 index 0000000000000..748f06da31d26 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/oldnumeric.h @@ -0,0 +1,23 @@ +#include "arrayobject.h" + +#ifndef REFCOUNT +# define REFCOUNT NPY_REFCOUNT +# define MAX_ELSIZE 16 +#endif + +#define PyArray_UNSIGNED_TYPES +#define PyArray_SBYTE NPY_BYTE +#define PyArray_CopyArray PyArray_CopyInto +#define _PyArray_multiply_list PyArray_MultiplyIntList +#define PyArray_ISSPACESAVER(m) NPY_FALSE +#define PyScalarArray_Check PyArray_CheckScalar + +#define CONTIGUOUS NPY_CONTIGUOUS +#define OWN_DIMENSIONS 0 +#define OWN_STRIDES 0 +#define OWN_DATA NPY_OWNDATA +#define SAVESPACE 0 +#define SAVESPACEBIT 0 + +#undef import_array +#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufunc_api.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufunc_api.txt new file mode 100644 index 0000000000000..606037f35108c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufunc_api.txt @@ -0,0 +1,321 @@ + +================= +Numpy Ufunc C-API +================= +:: + + PyObject * + PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void + **data, char *types, int ntypes, int nin, int + nout, int identity, const char *name, const + char *doc, int check_return) + + +:: + + int + PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int + usertype, PyUFuncGenericFunction + function, int *arg_types, void *data) + + +:: + + int + PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject + *kwds, PyArrayObject **op) + + +This generic function is called with the ufunc object, the arguments to it, +and an array of (pointers to) PyArrayObjects which are NULL. + +'op' is an array of at least NPY_MAXARGS PyArrayObject *. + +:: + + void + PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + int + PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject + **errobj) + + +On return, if errobj is populated with a non-NULL value, the caller +owns a new reference to errobj. + +:: + + int + PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) + + +:: + + void + PyUFunc_clearfperr() + + +:: + + int + PyUFunc_getfperr(void ) + + +:: + + int + PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int + *first) + + +:: + + int + PyUFunc_ReplaceLoopBySignature(PyUFuncObject + *func, PyUFuncGenericFunction + newfunc, int + *signature, PyUFuncGenericFunction + *oldfunc) + + +:: + + PyObject * + PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void + **data, char *types, int + ntypes, int nin, int nout, int + identity, const char *name, const + char *doc, int check_return, const + char *signature) + + +:: + + int + PyUFunc_SetUsesArraysAsData(void **data, size_t i) + + +:: + + void + PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void + *func) + + +:: + + void + PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + void + PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp + *steps, void *func) + + +:: + + int + PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING + casting, PyArrayObject + **operands, PyObject + *type_tup, PyArray_Descr **out_dtypes) + + +This function applies the default type resolution rules +for the provided ufunc. + +Returns 0 on success, -1 on error. + +:: + + int + PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING + casting, PyArrayObject + **operands, PyArray_Descr **dtypes) + + +Validates that the input operands can be cast to +the input types, and the output types can be cast to +the output operands where provided. + +Returns 0 on success, -1 (with exception raised) on validation failure. + +:: + + int + PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, PyArray_Descr + *user_dtype, PyUFuncGenericFunction + function, PyArray_Descr + **arg_dtypes, void *data) + + diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufuncobject.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufuncobject.h new file mode 100644 index 0000000000000..a24a0d83774fb --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufuncobject.h @@ -0,0 +1,375 @@ +#ifndef Py_UFUNCOBJECT_H +#define Py_UFUNCOBJECT_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The legacy generic inner loop for a standard element-wise or + * generalized ufunc. + */ +typedef void (*PyUFuncGenericFunction) + (char **args, + npy_intp *dimensions, + npy_intp *strides, + void *innerloopdata); + +/* + * The most generic one-dimensional inner loop for + * a standard element-wise ufunc. This typedef is also + * more consistent with the other NumPy function pointer typedefs + * than PyUFuncGenericFunction. + */ +typedef void (PyUFunc_StridedInnerLoopFunc)( + char **dataptrs, npy_intp *strides, + npy_intp count, + NpyAuxData *innerloopdata); + +/* + * The most generic one-dimensional inner loop for + * a masked standard element-wise ufunc. "Masked" here means that it skips + * doing calculations on any items for which the maskptr array has a true + * value. + */ +typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( + char **dataptrs, npy_intp *strides, + char *maskptr, npy_intp mask_stride, + npy_intp count, + NpyAuxData *innerloopdata); + +/* Forward declaration for the type resolver and loop selector typedefs */ +struct _tagPyUFuncObject; + +/* + * Given the operands for calling a ufunc, should determine the + * calculation input and output data types and return an inner loop function. + * This function should validate that the casting rule is being followed, + * and fail if it is not. + * + * For backwards compatibility, the regular type resolution function does not + * support auxiliary data with object semantics. The type resolution call + * which returns a masked generic function returns a standard NpyAuxData + * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros + * work. + * + * ufunc: The ufunc object. + * casting: The 'casting' parameter provided to the ufunc. + * operands: An array of length (ufunc->nin + ufunc->nout), + * with the output parameters possibly NULL. + * type_tup: Either NULL, or the type_tup passed to the ufunc. + * out_dtypes: An array which should be populated with new + * references to (ufunc->nin + ufunc->nout) new + * dtypes, one for each input and output. These + * dtypes should all be in native-endian format. + * + * Should return 0 on success, -1 on failure (with exception set), + * or -2 if Py_NotImplemented should be returned. + */ +typedef int (PyUFunc_TypeResolutionFunc)( + struct _tagPyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + +/* + * Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc, + * and an array of fixed strides (the array will contain NPY_MAX_INTP for + * strides which are not necessarily fixed), returns an inner loop + * with associated auxiliary data. + * + * For backwards compatibility, there is a variant of the inner loop + * selection which returns an inner loop irrespective of the strides, + * and with a void* static auxiliary data instead of an NpyAuxData * + * dynamically allocatable auxiliary data. + * + * ufunc: The ufunc object. + * dtypes: An array which has been populated with dtypes, + * in most cases by the type resolution funciton + * for the same ufunc. + * fixed_strides: For each input/output, either the stride that + * will be used every time the function is called + * or NPY_MAX_INTP if the stride might change or + * is not known ahead of time. The loop selection + * function may use this stride to pick inner loops + * which are optimized for contiguous or 0-stride + * cases. + * out_innerloop: Should be populated with the correct ufunc inner + * loop for the given type. + * out_innerloopdata: Should be populated with the void* data to + * be passed into the out_innerloop function. + * out_needs_api: If the inner loop needs to use the Python API, + * should set the to 1, otherwise should leave + * this untouched. + */ +typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( + struct _tagPyUFuncObject *ufunc, + PyArray_Descr **dtypes, + PyUFuncGenericFunction *out_innerloop, + void **out_innerloopdata, + int *out_needs_api); +typedef int (PyUFunc_InnerLoopSelectionFunc)( + struct _tagPyUFuncObject *ufunc, + PyArray_Descr **dtypes, + npy_intp *fixed_strides, + PyUFunc_StridedInnerLoopFunc **out_innerloop, + NpyAuxData **out_innerloopdata, + int *out_needs_api); +typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)( + struct _tagPyUFuncObject *ufunc, + PyArray_Descr **dtypes, + PyArray_Descr *mask_dtype, + npy_intp *fixed_strides, + npy_intp fixed_mask_stride, + PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop, + NpyAuxData **out_innerloopdata, + int *out_needs_api); + +typedef struct _tagPyUFuncObject { + PyObject_HEAD + /* + * nin: Number of inputs + * nout: Number of outputs + * nargs: Always nin + nout (Why is it stored?) + */ + int nin, nout, nargs; + + /* Identity for reduction, either PyUFunc_One or PyUFunc_Zero */ + int identity; + + /* Array of one-dimensional core loops */ + PyUFuncGenericFunction *functions; + /* Array of funcdata that gets passed into the functions */ + void **data; + /* The number of elements in 'functions' and 'data' */ + int ntypes; + + /* Does not appear to be used */ + int check_return; + + /* The name of the ufunc */ + const char *name; + + /* Array of type numbers, of size ('nargs' * 'ntypes') */ + char *types; + + /* Documentation string */ + const char *doc; + + void *ptr; + PyObject *obj; + PyObject *userloops; + + /* generalized ufunc parameters */ + + /* 0 for scalar ufunc; 1 for generalized ufunc */ + int core_enabled; + /* number of distinct dimension names in signature */ + int core_num_dim_ix; + + /* + * dimension indices of input/output argument k are stored in + * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] + */ + + /* numbers of core dimensions of each argument */ + int *core_num_dims; + /* + * dimension indices in a flatted form; indices + * are in the range of [0,core_num_dim_ix) + */ + int *core_dim_ixs; + /* + * positions of 1st core dimensions of each + * argument in core_dim_ixs + */ + int *core_offsets; + /* signature string for printing purpose */ + char *core_signature; + + /* + * A function which resolves the types and fills an array + * with the dtypes for the inputs and outputs. + */ + PyUFunc_TypeResolutionFunc *type_resolver; + /* + * A function which returns an inner loop written for + * NumPy 1.6 and earlier ufuncs. This is for backwards + * compatibility, and may be NULL if inner_loop_selector + * is specified. + */ + PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; + /* + * A function which returns an inner loop for the new mechanism + * in NumPy 1.7 and later. If provided, this is used, otherwise + * if NULL the legacy_inner_loop_selector is used instead. + */ + PyUFunc_InnerLoopSelectionFunc *inner_loop_selector; + /* + * A function which returns a masked inner loop for the ufunc. + */ + PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector; + + /* + * List of flags for each operand when ufunc is called by nditer object. + * These flags will be used in addition to the default flags for each + * operand set by nditer object. + */ + npy_uint32 *op_flags; + + /* + * List of global flags used when ufunc is called by nditer object. + * These flags will be used in addition to the default global flags + * set by nditer object. + */ + npy_uint32 iter_flags; +} PyUFuncObject; + +#include "arrayobject.h" + +#define UFUNC_ERR_IGNORE 0 +#define UFUNC_ERR_WARN 1 +#define UFUNC_ERR_RAISE 2 +#define UFUNC_ERR_CALL 3 +#define UFUNC_ERR_PRINT 4 +#define UFUNC_ERR_LOG 5 + + /* Python side integer mask */ + +#define UFUNC_MASK_DIVIDEBYZERO 0x07 +#define UFUNC_MASK_OVERFLOW 0x3f +#define UFUNC_MASK_UNDERFLOW 0x1ff +#define UFUNC_MASK_INVALID 0xfff + +#define UFUNC_SHIFT_DIVIDEBYZERO 0 +#define UFUNC_SHIFT_OVERFLOW 3 +#define UFUNC_SHIFT_UNDERFLOW 6 +#define UFUNC_SHIFT_INVALID 9 + + +#define UFUNC_OBJ_ISOBJECT 1 +#define UFUNC_OBJ_NEEDS_API 2 + + /* Default user error mode */ +#define UFUNC_ERR_DEFAULT \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID) + +#if NPY_ALLOW_THREADS +#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); +#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); +#else +#define NPY_LOOP_BEGIN_THREADS +#define NPY_LOOP_END_THREADS +#endif + +/* + * UFunc has unit of 1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_One 1 +/* + * UFunc has unit of 0, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_Zero 0 +/* + * UFunc has no unit, and the order of operations cannot be reordered. + * This case does not allow reduction with multiple axes at once. + */ +#define PyUFunc_None -1 +/* + * UFunc has no unit, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_ReorderableNone -2 + +#define UFUNC_REDUCE 0 +#define UFUNC_ACCUMULATE 1 +#define UFUNC_REDUCEAT 2 +#define UFUNC_OUTER 3 + + +typedef struct { + int nin; + int nout; + PyObject *callable; +} PyUFunc_PyFuncData; + +/* A linked-list of function information for + user-defined 1-d loops. + */ +typedef struct _loop1d_info { + PyUFuncGenericFunction func; + void *data; + int *arg_types; + struct _loop1d_info *next; + int nargs; + PyArray_Descr **arg_dtypes; +} PyUFunc_Loop1d; + + +#include "__ufunc_api.h" + +#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" + +#define UFUNC_CHECK_ERROR(arg) \ + do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \ + ((arg)->errormask && \ + PyUFunc_checkfperr((arg)->errormask, \ + (arg)->errobj, \ + &(arg)->first))) \ + goto fail;} while (0) + + +/* keep in sync with ieee754.c.src */ +#if defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || \ + (defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || \ + defined(__NetBSD__) || \ + defined(__GLIBC__) || defined(__APPLE__) || \ + defined(__CYGWIN__) || defined(__MINGW32__) || \ + (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) || \ + defined(_AIX) || \ + defined(_MSC_VER) || \ + defined(__osf__) && defined(__alpha) +#else +#define NO_FLOATING_POINT_SUPPORT +#endif + + +/* + * THESE MACROS ARE DEPRECATED. + * Use npy_set_floatstatus_* in the npymath library. + */ +#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO +#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW +#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW +#define UFUNC_FPE_INVALID NPY_FPE_INVALID + +#define UFUNC_CHECK_STATUS(ret) \ + { \ + ret = npy_clear_floatstatus(); \ + } +#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() +#define generate_overflow_error() npy_set_floatstatus_overflow() + + /* Make sure it gets defined if it isn't already */ +#ifndef UFUNC_NOFPE +/* Clear the floating point exception default of Borland C++ */ +#if defined(__BORLANDC__) +#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); +#else +#define UFUNC_NOFPE +#endif +#endif + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_UFUNCOBJECT_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/utils.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/utils.h new file mode 100644 index 0000000000000..cc968a35442d5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/utils.h @@ -0,0 +1,19 @@ +#ifndef __NUMPY_UTILS_HEADER__ +#define __NUMPY_UTILS_HEADER__ + +#ifndef __COMP_NPY_UNUSED + #if defined(__GNUC__) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + # elif defined(__ICC) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #else + #define __COMP_NPY_UNUSED + #endif +#endif + +/* Use this to tag a variable as not used. It will remove unused variable + * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable + * to avoid accidental use */ +#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED + +#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/info.py new file mode 100644 index 0000000000000..241f209b556ef --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/info.py @@ -0,0 +1,87 @@ +"""Defines a multi-dimensional array and useful procedures for Numerical computation. + +Functions + +- array - NumPy Array construction +- zeros - Return an array of all zeros +- empty - Return an unitialized array +- shape - Return shape of sequence or array +- rank - Return number of dimensions +- size - Return number of elements in entire array or a + certain dimension +- fromstring - Construct array from (byte) string +- take - Select sub-arrays using sequence of indices +- put - Set sub-arrays using sequence of 1-D indices +- putmask - Set portion of arrays using a mask +- reshape - Return array with new shape +- repeat - Repeat elements of array +- choose - Construct new array from indexed array tuple +- correlate - Correlate two 1-d arrays +- searchsorted - Search for element in 1-d array +- sum - Total sum over a specified dimension +- average - Average, possibly weighted, over axis or array. +- cumsum - Cumulative sum over a specified dimension +- product - Total product over a specified dimension +- cumproduct - Cumulative product over a specified dimension +- alltrue - Logical and over an entire axis +- sometrue - Logical or over an entire axis +- allclose - Tests if sequences are essentially equal + +More Functions: + +- arange - Return regularly spaced array +- asarray - Guarantee NumPy array +- convolve - Convolve two 1-d arrays +- swapaxes - Exchange axes +- concatenate - Join arrays together +- transpose - Permute axes +- sort - Sort elements of array +- argsort - Indices of sorted array +- argmax - Index of largest value +- argmin - Index of smallest value +- inner - Innerproduct of two arrays +- dot - Dot product (matrix multiplication) +- outer - Outerproduct of two arrays +- resize - Return array with arbitrary new shape +- indices - Tuple of indices +- fromfunction - Construct array from universal function +- diagonal - Return diagonal array +- trace - Trace of array +- dump - Dump array to file object (pickle) +- dumps - Return pickled string representing data +- load - Return array stored in file object +- loads - Return array from pickled string +- ravel - Return array as 1-D +- nonzero - Indices of nonzero elements for 1-D array +- shape - Shape of array +- where - Construct array from binary result +- compress - Elements of array where condition is true +- clip - Clip array between two values +- ones - Array of all ones +- identity - 2-D identity array (matrix) + +(Universal) Math Functions + + add logical_or exp + subtract logical_xor log + multiply logical_not log10 + divide maximum sin + divide_safe minimum sinh + conjugate bitwise_and sqrt + power bitwise_or tan + absolute bitwise_xor tanh + negative invert ceil + greater left_shift fabs + greater_equal right_shift floor + less arccos arctan2 + less_equal arcsin fmod + equal arctan hypot + not_equal cos around + logical_and cosh sign + arccosh arcsinh arctanh + +""" +from __future__ import division, absolute_import, print_function + +depends = ['testing'] +global_symbols = ['*'] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/mlib.ini b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/mlib.ini new file mode 100644 index 0000000000000..5840f5e1bc167 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/mlib.ini @@ -0,0 +1,12 @@ +[meta] +Name = mlib +Description = Math library used with this version of numpy +Version = 1.0 + +[default] +Libs=-lm +Cflags= + +[msvc] +Libs=m.lib +Cflags= diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/npymath.ini b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/npymath.ini new file mode 100644 index 0000000000000..3e465ad2aceaf --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/npymath.ini @@ -0,0 +1,20 @@ +[meta] +Name=npymath +Description=Portable, core math library implementing C99 standard +Version=0.1 + +[variables] +pkgname=numpy.core +prefix=${pkgdir} +libdir=${prefix}/lib +includedir=${prefix}/include + +[default] +Libs=-L${libdir} -lnpymath +Cflags=-I${includedir} +Requires=mlib + +[msvc] +Libs=/LIBPATH:${libdir} npymath.lib +Cflags=/INCLUDE:${includedir} +Requires=mlib diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/machar.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/machar.py new file mode 100644 index 0000000000000..9eb4430a62052 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/machar.py @@ -0,0 +1,338 @@ +""" +Machine arithmetics - determine the parameters of the +floating-point arithmetic system + +Author: Pearu Peterson, September 2003 + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['MachAr'] + +from numpy.core.fromnumeric import any +from numpy.core.numeric import errstate + +# Need to speed this up...especially for longfloat + +class MachAr(object): + """ + Diagnosing machine parameters. + + Attributes + ---------- + ibeta : int + Radix in which numbers are represented. + it : int + Number of base-`ibeta` digits in the floating point mantissa M. + machep : int + Exponent of the smallest (most negative) power of `ibeta` that, + added to 1.0, gives something different from 1.0 + eps : float + Floating-point number ``beta**machep`` (floating point precision) + negep : int + Exponent of the smallest power of `ibeta` that, substracted + from 1.0, gives something different from 1.0. + epsneg : float + Floating-point number ``beta**negep``. + iexp : int + Number of bits in the exponent (including its sign and bias). + minexp : int + Smallest (most negative) power of `ibeta` consistent with there + being no leading zeros in the mantissa. + xmin : float + Floating point number ``beta**minexp`` (the smallest [in + magnitude] usable floating value). + maxexp : int + Smallest (positive) power of `ibeta` that causes overflow. + xmax : float + ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] + usable floating value). + irnd : int + In ``range(6)``, information on what kind of rounding is done + in addition, and on how underflow is handled. + ngrd : int + Number of 'guard digits' used when truncating the product + of two mantissas to fit the representation. + epsilon : float + Same as `eps`. + tiny : float + Same as `xmin`. + huge : float + Same as `xmax`. + precision : float + ``- int(-log10(eps))`` + resolution : float + ``- 10**(-precision)`` + + Parameters + ---------- + float_conv : function, optional + Function that converts an integer or integer array to a float + or float array. Default is `float`. + int_conv : function, optional + Function that converts a float or float array to an integer or + integer array. Default is `int`. + float_to_float : function, optional + Function that converts a float array to float. Default is `float`. + Note that this does not seem to do anything useful in the current + implementation. + float_to_str : function, optional + Function that converts a single float to a string. Default is + ``lambda v:'%24.16e' %v``. + title : str, optional + Title that is printed in the string representation of `MachAr`. + + See Also + -------- + finfo : Machine limits for floating point types. + iinfo : Machine limits for integer types. + + References + ---------- + .. [1] Press, Teukolsky, Vetterling and Flannery, + "Numerical Recipes in C++," 2nd ed, + Cambridge University Press, 2002, p. 31. + + """ + def __init__(self, float_conv=float,int_conv=int, + float_to_float=float, + float_to_str = lambda v:'%24.16e' % v, + title = 'Python floating point number'): + """ + float_conv - convert integer to float (array) + int_conv - convert float (array) to integer + float_to_float - convert float array to float + float_to_str - convert array float to str + title - description of used floating point numbers + """ + # We ignore all errors here because we are purposely triggering + # underflow to detect the properties of the runninng arch. + with errstate(under='ignore'): + self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) + + def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): + max_iterN = 10000 + msg = "Did not converge after %d tries with %s" + one = float_conv(1) + two = one + one + zero = one - one + + # Do we really need to do this? Aren't they 2 and 2.0? + # Determine ibeta and beta + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + b = one + for _ in range(max_iterN): + b = b + b + temp = a + b + itemp = int_conv(temp-a) + if any(itemp != 0): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + ibeta = itemp + beta = float_conv(ibeta) + + # Determine it and irnd + it = -1 + b = one + for _ in range(max_iterN): + it = it + 1 + b = b * beta + temp = b + one + temp1 = temp - b + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + + betah = beta / two + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + temp = a + betah + irnd = 0 + if any(temp-a != zero): + irnd = 1 + tempa = a + beta + temp = tempa + betah + if irnd==0 and any(temp-tempa != zero): + irnd = 2 + + # Determine negep and epsneg + negep = it + 3 + betain = one / beta + a = one + for i in range(negep): + a = a * betain + b = a + for _ in range(max_iterN): + temp = one - a + if any(temp-one != zero): + break + a = a * beta + negep = negep - 1 + # Prevent infinite loop on PPC with gcc 4.0: + if negep < 0: + raise RuntimeError("could not determine machine tolerance " + "for 'negep', locals() -> %s" % (locals())) + else: + raise RuntimeError(msg % (_, one.dtype)) + negep = -negep + epsneg = a + + # Determine machep and eps + machep = - it - 3 + a = b + + for _ in range(max_iterN): + temp = one + a + if any(temp-one != zero): + break + a = a * beta + machep = machep + 1 + else: + raise RuntimeError(msg % (_, one.dtype)) + eps = a + + # Determine ngrd + ngrd = 0 + temp = one + eps + if irnd==0 and any(temp*one - one != zero): + ngrd = 1 + + # Determine iexp + i = 0 + k = 1 + z = betain + t = one + eps + nxres = 0 + for _ in range(max_iterN): + y = z + z = y*y + a = z*one # Check here for underflow + temp = z*t + if any(a+a == zero) or any(abs(z)>=y): + break + temp1 = temp * betain + if any(temp1*beta == z): + break + i = i + 1 + k = k + k + else: + raise RuntimeError(msg % (_, one.dtype)) + if ibeta != 10: + iexp = i + 1 + mx = k + k + else: + iexp = 2 + iz = ibeta + while k >= iz: + iz = iz * ibeta + iexp = iexp + 1 + mx = iz + iz - 1 + + # Determine minexp and xmin + for _ in range(max_iterN): + xmin = y + y = y * betain + a = y * one + temp = y * t + if any(a+a != zero) and any(abs(y) < xmin): + k = k + 1 + temp1 = temp * betain + if any(temp1*beta == y) and any(temp != y): + nxres = 3 + xmin = y + break + else: + break + else: + raise RuntimeError(msg % (_, one.dtype)) + minexp = -k + + # Determine maxexp, xmax + if mx <= k + k - 3 and ibeta != 10: + mx = mx + mx + iexp = iexp + 1 + maxexp = mx + minexp + irnd = irnd + nxres + if irnd >= 2: + maxexp = maxexp - 2 + i = maxexp + minexp + if ibeta == 2 and not i: + maxexp = maxexp - 1 + if i > 20: + maxexp = maxexp - 1 + if any(a != y): + maxexp = maxexp - 2 + xmax = one - epsneg + if any(xmax*one != xmax): + xmax = one - beta*epsneg + xmax = xmax / (xmin*beta*beta*beta) + i = maxexp + minexp + 3 + for j in range(i): + if ibeta==2: + xmax = xmax + xmax + else: + xmax = xmax * beta + + self.ibeta = ibeta + self.it = it + self.negep = negep + self.epsneg = float_to_float(epsneg) + self._str_epsneg = float_to_str(epsneg) + self.machep = machep + self.eps = float_to_float(eps) + self._str_eps = float_to_str(eps) + self.ngrd = ngrd + self.iexp = iexp + self.minexp = minexp + self.xmin = float_to_float(xmin) + self._str_xmin = float_to_str(xmin) + self.maxexp = maxexp + self.xmax = float_to_float(xmax) + self._str_xmax = float_to_str(xmax) + self.irnd = irnd + + self.title = title + # Commonly used parameters + self.epsilon = self.eps + self.tiny = self.xmin + self.huge = self.xmax + + import math + self.precision = int(-math.log10(float_to_float(self.eps))) + ten = two + two + two + two + two + resolution = ten ** (-self.precision) + self.resolution = float_to_float(resolution) + self._str_resolution = float_to_str(resolution) + + def __str__(self): + return '''\ +Machine parameters for %(title)s +--------------------------------------------------------------------- +ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s +machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon) +negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg) +minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny) +maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge) +--------------------------------------------------------------------- +''' % self.__dict__ + + +if __name__ == '__main__': + print(MachAr()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/memmap.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/memmap.py new file mode 100644 index 0000000000000..b1c96ee293ae8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/memmap.py @@ -0,0 +1,308 @@ +from __future__ import division, absolute_import, print_function + +__all__ = ['memmap'] + +import warnings +import sys + +import numpy as np +from .numeric import uint8, ndarray, dtype +from numpy.compat import long, basestring + +dtypedescr = dtype +valid_filemodes = ["r", "c", "r+", "w+"] +writeable_filemodes = ["r+", "w+"] + +mode_equivalents = { + "readonly":"r", + "copyonwrite":"c", + "readwrite":"r+", + "write":"w+" + } + +class memmap(ndarray): + """ + Create a memory-map to an array stored in a *binary* file on disk. + + Memory-mapped files are used for accessing small segments of large files + on disk, without reading the entire file into memory. Numpy's + memmap's are array-like objects. This differs from Python's ``mmap`` + module, which uses file-like objects. + + This subclass of ndarray has some unpleasant interactions with + some operations, because it doesn't quite fit properly as a subclass. + An alternative to using this subclass is to create the ``mmap`` + object yourself, then create an ndarray with ndarray.__new__ directly, + passing the object created in its 'buffer=' parameter. + + This class may at some point be turned into a factory function + which returns a view into an mmap buffer. + + Delete the memmap instance to close. + + + Parameters + ---------- + filename : str or file-like object + The file name or file object to be used as the array data buffer. + dtype : data-type, optional + The data-type used to interpret the file contents. + Default is `uint8`. + mode : {'r+', 'r', 'w+', 'c'}, optional + The file is opened in this mode: + + +------+-------------------------------------------------------------+ + | 'r' | Open existing file for reading only. | + +------+-------------------------------------------------------------+ + | 'r+' | Open existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'w+' | Create or overwrite existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'c' | Copy-on-write: assignments affect data in memory, but | + | | changes are not saved to disk. The file on disk is | + | | read-only. | + +------+-------------------------------------------------------------+ + + Default is 'r+'. + offset : int, optional + In the file, array data starts at this offset. Since `offset` is + measured in bytes, it should normally be a multiple of the byte-size + of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of + file are valid; The file will be extended to accommodate the + additional data. By default, ``memmap`` will start at the beginning of + the file, even if ``filename`` is a file pointer ``fp`` and + ``fp.tell() != 0``. + shape : tuple, optional + The desired shape of the array. If ``mode == 'r'`` and the number + of remaining bytes after `offset` is not a multiple of the byte-size + of `dtype`, you must specify `shape`. By default, the returned array + will be 1-D with the number of elements determined by file size + and data-type. + order : {'C', 'F'}, optional + Specify the order of the ndarray memory layout: C (row-major) or + Fortran (column-major). This only has an effect if the shape is + greater than 1-D. The default order is 'C'. + + Attributes + ---------- + filename : str + Path to the mapped file. + offset : int + Offset position in the file. + mode : str + File mode. + + Methods + ------- + flush + Flush any changes in memory to file on disk. + When you delete a memmap object, flush is called first to write + changes to disk before removing the object. + + + Notes + ----- + The memmap object can be used anywhere an ndarray is accepted. + Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns + ``True``. + + Memory-mapped arrays use the Python memory-map object which + (prior to Python 2.5) does not allow files to be larger than a + certain size depending on the platform. This size is always < 2GB + even on 64-bit systems. + + Examples + -------- + >>> data = np.arange(12, dtype='float32') + >>> data.resize((3,4)) + + This example uses a temporary file so that doctest doesn't write + files to your directory. You would use a 'normal' filename. + + >>> from tempfile import mkdtemp + >>> import os.path as path + >>> filename = path.join(mkdtemp(), 'newfile.dat') + + Create a memmap with dtype and shape that matches our data: + + >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp + memmap([[ 0., 0., 0., 0.], + [ 0., 0., 0., 0.], + [ 0., 0., 0., 0.]], dtype=float32) + + Write data to memmap array: + + >>> fp[:] = data[:] + >>> fp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + >>> fp.filename == path.abspath(filename) + True + + Deletion flushes memory changes to disk before removing the object: + + >>> del fp + + Load the memmap and verify data was stored: + + >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Read-only memmap: + + >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr.flags.writeable + False + + Copy-on-write memmap: + + >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc.flags.writeable + True + + It's possible to assign to copy-on-write array, but values are only + written into the memory copy of the array, and not written to disk: + + >>> fpc + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + >>> fpc[0,:] = 0 + >>> fpc + memmap([[ 0., 0., 0., 0.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + File on disk is unchanged: + + >>> fpr + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Offset into a memmap: + + >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo + memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) + + """ + + __array_priority__ = -100.0 + def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + shape=None, order='C'): + # Import here to minimize 'import numpy' overhead + import mmap + import os.path + try: + mode = mode_equivalents[mode] + except KeyError: + if mode not in valid_filemodes: + raise ValueError("mode must be one of %s" % + (valid_filemodes + list(mode_equivalents.keys()))) + + if hasattr(filename, 'read'): + fid = filename + own_file = False + else: + fid = open(filename, (mode == 'c' and 'r' or mode)+'b') + own_file = True + + if (mode == 'w+') and shape is None: + raise ValueError("shape must be given") + + fid.seek(0, 2) + flen = fid.tell() + descr = dtypedescr(dtype) + _dbytes = descr.itemsize + + if shape is None: + bytes = flen - offset + if (bytes % _dbytes): + fid.close() + raise ValueError("Size of available data is not a " + "multiple of the data-type size.") + size = bytes // _dbytes + shape = (size,) + else: + if not isinstance(shape, tuple): + shape = (shape,) + size = 1 + for k in shape: + size *= k + + bytes = long(offset + size*_dbytes) + + if mode == 'w+' or (mode == 'r+' and flen < bytes): + fid.seek(bytes - 1, 0) + fid.write(np.compat.asbytes('\0')) + fid.flush() + + if mode == 'c': + acc = mmap.ACCESS_COPY + elif mode == 'r': + acc = mmap.ACCESS_READ + else: + acc = mmap.ACCESS_WRITE + + start = offset - offset % mmap.ALLOCATIONGRANULARITY + bytes -= start + offset -= start + mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) + + self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + offset=offset, order=order) + self._mmap = mm + self.offset = offset + self.mode = mode + + if isinstance(filename, basestring): + self.filename = os.path.abspath(filename) + # py3 returns int for TemporaryFile().name + elif (hasattr(filename, "name") and + isinstance(filename.name, basestring)): + self.filename = os.path.abspath(filename.name) + # same as memmap copies (e.g. memmap + 1) + else: + self.filename = None + + if own_file: + fid.close() + + return self + + def __array_finalize__(self, obj): + if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): + self._mmap = obj._mmap + self.filename = obj.filename + self.offset = obj.offset + self.mode = obj.mode + else: + self._mmap = None + self.filename = None + self.offset = None + self.mode = None + + def flush(self): + """ + Write any changes in the array to the file on disk. + + For further information, see `memmap`. + + Parameters + ---------- + None + + See Also + -------- + memmap + + """ + if self.base is not None and hasattr(self.base, 'flush'): + self.base.flush() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray.py new file mode 100644 index 0000000000000..123cb89678287 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'multiarray.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray_tests.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray_tests.py new file mode 100644 index 0000000000000..5f5ee01ef2a0d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray_tests.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'multiarray_tests.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numeric.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numeric.py new file mode 100644 index 0000000000000..5d7407ce0de9a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numeric.py @@ -0,0 +1,2842 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +import warnings +import collections +from . import multiarray +from . import umath +from .umath import (invert, sin, UFUNC_BUFSIZE_DEFAULT, ERR_IGNORE, + ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, + ERR_DEFAULT, PINF, NAN) +from . import numerictypes +from .numerictypes import longlong, intc, int_, float_, complex_, bool_ + +if sys.version_info[0] >= 3: + import pickle + basestring = str +else: + import cPickle as pickle + +loads = pickle.loads + + +__all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', + 'arange', 'array', 'zeros', 'count_nonzero', + 'empty', 'broadcast', 'dtype', 'fromstring', 'fromfile', + 'frombuffer', 'int_asbuffer', 'where', 'argwhere', 'copyto', + 'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops', + 'can_cast', 'promote_types', 'min_scalar_type', 'result_type', + 'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray', + 'isfortran', 'empty_like', 'zeros_like', 'ones_like', + 'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot', + 'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot', + 'array2string', 'get_printoptions', 'set_printoptions', + 'array_repr', 'array_str', 'set_string_function', + 'little_endian', 'require', + 'fromiter', 'array_equal', 'array_equiv', + 'indices', 'fromfunction', 'isclose', + 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', + 'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask', + 'seterr', 'geterr', 'setbufsize', 'getbufsize', + 'seterrcall', 'geterrcall', 'errstate', 'flatnonzero', + 'Inf', 'inf', 'infty', 'Infinity', + 'nan', 'NaN', 'False_', 'True_', 'bitwise_not', + 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS', + 'ComplexWarning', 'may_share_memory', 'full', 'full_like'] + +if sys.version_info[0] < 3: + __all__.extend(['getbuffer', 'newbuffer']) + + +class ComplexWarning(RuntimeWarning): + """ + The warning raised when casting a complex dtype to a real dtype. + + As implemented, casting a complex number to a real discards its imaginary + part, but this behavior may not be what the user actually wants. + + """ + pass + +bitwise_not = invert + +CLIP = multiarray.CLIP +WRAP = multiarray.WRAP +RAISE = multiarray.RAISE +MAXDIMS = multiarray.MAXDIMS +ALLOW_THREADS = multiarray.ALLOW_THREADS +BUFSIZE = multiarray.BUFSIZE + +ndarray = multiarray.ndarray +flatiter = multiarray.flatiter +nditer = multiarray.nditer +nested_iters = multiarray.nested_iters +broadcast = multiarray.broadcast +dtype = multiarray.dtype +copyto = multiarray.copyto +ufunc = type(sin) + + +def zeros_like(a, dtype=None, order='K', subok=True): + """ + Return an array of zeros with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + .. versionadded:: 1.6.0 + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + .. versionadded:: 1.6.0 + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of 'a', otherwise it will be a base-class array. Defaults + to True. + + Returns + ------- + out : ndarray + Array of zeros with the same shape and type as `a`. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + empty_like : Return an empty array with shape and type of input. + zeros : Return a new array setting values to zero. + ones : Return a new array setting values to one. + empty : Return a new uninitialized array. + + Examples + -------- + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.zeros_like(x) + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y = np.arange(3, dtype=np.float) + >>> y + array([ 0., 1., 2.]) + >>> np.zeros_like(y) + array([ 0., 0., 0.]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok) + # needed instead of a 0 to get same result as zeros for for string dtypes + z = zeros(1, dtype=res.dtype) + multiarray.copyto(res, z, casting='unsafe') + return res + +def ones(shape, dtype=None, order='C'): + """ + Return a new array of given shape and type, filled with ones. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. + + Returns + ------- + out : ndarray + Array of ones with the given shape, dtype, and order. + + See Also + -------- + zeros, ones_like + + Examples + -------- + >>> np.ones(5) + array([ 1., 1., 1., 1., 1.]) + + >>> np.ones((5,), dtype=np.int) + array([1, 1, 1, 1, 1]) + + >>> np.ones((2, 1)) + array([[ 1.], + [ 1.]]) + + >>> s = (2,2) + >>> np.ones(s) + array([[ 1., 1.], + [ 1., 1.]]) + + """ + a = empty(shape, dtype, order) + multiarray.copyto(a, 1, casting='unsafe') + return a + +def ones_like(a, dtype=None, order='K', subok=True): + """ + Return an array of ones with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + .. versionadded:: 1.6.0 + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + .. versionadded:: 1.6.0 + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of 'a', otherwise it will be a base-class array. Defaults + to True. + + Returns + ------- + out : ndarray + Array of ones with the same shape and type as `a`. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + empty_like : Return an empty array with shape and type of input. + zeros : Return a new array setting values to zero. + ones : Return a new array setting values to one. + empty : Return a new uninitialized array. + + Examples + -------- + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.ones_like(x) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> y = np.arange(3, dtype=np.float) + >>> y + array([ 0., 1., 2.]) + >>> np.ones_like(y) + array([ 1., 1., 1.]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok) + multiarray.copyto(res, 1, casting='unsafe') + return res + +def full(shape, fill_value, dtype=None, order='C'): + """ + Return a new array of given shape and type, filled with `fill_value`. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + fill_value : scalar + Fill value. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + is chosen as `np.array(fill_value).dtype`. + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. + + Returns + ------- + out : ndarray + Array of `fill_value` with the given shape, dtype, and order. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + empty_like : Return an empty array with shape and type of input. + full_like : Fill an array with shape and type of input. + zeros : Return a new array setting values to zero. + ones : Return a new array setting values to one. + empty : Return a new uninitialized array. + + Examples + -------- + >>> np.full((2, 2), np.inf) + array([[ inf, inf], + [ inf, inf]]) + >>> np.full((2, 2), 10, dtype=np.int) + array([[10, 10], + [10, 10]]) + + """ + a = empty(shape, dtype, order) + multiarray.copyto(a, fill_value, casting='unsafe') + return a + +def full_like(a, fill_value, dtype=None, order='K', subok=True): + """ + Return a full array with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + fill_value : scalar + Fill value. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of 'a', otherwise it will be a base-class array. Defaults + to True. + + Returns + ------- + out : ndarray + Array of `fill_value` with the same shape and type as `a`. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + empty_like : Return an empty array with shape and type of input. + zeros : Return a new array setting values to zero. + ones : Return a new array setting values to one. + empty : Return a new uninitialized array. + full : Fill a new array. + + Examples + -------- + >>> x = np.arange(6, dtype=np.int) + >>> np.full_like(x, 1) + array([1, 1, 1, 1, 1, 1]) + >>> np.full_like(x, 0.1) + array([0, 0, 0, 0, 0, 0]) + >>> np.full_like(x, 0.1, dtype=np.double) + array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + >>> np.full_like(x, np.nan, dtype=np.double) + array([ nan, nan, nan, nan, nan, nan]) + + >>> y = np.arange(6, dtype=np.double) + >>> np.full_like(y, 0.1) + array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok) + multiarray.copyto(res, fill_value, casting='unsafe') + return res + + +def extend_all(module): + adict = {} + for a in __all__: + adict[a] = 1 + try: + mall = getattr(module, '__all__') + except AttributeError: + mall = [k for k in module.__dict__.keys() if not k.startswith('_')] + for a in mall: + if a not in adict: + __all__.append(a) + +newaxis = None + + +arange = multiarray.arange +array = multiarray.array +zeros = multiarray.zeros +count_nonzero = multiarray.count_nonzero +empty = multiarray.empty +empty_like = multiarray.empty_like +fromstring = multiarray.fromstring +fromiter = multiarray.fromiter +fromfile = multiarray.fromfile +frombuffer = multiarray.frombuffer +may_share_memory = multiarray.may_share_memory +if sys.version_info[0] < 3: + newbuffer = multiarray.newbuffer + getbuffer = multiarray.getbuffer +int_asbuffer = multiarray.int_asbuffer +where = multiarray.where +concatenate = multiarray.concatenate +fastCopyAndTranspose = multiarray._fastCopyAndTranspose +set_numeric_ops = multiarray.set_numeric_ops +can_cast = multiarray.can_cast +promote_types = multiarray.promote_types +min_scalar_type = multiarray.min_scalar_type +result_type = multiarray.result_type +lexsort = multiarray.lexsort +compare_chararrays = multiarray.compare_chararrays +putmask = multiarray.putmask +einsum = multiarray.einsum + +def asarray(a, dtype=None, order=None): + """ + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F' for FORTRAN) + memory representation. Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.matrix, np.ndarray) + True + >>> a = np.matrix([[1, 2]]) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order) + +def asanyarray(a, dtype=None, order=None): + """ + Convert the input to an ndarray, but pass ndarray subclasses through. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes scalars, lists, lists of tuples, tuples, tuples of tuples, + tuples of lists, and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('F') memory + representation. Defaults to 'C'. + + Returns + ------- + out : ndarray or an ndarray subclass + Array interpretation of `a`. If `a` is an ndarray or a subclass + of ndarray, it is returned as-is and no copy is performed. + + See Also + -------- + asarray : Similar function which always returns ndarrays. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and + Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asanyarray(a) + array([1, 2]) + + Instances of `ndarray` subclasses are passed through as-is: + + >>> a = np.matrix([1, 2]) + >>> np.asanyarray(a) is a + True + + """ + return array(a, dtype, copy=False, order=order, subok=True) + +def ascontiguousarray(a, dtype=None): + """ + Return a contiguous array in memory (C order). + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + Data-type of returned array. + + Returns + ------- + out : ndarray + Contiguous array of same shape and content as `a`, with type `dtype` + if specified. + + See Also + -------- + asfortranarray : Convert input to an ndarray with column-major + memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> np.ascontiguousarray(x, dtype=np.float32) + array([[ 0., 1., 2.], + [ 3., 4., 5.]], dtype=float32) + >>> x.flags['C_CONTIGUOUS'] + True + + """ + return array(a, dtype, copy=False, order='C', ndmin=1) + +def asfortranarray(a, dtype=None): + """ + Return an array laid out in Fortran order in memory. + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + By default, the data-type is inferred from the input data. + + Returns + ------- + out : ndarray + The input `a` in Fortran, or column-major, order. + + See Also + -------- + ascontiguousarray : Convert input to a contiguous (C order) array. + asanyarray : Convert input to an ndarray with either row or + column-major memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> y = np.asfortranarray(x) + >>> x.flags['F_CONTIGUOUS'] + False + >>> y.flags['F_CONTIGUOUS'] + True + + """ + return array(a, dtype, copy=False, order='F', ndmin=1) + +def require(a, dtype=None, requirements=None): + """ + Return an ndarray of the provided type that satisfies requirements. + + This function is useful to be sure that an array with the correct flags + is returned for passing to compiled code (perhaps through ctypes). + + Parameters + ---------- + a : array_like + The object to be converted to a type-and-requirement-satisfying array. + dtype : data-type + The required data-type, the default data-type is float64). + requirements : str or list of str + The requirements list can be any of the following + + * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array + * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array + * 'ALIGNED' ('A') - ensure a data-type aligned array + * 'WRITEABLE' ('W') - ensure a writable array + * 'OWNDATA' ('O') - ensure an array that owns its own data + + See Also + -------- + asarray : Convert input to an ndarray. + asanyarray : Convert to an ndarray, but pass through ndarray subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + ndarray.flags : Information about the memory layout of the array. + + Notes + ----- + The returned array will be guaranteed to have the listed requirements + by making a copy if needed. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : False + WRITEABLE : True + ALIGNED : True + UPDATEIFCOPY : False + + >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) + >>> y.flags + C_CONTIGUOUS : False + F_CONTIGUOUS : True + OWNDATA : True + WRITEABLE : True + ALIGNED : True + UPDATEIFCOPY : False + + """ + if requirements is None: + requirements = [] + else: + requirements = [x.upper() for x in requirements] + + if not requirements: + return asanyarray(a, dtype=dtype) + + if 'ENSUREARRAY' in requirements or 'E' in requirements: + subok = False + else: + subok = True + + arr = array(a, dtype=dtype, copy=False, subok=subok) + + copychar = 'A' + if 'FORTRAN' in requirements or \ + 'F_CONTIGUOUS' in requirements or \ + 'F' in requirements: + copychar = 'F' + elif 'CONTIGUOUS' in requirements or \ + 'C_CONTIGUOUS' in requirements or \ + 'C' in requirements: + copychar = 'C' + + for prop in requirements: + if not arr.flags[prop]: + arr = arr.copy(copychar) + break + return arr + +def isfortran(a): + """ + Returns True if array is arranged in Fortran-order in memory + and not C-order. + + Parameters + ---------- + a : ndarray + Input array. + + + Examples + -------- + + np.array allows to specify whether the array is written in C-contiguous + order (last index varies the fastest), or FORTRAN-contiguous order in + memory (first index varies the fastest). + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + + >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN') + >>> b + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(b) + True + + + The transpose of a C-ordered array is a FORTRAN-ordered array. + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + >>> b = a.T + >>> b + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.isfortran(b) + True + + C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. + + >>> np.isfortran(np.array([1, 2], order='FORTRAN')) + False + + """ + return a.flags.fnc + +def argwhere(a): + """ + Find the indices of array elements that are non-zero, grouped by element. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + index_array : ndarray + Indices of elements that are non-zero. Indices are grouped by element. + + See Also + -------- + where, nonzero + + Notes + ----- + ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. + + The output of ``argwhere`` is not suitable for indexing arrays. + For this purpose use ``where(a)`` instead. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.argwhere(x>1) + array([[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + """ + return transpose(nonzero(a)) + +def flatnonzero(a): + """ + Return indices that are non-zero in the flattened version of a. + + This is equivalent to a.ravel().nonzero()[0]. + + Parameters + ---------- + a : ndarray + Input array. + + Returns + ------- + res : ndarray + Output array, containing the indices of the elements of `a.ravel()` + that are non-zero. + + See Also + -------- + nonzero : Return the indices of the non-zero elements of the input array. + ravel : Return a 1-D array containing the elements of the input array. + + Examples + -------- + >>> x = np.arange(-2, 3) + >>> x + array([-2, -1, 0, 1, 2]) + >>> np.flatnonzero(x) + array([0, 1, 3, 4]) + + Use the indices of the non-zero elements as an index array to extract + these elements: + + >>> x.ravel()[np.flatnonzero(x)] + array([-2, -1, 1, 2]) + + """ + return a.ravel().nonzero()[0] + +_mode_from_name_dict = {'v': 0, + 's' : 1, + 'f' : 2} + +def _mode_from_name(mode): + if isinstance(mode, basestring): + return _mode_from_name_dict[mode.lower()[0]] + return mode + +def correlate(a, v, mode='valid', old_behavior=False): + """ + Cross-correlation of two 1-dimensional sequences. + + This function computes the correlation as generally defined in signal + processing texts:: + + c_{av}[k] = sum_n a[n+k] * conj(v[n]) + + with a and v sequences being zero-padded where necessary and conj being + the conjugate. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `convolve` docstring. Note that the default + is `valid`, unlike `convolve`, which uses `full`. + old_behavior : bool + If True, uses the old behavior from Numeric, + (correlate(a,v) == correlate(v,a), and the conjugate is not taken + for complex arrays). If False, uses the conventional signal + processing definition. + + Returns + ------- + out : ndarray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + convolve : Discrete, linear convolution of two one-dimensional sequences. + + Notes + ----- + The definition of correlation above is not unique and sometimes correlation + may be defined differently. Another common definition is:: + + c'_{av}[k] = sum_n a[n] conj(v[n+k]) + + which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``. + + Examples + -------- + >>> np.correlate([1, 2, 3], [0, 1, 0.5]) + array([ 3.5]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") + array([ 2. , 3.5, 3. ]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") + array([ 0.5, 2. , 3.5, 3. , 0. ]) + + Using complex sequences: + + >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') + array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) + + Note that you get the time reversed, complex conjugated result + when the two input sequences change places, i.e., + ``c_{va}[k] = c^{*}_{av}[-k]``: + + >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') + array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) + + """ + mode = _mode_from_name(mode) +# the old behavior should be made available under a different name, see thread +# http://thread.gmane.org/gmane.comp.python.numeric.general/12609/focus=12630 + if old_behavior: + warnings.warn(""" +The old behavior of correlate was deprecated for 1.4.0, and will be completely removed +for NumPy 2.0. + +The new behavior fits the conventional definition of correlation: inputs are +never swapped, and the second argument is conjugated for complex arrays.""", + DeprecationWarning) + return multiarray.correlate(a, v, mode) + else: + return multiarray.correlate2(a, v, mode) + +def convolve(a,v,mode='full'): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + The convolution operator is often seen in signal processing, where it + models the effect of a linear time-invariant system on a signal [1]_. In + probability theory, the sum of two independent random variables is + distributed according to the convolution of their individual + distributions. + + If `v` is longer than `a`, the arrays are swapped before computation. + + Parameters + ---------- + a : (N,) array_like + First one-dimensional input array. + v : (M,) array_like + Second one-dimensional input array. + mode : {'full', 'valid', 'same'}, optional + 'full': + By default, mode is 'full'. This returns the convolution + at each point of overlap, with an output shape of (N+M-1,). At + the end-points of the convolution, the signals do not overlap + completely, and boundary effects may be seen. + + 'same': + Mode `same` returns output of length ``max(M, N)``. Boundary + effects are still visible. + + 'valid': + Mode `valid` returns output of length + ``max(M, N) - min(M, N) + 1``. The convolution product is only given + for points where the signals overlap completely. Values outside + the signal boundary have no effect. + + Returns + ------- + out : ndarray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier + Transform. + scipy.linalg.toeplitz : Used to construct the convolution operator. + polymul : Polynomial multiplication. Same output as convolve, but also + accepts poly1d objects as input. + + Notes + ----- + The discrete convolution operation is defined as + + .. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m] + + It can be shown that a convolution :math:`x(t) * y(t)` in time/space + is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier + domain, after appropriate padding (padding is necessary to prevent + circular convolution). Since multiplication is more efficient (faster) + than convolution, the function `scipy.signal.fftconvolve` exploits the + FFT to calculate the convolution of large data-sets. + + References + ---------- + .. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution. + + Examples + -------- + Note how the convolution operator flips the second array + before "sliding" the two across one another: + + >>> np.convolve([1, 2, 3], [0, 1, 0.5]) + array([ 0. , 1. , 2.5, 4. , 1.5]) + + Only return the middle values of the convolution. + Contains boundary effects, where zeros are taken + into account: + + >>> np.convolve([1,2,3],[0,1,0.5], 'same') + array([ 1. , 2.5, 4. ]) + + The two arrays are of the same length, so there + is only one position where they completely overlap: + + >>> np.convolve([1,2,3],[0,1,0.5], 'valid') + array([ 2.5]) + + """ + a, v = array(a, ndmin=1), array(v, ndmin=1) + if (len(v) > len(a)): + a, v = v, a + if len(a) == 0 : + raise ValueError('a cannot be empty') + if len(v) == 0 : + raise ValueError('v cannot be empty') + mode = _mode_from_name(mode) + return multiarray.correlate(a, v[::-1], mode) + +def outer(a, b, out=None): + """ + Compute the outer product of two vectors. + + Given two vectors, ``a = [a0, a1, ..., aM]`` and + ``b = [b0, b1, ..., bN]``, + the outer product [1]_ is:: + + [[a0*b0 a0*b1 ... a0*bN ] + [a1*b0 . + [ ... . + [aM*b0 aM*bN ]] + + Parameters + ---------- + a : (M,) array_like + First input vector. Input is flattened if + not already 1-dimensional. + b : (N,) array_like + Second input vector. Input is flattened if + not already 1-dimensional. + out : (M, N) ndarray, optional + A location where the result is stored + + .. versionadded:: 1.9.0 + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + inner, einsum + + References + ---------- + .. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd + ed., Baltimore, MD, Johns Hopkins University Press, 1996, + pg. 8. + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.outer(x, [1, 2, 3]) + array([[a, aa, aaa], + [b, bb, bbb], + [c, cc, ccc]], dtype=object) + + """ + a = asarray(a) + b = asarray(b) + return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis,:], out) + +# try to import blas optimized dot if available +envbak = os.environ.copy() +try: + # importing this changes the dot function for basic 4 types + # to blas-optimized versions. + + # disables openblas affinity setting of the main thread that limits + # python threads or processes to one core + if 'OPENBLAS_MAIN_FREE' not in os.environ: + os.environ['OPENBLAS_MAIN_FREE'] = '1' + if 'GOTOBLAS_MAIN_FREE' not in os.environ: + os.environ['GOTOBLAS_MAIN_FREE'] = '1' + from ._dotblas import dot, vdot, inner, alterdot, restoredot +except ImportError: + # docstrings are in add_newdocs.py + inner = multiarray.inner + dot = multiarray.dot + def vdot(a, b): + return dot(asarray(a).ravel().conj(), asarray(b).ravel()) + def alterdot(): + pass + def restoredot(): + pass +finally: + os.environ.clear() + os.environ.update(envbak) + del envbak + +def tensordot(a, b, axes=2): + """ + Compute tensor dot product along specified axes for arrays >= 1-D. + + Given two tensors (arrays of dimension greater than or equal to one), + `a` and `b`, and an array_like object containing two array_like + objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s + elements (components) over the axes specified by ``a_axes`` and + ``b_axes``. The third argument can be a single non-negative + integer_like scalar, ``N``; if it is such, then the last ``N`` + dimensions of `a` and the first ``N`` dimensions of `b` are summed + over. + + Parameters + ---------- + a, b : array_like, len(shape) >= 1 + Tensors to "dot". + axes : variable type + * integer_like scalar + Number of axes to sum over (applies to both arrays); or + * (2,) array_like, both elements array_like of the same length + List of axes to be summed over, first sequence applying to `a`, + second to `b`. + + See Also + -------- + dot, einsum + + Notes + ----- + When there is more than one axis to sum over - and they are not the last + (first) axes of `a` (`b`) - the argument `axes` should consist of + two sequences of the same length, with the first axis to sum over given + first in both sequences, the second axis second, and so forth. + + Examples + -------- + A "traditional" example: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) + >>> c.shape + (5, 2) + >>> c + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> # A slower but equivalent way of computing the same... + >>> d = np.zeros((5,2)) + >>> for i in range(5): + ... for j in range(2): + ... for k in range(3): + ... for n in range(4): + ... d[i,j] += a[k,n,i] * b[n,k,j] + >>> c == d + array([[ True, True], + [ True, True], + [ True, True], + [ True, True], + [ True, True]], dtype=bool) + + An extended example taking advantage of the overloading of + and \\*: + + >>> a = np.array(range(1, 9)) + >>> a.shape = (2, 2, 2) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A.shape = (2, 2) + >>> a; A + array([[[1, 2], + [3, 4]], + [[5, 6], + [7, 8]]]) + array([[a, b], + [c, d]], dtype=object) + + >>> np.tensordot(a, A) # third argument default is 2 + array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object) + + >>> np.tensordot(a, A, 1) + array([[[acc, bdd], + [aaacccc, bbbdddd]], + [[aaaaacccccc, bbbbbdddddd], + [aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object) + + >>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.) + array([[[[[a, b], + [c, d]], + ... + + >>> np.tensordot(a, A, (0, 1)) + array([[[abbbbb, cddddd], + [aabbbbbb, ccdddddd]], + [[aaabbbbbbb, cccddddddd], + [aaaabbbbbbbb, ccccdddddddd]]], dtype=object) + + >>> np.tensordot(a, A, (2, 1)) + array([[[abb, cdd], + [aaabbbb, cccdddd]], + [[aaaaabbbbbb, cccccdddddd], + [aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object) + + >>> np.tensordot(a, A, ((0, 1), (0, 1))) + array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object) + + >>> np.tensordot(a, A, ((2, 1), (1, 0))) + array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object) + + """ + try: + iter(axes) + except: + axes_a = list(range(-axes, 0)) + axes_b = list(range(0, axes)) + else: + axes_a, axes_b = axes + try: + na = len(axes_a) + axes_a = list(axes_a) + except TypeError: + axes_a = [axes_a] + na = 1 + try: + nb = len(axes_b) + axes_b = list(axes_b) + except TypeError: + axes_b = [axes_b] + nb = 1 + + a, b = asarray(a), asarray(b) + as_ = a.shape + nda = len(a.shape) + bs = b.shape + ndb = len(b.shape) + equal = True + if (na != nb): equal = False + else: + for k in range(na): + if as_[axes_a[k]] != bs[axes_b[k]]: + equal = False + break + if axes_a[k] < 0: + axes_a[k] += nda + if axes_b[k] < 0: + axes_b[k] += ndb + if not equal: + raise ValueError("shape-mismatch for sum") + + # Move the axes to sum over to the end of "a" + # and to the front of "b" + notin = [k for k in range(nda) if k not in axes_a] + newaxes_a = notin + axes_a + N2 = 1 + for axis in axes_a: + N2 *= as_[axis] + newshape_a = (-1, N2) + olda = [as_[axis] for axis in notin] + + notin = [k for k in range(ndb) if k not in axes_b] + newaxes_b = axes_b + notin + N2 = 1 + for axis in axes_b: + N2 *= bs[axis] + newshape_b = (N2, -1) + oldb = [bs[axis] for axis in notin] + + at = a.transpose(newaxes_a).reshape(newshape_a) + bt = b.transpose(newaxes_b).reshape(newshape_b) + res = dot(at, bt) + return res.reshape(olda + oldb) + +def roll(a, shift, axis=None): + """ + Roll array elements along a given axis. + + Elements that roll beyond the last position are re-introduced at + the first. + + Parameters + ---------- + a : array_like + Input array. + shift : int + The number of places by which elements are shifted. + axis : int, optional + The axis along which elements are shifted. By default, the array + is flattened before shifting, after which the original + shape is restored. + + Returns + ------- + res : ndarray + Output array, with the same shape as `a`. + + See Also + -------- + rollaxis : Roll the specified axis backwards, until it lies in a + given position. + + Examples + -------- + >>> x = np.arange(10) + >>> np.roll(x, 2) + array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) + + >>> x2 = np.reshape(x, (2,5)) + >>> x2 + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> np.roll(x2, 1) + array([[9, 0, 1, 2, 3], + [4, 5, 6, 7, 8]]) + >>> np.roll(x2, 1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, 1, axis=1) + array([[4, 0, 1, 2, 3], + [9, 5, 6, 7, 8]]) + + """ + a = asanyarray(a) + if axis is None: + n = a.size + reshape = True + else: + try: + n = a.shape[axis] + except IndexError: + raise ValueError('axis must be >= 0 and < %d' % a.ndim) + reshape = False + if n == 0: + return a + shift %= n + indexes = concatenate((arange(n - shift, n), arange(n - shift))) + res = a.take(indexes, axis) + if reshape: + res = res.reshape(a.shape) + return res + +def rollaxis(a, axis, start=0): + """ + Roll the specified axis backwards, until it lies in a given position. + + Parameters + ---------- + a : ndarray + Input array. + axis : int + The axis to roll backwards. The positions of the other axes do not + change relative to one another. + start : int, optional + The axis is rolled until it lies before this position. The default, + 0, results in a "complete" roll. + + Returns + ------- + res : ndarray + Output array. + + See Also + -------- + roll : Roll the elements of an array by a number of positions along a + given axis. + + Examples + -------- + >>> a = np.ones((3,4,5,6)) + >>> np.rollaxis(a, 3, 1).shape + (3, 6, 4, 5) + >>> np.rollaxis(a, 2).shape + (5, 3, 4, 6) + >>> np.rollaxis(a, 1, 4).shape + (3, 5, 6, 4) + + """ + n = a.ndim + if axis < 0: + axis += n + if start < 0: + start += n + msg = 'rollaxis: %s (%d) must be >=0 and < %d' + if not (0 <= axis < n): + raise ValueError(msg % ('axis', axis, n)) + if not (0 <= start < n+1): + raise ValueError(msg % ('start', start, n+1)) + if (axis < start): # it's been removed + start -= 1 + if axis==start: + return a + axes = list(range(0, n)) + axes.remove(axis) + axes.insert(start, axis) + return a.transpose(axes) + +# fix hack in scipy which imports this function +def _move_axis_to_0(a, axis): + return rollaxis(a, axis, 0) + +def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): + """ + Return the cross product of two (arrays of) vectors. + + The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular + to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors + are defined by the last axis of `a` and `b` by default, and these axes + can have dimensions 2 or 3. Where the dimension of either `a` or `b` is + 2, the third component of the input vector is assumed to be zero and the + cross product calculated accordingly. In cases where both input vectors + have dimension 2, the z-component of the cross product is returned. + + Parameters + ---------- + a : array_like + Components of the first vector(s). + b : array_like + Components of the second vector(s). + axisa : int, optional + Axis of `a` that defines the vector(s). By default, the last axis. + axisb : int, optional + Axis of `b` that defines the vector(s). By default, the last axis. + axisc : int, optional + Axis of `c` containing the cross product vector(s). By default, the + last axis. + axis : int, optional + If defined, the axis of `a`, `b` and `c` that defines the vector(s) + and cross product(s). Overrides `axisa`, `axisb` and `axisc`. + + Returns + ------- + c : ndarray + Vector cross product(s). + + Raises + ------ + ValueError + When the dimension of the vector(s) in `a` and/or `b` does not + equal 2 or 3. + + See Also + -------- + inner : Inner product + outer : Outer product. + ix_ : Construct index arrays. + + Notes + ----- + .. versionadded:: 1.9.0 + Supports full broadcasting of the inputs. + + Examples + -------- + Vector cross-product. + + >>> x = [1, 2, 3] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([-3, 6, -3]) + + One vector with dimension 2. + + >>> x = [1, 2] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Equivalently: + + >>> x = [1, 2, 0] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Both vectors with dimension 2. + + >>> x = [1,2] + >>> y = [4,5] + >>> np.cross(x, y) + -3 + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the `right-hand rule`. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + The orientation of `c` can be changed using the `axisc` keyword. + + >>> np.cross(x, y, axisc=0) + array([[-3, 3], + [ 6, -6], + [-3, 3]]) + + Change the vector definition of `x` and `y` using `axisa` and `axisb`. + + >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) + >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[ -6, 12, -6], + [ 0, 0, 0], + [ 6, -12, 6]]) + >>> np.cross(x, y, axisa=0, axisb=0) + array([[-24, 48, -24], + [-30, 60, -30], + [-36, 72, -36]]) + + """ + if axis is not None: + axisa, axisb, axisc = (axis,) * 3 + a = asarray(a) + b = asarray(b) + # Move working axis to the end of the shape + a = rollaxis(a, axisa, a.ndim) + b = rollaxis(b, axisb, b.ndim) + msg = ("incompatible dimensions for cross product\n" + "(dimension must be 2 or 3)") + if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): + raise ValueError(msg) + + # Create the output array + shape = broadcast(a[..., 0], b[..., 0]).shape + if a.shape[-1] == 3 or b.shape[-1] == 3: + shape += (3,) + dtype = promote_types(a.dtype, b.dtype) + cp = empty(shape, dtype) + + # create local aliases for readability + a0 = a[..., 0] + a1 = a[..., 1] + if a.shape[-1] == 3: + a2 = a[..., 2] + b0 = b[..., 0] + b1 = b[..., 1] + if b.shape[-1] == 3: + b2 = b[..., 2] + if cp.ndim != 0 and cp.shape[-1] == 3: + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + if a.shape[-1] == 2: + if b.shape[-1] == 2: + # a0 * b1 - a1 * b0 + multiply(a0, b1, out=cp) + cp -= a1 * b0 + if cp.ndim == 0: + return cp + else: + # This works because we are moving the last axis + return rollaxis(cp, -1, axisc) + else: + # cp0 = a1 * b2 - 0 (a2 = 0) + # cp1 = 0 - a0 * b2 (a2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + multiply(a0, b2, out=cp1) + negative(cp1, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + elif a.shape[-1] == 3: + if b.shape[-1] == 3: + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = array(a2 * b1) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp + else: + # cp0 = 0 - a2 * b1 (b2 = 0) + # cp1 = a2 * b0 - 0 (b2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a2, b1, out=cp0) + negative(cp0, out=cp0) + multiply(a2, b0, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + + if cp.ndim == 1: + return cp + else: + # This works because we are moving the last axis + return rollaxis(cp, -1, axisc) + +#Use numarray's printing function +from .arrayprint import array2string, get_printoptions, set_printoptions + +_typelessdata = [int_, float_, complex_] +if issubclass(intc, int): + _typelessdata.append(intc) + +if issubclass(longlong, int): + _typelessdata.append(longlong) + +def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): + """ + Return the string representation of an array. + + Parameters + ---------- + arr : ndarray + Input array. + max_line_width : int, optional + The maximum number of columns the string should span. Newline + characters split the string appropriately after array elements. + precision : int, optional + Floating point precision. Default is the current printing precision + (usually 8), which can be altered using `set_printoptions`. + suppress_small : bool, optional + Represent very small numbers as zero, default is False. Very small + is defined by `precision`, if the precision is 8 then + numbers smaller than 5e-9 are represented as zero. + + Returns + ------- + string : str + The string representation of an array. + + See Also + -------- + array_str, array2string, set_printoptions + + Examples + -------- + >>> np.array_repr(np.array([1,2])) + 'array([1, 2])' + >>> np.array_repr(np.ma.array([0.])) + 'MaskedArray([ 0.])' + >>> np.array_repr(np.array([], np.int32)) + 'array([], dtype=int32)' + + >>> x = np.array([1e-6, 4e-7, 2, 3]) + >>> np.array_repr(x, precision=6, suppress_small=True) + 'array([ 0.000001, 0. , 2. , 3. ])' + + """ + if arr.size > 0 or arr.shape==(0,): + lst = array2string(arr, max_line_width, precision, suppress_small, + ', ', "array(") + else: # show zero-length shape unless it is (0,) + lst = "[], shape=%s" % (repr(arr.shape),) + + if arr.__class__ is not ndarray: + cName= arr.__class__.__name__ + else: + cName = "array" + + skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0 + + if skipdtype: + return "%s(%s)" % (cName, lst) + else: + typename = arr.dtype.name + # Quote typename in the output if it is "complex". + if typename and not (typename[0].isalpha() and typename.isalnum()): + typename = "'%s'" % typename + + lf = '' + if issubclass(arr.dtype.type, flexible): + if arr.dtype.names: + typename = "%s" % str(arr.dtype) + else: + typename = "'%s'" % str(arr.dtype) + lf = '\n'+' '*len("array(") + return cName + "(%s, %sdtype=%s)" % (lst, lf, typename) + +def array_str(a, max_line_width=None, precision=None, suppress_small=None): + """ + Return a string representation of the data in an array. + + The data in the array is returned as a single string. This function is + similar to `array_repr`, the difference being that `array_repr` also + returns information on the kind of array and its data type. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. The + default is, indirectly, 75. + precision : int, optional + Floating point precision. Default is the current printing precision + (usually 8), which can be altered using `set_printoptions`. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + + See Also + -------- + array2string, array_repr, set_printoptions + + Examples + -------- + >>> np.array_str(np.arange(3)) + '[0 1 2]' + + """ + return array2string(a, max_line_width, precision, suppress_small, ' ', "", str) + +def set_string_function(f, repr=True): + """ + Set a Python function to be used when pretty printing arrays. + + Parameters + ---------- + f : function or None + Function to be used to pretty print arrays. The function should expect + a single array argument and return a string of the representation of + the array. If None, the function is reset to the default NumPy function + to print arrays. + repr : bool, optional + If True (default), the function for pretty printing (``__repr__``) + is set, if False the function that returns the default string + representation (``__str__``) is set. + + See Also + -------- + set_printoptions, get_printoptions + + Examples + -------- + >>> def pprint(arr): + ... return 'HA! - What are you going to do now?' + ... + >>> np.set_string_function(pprint) + >>> a = np.arange(10) + >>> a + HA! - What are you going to do now? + >>> print a + [0 1 2 3 4 5 6 7 8 9] + + We can reset the function to the default: + + >>> np.set_string_function(None) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + `repr` affects either pretty printing or normal string representation. + Note that ``__repr__`` is still affected by setting ``__str__`` + because the width of each array element in the returned string becomes + equal to the length of the result of ``__str__()``. + + >>> x = np.arange(4) + >>> np.set_string_function(lambda x:'random', repr=False) + >>> x.__str__() + 'random' + >>> x.__repr__() + 'array([ 0, 1, 2, 3])' + + """ + if f is None: + if repr: + return multiarray.set_string_function(array_repr, 1) + else: + return multiarray.set_string_function(array_str, 0) + else: + return multiarray.set_string_function(f, repr) + +set_string_function(array_str, 0) +set_string_function(array_repr, 1) + +little_endian = (sys.byteorder == 'little') + + +def indices(dimensions, dtype=int): + """ + Return an array representing the indices of a grid. + + Compute an array where the subarrays contain index values 0,1,... + varying only along the corresponding axis. + + Parameters + ---------- + dimensions : sequence of ints + The shape of the grid. + dtype : dtype, optional + Data type of the result. + + Returns + ------- + grid : ndarray + The array of grid indices, + ``grid.shape = (len(dimensions),) + tuple(dimensions)``. + + See Also + -------- + mgrid, meshgrid + + Notes + ----- + The output shape is obtained by prepending the number of dimensions + in front of the tuple of dimensions, i.e. if `dimensions` is a tuple + ``(r0, ..., rN-1)`` of length ``N``, the output shape is + ``(N,r0,...,rN-1)``. + + The subarrays ``grid[k]`` contains the N-D array of indices along the + ``k-th`` axis. Explicitly:: + + grid[k,i0,i1,...,iN-1] = ik + + Examples + -------- + >>> grid = np.indices((2, 3)) + >>> grid.shape + (2, 2, 3) + >>> grid[0] # row indices + array([[0, 0, 0], + [1, 1, 1]]) + >>> grid[1] # column indices + array([[0, 1, 2], + [0, 1, 2]]) + + The indices can be used as an index into an array. + + >>> x = np.arange(20).reshape(5, 4) + >>> row, col = np.indices((2, 3)) + >>> x[row, col] + array([[0, 1, 2], + [4, 5, 6]]) + + Note that it would be more straightforward in the above example to + extract the required elements directly with ``x[:2, :3]``. + + """ + dimensions = tuple(dimensions) + N = len(dimensions) + if N == 0: + return array([], dtype=dtype) + res = empty((N,)+dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + tmp = arange(dim, dtype=dtype) + tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1) + newdim = dimensions[:i] + (1,)+ dimensions[i+1:] + val = zeros(newdim, dtype) + add(tmp, val, res[i]) + return res + +def fromfunction(function, shape, **kwargs): + """ + Construct an array by executing a function over each coordinate. + + The resulting array therefore has a value ``fn(x, y, z)`` at + coordinate ``(x, y, z)``. + + Parameters + ---------- + function : callable + The function is called with N parameters, where N is the rank of + `shape`. Each parameter represents the coordinates of the array + varying along a specific axis. For example, if `shape` + were ``(2, 2)``, then the parameters in turn be (0, 0), (0, 1), + (1, 0), (1, 1). + shape : (N,) tuple of ints + Shape of the output array, which also determines the shape of + the coordinate arrays passed to `function`. + dtype : data-type, optional + Data-type of the coordinate arrays passed to `function`. + By default, `dtype` is float. + + Returns + ------- + fromfunction : any + The result of the call to `function` is passed back directly. + Therefore the shape of `fromfunction` is completely determined by + `function`. If `function` returns a scalar value, the shape of + `fromfunction` would match the `shape` parameter. + + See Also + -------- + indices, meshgrid + + Notes + ----- + Keywords other than `dtype` are passed to `function`. + + Examples + -------- + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + array([[ True, False, False], + [False, True, False], + [False, False, True]], dtype=bool) + + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4]]) + + """ + dtype = kwargs.pop('dtype', float) + args = indices(shape, dtype=dtype) + return function(*args,**kwargs) + +def isscalar(num): + """ + Returns True if the type of `num` is a scalar type. + + Parameters + ---------- + num : any + Input argument, can be of any type and shape. + + Returns + ------- + val : bool + True if `num` is a scalar type, False if it is not. + + Examples + -------- + >>> np.isscalar(3.1) + True + >>> np.isscalar([3.1]) + False + >>> np.isscalar(False) + True + + """ + if isinstance(num, generic): + return True + else: + return type(num) in ScalarType + +_lkup = { + '0':'0000', + '1':'0001', + '2':'0010', + '3':'0011', + '4':'0100', + '5':'0101', + '6':'0110', + '7':'0111', + '8':'1000', + '9':'1001', + 'a':'1010', + 'b':'1011', + 'c':'1100', + 'd':'1101', + 'e':'1110', + 'f':'1111', + 'A':'1010', + 'B':'1011', + 'C':'1100', + 'D':'1101', + 'E':'1110', + 'F':'1111', + 'L':''} + +def binary_repr(num, width=None): + """ + Return the binary representation of the input number as a string. + + For negative numbers, if width is not given, a minus sign is added to the + front. If width is given, the two's complement of the number is + returned, with respect to that width. + + In a two's-complement system negative numbers are represented by the two's + complement of the absolute value. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range + :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + + Parameters + ---------- + num : int + Only an integer decimal number can be used. + width : int, optional + The length of the returned string if `num` is positive, the length of + the two's complement if `num` is negative. + + Returns + ------- + bin : str + Binary representation of `num` or two's complement of `num`. + + See Also + -------- + base_repr: Return a string representation of a number in the given base + system. + + Notes + ----- + `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x + faster. + + References + ---------- + .. [1] Wikipedia, "Two's complement", + http://en.wikipedia.org/wiki/Two's_complement + + Examples + -------- + >>> np.binary_repr(3) + '11' + >>> np.binary_repr(-3) + '-11' + >>> np.binary_repr(3, width=4) + '0011' + + The two's complement is returned when the input number is negative and + width is specified: + + >>> np.binary_repr(-3, width=4) + '1101' + + """ + # ' <-- unbreak Emacs fontification + sign = '' + if num < 0: + if width is None: + sign = '-' + num = -num + else: + # replace num with its 2-complement + num = 2**width + num + elif num == 0: + return '0'*(width or 1) + ostr = hex(num) + bin = ''.join([_lkup[ch] for ch in ostr[2:]]) + bin = bin.lstrip('0') + if width is not None: + bin = bin.zfill(width) + return sign + bin + +def base_repr(number, base=2, padding=0): + """ + Return a string representation of a number in the given base system. + + Parameters + ---------- + number : int + The value to convert. Only positive values are handled. + base : int, optional + Convert `number` to the `base` number system. The valid range is 2-36, + the default value is 2. + padding : int, optional + Number of zeros padded on the left. Default is 0 (no padding). + + Returns + ------- + out : str + String representation of `number` in `base` system. + + See Also + -------- + binary_repr : Faster version of `base_repr` for base 2. + + Examples + -------- + >>> np.base_repr(5) + '101' + >>> np.base_repr(6, 5) + '11' + >>> np.base_repr(7, base=5, padding=3) + '00012' + + >>> np.base_repr(10, base=16) + 'A' + >>> np.base_repr(32, base=16) + '20' + + """ + digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + if base > len(digits): + raise ValueError("Bases greater than 36 not handled in base_repr.") + + num = abs(number) + res = [] + while num: + res.append(digits[num % base]) + num //= base + if padding: + res.append('0' * padding) + if number < 0: + res.append('-') + return ''.join(reversed(res or '0')) + + +def load(file): + """ + Wrapper around cPickle.load which accepts either a file-like object or + a filename. + + Note that the NumPy binary format is not based on pickle/cPickle anymore. + For details on the preferred way of loading and saving files, see `load` + and `save`. + + See Also + -------- + load, save + + """ + if isinstance(file, type("")): + file = open(file, "rb") + return pickle.load(file) + +# These are all essentially abbreviations +# These might wind up in a special abbreviations module + +def _maketup(descr, val): + dt = dtype(descr) + # Place val in all scalar tuples: + fields = dt.fields + if fields is None: + return val + else: + res = [_maketup(fields[name][0], val) for name in dt.names] + return tuple(res) + +def identity(n, dtype=None): + """ + Return the identity array. + + The identity array is a square array with ones on + the main diagonal. + + Parameters + ---------- + n : int + Number of rows (and columns) in `n` x `n` output. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : ndarray + `n` x `n` array with its main diagonal set to one, + and all other elements 0. + + Examples + -------- + >>> np.identity(3) + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + """ + from numpy import eye + return eye(n, dtype=dtype) + +def allclose(a, b, rtol=1.e-5, atol=1.e-8): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + If either array contains one or more NaNs, False is returned. + Infs are treated as equal if they are in the same place and of the same + sign in both arrays. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : float + The relative tolerance parameter (see Notes). + atol : float + The absolute tolerance parameter (see Notes). + + Returns + ------- + allclose : bool + Returns True if the two arrays are equal within the given + tolerance; False otherwise. + + See Also + -------- + isclose, all, any + + Notes + ----- + If the following equation is element-wise True, then allclose returns + True. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + The above equation is not symmetric in `a` and `b`, so that + `allclose(a, b)` might be different from `allclose(b, a)` in + some rare cases. + + Examples + -------- + >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) + False + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) + True + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) + False + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) + False + + """ + x = array(a, copy=False, ndmin=1) + y = array(b, copy=False, ndmin=1) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = multiarray.result_type(y, 1.) + y = array(y, dtype=dtype, copy=False) + + xinf = isinf(x) + yinf = isinf(y) + if any(xinf) or any(yinf): + # Check that x and y have inf's only in the same positions + if not all(xinf == yinf): + return False + # Check that sign of inf's in x and y is the same + if not all(x[xinf] == y[xinf]): + return False + + x = x[~xinf] + y = y[~xinf] + + # ignore invalid fpe's + with errstate(invalid='ignore'): + r = all(less_equal(abs(x - y), atol + rtol * abs(y))) + + return r + +def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns a boolean array where two arrays are element-wise equal within a + tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : float + The relative tolerance parameter (see Notes). + atol : float + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + y : array_like + Returns a boolean array of where `a` and `b` are equal within the + given tolerance. If both `a` and `b` are scalars, returns a single + boolean value. + + See Also + -------- + allclose + + Notes + ----- + .. versionadded:: 1.7.0 + + For finite values, isclose uses the following equation to test whether + two floating point values are equivalent. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + The above equation is not symmetric in `a` and `b`, so that + `isclose(a, b)` might be different from `isclose(b, a)` in + some rare cases. + + Examples + -------- + >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) + array([True, False]) + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) + array([True, True]) + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) + array([False, True]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) + array([True, False]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + array([True, True]) + """ + def within_tol(x, y, atol, rtol): + with errstate(invalid='ignore'): + result = less_equal(abs(x-y), atol + rtol * abs(y)) + if isscalar(a) and isscalar(b): + result = bool(result) + return result + + x = array(a, copy=False, subok=True, ndmin=1) + y = array(b, copy=False, subok=True, ndmin=1) + xfin = isfinite(x) + yfin = isfinite(y) + if all(xfin) and all(yfin): + return within_tol(x, y, atol, rtol) + else: + finite = xfin & yfin + cond = zeros_like(finite, subok=True) + # Because we're using boolean indexing, x & y must be the same shape. + # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in + # lib.stride_tricks, though, so we can't import it here. + x = x * ones_like(cond) + y = y * ones_like(cond) + # Avoid subtraction with infinite/nan values... + cond[finite] = within_tol(x[finite], y[finite], atol, rtol) + # Check for equality of infinite values... + cond[~finite] = (x[~finite] == y[~finite]) + if equal_nan: + # Make NaN == NaN + both_nan = isnan(x) & isnan(y) + cond[both_nan] = both_nan[both_nan] + return cond + +def array_equal(a1, a2): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + if a1.shape != a2.shape: + return False + return bool(asarray(a1 == a2).all()) + +def array_equiv(a1, a2): + """ + Returns True if input arrays are shape consistent and all elements equal. + + Shape consistent means they are either the same shape, or one input array + can be broadcasted to create the same shape as the other one. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + out : bool + True if equivalent, False otherwise. + + Examples + -------- + >>> np.array_equiv([1, 2], [1, 2]) + True + >>> np.array_equiv([1, 2], [1, 3]) + False + + Showing the shape equivalence: + + >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) + True + >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) + False + + >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except: + return False + try: + multiarray.broadcast(a1, a2) + except: + return False + + return bool(asarray(a1 == a2).all()) + + +_errdict = {"ignore":ERR_IGNORE, + "warn":ERR_WARN, + "raise":ERR_RAISE, + "call":ERR_CALL, + "print":ERR_PRINT, + "log":ERR_LOG} + +_errdict_rev = {} +for key in _errdict.keys(): + _errdict_rev[_errdict[key]] = key +del key + +def seterr(all=None, divide=None, over=None, under=None, invalid=None): + """ + Set how floating-point errors are handled. + + Note that operations on integer scalar types (such as `int16`) are + handled like floating point, and are affected by these settings. + + Parameters + ---------- + all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Set treatment for all types of floating-point errors at once: + + - ignore: Take no action when the exception occurs. + - warn: Print a `RuntimeWarning` (via the Python `warnings` module). + - raise: Raise a `FloatingPointError`. + - call: Call a function specified using the `seterrcall` function. + - print: Print a warning directly to ``stdout``. + - log: Record error in a Log object specified by `seterrcall`. + + The default is not to change the current behavior. + divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for division by zero. + over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point overflow. + under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point underflow. + invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for invalid floating-point operation. + + Returns + ------- + old_settings : dict + Dictionary containing the old settings. + + See also + -------- + seterrcall : Set a callback function for the 'call' mode. + geterr, geterrcall, errstate + + Notes + ----- + The floating-point exceptions are defined in the IEEE 754 standard [1]: + + - Division by zero: infinite result obtained from finite numbers. + - Overflow: result too large to be expressed. + - Underflow: result so close to zero that some precision + was lost. + - Invalid operation: result is not an expressible number, typically + indicates that a NaN was produced. + + .. [1] http://en.wikipedia.org/wiki/IEEE_754 + + Examples + -------- + >>> old_settings = np.seterr(all='ignore') #seterr to known value + >>> np.seterr(over='raise') + {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', + 'under': 'ignore'} + >>> np.seterr(**old_settings) # reset to default + {'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'} + + >>> np.int16(32000) * np.int16(3) + 30464 + >>> old_settings = np.seterr(all='warn', over='raise') + >>> np.int16(32000) * np.int16(3) + Traceback (most recent call last): + File "", line 1, in + FloatingPointError: overflow encountered in short_scalars + + >>> old_settings = np.seterr(all='print') + >>> np.geterr() + {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'} + >>> np.int16(32000) * np.int16(3) + Warning: overflow encountered in short_scalars + 30464 + + """ + + pyvals = umath.geterrobj() + old = geterr() + + if divide is None: divide = all or old['divide'] + if over is None: over = all or old['over'] + if under is None: under = all or old['under'] + if invalid is None: invalid = all or old['invalid'] + + maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + + (_errdict[over] << SHIFT_OVERFLOW ) + + (_errdict[under] << SHIFT_UNDERFLOW) + + (_errdict[invalid] << SHIFT_INVALID)) + + pyvals[1] = maskvalue + umath.seterrobj(pyvals) + return old + + +def geterr(): + """ + Get the current way of handling floating-point errors. + + Returns + ------- + res : dict + A dictionary with keys "divide", "over", "under", and "invalid", + whose values are from the strings "ignore", "print", "log", "warn", + "raise", and "call". The keys represent possible floating-point + exceptions, and the values define how these exceptions are handled. + + See Also + -------- + geterrcall, seterr, seterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterr() + {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', + 'under': 'ignore'} + >>> np.arange(3.) / np.arange(3.) + array([ NaN, 1., 1.]) + + >>> oldsettings = np.seterr(all='warn', over='raise') + >>> np.geterr() + {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'} + >>> np.arange(3.) / np.arange(3.) + __main__:1: RuntimeWarning: invalid value encountered in divide + array([ NaN, 1., 1.]) + + """ + maskvalue = umath.geterrobj()[1] + mask = 7 + res = {} + val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask + res['divide'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_OVERFLOW) & mask + res['over'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_UNDERFLOW) & mask + res['under'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_INVALID) & mask + res['invalid'] = _errdict_rev[val] + return res + +def setbufsize(size): + """ + Set the size of the buffer used in ufuncs. + + Parameters + ---------- + size : int + Size of buffer. + + """ + if size > 10e6: + raise ValueError("Buffer size, %s, is too big." % size) + if size < 5: + raise ValueError("Buffer size, %s, is too small." %size) + if size % 16 != 0: + raise ValueError("Buffer size, %s, is not a multiple of 16." %size) + + pyvals = umath.geterrobj() + old = getbufsize() + pyvals[0] = size + umath.seterrobj(pyvals) + return old + +def getbufsize(): + """ + Return the size of the buffer used in ufuncs. + + Returns + ------- + getbufsize : int + Size of ufunc buffer in bytes. + + """ + return umath.geterrobj()[0] + +def seterrcall(func): + """ + Set the floating-point error callback function or log object. + + There are two ways to capture floating-point error messages. The first + is to set the error-handler to 'call', using `seterr`. Then, set + the function to call using this function. + + The second is to set the error-handler to 'log', using `seterr`. + Floating-point errors then trigger a call to the 'write' method of + the provided object. + + Parameters + ---------- + func : callable f(err, flag) or object with write method + Function to call upon floating-point errors ('call'-mode) or + object whose 'write' method is used to log such message ('log'-mode). + + The call function takes two arguments. The first is the + type of error (one of "divide", "over", "under", or "invalid"), + and the second is the status flag. The flag is a byte, whose + least-significant bits indicate the status:: + + [0 0 0 0 invalid over under invalid] + + In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. + + If an object is provided, its write method should take one argument, + a string. + + Returns + ------- + h : callable, log instance or None + The old error handler. + + See Also + -------- + seterr, geterr, geterrcall + + Examples + -------- + Callback upon error: + + >>> def err_handler(type, flag): + ... print "Floating point error (%s), with flag %s" % (type, flag) + ... + + >>> saved_handler = np.seterrcall(err_handler) + >>> save_err = np.seterr(all='call') + + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([ Inf, Inf, Inf]) + + >>> np.seterrcall(saved_handler) + + >>> np.seterr(**save_err) + {'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'} + + Log error message: + + >>> class Log(object): + ... def write(self, msg): + ... print "LOG: %s" % msg + ... + + >>> log = Log() + >>> saved_handler = np.seterrcall(log) + >>> save_err = np.seterr(all='log') + + >>> np.array([1, 2, 3]) / 0.0 + LOG: Warning: divide by zero encountered in divide + + array([ Inf, Inf, Inf]) + + >>> np.seterrcall(saved_handler) + <__main__.Log object at 0x...> + >>> np.seterr(**save_err) + {'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'} + + """ + if func is not None and not isinstance(func, collections.Callable): + if not hasattr(func, 'write') or not isinstance(func.write, collections.Callable): + raise ValueError("Only callable can be used as callback") + pyvals = umath.geterrobj() + old = geterrcall() + pyvals[2] = func + umath.seterrobj(pyvals) + return old + +def geterrcall(): + """ + Return the current callback function used on floating-point errors. + + When the error handling for a floating-point error (one of "divide", + "over", "under", or "invalid") is set to 'call' or 'log', the function + that is called or the log instance that is written to is returned by + `geterrcall`. This function or log instance has been set with + `seterrcall`. + + Returns + ------- + errobj : callable, log instance or None + The current error handler. If no handler was set through `seterrcall`, + ``None`` is returned. + + See Also + -------- + seterrcall, seterr, geterr + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterrcall() # we did not yet set a handler, returns None + + >>> oldsettings = np.seterr(all='call') + >>> def err_handler(type, flag): + ... print "Floating point error (%s), with flag %s" % (type, flag) + >>> oldhandler = np.seterrcall(err_handler) + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([ Inf, Inf, Inf]) + + >>> cur_handler = np.geterrcall() + >>> cur_handler is err_handler + True + + """ + return umath.geterrobj()[2] + +class _unspecified(object): + pass +_Unspecified = _unspecified() + +class errstate(object): + """ + errstate(**kwargs) + + Context manager for floating-point error handling. + + Using an instance of `errstate` as a context manager allows statements in + that context to execute with a known error handling behavior. Upon entering + the context the error handling is set with `seterr` and `seterrcall`, and + upon exiting it is reset to what it was before. + + Parameters + ---------- + kwargs : {divide, over, under, invalid} + Keyword arguments. The valid keywords are the possible floating-point + exceptions. Each keyword should have a string value that defines the + treatment for the particular error. Possible values are + {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. + + See Also + -------- + seterr, geterr, seterrcall, geterrcall + + Notes + ----- + The ``with`` statement was introduced in Python 2.5, and can only be used + there by importing it: ``from __future__ import with_statement``. In + earlier Python versions the ``with`` statement is not available. + + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> from __future__ import with_statement # use 'with' in Python 2.5 + >>> olderr = np.seterr(all='ignore') # Set error handling to known state. + + >>> np.arange(3) / 0. + array([ NaN, Inf, Inf]) + >>> with np.errstate(divide='warn'): + ... np.arange(3) / 0. + ... + __main__:2: RuntimeWarning: divide by zero encountered in divide + array([ NaN, Inf, Inf]) + + >>> np.sqrt(-1) + nan + >>> with np.errstate(invalid='raise'): + ... np.sqrt(-1) + Traceback (most recent call last): + File "", line 2, in + FloatingPointError: invalid value encountered in sqrt + + Outside the context the error handling behavior has not changed: + + >>> np.geterr() + {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', + 'under': 'ignore'} + + """ + # Note that we don't want to run the above doctests because they will fail + # without a from __future__ import with_statement + def __init__(self, **kwargs): + self.call = kwargs.pop('call', _Unspecified) + self.kwargs = kwargs + + def __enter__(self): + self.oldstate = seterr(**self.kwargs) + if self.call is not _Unspecified: + self.oldcall = seterrcall(self.call) + + def __exit__(self, *exc_info): + seterr(**self.oldstate) + if self.call is not _Unspecified: + seterrcall(self.oldcall) + + +def _setdef(): + defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None] + umath.seterrobj(defval) + +# set the default values +_setdef() + +Inf = inf = infty = Infinity = PINF +nan = NaN = NAN +False_ = bool_(False) +True_ = bool_(True) + +from .umath import * +from .numerictypes import * +from . import fromnumeric +from .fromnumeric import * +extend_all(fromnumeric) +extend_all(umath) +extend_all(numerictypes) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numerictypes.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numerictypes.py new file mode 100644 index 0000000000000..1545bc7348953 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numerictypes.py @@ -0,0 +1,1042 @@ +""" +numerictypes: Define the numeric type objects + +This module is designed so "from numerictypes import \\*" is safe. +Exported symbols include: + + Dictionary with all registered number types (including aliases): + typeDict + + Type objects (not all will be available, depends on platform): + see variable sctypes for which ones you have + + Bit-width names + + int8 int16 int32 int64 int128 + uint8 uint16 uint32 uint64 uint128 + float16 float32 float64 float96 float128 float256 + complex32 complex64 complex128 complex192 complex256 complex512 + datetime64 timedelta64 + + c-based names + + bool_ + + object_ + + void, str_, unicode_ + + byte, ubyte, + short, ushort + intc, uintc, + intp, uintp, + int_, uint, + longlong, ulonglong, + + single, csingle, + float_, complex_, + longfloat, clongfloat, + + As part of the type-hierarchy: xx -- is bit-width + + generic + +-> bool_ (kind=b) + +-> number (kind=i) + | integer + | signedinteger (intxx) + | byte + | short + | intc + | intp int0 + | int_ + | longlong + +-> unsignedinteger (uintxx) (kind=u) + | ubyte + | ushort + | uintc + | uintp uint0 + | uint_ + | ulonglong + +-> inexact + | +-> floating (floatxx) (kind=f) + | | half + | | single + | | float_ (double) + | | longfloat + | \\-> complexfloating (complexxx) (kind=c) + | csingle (singlecomplex) + | complex_ (cfloat, cdouble) + | clongfloat (longcomplex) + +-> flexible + | character + | void (kind=V) + | + | str_ (string_, bytes_) (kind=S) [Python 2] + | unicode_ (kind=U) [Python 2] + | + | bytes_ (string_) (kind=S) [Python 3] + | str_ (unicode_) (kind=U) [Python 3] + | + \\-> object_ (not used much) (kind=O) + +""" +from __future__ import division, absolute_import, print_function + +# we add more at the bottom +__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes', + 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', + 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', + 'issubdtype', 'datetime_data', 'datetime_as_string', + 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', + ] + +from numpy.core.multiarray import ( + typeinfo, ndarray, array, empty, dtype, datetime_data, + datetime_as_string, busday_offset, busday_count, is_busday, + busdaycalendar + ) +import types as _types +import sys +from numpy.compat import bytes, long +import numbers + +# we don't export these for import *, but we do want them accessible +# as numerictypes.bool, etc. +if sys.version_info[0] >= 3: + from builtins import bool, int, float, complex, object, str + unicode = str +else: + from __builtin__ import bool, int, float, complex, object, unicode, str + + +# String-handling utilities to avoid locale-dependence. + +# "import string" is costly to import! +# Construct the translation tables directly +# "A" = chr(65), "a" = chr(97) +_all_chars = [chr(_m) for _m in range(256)] +_ascii_upper = _all_chars[65:65+26] +_ascii_lower = _all_chars[97:97+26] +LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:]) +UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:]) + +#import string +# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \ +# LOWER_TABLE) +# assert (string.maketrnas(string_ascii_lowercase, string.ascii_uppercase) == \ +# UPPER_TABLE) +#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase) +#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase) + +def english_lower(s): + """ Apply English case rules to convert ASCII strings to all lower case. + + This is an internal utility function to replace calls to str.lower() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + lowered : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_lower + >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' + >>> english_lower('') + '' + """ + lowered = s.translate(LOWER_TABLE) + return lowered + +def english_upper(s): + """ Apply English case rules to convert ASCII strings to all upper case. + + This is an internal utility function to replace calls to str.upper() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + uppered : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_upper + >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' + >>> english_upper('') + '' + """ + uppered = s.translate(UPPER_TABLE) + return uppered + +def english_capitalize(s): + """ Apply English case rules to convert the first character of an ASCII + string to upper case. + + This is an internal utility function to replace calls to str.capitalize() + such that we can avoid changing behavior with changing locales. + + Parameters + ---------- + s : str + + Returns + ------- + capitalized : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_capitalize + >>> english_capitalize('int8') + 'Int8' + >>> english_capitalize('Int8') + 'Int8' + >>> english_capitalize('') + '' + """ + if s: + return english_upper(s[0]) + s[1:] + else: + return s + + +sctypeDict = {} # Contains all leaf-node scalar types with aliases +sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences +allTypes = {} # Collect the types we will add to the module here + +def _evalname(name): + k = 0 + for ch in name: + if ch in '0123456789': + break + k += 1 + try: + bits = int(name[k:]) + except ValueError: + bits = 0 + base = name[:k] + return base, bits + +def bitname(obj): + """Return a bit-width name for a given type object""" + name = obj.__name__ + base = '' + char = '' + try: + if name[-1] == '_': + newname = name[:-1] + else: + newname = name + info = typeinfo[english_upper(newname)] + assert(info[-1] == obj) # sanity check + bits = info[2] + + except KeyError: # bit-width name + base, bits = _evalname(name) + char = base[0] + + if name == 'bool_': + char = 'b' + base = 'bool' + elif name=='void': + char = 'V' + base = 'void' + elif name=='object_': + char = 'O' + base = 'object' + bits = 0 + elif name=='datetime64': + char = 'M' + elif name=='timedelta64': + char = 'm' + + if sys.version_info[0] >= 3: + if name=='bytes_': + char = 'S' + base = 'bytes' + elif name=='str_': + char = 'U' + base = 'str' + else: + if name=='string_': + char = 'S' + base = 'string' + elif name=='unicode_': + char = 'U' + base = 'unicode' + + bytes = bits // 8 + + if char != '' and bytes != 0: + char = "%s%d" % (char, bytes) + + return base, bits, char + + +def _add_types(): + for a in typeinfo.keys(): + name = english_lower(a) + if isinstance(typeinfo[a], tuple): + typeobj = typeinfo[a][-1] + + # define C-name and insert typenum and typechar references also + allTypes[name] = typeobj + sctypeDict[name] = typeobj + sctypeDict[typeinfo[a][0]] = typeobj + sctypeDict[typeinfo[a][1]] = typeobj + + else: # generic class + allTypes[name] = typeinfo[a] +_add_types() + +def _add_aliases(): + for a in typeinfo.keys(): + name = english_lower(a) + if not isinstance(typeinfo[a], tuple): + continue + typeobj = typeinfo[a][-1] + # insert bit-width version for this class (if relevant) + base, bit, char = bitname(typeobj) + if base[-3:] == 'int' or char[0] in 'ui': continue + if base != '': + myname = "%s%d" % (base, bit) + if (name != 'longdouble' and name != 'clongdouble') or \ + myname not in allTypes.keys(): + allTypes[myname] = typeobj + sctypeDict[myname] = typeobj + if base == 'complex': + na_name = '%s%d' % (english_capitalize(base), bit//2) + elif base == 'bool': + na_name = english_capitalize(base) + sctypeDict[na_name] = typeobj + else: + na_name = "%s%d" % (english_capitalize(base), bit) + sctypeDict[na_name] = typeobj + sctypeNA[na_name] = typeobj + sctypeDict[na_name] = typeobj + sctypeNA[typeobj] = na_name + sctypeNA[typeinfo[a][0]] = na_name + if char != '': + sctypeDict[char] = typeobj + sctypeNA[char] = na_name +_add_aliases() + +# Integers handled so that +# The int32, int64 types should agree exactly with +# PyArray_INT32, PyArray_INT64 in C +# We need to enforce the same checking as is done +# in arrayobject.h where the order of getting a +# bit-width match is: +# long, longlong, int, short, char +# for int8, int16, int32, int64, int128 + +def _add_integer_aliases(): + _ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE'] + for ctype in _ctypes: + val = typeinfo[ctype] + bits = val[2] + charname = 'i%d' % (bits//8,) + ucharname = 'u%d' % (bits//8,) + intname = 'int%d' % bits + UIntname = 'UInt%d' % bits + Intname = 'Int%d' % bits + uval = typeinfo['U'+ctype] + typeobj = val[-1] + utypeobj = uval[-1] + if intname not in allTypes.keys(): + uintname = 'uint%d' % bits + allTypes[intname] = typeobj + allTypes[uintname] = utypeobj + sctypeDict[intname] = typeobj + sctypeDict[uintname] = utypeobj + sctypeDict[Intname] = typeobj + sctypeDict[UIntname] = utypeobj + sctypeDict[charname] = typeobj + sctypeDict[ucharname] = utypeobj + sctypeNA[Intname] = typeobj + sctypeNA[UIntname] = utypeobj + sctypeNA[charname] = typeobj + sctypeNA[ucharname] = utypeobj + sctypeNA[typeobj] = Intname + sctypeNA[utypeobj] = UIntname + sctypeNA[val[0]] = Intname + sctypeNA[uval[0]] = UIntname +_add_integer_aliases() + +# We use these later +void = allTypes['void'] +generic = allTypes['generic'] + +# +# Rework the Python names (so that float and complex and int are consistent +# with Python usage) +# +def _set_up_aliases(): + type_pairs = [('complex_', 'cdouble'), + ('int0', 'intp'), + ('uint0', 'uintp'), + ('single', 'float'), + ('csingle', 'cfloat'), + ('singlecomplex', 'cfloat'), + ('float_', 'double'), + ('intc', 'int'), + ('uintc', 'uint'), + ('int_', 'long'), + ('uint', 'ulong'), + ('cfloat', 'cdouble'), + ('longfloat', 'longdouble'), + ('clongfloat', 'clongdouble'), + ('longcomplex', 'clongdouble'), + ('bool_', 'bool'), + ('unicode_', 'unicode'), + ('object_', 'object')] + if sys.version_info[0] >= 3: + type_pairs.extend([('bytes_', 'string'), + ('str_', 'unicode'), + ('string_', 'string')]) + else: + type_pairs.extend([('str_', 'string'), + ('string_', 'string'), + ('bytes_', 'string')]) + for alias, t in type_pairs: + allTypes[alias] = allTypes[t] + sctypeDict[alias] = sctypeDict[t] + # Remove aliases overriding python types and modules + to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float', + 'complex', 'bool', 'string', 'datetime', 'timedelta'] + if sys.version_info[0] >= 3: + # Py3K + to_remove.append('bytes') + to_remove.append('str') + to_remove.remove('unicode') + to_remove.remove('long') + for t in to_remove: + try: + del allTypes[t] + del sctypeDict[t] + except KeyError: + pass +_set_up_aliases() + +# Now, construct dictionary to lookup character codes from types +_sctype2char_dict = {} +def _construct_char_code_lookup(): + for name in typeinfo.keys(): + tup = typeinfo[name] + if isinstance(tup, tuple): + if tup[0] not in ['p', 'P']: + _sctype2char_dict[tup[-1]] = tup[0] +_construct_char_code_lookup() + + +sctypes = {'int': [], + 'uint':[], + 'float':[], + 'complex':[], + 'others':[bool, object, str, unicode, void]} + +def _add_array_type(typename, bits): + try: + t = allTypes['%s%d' % (typename, bits)] + except KeyError: + pass + else: + sctypes[typename].append(t) + +def _set_array_types(): + ibytes = [1, 2, 4, 8, 16, 32, 64] + fbytes = [2, 4, 8, 10, 12, 16, 32, 64] + for bytes in ibytes: + bits = 8*bytes + _add_array_type('int', bits) + _add_array_type('uint', bits) + for bytes in fbytes: + bits = 8*bytes + _add_array_type('float', bits) + _add_array_type('complex', 2*bits) + _gi = dtype('p') + if _gi.type not in sctypes['int']: + indx = 0 + sz = _gi.itemsize + _lst = sctypes['int'] + while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): + indx += 1 + sctypes['int'].insert(indx, _gi.type) + sctypes['uint'].insert(indx, dtype('P').type) +_set_array_types() + + +genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', + 'int32', 'uint32', 'int64', 'uint64', 'int128', + 'uint128', 'float16', + 'float32', 'float64', 'float80', 'float96', 'float128', + 'float256', + 'complex32', 'complex64', 'complex128', 'complex160', + 'complex192', 'complex256', 'complex512', 'object'] + +def maximum_sctype(t): + """ + Return the scalar type of highest precision of the same kind as the input. + + Parameters + ---------- + t : dtype or dtype specifier + The input data type. This can be a `dtype` object or an object that + is convertible to a `dtype`. + + Returns + ------- + out : dtype + The highest precision data type of the same kind (`dtype.kind`) as `t`. + + See Also + -------- + obj2sctype, mintypecode, sctype2char + dtype + + Examples + -------- + >>> np.maximum_sctype(np.int) + + >>> np.maximum_sctype(np.uint8) + + >>> np.maximum_sctype(np.complex) + + + >>> np.maximum_sctype(str) + + + >>> np.maximum_sctype('i2') + + >>> np.maximum_sctype('f4') + + + """ + g = obj2sctype(t) + if g is None: + return t + t = g + name = t.__name__ + base, bits = _evalname(name) + if bits == 0: + return t + else: + return sctypes[base][-1] + +try: + buffer_type = _types.BufferType +except AttributeError: + # Py3K + buffer_type = memoryview + +_python_types = {int: 'int_', + float: 'float_', + complex: 'complex_', + bool: 'bool_', + bytes: 'bytes_', + unicode: 'unicode_', + buffer_type: 'void', + } + +if sys.version_info[0] >= 3: + def _python_type(t): + """returns the type corresponding to a certain Python type""" + if not isinstance(t, type): + t = type(t) + return allTypes[_python_types.get(t, 'object_')] +else: + def _python_type(t): + """returns the type corresponding to a certain Python type""" + if not isinstance(t, _types.TypeType): + t = type(t) + return allTypes[_python_types.get(t, 'object_')] + +def issctype(rep): + """ + Determines whether the given object represents a scalar data-type. + + Parameters + ---------- + rep : any + If `rep` is an instance of a scalar dtype, True is returned. If not, + False is returned. + + Returns + ------- + out : bool + Boolean result of check whether `rep` is a scalar dtype. + + See Also + -------- + issubsctype, issubdtype, obj2sctype, sctype2char + + Examples + -------- + >>> np.issctype(np.int32) + True + >>> np.issctype(list) + False + >>> np.issctype(1.1) + False + + Strings are also a scalar type: + + >>> np.issctype(np.dtype('str')) + True + + """ + if not isinstance(rep, (type, dtype)): + return False + try: + res = obj2sctype(rep) + if res and res != object_: + return True + return False + except: + return False + +def obj2sctype(rep, default=None): + """ + Return the scalar dtype or NumPy equivalent of Python type of an object. + + Parameters + ---------- + rep : any + The object of which the type is returned. + default : any, optional + If given, this is returned for objects whose types can not be + determined. If not given, None is returned for those objects. + + Returns + ------- + dtype : dtype or Python type + The data type of `rep`. + + See Also + -------- + sctype2char, issctype, issubsctype, issubdtype, maximum_sctype + + Examples + -------- + >>> np.obj2sctype(np.int32) + + >>> np.obj2sctype(np.array([1., 2.])) + + >>> np.obj2sctype(np.array([1.j])) + + + >>> np.obj2sctype(dict) + + >>> np.obj2sctype('string') + + + >>> np.obj2sctype(1, default=list) + + + """ + try: + if issubclass(rep, generic): + return rep + except TypeError: + pass + if isinstance(rep, dtype): + return rep.type + if isinstance(rep, type): + return _python_type(rep) + if isinstance(rep, ndarray): + return rep.dtype.type + try: + res = dtype(rep) + except: + return default + return res.type + + +def issubclass_(arg1, arg2): + """ + Determine if a class is a subclass of a second class. + + `issubclass_` is equivalent to the Python built-in ``issubclass``, + except that it returns False instead of raising a TypeError is one + of the arguments is not a class. + + Parameters + ---------- + arg1 : class + Input class. True is returned if `arg1` is a subclass of `arg2`. + arg2 : class or tuple of classes. + Input class. If a tuple of classes, True is returned if `arg1` is a + subclass of any of the tuple elements. + + Returns + ------- + out : bool + Whether `arg1` is a subclass of `arg2` or not. + + See Also + -------- + issubsctype, issubdtype, issctype + + Examples + -------- + >>> np.issubclass_(np.int32, np.int) + True + >>> np.issubclass_(np.int32, np.float) + False + + """ + try: + return issubclass(arg1, arg2) + except TypeError: + return False + +def issubsctype(arg1, arg2): + """ + Determine if the first argument is a subclass of the second argument. + + Parameters + ---------- + arg1, arg2 : dtype or dtype specifier + Data-types. + + Returns + ------- + out : bool + The result. + + See Also + -------- + issctype, issubdtype,obj2sctype + + Examples + -------- + >>> np.issubsctype('S8', str) + True + >>> np.issubsctype(np.array([1]), np.int) + True + >>> np.issubsctype(np.array([1]), np.float) + False + + """ + return issubclass(obj2sctype(arg1), obj2sctype(arg2)) + +def issubdtype(arg1, arg2): + """ + Returns True if first argument is a typecode lower/equal in type hierarchy. + + Parameters + ---------- + arg1, arg2 : dtype_like + dtype or string representing a typecode. + + Returns + ------- + out : bool + + See Also + -------- + issubsctype, issubclass_ + numpy.core.numerictypes : Overview of numpy type hierarchy. + + Examples + -------- + >>> np.issubdtype('S1', str) + True + >>> np.issubdtype(np.float64, np.float32) + False + + """ + if issubclass_(arg2, generic): + return issubclass(dtype(arg1).type, arg2) + mro = dtype(arg2).type.mro() + if len(mro) > 1: + val = mro[1] + else: + val = mro[0] + return issubclass(dtype(arg1).type, val) + + +# This dictionary allows look up based on any alias for an array data-type +class _typedict(dict): + """ + Base object for a dictionary for look-up with any alias for an array dtype. + + Instances of `_typedict` can not be used as dictionaries directly, + first they have to be populated. + + """ + def __getitem__(self, obj): + return dict.__getitem__(self, obj2sctype(obj)) + +nbytes = _typedict() +_alignment = _typedict() +_maxvals = _typedict() +_minvals = _typedict() +def _construct_lookups(): + for name, val in typeinfo.items(): + if not isinstance(val, tuple): + continue + obj = val[-1] + nbytes[obj] = val[2] // 8 + _alignment[obj] = val[3] + if (len(val) > 5): + _maxvals[obj] = val[4] + _minvals[obj] = val[5] + else: + _maxvals[obj] = None + _minvals[obj] = None + +_construct_lookups() + +def sctype2char(sctype): + """ + Return the string representation of a scalar dtype. + + Parameters + ---------- + sctype : scalar dtype or object + If a scalar dtype, the corresponding string character is + returned. If an object, `sctype2char` tries to infer its scalar type + and then return the corresponding string character. + + Returns + ------- + typechar : str + The string character corresponding to the scalar type. + + Raises + ------ + ValueError + If `sctype` is an object for which the type can not be inferred. + + See Also + -------- + obj2sctype, issctype, issubsctype, mintypecode + + Examples + -------- + >>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]: + ... print np.sctype2char(sctype) + l + d + D + S + O + + >>> x = np.array([1., 2-1.j]) + >>> np.sctype2char(x) + 'D' + >>> np.sctype2char(list) + 'O' + + """ + sctype = obj2sctype(sctype) + if sctype is None: + raise ValueError("unrecognized type") + return _sctype2char_dict[sctype] + +# Create dictionary of casting functions that wrap sequences +# indexed by type or type character + + +cast = _typedict() +try: + ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType, + _types.LongType, _types.BooleanType, + _types.StringType, _types.UnicodeType, _types.BufferType] +except AttributeError: + # Py3K + ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] + +ScalarType.extend(_sctype2char_dict.keys()) +ScalarType = tuple(ScalarType) +for key in _sctype2char_dict.keys(): + cast[key] = lambda x, k=key : array(x, copy=False).astype(k) + +# Create the typestring lookup dictionary +_typestr = _typedict() +for key in _sctype2char_dict.keys(): + if issubclass(key, allTypes['flexible']): + _typestr[key] = _sctype2char_dict[key] + else: + _typestr[key] = empty((1,), key).dtype.str[1:] + +# Make sure all typestrings are in sctypeDict +for key, val in _typestr.items(): + if val not in sctypeDict: + sctypeDict[val] = key + +# Add additional strings to the sctypeDict + +if sys.version_info[0] >= 3: + _toadd = ['int', 'float', 'complex', 'bool', 'object', + 'str', 'bytes', 'object', ('a', allTypes['bytes_'])] +else: + _toadd = ['int', 'float', 'complex', 'bool', 'object', 'string', + ('str', allTypes['string_']), + 'unicode', 'object', ('a', allTypes['string_'])] + +for name in _toadd: + if isinstance(name, tuple): + sctypeDict[name[0]] = name[1] + else: + sctypeDict[name] = allTypes['%s_' % name] + +del _toadd, name + +# Now add the types we've determined to this module +for key in allTypes: + globals()[key] = allTypes[key] + __all__.append(key) + +del key + +typecodes = {'Character':'c', + 'Integer':'bhilqp', + 'UnsignedInteger':'BHILQP', + 'Float':'efdg', + 'Complex':'FDG', + 'AllInteger':'bBhHiIlLqQpP', + 'AllFloat':'efdgFDG', + 'Datetime': 'Mm', + 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} + +# backwards compatibility --- deprecated name +typeDict = sctypeDict +typeNA = sctypeNA + +# b -> boolean +# u -> unsigned integer +# i -> signed integer +# f -> floating point +# c -> complex +# M -> datetime +# m -> timedelta +# S -> string +# U -> Unicode string +# V -> record +# O -> Python object +_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm'] + +__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O' +__len_test_types = len(__test_types) + +# Keep incrementing until a common type both can be coerced to +# is found. Otherwise, return None +def _find_common_coerce(a, b): + if a > b: + return a + try: + thisind = __test_types.index(a.char) + except ValueError: + return None + return _can_coerce_all([a, b], start=thisind) + +# Find a data-type that all data-types in a list can be coerced to +def _can_coerce_all(dtypelist, start=0): + N = len(dtypelist) + if N == 0: + return None + if N == 1: + return dtypelist[0] + thisind = start + while thisind < __len_test_types: + newdtype = dtype(__test_types[thisind]) + numcoerce = len([x for x in dtypelist if newdtype >= x]) + if numcoerce == N: + return newdtype + thisind += 1 + return None + +def _register_types(): + numbers.Integral.register(integer) + numbers.Complex.register(inexact) + numbers.Real.register(floating) +_register_types() + +def find_common_type(array_types, scalar_types): + """ + Determine common type following standard coercion rules. + + Parameters + ---------- + array_types : sequence + A list of dtypes or dtype convertible objects representing arrays. + scalar_types : sequence + A list of dtypes or dtype convertible objects representing scalars. + + Returns + ------- + datatype : dtype + The common data type, which is the maximum of `array_types` ignoring + `scalar_types`, unless the maximum of `scalar_types` is of a + different kind (`dtype.kind`). If the kind is not understood, then + None is returned. + + See Also + -------- + dtype, common_type, can_cast, mintypecode + + Examples + -------- + >>> np.find_common_type([], [np.int64, np.float32, np.complex]) + dtype('complex128') + >>> np.find_common_type([np.int64, np.float32], []) + dtype('float64') + + The standard casting rules ensure that a scalar cannot up-cast an + array unless the scalar is of a fundamentally different kind of data + (i.e. under a different hierarchy in the data type hierarchy) then + the array: + + >>> np.find_common_type([np.float32], [np.int64, np.float64]) + dtype('float32') + + Complex is of a different type, so it up-casts the float in the + `array_types` argument: + + >>> np.find_common_type([np.float32], [np.complex]) + dtype('complex128') + + Type specifier strings are convertible to dtypes and can therefore + be used instead of dtypes: + + >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8']) + dtype('complex128') + + """ + array_types = [dtype(x) for x in array_types] + scalar_types = [dtype(x) for x in scalar_types] + + maxa = _can_coerce_all(array_types) + maxsc = _can_coerce_all(scalar_types) + + if maxa is None: + return maxsc + + if maxsc is None: + return maxa + + try: + index_a = _kind_list.index(maxa.kind) + index_sc = _kind_list.index(maxsc.kind) + except ValueError: + return None + + if index_sc > index_a: + return _find_common_coerce(maxsc, maxa) + else: + return maxa diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/operand_flag_tests.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/operand_flag_tests.py new file mode 100644 index 0000000000000..55dedb776b147 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/operand_flag_tests.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'operand_flag_tests.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/records.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/records.py new file mode 100644 index 0000000000000..d0f82a25c6d52 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/records.py @@ -0,0 +1,808 @@ +""" +Record Arrays +============= +Record arrays expose the fields of structured arrays as properties. + +Most commonly, ndarrays contain elements of a single type, e.g. floats, integers, +bools etc. However, it is possible for elements to be combinations of these, +such as:: + + >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)]) + >>> a + array([(1, 2.0), (1, 2.0)], + dtype=[('x', '>> a['x'] + array([1, 1]) + + >>> a['y'] + array([ 2., 2.]) + +Record arrays allow us to access fields as properties:: + + >>> ar = a.view(np.recarray) + + >>> ar.x + array([1, 1]) + + >>> ar.y + array([ 2., 2.]) + +""" +from __future__ import division, absolute_import, print_function + +import sys +import os + +from . import numeric as sb +from .defchararray import chararray +from . import numerictypes as nt +from numpy.compat import isfileobj, bytes, long + +# All of the functions allow formats to be a dtype +__all__ = ['record', 'recarray', 'format_parser'] + + +ndarray = sb.ndarray + +_byteorderconv = {'b':'>', + 'l':'<', + 'n':'=', + 'B':'>', + 'L':'<', + 'N':'=', + 'S':'s', + 's':'s', + '>':'>', + '<':'<', + '=':'=', + '|':'|', + 'I':'|', + 'i':'|'} + +# formats regular expression +# allows multidimension spec with a tuple syntax in front +# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' +# are equally allowed + +numfmt = nt.typeDict +_typestr = nt._typestr + +def find_duplicate(list): + """Find duplication in a list, return a list of duplicated elements""" + dup = [] + for i in range(len(list)): + if (list[i] in list[i + 1:]): + if (list[i] not in dup): + dup.append(list[i]) + return dup + +class format_parser: + """ + Class to convert formats, names, titles description to a dtype. + + After constructing the format_parser object, the dtype attribute is + the converted data-type: + ``dtype = format_parser(formats, names, titles).dtype`` + + Attributes + ---------- + dtype : dtype + The converted data-type. + + Parameters + ---------- + formats : str or list of str + The format description, either specified as a string with + comma-separated format descriptions in the form ``'f8, i4, a5'``, or + a list of format description strings in the form + ``['f8', 'i4', 'a5']``. + names : str or list/tuple of str + The field names, either specified as a comma-separated string in the + form ``'col1, col2, col3'``, or as a list or tuple of strings in the + form ``['col1', 'col2', 'col3']``. + An empty list can be used, in that case default field names + ('f0', 'f1', ...) are used. + titles : sequence + Sequence of title strings. An empty list can be used to leave titles + out. + aligned : bool, optional + If True, align the fields by padding as the C-compiler would. + Default is False. + byteorder : str, optional + If specified, all the fields will be changed to the + provided byte-order. Otherwise, the default byte-order is + used. For all available string specifiers, see `dtype.newbyteorder`. + + See Also + -------- + dtype, typename, sctype2char + + Examples + -------- + >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + ... ['T1', 'T2', 'T3']).dtype + dtype([(('T1', 'col1'), '>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + ... []).dtype + dtype([('col1', '>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype + dtype([('f0', ' len(titles)): + self._titles += [None] * (self._nfields - len(titles)) + + def _createdescr(self, byteorder): + descr = sb.dtype({'names':self._names, + 'formats':self._f_formats, + 'offsets':self._offsets, + 'titles':self._titles}) + if (byteorder is not None): + byteorder = _byteorderconv[byteorder[0]] + descr = descr.newbyteorder(byteorder) + + self._descr = descr + +class record(nt.void): + """A data-type scalar that allows field access as attribute lookup. + """ + def __repr__(self): + return self.__str__() + + def __str__(self): + return str(self.item()) + + def __getattribute__(self, attr): + if attr in ['setfield', 'getfield', 'dtype']: + return nt.void.__getattribute__(self, attr) + try: + return nt.void.__getattribute__(self, attr) + except AttributeError: + pass + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + obj = self.getfield(*res[:2]) + # if it has fields return a recarray, + # if it's a string ('SU') return a chararray + # otherwise return the object + try: + dt = obj.dtype + except AttributeError: + return obj + if dt.fields: + return obj.view(obj.__class__) + if dt.char in 'SU': + return obj.view(chararray) + return obj + else: + raise AttributeError("'record' object has no " + "attribute '%s'" % attr) + + + def __setattr__(self, attr, val): + if attr in ['setfield', 'getfield', 'dtype']: + raise AttributeError("Cannot set '%s' attribute" % attr) + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + return self.setfield(val, *res[:2]) + else: + if getattr(self, attr, None): + return nt.void.__setattr__(self, attr, val) + else: + raise AttributeError("'record' object has no " + "attribute '%s'" % attr) + + def pprint(self): + """Pretty-print all fields.""" + # pretty-print all fields + names = self.dtype.names + maxlen = max([len(name) for name in names]) + rows = [] + fmt = '%% %ds: %%s' % maxlen + for name in names: + rows.append(fmt % (name, getattr(self, name))) + return "\n".join(rows) + +# The recarray is almost identical to a standard array (which supports +# named fields already) The biggest difference is that it can use +# attribute-lookup to find the fields and it is constructed using +# a record. + +# If byteorder is given it forces a particular byteorder on all +# the fields (and any subfields) + +class recarray(ndarray): + """ + Construct an ndarray that allows field access using attributes. + + Arrays may have a data-types containing fields, analogous + to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, + where each entry in the array is a pair of ``(int, float)``. Normally, + these attributes are accessed using dictionary lookups such as ``arr['x']`` + and ``arr['y']``. Record arrays allow the fields to be accessed as members + of the array, using ``arr.x`` and ``arr.y``. + + Parameters + ---------- + shape : tuple + Shape of output array. + dtype : data-type, optional + The desired data-type. By default, the data-type is determined + from `formats`, `names`, `titles`, `aligned` and `byteorder`. + formats : list of data-types, optional + A list containing the data-types for the different columns, e.g. + ``['i4', 'f8', 'i4']``. `formats` does *not* support the new + convention of using types directly, i.e. ``(int, float, int)``. + Note that `formats` must be a list, not a tuple. + Given that `formats` is somewhat limited, we recommend specifying + `dtype` instead. + names : tuple of str, optional + The name of each column, e.g. ``('x', 'y', 'z')``. + buf : buffer, optional + By default, a new array is created of the given shape and data-type. + If `buf` is specified and is an object exposing the buffer interface, + the array will use the memory from the existing buffer. In this case, + the `offset` and `strides` keywords are available. + + Other Parameters + ---------------- + titles : tuple of str, optional + Aliases for column names. For example, if `names` were + ``('x', 'y', 'z')`` and `titles` is + ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then + ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. + byteorder : {'<', '>', '='}, optional + Byte-order for all fields. + aligned : bool, optional + Align the fields in memory as the C-compiler would. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + offset : int, optional + Start reading buffer (`buf`) from this offset onwards. + order : {'C', 'F'}, optional + Row-major or column-major order. + + Returns + ------- + rec : recarray + Empty array of the given shape and type. + + See Also + -------- + rec.fromrecords : Construct a record array from data. + record : fundamental data-type for `recarray`. + format_parser : determine a data-type from formats, names, titles. + + Notes + ----- + This constructor can be compared to ``empty``: it creates a new record + array but does not fill it with data. To create a record array from data, + use one of the following methods: + + 1. Create a standard ndarray and convert it to a record array, + using ``arr.view(np.recarray)`` + 2. Use the `buf` keyword. + 3. Use `np.rec.fromrecords`. + + Examples + -------- + Create an array with two fields, ``x`` and ``y``: + + >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)]) + >>> x + array([(1.0, 2), (3.0, 4)], + dtype=[('x', '>> x['x'] + array([ 1., 3.]) + + View the array as a record array: + + >>> x = x.view(np.recarray) + + >>> x.x + array([ 1., 3.]) + + >>> x.y + array([2, 4]) + + Create a new, empty record array: + + >>> np.recarray((2,), + ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP + rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), + (3471280, 1.2134086255804012e-316, 0)], + dtype=[('x', '>> x1=np.array([1,2,3,4]) + >>> x2=np.array(['a','dd','xyz','12']) + >>> x3=np.array([1.1,2,3,4]) + >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') + >>> print r[1] + (2, 'dd', 2.0) + >>> x1[1]=34 + >>> r.a + array([1, 2, 3, 4]) + """ + + arrayList = [sb.asarray(x) for x in arrayList] + + if shape is None or shape == 0: + shape = arrayList[0].shape + + if isinstance(shape, int): + shape = (shape,) + + if formats is None and dtype is None: + # go through each object in the list to see if it is an ndarray + # and determine the formats. + formats = '' + for obj in arrayList: + if not isinstance(obj, ndarray): + raise ValueError("item in the array list must be an ndarray.") + formats += _typestr[obj.dtype.type] + if issubclass(obj.dtype.type, nt.flexible): + formats += repr(obj.itemsize) + formats += ',' + formats = formats[:-1] + + if dtype is not None: + descr = sb.dtype(dtype) + _names = descr.names + else: + parsed = format_parser(formats, names, titles, aligned, byteorder) + _names = parsed._names + descr = parsed._descr + + # Determine shape from data-type. + if len(descr) != len(arrayList): + raise ValueError("mismatch between the number of fields " + "and the number of arrays") + + d0 = descr[0].shape + nn = len(d0) + if nn > 0: + shape = shape[:-nn] + + for k, obj in enumerate(arrayList): + nn = len(descr[k].shape) + testshape = obj.shape[:len(obj.shape) - nn] + if testshape != shape: + raise ValueError("array-shape mismatch in array %d" % k) + + _array = recarray(shape, descr) + + # populate the record array (makes a copy) + for i in range(len(arrayList)): + _array[_names[i]] = arrayList[i] + + return _array + +# shape must be 1-d if you use list of lists... +def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None): + """ create a recarray from a list of records in text form + + The data in the same field can be heterogeneous, they will be promoted + to the highest data type. This method is intended for creating + smaller record arrays. If used to create large array without formats + defined + + r=fromrecords([(2,3.,'abc')]*100000) + + it can be slow. + + If formats is None, then this will auto-detect formats. Use list of + tuples rather than list of lists for faster processing. + + >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], + ... names='col1,col2,col3') + >>> print r[0] + (456, 'dbe', 1.2) + >>> r.col1 + array([456, 2]) + >>> r.col2 + chararray(['dbe', 'de'], + dtype='|S3') + >>> import pickle + >>> print pickle.loads(pickle.dumps(r)) + [(456, 'dbe', 1.2) (2, 'de', 1.3)] + """ + + nfields = len(recList[0]) + if formats is None and dtype is None: # slower + obj = sb.array(recList, dtype=object) + arrlist = [sb.array(obj[..., i].tolist()) for i in range(nfields)] + return fromarrays(arrlist, formats=formats, shape=shape, names=names, + titles=titles, aligned=aligned, byteorder=byteorder) + + if dtype is not None: + descr = sb.dtype((record, dtype)) + else: + descr = format_parser(formats, names, titles, aligned, byteorder)._descr + + try: + retval = sb.array(recList, dtype=descr) + except TypeError: # list of lists instead of list of tuples + if (shape is None or shape == 0): + shape = len(recList) + if isinstance(shape, (int, long)): + shape = (shape,) + if len(shape) > 1: + raise ValueError("Can only deal with 1-d array.") + _array = recarray(shape, descr) + for k in range(_array.size): + _array[k] = tuple(recList[k]) + return _array + else: + if shape is not None and retval.shape != shape: + retval.shape = shape + + res = retval.view(recarray) + + return res + + +def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """ create a (read-only) record array from binary data contained in + a string""" + + + if dtype is None and formats is None: + raise ValueError("Must have dtype= or formats=") + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder)._descr + + itemsize = descr.itemsize + if (shape is None or shape == 0 or shape == -1): + shape = (len(datastring) - offset) / itemsize + + _array = recarray(shape, descr, buf=datastring, offset=offset) + return _array + +def get_remaining_size(fd): + try: + fn = fd.fileno() + except AttributeError: + return os.path.getsize(fd.name) - fd.tell() + st = os.fstat(fn) + size = st.st_size - fd.tell() + return size + +def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create an array from binary file data + + If file is a string then that file is opened, else it is assumed + to be a file object. + + >>> from tempfile import TemporaryFile + >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a[5] = (0.5,10,'abcde') + >>> + >>> fd=TemporaryFile() + >>> a = a.newbyteorder('<') + >>> a.tofile(fd) + >>> + >>> fd.seek(0) + >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, + ... byteorder='<') + >>> print r[5] + (0.5, 10, 'abcde') + >>> r.shape + (10,) + """ + + if (shape is None or shape == 0): + shape = (-1,) + elif isinstance(shape, (int, long)): + shape = (shape,) + + name = 0 + if isinstance(fd, str): + name = 1 + fd = open(fd, 'rb') + if (offset > 0): + fd.seek(offset, 1) + size = get_remaining_size(fd) + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder)._descr + + itemsize = descr.itemsize + + shapeprod = sb.array(shape).prod() + shapesize = shapeprod * itemsize + if shapesize < 0: + shape = list(shape) + shape[ shape.index(-1) ] = size / -shapesize + shape = tuple(shape) + shapeprod = sb.array(shape).prod() + + nbytes = shapeprod * itemsize + + if nbytes > size: + raise ValueError( + "Not enough bytes left in file for specified shape and type") + + # create the array + _array = recarray(shape, descr) + nbytesread = fd.readinto(_array.data) + if nbytesread != nbytes: + raise IOError("Didn't read as many bytes as expected") + if name: + fd.close() + + return _array + +def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, copy=True): + """Construct a record array from a wide-variety of objects. + """ + + if (isinstance(obj, (type(None), str)) or isfileobj(obj)) \ + and (formats is None) \ + and (dtype is None): + raise ValueError("Must define formats (or dtype) if object is "\ + "None, string, or an open file") + + kwds = {} + if dtype is not None: + dtype = sb.dtype(dtype) + elif formats is not None: + dtype = format_parser(formats, names, titles, + aligned, byteorder)._descr + else: + kwds = {'formats': formats, + 'names' : names, + 'titles' : titles, + 'aligned' : aligned, + 'byteorder' : byteorder + } + + if obj is None: + if shape is None: + raise ValueError("Must define a shape if obj is None") + return recarray(shape, dtype, buf=obj, offset=offset, strides=strides) + + elif isinstance(obj, bytes): + return fromstring(obj, dtype, shape=shape, offset=offset, **kwds) + + elif isinstance(obj, (list, tuple)): + if isinstance(obj[0], (tuple, list)): + return fromrecords(obj, dtype=dtype, shape=shape, **kwds) + else: + return fromarrays(obj, dtype=dtype, shape=shape, **kwds) + + elif isinstance(obj, recarray): + if dtype is not None and (obj.dtype != dtype): + new = obj.view(dtype) + else: + new = obj + if copy: + new = new.copy() + return new + + elif isfileobj(obj): + return fromfile(obj, dtype=dtype, shape=shape, offset=offset) + + elif isinstance(obj, ndarray): + if dtype is not None and (obj.dtype != dtype): + new = obj.view(dtype) + else: + new = obj + if copy: + new = new.copy() + res = new.view(recarray) + if issubclass(res.dtype.type, nt.void): + res.dtype = sb.dtype((record, res.dtype)) + return res + + else: + interface = getattr(obj, "__array_interface__", None) + if interface is None or not isinstance(interface, dict): + raise ValueError("Unknown input type") + obj = sb.array(obj) + if dtype is not None and (obj.dtype != dtype): + obj = obj.view(dtype) + res = obj.view(recarray) + if issubclass(res.dtype.type, nt.void): + res.dtype = sb.dtype((record, res.dtype)) + return res diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/scalarmath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/scalarmath.py new file mode 100644 index 0000000000000..0bb7dbbf6397e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/scalarmath.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'scalarmath.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup.py new file mode 100644 index 0000000000000..5da04241317eb --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup.py @@ -0,0 +1,1013 @@ +from __future__ import division, print_function + +import imp +import os +import sys +import shutil +import pickle +import copy +import warnings +import re +from os.path import join +from numpy.distutils import log +from distutils.dep_util import newer +from distutils.sysconfig import get_config_var + +from setup_common import * + +# Set to True to enable multiple file compilations (experimental) +ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0") +# Set to True to enable relaxed strides checking. This (mostly) means +# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags. +NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0") + +# XXX: ugly, we use a class to avoid calling twice some expensive functions in +# config.h/numpyconfig.h. I don't see a better way because distutils force +# config.h generation inside an Extension class, and as such sharing +# configuration informations between extensions is not easy. +# Using a pickled-based memoize does not work because config_cmd is an instance +# method, which cPickle does not like. +# +# Use pickle in all cases, as cPickle is gone in python3 and the difference +# in time is only in build. -- Charles Harris, 2013-03-30 + +class CallOnceOnly(object): + def __init__(self): + self._check_types = None + self._check_ieee_macros = None + self._check_complex = None + + def check_types(self, *a, **kw): + if self._check_types is None: + out = check_types(*a, **kw) + self._check_types = pickle.dumps(out) + else: + out = copy.deepcopy(pickle.loads(self._check_types)) + return out + + def check_ieee_macros(self, *a, **kw): + if self._check_ieee_macros is None: + out = check_ieee_macros(*a, **kw) + self._check_ieee_macros = pickle.dumps(out) + else: + out = copy.deepcopy(pickle.loads(self._check_ieee_macros)) + return out + + def check_complex(self, *a, **kw): + if self._check_complex is None: + out = check_complex(*a, **kw) + self._check_complex = pickle.dumps(out) + else: + out = copy.deepcopy(pickle.loads(self._check_complex)) + return out + +PYTHON_HAS_UNICODE_WIDE = True + +def pythonlib_dir(): + """return path where libpython* is.""" + if sys.platform == 'win32': + return os.path.join(sys.prefix, "libs") + else: + return get_config_var('LIBDIR') + +def is_npy_no_signal(): + """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration + header.""" + return sys.platform == 'win32' + +def is_npy_no_smp(): + """Return True if the NPY_NO_SMP symbol must be defined in public + header (when SMP support cannot be reliably enabled).""" + # Python 2.3 causes a segfault when + # trying to re-acquire the thread-state + # which is done in error-handling + # ufunc code. NPY_ALLOW_C_API and friends + # cause the segfault. So, we disable threading + # for now. + if sys.version[:5] < '2.4.2': + nosmp = 1 + else: + # Perhaps a fancier check is in order here. + # so that threads are only enabled if there + # are actually multiple CPUS? -- but + # threaded code can be nice even on a single + # CPU so that long-calculating code doesn't + # block. + try: + nosmp = os.environ['NPY_NOSMP'] + nosmp = 1 + except KeyError: + nosmp = 0 + return nosmp == 1 + +def win32_checks(deflist): + from numpy.distutils.misc_util import get_build_architecture + a = get_build_architecture() + + # Distutils hack on AMD64 on windows + print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % + (a, os.name, sys.platform)) + if a == 'AMD64': + deflist.append('DISTUTILS_USE_SDK') + + # On win32, force long double format string to be 'g', not + # 'Lg', since the MS runtime does not support long double whose + # size is > sizeof(double) + if a == "Intel" or a == "AMD64": + deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') + +def check_math_capabilities(config, moredefs, mathlibs): + def check_func(func_name): + return config.check_func(func_name, libraries=mathlibs, + decl=True, call=True) + + def check_funcs_once(funcs_name): + decl = dict([(f, True) for f in funcs_name]) + st = config.check_funcs_once(funcs_name, libraries=mathlibs, + decl=decl, call=decl) + if st: + moredefs.extend([(fname2def(f), 1) for f in funcs_name]) + return st + + def check_funcs(funcs_name): + # Use check_funcs_once first, and if it does not work, test func per + # func. Return success only if all the functions are available + if not check_funcs_once(funcs_name): + # Global check failed, check func per func + for f in funcs_name: + if check_func(f): + moredefs.append((fname2def(f), 1)) + return 0 + else: + return 1 + + #use_msvc = config.check_decl("_MSC_VER") + + if not check_funcs_once(MANDATORY_FUNCS): + raise SystemError("One of the required function to build numpy is not" + " available (the list is %s)." % str(MANDATORY_FUNCS)) + + # Standard functions which may not be available and for which we have a + # replacement implementation. Note that some of these are C99 functions. + + # XXX: hack to circumvent cpp pollution from python: python put its + # config.h in the public namespace, so we have a clash for the common + # functions we test. We remove every function tested by python's + # autoconf, hoping their own test are correct + for f in OPTIONAL_STDFUNCS_MAYBE: + if config.check_decl(fname2def(f), + headers=["Python.h", "math.h"]): + OPTIONAL_STDFUNCS.remove(f) + + check_funcs(OPTIONAL_STDFUNCS) + + for h in OPTIONAL_HEADERS: + if config.check_func("", decl=False, call=False, headers=[h]): + moredefs.append((fname2def(h).replace(".", "_"), 1)) + + for tup in OPTIONAL_INTRINSICS: + headers = None + if len(tup) == 2: + f, args = tup + else: + f, args, headers = tup[0], tup[1], [tup[2]] + if config.check_func(f, decl=False, call=True, call_args=args, + headers=headers): + moredefs.append((fname2def(f), 1)) + + for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES: + if config.check_func(fn, decl='int %s %s(void *);' % (dec, fn), + call=False): + moredefs.append((fname2def(fn), 1)) + + for fn in OPTIONAL_VARIABLE_ATTRIBUTES: + if config.check_func(fn, decl='int %s a;' % (fn), call=False): + m = fn.replace("(", "_").replace(")", "_") + moredefs.append((fname2def(m), 1)) + + # C99 functions: float and long double versions + check_funcs(C99_FUNCS_SINGLE) + check_funcs(C99_FUNCS_EXTENDED) + +def check_complex(config, mathlibs): + priv = [] + pub = [] + + try: + if os.uname()[0] == "Interix": + warnings.warn("Disabling broken complex support. See #1365") + return priv, pub + except: + # os.uname not available on all platforms. blanket except ugly but safe + pass + + # Check for complex support + st = config.check_header('complex.h') + if st: + priv.append(('HAVE_COMPLEX_H', 1)) + pub.append(('NPY_USE_C99_COMPLEX', 1)) + + for t in C99_COMPLEX_TYPES: + st = config.check_type(t, headers=["complex.h"]) + if st: + pub.append(('NPY_HAVE_%s' % type2def(t), 1)) + + def check_prec(prec): + flist = [f + prec for f in C99_COMPLEX_FUNCS] + decl = dict([(f, True) for f in flist]) + if not config.check_funcs_once(flist, call=decl, decl=decl, + libraries=mathlibs): + for f in flist: + if config.check_func(f, call=True, decl=True, + libraries=mathlibs): + priv.append((fname2def(f), 1)) + else: + priv.extend([(fname2def(f), 1) for f in flist]) + + check_prec('') + check_prec('f') + check_prec('l') + + return priv, pub + +def check_ieee_macros(config): + priv = [] + pub = [] + + macros = [] + + def _add_decl(f): + priv.append(fname2def("decl_%s" % f)) + pub.append('NPY_%s' % fname2def("decl_%s" % f)) + + # XXX: hack to circumvent cpp pollution from python: python put its + # config.h in the public namespace, so we have a clash for the common + # functions we test. We remove every function tested by python's + # autoconf, hoping their own test are correct + _macros = ["isnan", "isinf", "signbit", "isfinite"] + for f in _macros: + py_symbol = fname2def("decl_%s" % f) + already_declared = config.check_decl(py_symbol, + headers=["Python.h", "math.h"]) + if already_declared: + if config.check_macro_true(py_symbol, + headers=["Python.h", "math.h"]): + pub.append('NPY_%s' % fname2def("decl_%s" % f)) + else: + macros.append(f) + # Normally, isnan and isinf are macro (C99), but some platforms only have + # func, or both func and macro version. Check for macro only, and define + # replacement ones if not found. + # Note: including Python.h is necessary because it modifies some math.h + # definitions + for f in macros: + st = config.check_decl(f, headers = ["Python.h", "math.h"]) + if st: + _add_decl(f) + + return priv, pub + +def check_types(config_cmd, ext, build_dir): + private_defines = [] + public_defines = [] + + # Expected size (in number of bytes) for each type. This is an + # optimization: those are only hints, and an exhaustive search for the size + # is done if the hints are wrong. + expected = {} + expected['short'] = [2] + expected['int'] = [4] + expected['long'] = [8, 4] + expected['float'] = [4] + expected['double'] = [8] + expected['long double'] = [8, 12, 16] + expected['Py_intptr_t'] = [4, 8] + expected['PY_LONG_LONG'] = [8] + expected['long long'] = [8] + expected['off_t'] = [4, 8] + + # Check we have the python header (-dev* packages on Linux) + result = config_cmd.check_header('Python.h') + if not result: + raise SystemError( + "Cannot compile 'Python.h'. Perhaps you need to "\ + "install python-dev|python-devel.") + res = config_cmd.check_header("endian.h") + if res: + private_defines.append(('HAVE_ENDIAN_H', 1)) + public_defines.append(('NPY_HAVE_ENDIAN_H', 1)) + + # Check basic types sizes + for type in ('short', 'int', 'long'): + res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"]) + if res: + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type))) + else: + res = config_cmd.check_type_size(type, expected=expected[type]) + if res >= 0: + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + for type in ('float', 'double', 'long double'): + already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), + headers = ["Python.h"]) + res = config_cmd.check_type_size(type, expected=expected[type]) + if res >= 0: + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) + if not already_declared and not type == 'long double': + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + # Compute size of corresponding complex type: used to check that our + # definition is binary compatible with C99 complex type (check done at + # build time in npy_common.h) + complex_def = "struct {%s __x; %s __y;}" % (type, type) + res = config_cmd.check_type_size(complex_def, expected=2*expected[type]) + if res >= 0: + public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % complex_def) + + + for type in ('Py_intptr_t', 'off_t'): + res = config_cmd.check_type_size(type, headers=["Python.h"], + library_dirs=[pythonlib_dir()], + expected=expected[type]) + + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + # We check declaration AND type because that's how distutils does it. + if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): + res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], + library_dirs=[pythonlib_dir()], + expected=expected['PY_LONG_LONG']) + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') + + res = config_cmd.check_type_size('long long', + expected=expected['long long']) + if res >= 0: + #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % 'long long') + + if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): + raise RuntimeError( + "Config wo CHAR_BIT is not supported"\ + ", please contact the maintainers") + + return private_defines, public_defines + +def check_mathlib(config_cmd): + # Testing the C math library + mathlibs = [] + mathlibs_choices = [[], ['m'], ['cpml']] + mathlib = os.environ.get('MATHLIB') + if mathlib: + mathlibs_choices.insert(0, mathlib.split(',')) + for libs in mathlibs_choices: + if config_cmd.check_func("exp", libraries=libs, decl=True, call=True): + mathlibs = libs + break + else: + raise EnvironmentError("math library missing; rerun " + "setup.py after setting the " + "MATHLIB env variable") + return mathlibs + +def visibility_define(config): + """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty + string).""" + if config.check_compiler_gcc4(): + return '__attribute__((visibility("hidden")))' + else: + return '' + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration, dot_join + from numpy.distutils.system_info import get_info, default_lib_dirs + + config = Configuration('core', parent_package, top_path) + local_dir = config.local_path + codegen_dir = join(local_dir, 'code_generators') + + if is_released(config): + warnings.simplefilter('error', MismatchCAPIWarning) + + # Check whether we have a mismatch between the set C API VERSION and the + # actual C API VERSION + check_api_version(C_API_VERSION, codegen_dir) + + generate_umath_py = join(codegen_dir, 'generate_umath.py') + n = dot_join(config.name, 'generate_umath') + generate_umath = imp.load_module('_'.join(n.split('.')), + open(generate_umath_py, 'U'), generate_umath_py, + ('.py', 'U', 1)) + + header_dir = 'include/numpy' # this is relative to config.path_in_package + + cocache = CallOnceOnly() + + def generate_config_h(ext, build_dir): + target = join(build_dir, header_dir, 'config.h') + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) + + if newer(__file__, target): + config_cmd = config.get_config_cmd() + log.info('Generating %s', target) + + # Check sizeof + moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir) + + # Check math library and C99 math funcs availability + mathlibs = check_mathlib(config_cmd) + moredefs.append(('MATHLIB', ','.join(mathlibs))) + + check_math_capabilities(config_cmd, moredefs, mathlibs) + moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) + moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) + + # Signal check + if is_npy_no_signal(): + moredefs.append('__NPY_PRIVATE_NO_SIGNAL') + + # Windows checks + if sys.platform=='win32' or os.name=='nt': + win32_checks(moredefs) + + # Inline check + inline = config_cmd.check_inline() + + # Check whether we need our own wide character support + if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']): + PYTHON_HAS_UNICODE_WIDE = True + else: + PYTHON_HAS_UNICODE_WIDE = False + + if ENABLE_SEPARATE_COMPILATION: + moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1)) + + if NPY_RELAXED_STRIDES_CHECKING: + moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) + + # Get long double representation + if sys.platform != 'darwin': + rep = check_long_double_representation(config_cmd) + if rep in ['INTEL_EXTENDED_12_BYTES_LE', + 'INTEL_EXTENDED_16_BYTES_LE', + 'MOTOROLA_EXTENDED_12_BYTES_BE', + 'IEEE_QUAD_LE', 'IEEE_QUAD_BE', + 'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE', + 'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']: + moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) + else: + raise ValueError("Unrecognized long double format: %s" % rep) + + # Py3K check + if sys.version_info[0] == 3: + moredefs.append(('NPY_PY3K', 1)) + + # Generate the config.h file from moredefs + target_f = open(target, 'w') + for d in moredefs: + if isinstance(d, str): + target_f.write('#define %s\n' % (d)) + else: + target_f.write('#define %s %s\n' % (d[0], d[1])) + + # define inline to our keyword, or nothing + target_f.write('#ifndef __cplusplus\n') + if inline == 'inline': + target_f.write('/* #undef inline */\n') + else: + target_f.write('#define inline %s\n' % inline) + target_f.write('#endif\n') + + # add the guard to make sure config.h is never included directly, + # but always through npy_config.h + target_f.write(""" +#ifndef _NPY_NPY_CONFIG_H_ +#error config.h should never be included directly, include npy_config.h instead +#endif +""") + + target_f.close() + print('File:', target) + target_f = open(target) + print(target_f.read()) + target_f.close() + print('EOF') + else: + mathlibs = [] + target_f = open(target) + for line in target_f: + s = '#define MATHLIB' + if line.startswith(s): + value = line[len(s):].strip() + if value: + mathlibs.extend(value.split(',')) + target_f.close() + + # Ugly: this can be called within a library and not an extension, + # in which case there is no libraries attributes (and none is + # needed). + if hasattr(ext, 'libraries'): + ext.libraries.extend(mathlibs) + + incl_dir = os.path.dirname(target) + if incl_dir not in config.numpy_include_dirs: + config.numpy_include_dirs.append(incl_dir) + + return target + + def generate_numpyconfig_h(ext, build_dir): + """Depends on config.h: generate_config_h has to be called before !""" + # put private include directory in build_dir on search path + # allows using code generation in headers headers + config.add_include_dirs(join(build_dir, "src", "private")) + + target = join(build_dir, header_dir, '_numpyconfig.h') + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) + if newer(__file__, target): + config_cmd = config.get_config_cmd() + log.info('Generating %s', target) + + # Check sizeof + ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir) + + if is_npy_no_signal(): + moredefs.append(('NPY_NO_SIGNAL', 1)) + + if is_npy_no_smp(): + moredefs.append(('NPY_NO_SMP', 1)) + else: + moredefs.append(('NPY_NO_SMP', 0)) + + mathlibs = check_mathlib(config_cmd) + moredefs.extend(cocache.check_ieee_macros(config_cmd)[1]) + moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1]) + + if ENABLE_SEPARATE_COMPILATION: + moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1)) + + if NPY_RELAXED_STRIDES_CHECKING: + moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) + + # Check wether we can use inttypes (C99) formats + if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']): + moredefs.append(('NPY_USE_C99_FORMATS', 1)) + + # visibility check + hidden_visibility = visibility_define(config_cmd) + moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility)) + + # Add the C API/ABI versions + moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION)) + moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION)) + + # Add moredefs to header + target_f = open(target, 'w') + for d in moredefs: + if isinstance(d, str): + target_f.write('#define %s\n' % (d)) + else: + target_f.write('#define %s %s\n' % (d[0], d[1])) + + # Define __STDC_FORMAT_MACROS + target_f.write(""" +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif +""") + target_f.close() + + # Dump the numpyconfig.h header to stdout + print('File: %s' % target) + target_f = open(target) + print(target_f.read()) + target_f.close() + print('EOF') + config.add_data_files((header_dir, target)) + return target + + def generate_api_func(module_name): + def generate_api(ext, build_dir): + script = join(codegen_dir, module_name + '.py') + sys.path.insert(0, codegen_dir) + try: + m = __import__(module_name) + log.info('executing %s', script) + h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir)) + finally: + del sys.path[0] + config.add_data_files((header_dir, h_file), + (header_dir, doc_file)) + return (h_file,) + return generate_api + + generate_numpy_api = generate_api_func('generate_numpy_api') + generate_ufunc_api = generate_api_func('generate_ufunc_api') + + config.add_include_dirs(join(local_dir, "src", "private")) + config.add_include_dirs(join(local_dir, "src")) + config.add_include_dirs(join(local_dir)) + + config.add_data_files('include/numpy/*.h') + config.add_include_dirs(join('src', 'npymath')) + config.add_include_dirs(join('src', 'multiarray')) + config.add_include_dirs(join('src', 'umath')) + config.add_include_dirs(join('src', 'npysort')) + + config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")]) + config.add_define_macros([("_FILE_OFFSET_BITS", "64")]) + config.add_define_macros([('_LARGEFILE_SOURCE', '1')]) + config.add_define_macros([('_LARGEFILE64_SOURCE', '1')]) + + config.numpy_include_dirs.extend(config.paths('include')) + + deps = [join('src', 'npymath', '_signbit.c'), + join('include', 'numpy', '*object.h'), + 'include/numpy/fenv/fenv.c', + 'include/numpy/fenv/fenv.h', + join(codegen_dir, 'genapi.py'), + ] + + # Don't install fenv unless we need them. + if sys.platform == 'cygwin': + config.add_data_dir('include/numpy/fenv') + + ####################################################################### + # dummy module # + ####################################################################### + + # npymath needs the config.h and numpyconfig.h files to be generated, but + # build_clib cannot handle generate_config_h and generate_numpyconfig_h + # (don't ask). Because clib are generated before extensions, we have to + # explicitly add an extension which has generate_config_h and + # generate_numpyconfig_h as sources *before* adding npymath. + + config.add_extension('_dummy', + sources = [join('src', 'dummymodule.c'), + generate_config_h, + generate_numpyconfig_h, + generate_numpy_api] + ) + + ####################################################################### + # npymath library # + ####################################################################### + + subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")]) + def get_mathlib_info(*args): + # Another ugly hack: the mathlib info is known once build_src is run, + # but we cannot use add_installed_pkg_config here either, so we only + # update the substition dictionary during npymath build + config_cmd = config.get_config_cmd() + + # Check that the toolchain works, to fail early if it doesn't + # (avoid late errors with MATHLIB which are confusing if the + # compiler does not work). + st = config_cmd.try_link('int main(void) { return 0;}') + if not st: + raise RuntimeError("Broken toolchain: cannot link a simple C program") + mlibs = check_mathlib(config_cmd) + + posix_mlib = ' '.join(['-l%s' % l for l in mlibs]) + msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs]) + subst_dict["posix_mathlib"] = posix_mlib + subst_dict["msvc_mathlib"] = msvc_mlib + + npymath_sources = [join('src', 'npymath', 'npy_math.c.src'), + join('src', 'npymath', 'ieee754.c.src'), + join('src', 'npymath', 'npy_math_complex.c.src'), + join('src', 'npymath', 'halffloat.c')] + config.add_installed_library('npymath', + sources=npymath_sources + [get_mathlib_info], + install_dir='lib') + config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", + subst_dict) + config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", + subst_dict) + + ####################################################################### + # npysort library # + ####################################################################### + + # This library is created for the build but it is not installed + npysort_sources=[join('src', 'npysort', 'quicksort.c.src'), + join('src', 'npysort', 'mergesort.c.src'), + join('src', 'npysort', 'heapsort.c.src'), + join('src', 'private', 'npy_partition.h.src'), + join('src', 'npysort', 'selection.c.src'), + join('src', 'private', 'npy_binsearch.h.src'), + join('src', 'npysort', 'binsearch.c.src'), + ] + config.add_library('npysort', + sources=npysort_sources, + include_dirs=[]) + + + ####################################################################### + # multiarray module # + ####################################################################### + + # Multiarray version: this function is needed to build foo.c from foo.c.src + # when foo.c is included in another file and as such not in the src + # argument of build_ext command + def generate_multiarray_templated_sources(ext, build_dir): + from numpy.distutils.misc_util import get_cmd + + subpath = join('src', 'multiarray') + sources = [join(local_dir, subpath, 'scalartypes.c.src'), + join(local_dir, subpath, 'arraytypes.c.src'), + join(local_dir, subpath, 'nditer_templ.c.src'), + join(local_dir, subpath, 'lowlevel_strided_loops.c.src'), + join(local_dir, subpath, 'einsum.c.src')] + + # numpy.distutils generate .c from .c.src in weird directories, we have + # to add them there as they depend on the build_dir + config.add_include_dirs(join(build_dir, subpath)) + cmd = get_cmd('build_src') + cmd.ensure_finalized() + cmd.template_sources(sources, ext) + + multiarray_deps = [ + join('src', 'multiarray', 'arrayobject.h'), + join('src', 'multiarray', 'arraytypes.h'), + join('src', 'multiarray', 'array_assign.h'), + join('src', 'multiarray', 'buffer.h'), + join('src', 'multiarray', 'calculation.h'), + join('src', 'multiarray', 'common.h'), + join('src', 'multiarray', 'convert_datatype.h'), + join('src', 'multiarray', 'convert.h'), + join('src', 'multiarray', 'conversion_utils.h'), + join('src', 'multiarray', 'ctors.h'), + join('src', 'multiarray', 'descriptor.h'), + join('src', 'multiarray', 'getset.h'), + join('src', 'multiarray', 'hashdescr.h'), + join('src', 'multiarray', 'iterators.h'), + join('src', 'multiarray', 'mapping.h'), + join('src', 'multiarray', 'methods.h'), + join('src', 'multiarray', 'multiarraymodule.h'), + join('src', 'multiarray', 'nditer_impl.h'), + join('src', 'multiarray', 'numpymemoryview.h'), + join('src', 'multiarray', 'number.h'), + join('src', 'multiarray', 'numpyos.h'), + join('src', 'multiarray', 'refcount.h'), + join('src', 'multiarray', 'scalartypes.h'), + join('src', 'multiarray', 'sequence.h'), + join('src', 'multiarray', 'shape.h'), + join('src', 'multiarray', 'ucsnarrow.h'), + join('src', 'multiarray', 'usertypes.h'), + join('src', 'private', 'lowlevel_strided_loops.h'), + join('include', 'numpy', 'arrayobject.h'), + join('include', 'numpy', '_neighborhood_iterator_imp.h'), + join('include', 'numpy', 'npy_endian.h'), + join('include', 'numpy', 'arrayscalars.h'), + join('include', 'numpy', 'noprefix.h'), + join('include', 'numpy', 'npy_interrupt.h'), + join('include', 'numpy', 'npy_3kcompat.h'), + join('include', 'numpy', 'npy_math.h'), + join('include', 'numpy', 'halffloat.h'), + join('include', 'numpy', 'npy_common.h'), + join('include', 'numpy', 'npy_os.h'), + join('include', 'numpy', 'utils.h'), + join('include', 'numpy', 'ndarrayobject.h'), + join('include', 'numpy', 'npy_cpu.h'), + join('include', 'numpy', 'numpyconfig.h'), + join('include', 'numpy', 'ndarraytypes.h'), + join('include', 'numpy', 'npy_1_7_deprecated_api.h'), + join('include', 'numpy', '_numpyconfig.h.in'), + # add library sources as distuils does not consider libraries + # dependencies + ] + npysort_sources + npymath_sources + + multiarray_src = [ + join('src', 'multiarray', 'alloc.c'), + join('src', 'multiarray', 'arrayobject.c'), + join('src', 'multiarray', 'arraytypes.c.src'), + join('src', 'multiarray', 'array_assign.c'), + join('src', 'multiarray', 'array_assign_scalar.c'), + join('src', 'multiarray', 'array_assign_array.c'), + join('src', 'multiarray', 'buffer.c'), + join('src', 'multiarray', 'calculation.c'), + join('src', 'multiarray', 'common.c'), + join('src', 'multiarray', 'convert.c'), + join('src', 'multiarray', 'convert_datatype.c'), + join('src', 'multiarray', 'conversion_utils.c'), + join('src', 'multiarray', 'ctors.c'), + join('src', 'multiarray', 'datetime.c'), + join('src', 'multiarray', 'datetime_strings.c'), + join('src', 'multiarray', 'datetime_busday.c'), + join('src', 'multiarray', 'datetime_busdaycal.c'), + join('src', 'multiarray', 'descriptor.c'), + join('src', 'multiarray', 'dtype_transfer.c'), + join('src', 'multiarray', 'einsum.c.src'), + join('src', 'multiarray', 'flagsobject.c'), + join('src', 'multiarray', 'getset.c'), + join('src', 'multiarray', 'hashdescr.c'), + join('src', 'multiarray', 'item_selection.c'), + join('src', 'multiarray', 'iterators.c'), + join('src', 'multiarray', 'lowlevel_strided_loops.c.src'), + join('src', 'multiarray', 'mapping.c'), + join('src', 'multiarray', 'methods.c'), + join('src', 'multiarray', 'multiarraymodule.c'), + join('src', 'multiarray', 'nditer_templ.c.src'), + join('src', 'multiarray', 'nditer_api.c'), + join('src', 'multiarray', 'nditer_constr.c'), + join('src', 'multiarray', 'nditer_pywrap.c'), + join('src', 'multiarray', 'number.c'), + join('src', 'multiarray', 'numpymemoryview.c'), + join('src', 'multiarray', 'numpyos.c'), + join('src', 'multiarray', 'refcount.c'), + join('src', 'multiarray', 'sequence.c'), + join('src', 'multiarray', 'shape.c'), + join('src', 'multiarray', 'scalarapi.c'), + join('src', 'multiarray', 'scalartypes.c.src'), + join('src', 'multiarray', 'usertypes.c'), + join('src', 'multiarray', 'ucsnarrow.c')] + + + if not ENABLE_SEPARATE_COMPILATION: + multiarray_deps.extend(multiarray_src) + multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')] + multiarray_src.append(generate_multiarray_templated_sources) + + config.add_extension('multiarray', + sources = multiarray_src + + [generate_config_h, + generate_numpyconfig_h, + generate_numpy_api, + join(codegen_dir, 'generate_numpy_api.py'), + join('*.py')], + depends = deps + multiarray_deps, + libraries = ['npymath', 'npysort']) + + ####################################################################### + # umath module # + ####################################################################### + + # umath version: this function is needed to build foo.c from foo.c.src + # when foo.c is included in another file and as such not in the src + # argument of build_ext command + def generate_umath_templated_sources(ext, build_dir): + from numpy.distutils.misc_util import get_cmd + + subpath = join('src', 'umath') + sources = [ + join(local_dir, subpath, 'loops.h.src'), + join(local_dir, subpath, 'loops.c.src'), + join(local_dir, subpath, 'simd.inc.src')] + + # numpy.distutils generate .c from .c.src in weird directories, we have + # to add them there as they depend on the build_dir + config.add_include_dirs(join(build_dir, subpath)) + cmd = get_cmd('build_src') + cmd.ensure_finalized() + cmd.template_sources(sources, ext) + + + def generate_umath_c(ext, build_dir): + target = join(build_dir, header_dir, '__umath_generated.c') + dir = os.path.dirname(target) + if not os.path.exists(dir): + os.makedirs(dir) + script = generate_umath_py + if newer(script, target): + f = open(target, 'w') + f.write(generate_umath.make_code(generate_umath.defdict, + generate_umath.__file__)) + f.close() + return [] + + umath_src = [ + join('src', 'umath', 'umathmodule.c'), + join('src', 'umath', 'reduction.c'), + join('src', 'umath', 'funcs.inc.src'), + join('src', 'umath', 'simd.inc.src'), + join('src', 'umath', 'loops.h.src'), + join('src', 'umath', 'loops.c.src'), + join('src', 'umath', 'ufunc_object.c'), + join('src', 'umath', 'ufunc_type_resolution.c')] + + umath_deps = [ + generate_umath_py, + join('src', 'multiarray', 'common.h'), + join('src', 'umath', 'simd.inc.src'), + join(codegen_dir, 'generate_ufunc_api.py'), + join('src', 'private', 'ufunc_override.h')] + npymath_sources + + if not ENABLE_SEPARATE_COMPILATION: + umath_deps.extend(umath_src) + umath_src = [join('src', 'umath', 'umathmodule_onefile.c')] + umath_src.append(generate_umath_templated_sources) + umath_src.append(join('src', 'umath', 'funcs.inc.src')) + umath_src.append(join('src', 'umath', 'simd.inc.src')) + + config.add_extension('umath', + sources = umath_src + + [generate_config_h, + generate_numpyconfig_h, + generate_umath_c, + generate_ufunc_api], + depends = deps + umath_deps, + libraries = ['npymath'], + ) + + ####################################################################### + # scalarmath module # + ####################################################################### + + config.add_extension('scalarmath', + sources = [join('src', 'scalarmathmodule.c.src'), + join('src', 'private', 'scalarmathmodule.h.src'), + generate_config_h, + generate_numpyconfig_h, + generate_numpy_api, + generate_ufunc_api], + depends = deps + npymath_sources, + libraries = ['npymath'], + ) + + ####################################################################### + # _dotblas module # + ####################################################################### + + # Configure blasdot + blas_info = get_info('blas_opt', 0) + #blas_info = {} + def get_dotblas_sources(ext, build_dir): + if blas_info: + if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []): + return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient. + return ext.depends[:1] + return None # no extension module will be built + + config.add_extension('_dotblas', + sources = [get_dotblas_sources], + depends = [join('blasdot', '_dotblas.c'), + join('blasdot', 'cblas.h'), + ], + include_dirs = ['blasdot'], + extra_info = blas_info + ) + + ####################################################################### + # umath_tests module # + ####################################################################### + + config.add_extension('umath_tests', + sources = [join('src', 'umath', 'umath_tests.c.src')]) + + ####################################################################### + # custom rational dtype module # + ####################################################################### + + config.add_extension('test_rational', + sources = [join('src', 'umath', 'test_rational.c.src')]) + + ####################################################################### + # struct_ufunc_test module # + ####################################################################### + + config.add_extension('struct_ufunc_test', + sources = [join('src', 'umath', 'struct_ufunc_test.c.src')]) + + ####################################################################### + # multiarray_tests module # + ####################################################################### + + config.add_extension('multiarray_tests', + sources = [join('src', 'multiarray', 'multiarray_tests.c.src')]) + + ####################################################################### + # operand_flag_tests module # + ####################################################################### + + config.add_extension('operand_flag_tests', + sources = [join('src', 'umath', 'operand_flag_tests.c.src')]) + + config.add_data_dir('tests') + config.add_data_dir('tests/data') + + config.make_svn_version_py() + + return config + +if __name__=='__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup_common.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup_common.py new file mode 100644 index 0000000000000..be5673a478733 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup_common.py @@ -0,0 +1,321 @@ +from __future__ import division, absolute_import, print_function + +# Code common to build tools +import sys +from os.path import join +import warnings +import copy +import binascii + +from distutils.ccompiler import CompileError + +#------------------- +# Versioning support +#------------------- +# How to change C_API_VERSION ? +# - increase C_API_VERSION value +# - record the hash for the new C API with the script cversions.py +# and add the hash to cversions.txt +# The hash values are used to remind developers when the C API number was not +# updated - generates a MismatchCAPIWarning warning which is turned into an +# exception for released version. + +# Binary compatibility version number. This number is increased whenever the +# C-API is changed such that binary compatibility is broken, i.e. whenever a +# recompile of extension modules is needed. +C_ABI_VERSION = 0x01000009 + +# Minor API version. This number is increased whenever a change is made to the +# C-API -- whether it breaks binary compatibility or not. Some changes, such +# as adding a function pointer to the end of the function table, can be made +# without breaking binary compatibility. In this case, only the C_API_VERSION +# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is +# broken, both C_API_VERSION and C_ABI_VERSION should be increased. +# +# 0x00000008 - 1.7.x +# 0x00000009 - 1.8.x +# 0x00000009 - 1.9.x +C_API_VERSION = 0x00000009 + +class MismatchCAPIWarning(Warning): + pass + +def is_released(config): + """Return True if a released version of numpy is detected.""" + from distutils.version import LooseVersion + + v = config.get_version('../version.py') + if v is None: + raise ValueError("Could not get version") + pv = LooseVersion(vstring=v).version + if len(pv) > 3: + return False + return True + +def get_api_versions(apiversion, codegen_dir): + """Return current C API checksum and the recorded checksum for the given + version of the C API version.""" + api_files = [join(codegen_dir, 'numpy_api_order.txt'), + join(codegen_dir, 'ufunc_api_order.txt')] + + # Compute the hash of the current API as defined in the .txt files in + # code_generators + sys.path.insert(0, codegen_dir) + try: + m = __import__('genapi') + numpy_api = __import__('numpy_api') + curapi_hash = m.fullapi_hash(numpy_api.full_api) + apis_hash = m.get_versions_hash() + finally: + del sys.path[0] + + return curapi_hash, apis_hash[apiversion] + +def check_api_version(apiversion, codegen_dir): + """Emits a MismacthCAPIWarning if the C API version needs updating.""" + curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) + + # If different hash, it means that the api .txt files in + # codegen_dir have been updated without the API version being + # updated. Any modification in those .txt files should be reflected + # in the api and eventually abi versions. + # To compute the checksum of the current API, use + # code_generators/cversions.py script + if not curapi_hash == api_hash: + msg = "API mismatch detected, the C API version " \ + "numbers have to be updated. Current C api version is %d, " \ + "with checksum %s, but recorded checksum for C API version %d in " \ + "codegen_dir/cversions.txt is %s. If functions were added in the " \ + "C API, you have to update C_API_VERSION in %s." + warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, + __file__), + MismatchCAPIWarning) +# Mandatory functions: if not found, fail the build +MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", + "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", + "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] + +# Standard functions which may not be available and for which we have a +# replacement implementation. Note that some of these are C99 functions. +OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", + "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", + "copysign", "nextafter", "ftello", "fseeko", + "strtoll", "strtoull"] + + +OPTIONAL_HEADERS = [ +# sse headers only enabled automatically on amd64/x32 builds + "xmmintrin.h", # SSE + "emmintrin.h", # SSE2 +] + +# optional gcc compiler builtins and their call arguments and optional a +# required header +# call arguments are required as the compiler will do strict signature checking +OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'), + ("__builtin_isinf", '5.'), + ("__builtin_isfinite", '5.'), + ("__builtin_bswap32", '5u'), + ("__builtin_bswap64", '5u'), + ("__builtin_expect", '5, 0'), + ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE + ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2 + ] + +# function attributes +# tested via "int %s %s(void *);" % (attribute, name) +# function name will be converted to HAVE_ preprocessor macro +OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))', + 'attribute_optimize_unroll_loops'), + ('__attribute__((optimize("O3")))', + 'attribute_optimize_opt_3'), + ('__attribute__((nonnull (1)))', + 'attribute_nonnull'), + ] + +# variable attributes tested via "int %s a" % attribute +OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"] + +# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h +OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot", + "copysign", "ftello", "fseeko"] + +# C99 functions: float and long double versions +C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", + "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", + "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", + "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', + "exp2", "log2", "copysign", "nextafter"] + +C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] +C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] + +C99_COMPLEX_TYPES = ['complex double', 'complex float', 'complex long double'] + +C99_COMPLEX_FUNCS = ['creal', 'cimag', 'cabs', 'carg', 'cexp', 'csqrt', 'clog', + 'ccos', 'csin', 'cpow'] + +def fname2def(name): + return "HAVE_%s" % name.upper() + +def sym2def(symbol): + define = symbol.replace(' ', '') + return define.upper() + +def type2def(symbol): + define = symbol.replace(' ', '_') + return define.upper() + +# Code to detect long double representation taken from MPFR m4 macro +def check_long_double_representation(cmd): + cmd._check_compiler() + body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} + + # We need to use _compile because we need the object filename + src, object = cmd._compile(body, None, None, 'c') + try: + type = long_double_representation(pyod(object)) + return type + finally: + cmd._clean() + +LONG_DOUBLE_REPRESENTATION_SRC = r""" +/* "before" is 16 bytes to ensure there's no padding between it and "x". + * We're not expecting any "long double" bigger than 16 bytes or with + * alignment requirements stricter than 16 bytes. */ +typedef %(type)s test_type; + +struct { + char before[16]; + test_type x; + char after[8]; +} foo = { + { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, + -123456789.0, + { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } +}; +""" + +def pyod(filename): + """Python implementation of the od UNIX utility (od -b, more exactly). + + Parameters + ---------- + filename : str + name of the file to get the dump from. + + Returns + ------- + out : seq + list of lines of od output + + Note + ---- + We only implement enough to get the necessary information for long double + representation, this is not intended as a compatible replacement for od. + """ + def _pyod2(): + out = [] + + fid = open(filename, 'rb') + try: + yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()] + for i in range(0, len(yo), 16): + line = ['%07d' % int(oct(i))] + line.extend(['%03d' % c for c in yo[i:i+16]]) + out.append(" ".join(line)) + return out + finally: + fid.close() + + def _pyod3(): + out = [] + + fid = open(filename, 'rb') + try: + yo2 = [oct(o)[2:] for o in fid.read()] + for i in range(0, len(yo2), 16): + line = ['%07d' % int(oct(i)[2:])] + line.extend(['%03d' % int(c) for c in yo2[i:i+16]]) + out.append(" ".join(line)) + return out + finally: + fid.close() + + if sys.version_info[0] < 3: + return _pyod2() + else: + return _pyod3() + +_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', + '001', '043', '105', '147', '211', '253', '315', '357'] +_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] + +_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] +_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] +_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', + '031', '300', '000', '000'] +_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', + '031', '300', '000', '000', '000', '000', '000', '000'] +_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171', + '242', '240', '000', '000', '000', '000'] +_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', + '000', '000', '000', '000', '000', '000', '000', '000'] +_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] +_DOUBLE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] + \ + ['000'] * 8 +_DOUBLE_DOUBLE_LE = ['000', '000', '000', '124', '064', '157', '235', '301'] + \ + ['000'] * 8 + +def long_double_representation(lines): + """Given a binary dump as given by GNU od -b, look for long double + representation.""" + + # Read contains a list of 32 items, each item is a byte (in octal + # representation, as a string). We 'slide' over the output until read is of + # the form before_seq + content + after_sequence, where content is the long double + # representation: + # - content is 12 bytes: 80 bits Intel representation + # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision + # - content is 8 bytes: same as double (not implemented yet) + read = [''] * 32 + saw = None + for line in lines: + # we skip the first word, as od -b output an index at the beginning of + # each line + for w in line.split()[1:]: + read.pop(0) + read.append(w) + + # If the end of read is equal to the after_sequence, read contains + # the long double + if read[-8:] == _AFTER_SEQ: + saw = copy.copy(read) + if read[:12] == _BEFORE_SEQ[4:]: + if read[12:-8] == _INTEL_EXTENDED_12B: + return 'INTEL_EXTENDED_12_BYTES_LE' + if read[12:-8] == _MOTOROLA_EXTENDED_12B: + return 'MOTOROLA_EXTENDED_12_BYTES_BE' + elif read[:8] == _BEFORE_SEQ[8:]: + if read[8:-8] == _INTEL_EXTENDED_16B: + return 'INTEL_EXTENDED_16_BYTES_LE' + elif read[8:-8] == _IEEE_QUAD_PREC_BE: + return 'IEEE_QUAD_BE' + elif read[8:-8] == _IEEE_QUAD_PREC_LE: + return 'IEEE_QUAD_LE' + elif read[8:-8] == _DOUBLE_DOUBLE_BE: + return 'DOUBLE_DOUBLE_BE' + elif read[8:-8] == _DOUBLE_DOUBLE_LE: + return 'DOUBLE_DOUBLE_LE' + elif read[:16] == _BEFORE_SEQ: + if read[16:-8] == _IEEE_DOUBLE_LE: + return 'IEEE_DOUBLE_LE' + elif read[16:-8] == _IEEE_DOUBLE_BE: + return 'IEEE_DOUBLE_BE' + + if saw is not None: + raise ValueError("Unrecognized format (%s)" % saw) + else: + # We never detected the after_sequence + raise ValueError("Could not lock sequences (%s)" % saw) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/shape_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/shape_base.py new file mode 100644 index 0000000000000..ae684fb423949 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/shape_base.py @@ -0,0 +1,277 @@ +from __future__ import division, absolute_import, print_function + +__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'vstack', 'hstack'] + +from . import numeric as _nx +from .numeric import array, asanyarray, newaxis + +def atleast_1d(*arys): + """ + Convert inputs to arrays with at least one dimension. + + Scalar inputs are converted to 1-dimensional arrays, whilst + higher-dimensional inputs are preserved. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or sequence of arrays, each with ``a.ndim >= 1``. + Copies are made only if necessary. + + See Also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> np.atleast_1d(1.0) + array([ 1.]) + + >>> x = np.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + array([[ 0., 1., 2.], + [ 3., 4., 5.], + [ 6., 7., 8.]]) + >>> np.atleast_1d(x) is x + True + + >>> np.atleast_1d(1, [3, 4]) + [array([1]), array([3, 4])] + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if len(ary.shape) == 0 : + result = ary.reshape(1) + else : + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + +def atleast_2d(*arys): + """ + View inputs as arrays with at least two dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted + to arrays. Arrays that already have two or more dimensions are + preserved. + + Returns + ------- + res, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 2``. + Copies are avoided where possible, and views with two or more + dimensions are returned. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> np.atleast_2d(3.0) + array([[ 3.]]) + + >>> x = np.arange(3.0) + >>> np.atleast_2d(x) + array([[ 0., 1., 2.]]) + >>> np.atleast_2d(x).base is x + True + + >>> np.atleast_2d(1, [1, 2], [[1, 2]]) + [array([[1]]), array([[1, 2]]), array([[1, 2]])] + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if len(ary.shape) == 0 : + result = ary.reshape(1, 1) + elif len(ary.shape) == 1 : + result = ary[newaxis,:] + else : + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + +def atleast_3d(*arys): + """ + View inputs as arrays with at least three dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted to + arrays. Arrays that already have three or more dimensions are + preserved. + + Returns + ------- + res1, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are + avoided where possible, and views with three or more dimensions are + returned. For example, a 1-D array of shape ``(N,)`` becomes a view + of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a + view of shape ``(M, N, 1)``. + + See Also + -------- + atleast_1d, atleast_2d + + Examples + -------- + >>> np.atleast_3d(3.0) + array([[[ 3.]]]) + + >>> x = np.arange(3.0) + >>> np.atleast_3d(x).shape + (1, 3, 1) + + >>> x = np.arange(12.0).reshape(4,3) + >>> np.atleast_3d(x).shape + (4, 3, 1) + >>> np.atleast_3d(x).base is x + True + + >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): + ... print arr, arr.shape + ... + [[[1] + [2]]] (1, 2, 1) + [[[1] + [2]]] (1, 2, 1) + [[[1 2]]] (1, 1, 2) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if len(ary.shape) == 0: + result = ary.reshape(1, 1, 1) + elif len(ary.shape) == 1: + result = ary[newaxis,:, newaxis] + elif len(ary.shape) == 2: + result = ary[:,:, newaxis] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def vstack(tup): + """ + Stack arrays in sequence vertically (row wise). + + Take a sequence of arrays and stack them vertically to make a single + array. Rebuild arrays divided by `vsplit`. + + Parameters + ---------- + tup : sequence of ndarrays + Tuple containing arrays to be stacked. The arrays must have the same + shape along all but the first axis. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays. + + See Also + -------- + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + concatenate : Join a sequence of arrays together. + vsplit : Split array into a list of multiple sub-arrays vertically. + + Notes + ----- + Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that + are at least 2-dimensional. + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> b = np.array([2, 3, 4]) + >>> np.vstack((a,b)) + array([[1, 2, 3], + [2, 3, 4]]) + + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[2], [3], [4]]) + >>> np.vstack((a,b)) + array([[1], + [2], + [3], + [2], + [3], + [4]]) + + """ + return _nx.concatenate([atleast_2d(_m) for _m in tup], 0) + +def hstack(tup): + """ + Stack arrays in sequence horizontally (column wise). + + Take a sequence of arrays and stack them horizontally to make + a single array. Rebuild arrays divided by `hsplit`. + + Parameters + ---------- + tup : sequence of ndarrays + All arrays must have the same shape along all but the second axis. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays. + + See Also + -------- + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third axis). + concatenate : Join a sequence of arrays together. + hsplit : Split array along second axis. + + Notes + ----- + Equivalent to ``np.concatenate(tup, axis=1)`` + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.hstack((a,b)) + array([1, 2, 3, 2, 3, 4]) + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.hstack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + arrs = [atleast_1d(_m) for _m in tup] + # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" + if arrs[0].ndim == 1: + return _nx.concatenate(arrs, 0) + else: + return _nx.concatenate(arrs, 1) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/struct_ufunc_test.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/struct_ufunc_test.py new file mode 100644 index 0000000000000..7eec360249bb9 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/struct_ufunc_test.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'struct_ufunc_test.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/test_rational.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/test_rational.py new file mode 100644 index 0000000000000..1fee9627eb0da --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/test_rational.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'test_rational.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/data/astype_copy.pkl b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/data/astype_copy.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7397c978297b3f64c7e6540b23f448f280e30bcc GIT binary patch literal 716 zcmXYteJs>*9LMj0wELrGX0g*+#^yGUX+D(rtPp;lqH|WB?v6qnckVXSb|}U4T*BDc ztgt&*R!!n4);z_!hDA=U2TLO{Ow;HiegF8r_Wgc7-|zeV{uFYgGF4VizN<7_k?NYI z%2cL{6$)`aFG-Oq&6edV6)LHcCui?;@nk7GRXK-Yl?d6AYteG96T@agO8z&bDD!ht zd2%x$=bSggV%XGR?!<6~Y~P58h|n!s|2BrDl$4zg=CD`+(VqNc9e~^+UBN~>V67&n zEsSU_2&g_w9CexS?S286d?)vaZ7ZOSCN{jM9>#oZnP9{-<74-r(()ADGyiQ_X*#oFw)|WgXCbaQIUc?Vex~Y?|0qZuS+&o_ay3Y^G(z*e| zU5Z-|b^#im58G6B0ye}i_Xp`H&tlY39r^v$RA_W82MiqR@mE&?2Jin|;oS)MHc%*j zNAG>wGa}EEPm!Hn*@IEQibJv)dt#i5AIjc77i!hX#D@){8p~1s&o|_0=^xg1F!FOk`#9_}3c|bNMTiwYXD@MeGhs``i;pyb#^J) zWJ3vQ`c!lWhB#Vs&(53~3TNz)<$7TOyoDBX;Q~S)F$hvtfS)E|n#Ukgm=o85LeM0j zdC=H+OWZYiY{xUkh?kVH$8tzR@VF=-LqDK`LdKt1)PPc7Z1a&g=(WbzYv3su`h=05 zFzeB^?AZ}KnXgJsLKRFvOm{MEL3sk~NwB2gY0|)-u9i#7cJV~D@%21nPuxSwNK7{j z+?*ywmgWMST_ITf1|)@4K)^ztumBL)EJU6rX}kmE^94^WXWL!D{Q{ga%OV!D1c6x2 zF~?8C;YBtW4}RHu69c`;0M|xW2X8z&f5Dr|JbVFS%fL1r6C9tj#AmPyLyi}$h_kO~ zAtM2o`P(Fo(#=i=t@Z)D;Hi9PG7T<}73>keQ~~~M^m+;)#iR0y7p_`|$_LLI)mhz> zJ7UXvbR5_5Nf$q7rgPz%w+qYZU_o4pbYi|Nm*j(Sb#>M4;oqczbNDhR3(Gc_7UBu- zJd`cgIo!i5_8*N^uBxwVcaFIBzF}JX&uH{ttwFG=NAOki9XU%cYX&p}ngPv#WNr$X#)El?NR;s0`H$0xudb?)JSllr-j zc>Qj`QMO5KMI+SYwP+6*^{c`&+V!(BLA=jA5L+oGtBg?zos3oC_1p1aP(S(m{PlG8 zAmwFWoRgcaggJ%Cl)Wf+-3iyz-NDgcG^l@|NtNUezXW*D5 zt%;{{-48KXWd(XIyAnGRW~&F3?bWvGQ^lw1+wu16O}ua8!>5m%im$>m-c^q^@!I1X zR`KB*YTa4)HFy;t@{aGXzh7?ReH-7&DyjG?ypj3+@tMw)5SVUhjw2clP0!}eh_kNh zan~Q6;|TrtdhjBPa7^8o8c#Iqk33x)T5V7G4!C&SCZcqo4l z%H%NWexgyyR44jrRL05khPtETtN1fV)s24@-|xh6SNHE>if_2Uw{o6yFvi#Y(KQZ` z{eJgdnRNTT-pO~O9-p4y>TGCVGoTsJ4E#6)H?Klzo3pdCx770kcJu0sw6D1EYq;6+ oKW=^+NE_n9Zy)4;h|$re8PE)91~dbj0nLDBKr^5j_= (2, 7, 5): + # This test fails for earlier versions of Python. + # Evidently a bug got fixed in 2.7.5. + dat = np.array(_buffer('1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(_buffer(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) + + # test memoryview, new version of buffer + _memoryview = builtins.get("memoryview") + if _memoryview: + dat = np.array(_memoryview(b'1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(_memoryview(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) + + # test array interface + a = np.array(100.0, dtype=np.float64) + o = type("o", (object,), + dict(__array_interface__=a.__array_interface__)) + assert_equal(np.array(o, dtype=np.float64), a) + + # test array_struct interface + a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], + dtype=[('f0', int), ('f1', float), ('f2', str)]) + o = type("o", (object,), + dict(__array_struct__=a.__array_struct__)) + ## wasn't what I expected... is np.array(o) supposed to equal a ? + ## instead we get a array([...], dtype=">V18") + assert_equal(str(np.array(o).data), str(a.data)) + + # test array + o = type("o", (object,), + dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))() + assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) + + # test recursion + nested = 1.5 + for i in range(np.MAXDIMS): + nested = [nested] + + # no error + np.array(nested) + + # Exceeds recursion limit + assert_raises(ValueError, np.array, [nested], dtype=np.float64) + + # Try with lists... + assert_equal(np.array([None] * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([[None]] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array([1.0] * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([[1.0]] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + # Try with tuples + assert_equal(np.array((None,) * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,)] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array((1.0,) * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + +def test_fastCopyAndTranspose(): + # 0D array + a = np.array(2) + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert_(b.flags.owndata) + + # 1D array + a = np.array([3, 2, 7, 0]) + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert_(b.flags.owndata) + + # 2D array + a = np.arange(6).reshape(2, 3) + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert_(b.flags.owndata) + +def test_array_astype(): + a = np.arange(6, dtype='f4').reshape(2, 3) + # Default behavior: allows unsafe casts, keeps memory layout, + # always copies. + b = a.astype('i4') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.strides, b.strides) + b = a.T.astype('i4') + assert_equal(a.T, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.T.strides, b.strides) + b = a.astype('f4') + assert_equal(a, b) + assert_(not (a is b)) + + # copy=False parameter can sometimes skip a copy + b = a.astype('f4', copy=False) + assert_(a is b) + + # order parameter allows overriding of the memory layout, + # forcing a copy if the layout is wrong + b = a.astype('f4', order='F', copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(b.flags.f_contiguous) + + b = a.astype('f4', order='C', copy=False) + assert_equal(a, b) + assert_(a is b) + assert_(b.flags.c_contiguous) + + # casting parameter allows catching bad casts + b = a.astype('c8', casting='safe') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('c8')) + + assert_raises(TypeError, a.astype, 'i4', casting='safe') + + # subok=False passes through a non-subclassed array + b = a.astype('f4', subok=0, copy=False) + assert_(a is b) + + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + + # subok=True passes through a matrix + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), np.matrix) + + # subok=False never returns a matrix + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not np.matrix) + + # Make sure converting from string object to fixed length string + # does not truncate. + a = np.array([b'a'*100], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S100')) + a = np.array([sixu('a')*100], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U100')) + + # Same test as above but for strings shorter than 64 characters + a = np.array([b'a'*10], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S10')) + a = np.array([sixu('a')*10], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U10')) + + a = np.array(123456789012345678901234567890, dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='O').astype('U') + assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30')) + + a = np.array([123456789012345678901234567890], dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array([123456789012345678901234567890], dtype='O').astype('U') + assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30')) + + a = np.array(123456789012345678901234567890, dtype='S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='U') + assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30')) + + a = np.array(sixu('a\u0140'), dtype='U') + b = np.ndarray(buffer=a, dtype='uint32', shape=2) + assert_(b.size == 2) + + a = np.array([1000], dtype='i4') + assert_raises(TypeError, a.astype, 'S1', casting='safe') + + a = np.array(1000, dtype='i4') + assert_raises(TypeError, a.astype, 'U1', casting='safe') + +def test_copyto_fromscalar(): + a = np.arange(6, dtype='f4').reshape(2, 3) + + # Simple copy + np.copyto(a, 1.5) + assert_equal(a, 1.5) + np.copyto(a.T, 2.5) + assert_equal(a, 2.5) + + # Where-masked copy + mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') + np.copyto(a, 3.5, where=mask) + assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) + mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') + np.copyto(a.T, 4.5, where=mask) + assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) + +def test_copyto(): + a = np.arange(6, dtype='i4').reshape(2, 3) + + # Simple copy + np.copyto(a, [[3, 1, 5], [6, 2, 1]]) + assert_equal(a, [[3, 1, 5], [6, 2, 1]]) + + # Overlapping copy should work + np.copyto(a[:, :2], a[::-1, 1::-1]) + assert_equal(a, [[2, 6, 5], [1, 3, 1]]) + + # Defaults to 'same_kind' casting + assert_raises(TypeError, np.copyto, a, 1.5) + + # Force a copy with 'unsafe' casting, truncating 1.5 to 1 + np.copyto(a, 1.5, casting='unsafe') + assert_equal(a, 1) + + # Copying with a mask + np.copyto(a, 3, where=[True, False, True]) + assert_equal(a, [[3, 1, 3], [3, 1, 3]]) + + # Casting rule still applies with a mask + assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) + + # Lists of integer 0's and 1's is ok too + np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) + assert_equal(a, [[3, 4, 4], [4, 1, 3]]) + + # Overlapping copy with mask should work + np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) + assert_equal(a, [[3, 4, 4], [4, 3, 3]]) + + # 'dst' must be an array + assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + +def test_copyto_permut(): + # test explicit overflow case + pad = 500 + l = [True] * pad + [True, True, True, True] + r = np.zeros(len(l)-pad) + d = np.ones(len(l)-pad) + mask = np.array(l)[pad:] + np.copyto(r, d, where=mask[::-1]) + + # test all permutation of possible masks, 9 should be sufficient for + # current 4 byte unrolled code + power = 9 + d = np.ones(power) + for i in range(2**power): + r = np.zeros(power) + l = [(i & x) != 0 for x in range(power)] + mask = np.array(l) + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=mask[::-1]) + assert_array_equal(r == 1, l[::-1]) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::2]) + assert_array_equal(r[::2] == 1, l[::2]) + assert_equal(r[::2].sum(), sum(l[::2])) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::-2]) + assert_array_equal(r[::2] == 1, l[::-2]) + assert_equal(r[::2].sum(), sum(l[::-2])) + + for c in [0xFF, 0x7F, 0x02, 0x10]: + r = np.zeros(power) + mask = np.array(l) + imask = np.array(l).view(np.uint8) + imask[mask != 0] = 0xFF + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=True) + assert_equal(r.sum(), r.size) + r = np.ones(power) + d = np.zeros(power) + np.copyto(r, d, where=False) + assert_equal(r.sum(), r.size) + +def test_copy_order(): + a = np.arange(24).reshape(2, 1, 3, 4) + b = a.copy(order='F') + c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) + + def check_copy_result(x, y, ccontig, fcontig, strides=False): + assert_(not (x is y)) + assert_equal(x, y) + assert_equal(res.flags.c_contiguous, ccontig) + assert_equal(res.flags.f_contiguous, fcontig) + # This check is impossible only because + # NPY_RELAXED_STRIDES_CHECKING changes the strides actively + if not NPY_RELAXED_STRIDES_CHECKING: + if strides: + assert_equal(x.strides, y.strides) + else: + assert_(x.strides != y.strides) + + # Validate the initial state of a, b, and c + assert_(a.flags.c_contiguous) + assert_(not a.flags.f_contiguous) + assert_(not b.flags.c_contiguous) + assert_(b.flags.f_contiguous) + assert_(not c.flags.c_contiguous) + assert_(not c.flags.f_contiguous) + + # Copy with order='C' + res = a.copy(order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = c.copy(order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + res = np.copy(a, order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = np.copy(c, order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + + # Copy with order='F' + res = a.copy(order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = b.copy(order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + res = np.copy(a, order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = np.copy(b, order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + + # Copy with order='K' + res = a.copy(order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + res = np.copy(a, order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + +def test_contiguous_flags(): + a = np.ones((4, 4, 1))[::2,:,:] + if NPY_RELAXED_STRIDES_CHECKING: + a.strides = a.strides[:2] + (-123,) + b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) + + def check_contig(a, ccontig, fcontig): + assert_(a.flags.c_contiguous == ccontig) + assert_(a.flags.f_contiguous == fcontig) + + # Check if new arrays are correct: + check_contig(a, False, False) + check_contig(b, False, False) + if NPY_RELAXED_STRIDES_CHECKING: + check_contig(np.empty((2, 2, 0, 2, 2)), True, True) + check_contig(np.array([[[1], [2]]], order='F'), True, True) + else: + check_contig(np.empty((2, 2, 0, 2, 2)), True, False) + check_contig(np.array([[[1], [2]]], order='F'), False, True) + check_contig(np.empty((2, 2)), True, False) + check_contig(np.empty((2, 2), order='F'), False, True) + + # Check that np.array creates correct contiguous flags: + check_contig(np.array(a, copy=False), False, False) + check_contig(np.array(a, copy=False, order='C'), True, False) + check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True) + + if NPY_RELAXED_STRIDES_CHECKING: + # Check slicing update of flags and : + check_contig(a[0], True, True) + check_contig(a[None, ::4, ..., None], True, True) + check_contig(b[0, 0, ...], False, True) + check_contig(b[:,:, 0:0,:,:], True, True) + else: + # Check slicing update of flags: + check_contig(a[0], True, False) + # Would be nice if this was C-Contiguous: + check_contig(a[None, 0, ..., None], False, False) + check_contig(b[0, 0, 0, ...], False, True) + + # Test ravel and squeeze. + check_contig(a.ravel(), True, True) + check_contig(np.ones((1, 3, 1)).squeeze(), True, True) + +def test_broadcast_arrays(): + # Test user defined dtypes + a = np.array([(1, 2, 3)], dtype='u4,u4,u4') + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + result = np.broadcast_arrays(a, b) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_arrayprint.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_arrayprint.py new file mode 100644 index 0000000000000..44bf5f3978ffe --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_arrayprint.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import division, absolute_import, print_function + +import sys +import numpy as np +from numpy.testing import * +from numpy.compat import sixu + +class TestArrayRepr(object): + def test_nan_inf(self): + x = np.array([np.nan, np.inf]) + assert_equal(repr(x), 'array([ nan, inf])') + +class TestComplexArray(TestCase): + def test_str(self): + rvals = [0, 1, -1, np.inf, -np.inf, np.nan] + cvals = [complex(rp, ip) for rp in rvals for ip in rvals] + dtypes = [np.complex64, np.cdouble, np.clongdouble] + actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] + wanted = [ + '[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]', + '[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]', + '[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]', + '[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]', + '[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]', + '[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]', + '[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]', + '[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]', + '[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]', + '[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]', + '[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]', + '[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]', + '[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]', + '[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]', + '[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]', + '[-1.+infj]', '[-1.+infj]', '[-1.0+infj]', + '[-1.-infj]', '[-1.-infj]', '[-1.0-infj]', + '[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]', + '[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]', + '[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]', + '[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]', + '[ inf+infj]', '[ inf+infj]', '[ inf+infj]', + '[ inf-infj]', '[ inf-infj]', '[ inf-infj]', + '[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]', + '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]', + '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]', + '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]', + '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', + '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', + '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', + '[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]', + '[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]', + '[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]', + '[ nan+infj]', '[ nan+infj]', '[ nan+infj]', + '[ nan-infj]', '[ nan-infj]', '[ nan-infj]', + '[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]'] + + for res, val in zip(actual, wanted): + assert_(res == val) + +class TestArray2String(TestCase): + def test_basic(self): + """Basic test of array2string.""" + a = np.arange(3) + assert_(np.array2string(a) == '[0 1 2]') + assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]') + + def test_style_keyword(self): + """This should only apply to 0-D arrays. See #1218.""" + stylestr = np.array2string(np.array(1.5), + style=lambda x: "Value in 0-D array: " + str(x)) + assert_(stylestr == 'Value in 0-D array: 1.5') + + def test_format_function(self): + """Test custom format function for each element in array.""" + def _format_function(x): + if np.abs(x) < 1: + return '.' + elif np.abs(x) < 2: + return 'o' + else: + return 'O' + x = np.arange(3) + if sys.version_info[0] >= 3: + x_hex = "[0x0 0x1 0x2]" + x_oct = "[0o0 0o1 0o2]" + else: + x_hex = "[0x0L 0x1L 0x2L]" + x_oct = "[0L 01L 02L]" + assert_(np.array2string(x, formatter={'all':_format_function}) == \ + "[. o O]") + assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==\ + "[. o O]") + assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == \ + "[0.0000 1.0000 2.0000]") + assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), \ + x_hex) + assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), \ + x_oct) + + x = np.arange(3.) + assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == \ + "[0.00 1.00 2.00]") + assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == \ + "[0.00 1.00 2.00]") + + s = np.array(['abc', 'def']) + assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == \ + '[abcabc defdef]') + + +class TestPrintOptions: + """Test getting and setting global print options.""" + def setUp(self): + self.oldopts = np.get_printoptions() + + def tearDown(self): + np.set_printoptions(**self.oldopts) + + def test_basic(self): + x = np.array([1.5, 0, 1.234567890]) + assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])") + np.set_printoptions(precision=4) + assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])") + + def test_formatter(self): + x = np.arange(3) + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + + def test_formatter_reset(self): + x = np.arange(3) + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'int':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'all':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + np.set_printoptions(formatter={'int':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'int_kind':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + x = np.arange(3.) + np.set_printoptions(formatter={'float':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1.0, 0.0, 1.0])") + np.set_printoptions(formatter={'float_kind':None}) + assert_equal(repr(x), "array([ 0., 1., 2.])") + +def test_unicode_object_array(): + import sys + if sys.version_info[0] >= 3: + expected = "array(['é'], dtype=object)" + else: + expected = "array([u'\\xe9'], dtype=object)" + x = np.array([sixu('\xe9')], dtype=object) + assert_equal(repr(x), expected) + + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_blasdot.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_blasdot.py new file mode 100644 index 0000000000000..264663835644d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_blasdot.py @@ -0,0 +1,172 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +import sys +from numpy.core import zeros, float64 +from numpy.testing import dec, TestCase, assert_almost_equal, assert_, \ + assert_raises, assert_array_equal, assert_allclose, assert_equal +from numpy.core.multiarray import inner as inner_ + +DECPREC = 14 + +class TestInner(TestCase): + def test_vecself(self): + """Ticket 844.""" + # Inner product of a vector with itself segfaults or give meaningless + # result + a = zeros(shape = (1, 80), dtype = float64) + p = inner_(a, a) + assert_almost_equal(p, 0, decimal = DECPREC) + +try: + import numpy.core._dotblas as _dotblas +except ImportError: + _dotblas = None + +@dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas") +def test_blasdot_used(): + from numpy.core import dot, vdot, inner, alterdot, restoredot + assert_(dot is _dotblas.dot) + assert_(vdot is _dotblas.vdot) + assert_(inner is _dotblas.inner) + assert_(alterdot is _dotblas.alterdot) + assert_(restoredot is _dotblas.restoredot) + + +def test_dot_2args(): + from numpy.core import dot + + a = np.array([[1, 2], [3, 4]], dtype=float) + b = np.array([[1, 0], [1, 1]], dtype=float) + c = np.array([[3, 2], [7, 4]], dtype=float) + + d = dot(a, b) + assert_allclose(c, d) + +def test_dot_3args(): + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 32)) + for i in range(12): + np.dot(f, v, r) + assert_equal(sys.getrefcount(r), 2) + r2 = np.dot(f, v, out=None) + assert_array_equal(r2, r) + assert_(r is np.dot(f, v, out=r)) + + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = np.dot(f, v) + assert_(r is np.dot(f, v, r)) + assert_array_equal(r2, r) + +def test_dot_3args_errors(): + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 31)) + assert_raises(ValueError, np.dot, f, v, r) + + r = np.empty((1024,)) + assert_raises(ValueError, np.dot, f, v, r) + + r = np.empty((32,)) + assert_raises(ValueError, np.dot, f, v, r) + + r = np.empty((32, 1024)) + assert_raises(ValueError, np.dot, f, v, r) + assert_raises(ValueError, np.dot, f, v, r.T) + + r = np.empty((1024, 64)) + assert_raises(ValueError, np.dot, f, v, r[:, ::2]) + assert_raises(ValueError, np.dot, f, v, r[:, :32]) + + r = np.empty((1024, 32), dtype=np.float32) + assert_raises(ValueError, np.dot, f, v, r) + + r = np.empty((1024, 32), dtype=int) + assert_raises(ValueError, np.dot, f, v, r) + +def test_dot_array_order(): + """ Test numpy dot with different order C, F + + Comparing results with multiarray dot. + Double and single precisions array are compared using relative + precision of 7 and 5 decimals respectively. + Use 30 decimal when comparing exact operations like: + (a.b)' = b'.a' + """ + _dot = np.core.multiarray.dot + a_dim, b_dim, c_dim = 10, 4, 7 + orders = ["C", "F"] + dtypes_prec = {np.float64: 7, np.float32: 5} + np.random.seed(7) + + for arr_type, prec in dtypes_prec.items(): + for a_order in orders: + a = np.asarray(np.random.randn(a_dim, a_dim), + dtype=arr_type, order=a_order) + assert_array_equal(np.dot(a, a), a.dot(a)) + # (a.a)' = a'.a', note that mse~=1e-31 needs almost_equal + assert_almost_equal(a.dot(a), a.T.dot(a.T).T, decimal=prec) + + # + # Check with making explicit copy + # + a_T = a.T.copy(order=a_order) + assert_almost_equal(a_T.dot(a_T), a.T.dot(a.T), decimal=prec) + assert_almost_equal(a.dot(a_T), a.dot(a.T), decimal=prec) + assert_almost_equal(a_T.dot(a), a.T.dot(a), decimal=prec) + + # + # Compare with multiarray dot + # + assert_almost_equal(a.dot(a), _dot(a, a), decimal=prec) + assert_almost_equal(a.T.dot(a), _dot(a.T, a), decimal=prec) + assert_almost_equal(a.dot(a.T), _dot(a, a.T), decimal=prec) + assert_almost_equal(a.T.dot(a.T), _dot(a.T, a.T), decimal=prec) + for res in a.dot(a), a.T.dot(a), a.dot(a.T), a.T.dot(a.T): + assert res.flags.c_contiguous + + for b_order in orders: + b = np.asarray(np.random.randn(a_dim, b_dim), + dtype=arr_type, order=b_order) + b_T = b.T.copy(order=b_order) + assert_almost_equal(a_T.dot(b), a.T.dot(b), decimal=prec) + assert_almost_equal(b_T.dot(a), b.T.dot(a), decimal=prec) + # (b'.a)' = a'.b + assert_almost_equal(b.T.dot(a), a.T.dot(b).T, decimal=prec) + assert_almost_equal(a.dot(b), _dot(a, b), decimal=prec) + assert_almost_equal(b.T.dot(a), _dot(b.T, a), decimal=prec) + + + for c_order in orders: + c = np.asarray(np.random.randn(b_dim, c_dim), + dtype=arr_type, order=c_order) + c_T = c.T.copy(order=c_order) + assert_almost_equal(c.T.dot(b.T), c_T.dot(b_T), decimal=prec) + assert_almost_equal(c.T.dot(b.T).T, b.dot(c), decimal=prec) + assert_almost_equal(b.dot(c), _dot(b, c), decimal=prec) + assert_almost_equal(c.T.dot(b.T), _dot(c.T, b.T), decimal=prec) + +@dec.skipif(True) # ufunc override disabled for 1.9 +def test_dot_override(): + class A(object): + def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): + return "A" + + class B(object): + def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): + return NotImplemented + + a = A() + b = B() + c = np.array([[1]]) + + assert_equal(np.dot(a, b), "A") + assert_equal(c.dot(a), "A") + assert_raises(TypeError, np.dot, b, c) + assert_raises(TypeError, c.dot, b) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_datetime.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_datetime.py new file mode 100644 index 0000000000000..bf0ba68073940 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_datetime.py @@ -0,0 +1,1771 @@ +from __future__ import division, absolute_import, print_function + +import os, pickle +import numpy +import numpy as np +from numpy.testing import * +from numpy.compat import asbytes +import datetime + +# Use pytz to test out various time zones if available +try: + from pytz import timezone as tz + _has_pytz = True +except ImportError: + _has_pytz = False + + +class TestDateTime(TestCase): + def test_datetime_dtype_creation(self): + for unit in ['Y', 'M', 'W', 'D', + 'h', 'm', 's', 'ms', 'us', + 'ns', 'ps', 'fs', 'as']: + dt1 = np.dtype('M8[750%s]'%unit) + assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) + dt2 = np.dtype('m8[%s]' % unit) + assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) + + # Generic units shouldn't add [] to the end + assert_equal(str(np.dtype("M8")), "datetime64") + + # Should be possible to specify the endianness + assert_equal(np.dtype("=M8"), np.dtype("M8")) + assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) + assert_(np.dtype(">M8") == np.dtype("M8") or + np.dtype("M8[D]") == np.dtype("M8[D]") or + np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or + np.dtype("m8[D]") == np.dtype("m8[D]") or + np.dtype("m8") != np.dtype(" Scalars + assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) + + # Arrays -> Scalars + assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) + + def test_days_creation(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3 - 365) + assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3) + assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3 + 366) + assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), + (1900-1970)*365 - (1970-1900)//4) + assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), + (1900-1970)*365 - (1970-1900)//4 + 365) + assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) + assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) + assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) + assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) + assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) + assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) + assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) + assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) + assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4) + assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 366) + assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), + (2400 - 1970)*365 + (2400 - 1972)//4 - 3) + assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), + (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) + + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) + assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) + assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) + + def test_days_to_pydate(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('O'), + datetime.date(1599, 1, 1)) + assert_equal(np.array('1600', dtype='M8[D]').astype('O'), + datetime.date(1600, 1, 1)) + assert_equal(np.array('1601', dtype='M8[D]').astype('O'), + datetime.date(1601, 1, 1)) + assert_equal(np.array('1900', dtype='M8[D]').astype('O'), + datetime.date(1900, 1, 1)) + assert_equal(np.array('1901', dtype='M8[D]').astype('O'), + datetime.date(1901, 1, 1)) + assert_equal(np.array('2000', dtype='M8[D]').astype('O'), + datetime.date(2000, 1, 1)) + assert_equal(np.array('2001', dtype='M8[D]').astype('O'), + datetime.date(2001, 1, 1)) + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), + datetime.date(1600, 2, 29)) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), + datetime.date(1600, 3, 1)) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), + datetime.date(2001, 3, 22)) + + def test_dtype_comparison(self): + assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) + assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) + assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) + assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) + + def test_pydatetime_creation(self): + a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') + assert_equal(a[0], a[1]) + # Will fail if the date changes during the exact right moment + a = np.array(['today', datetime.date.today()], dtype='M8[D]') + assert_equal(a[0], a[1]) + # datetime.datetime.now() returns local time, not UTC + #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') + #assert_equal(a[0], a[1]) + + # A datetime.date will raise if you try to give it time units + assert_raises(TypeError, np.array, datetime.date(1960, 3, 12), + dtype='M8[s]') + + def test_datetime_string_conversion(self): + a = ['2011-03-16', '1920-01-01', '2013-05-19'] + str_a = np.array(a, dtype='S') + dt_a = np.array(a, dtype='M') + str_b = np.empty_like(str_a) + dt_b = np.empty_like(dt_a) + + # String to datetime + assert_equal(dt_a, str_a.astype('M')) + assert_equal(dt_a.dtype, str_a.astype('M').dtype) + dt_b[...] = str_a + assert_equal(dt_a, dt_b) + # Datetime to string + assert_equal(str_a, dt_a.astype('S0')) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + # Convert the 'S' to 'U' + str_a = str_a.astype('U') + str_b = str_b.astype('U') + + # Unicode to datetime + assert_equal(dt_a, str_a.astype('M')) + assert_equal(dt_a.dtype, str_a.astype('M').dtype) + dt_b[...] = str_a + assert_equal(dt_a, dt_b) + # Datetime to unicode + assert_equal(str_a, dt_a.astype('U')) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + def test_datetime_array_str(self): + a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') + assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") + + a = np.array(['2011-03-16T13:55Z', '1920-01-01T03:12Z'], dtype='M') + assert_equal(np.array2string(a, separator=', ', + formatter={'datetime': lambda x : + "'%s'" % np.datetime_as_string(x, timezone='UTC')}), + "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") + + # Check that one NaT doesn't corrupt subsequent entries + a = np.array(['2010', 'NaT', '2030']).astype('M') + assert_equal(str(a), "['2010' 'NaT' '2030']") + + def test_pickle(self): + # Check that pickle roundtripping works + dt = np.dtype('M8[7D]') + assert_equal(pickle.loads(pickle.dumps(dt)), dt) + dt = np.dtype('M8[W]') + assert_equal(pickle.loads(pickle.dumps(dt)), dt) + + # Check that loading pickles from 1.6 works + pkl = "cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ + "(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \ + "I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(asbytes(pkl)), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ + "I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(asbytes(pkl)), np.dtype('>M8[us]')) + + def test_setstate(self): + "Verify that datetime dtype __setstate__ can handle bad arguments" + dt = np.dtype('>M8[us]') + assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + + def test_dtype_promotion(self): + # datetime datetime computes the metadata gcd + # timedelta timedelta computes the metadata gcd + for mM in ['m', 'M']: + assert_equal( + np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), + np.dtype(mM+'8[2Y]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), + np.dtype(mM+'8[3Y]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), + np.dtype(mM+'8[2M]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), + np.dtype(mM+'8[1D]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), + np.dtype(mM+'8[s]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), + np.dtype(mM+'8[7s]')) + # timedelta timedelta raises when there is no reasonable gcd + assert_raises(TypeError, np.promote_types, + np.dtype('m8[Y]'), np.dtype('m8[D]')) + assert_raises(TypeError, np.promote_types, + np.dtype('m8[M]'), np.dtype('m8[W]')) + # timedelta timedelta may overflow with big unit ranges + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[W]'), np.dtype('m8[fs]')) + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[s]'), np.dtype('m8[as]')) + + def test_cast_overflow(self): + # gh-4486 + def cast(): + numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("= self.B)) + assert_(all(self.A <= self.B)) + assert_(not any(self.A > self.B)) + assert_(not any(self.A < self.B)) + assert_(not any(self.A != self.B)) + +class TestChar(TestCase): + def setUp(self): + self.A = np.array('abc1', dtype='c').view(np.chararray) + + def test_it(self): + assert_equal(self.A.shape, (4,)) + assert_equal(self.A.upper()[:2].tobytes(), asbytes('AB')) + +class TestComparisons(TestCase): + def setUp(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + self.B = np.array([['efg', '123 '], + ['051', 'tuv']]).view(np.chararray) + + def test_not_equal(self): + assert_array_equal((self.A != self.B), [[True, False], [True, True]]) + + def test_equal(self): + assert_array_equal((self.A == self.B), [[False, True], [False, False]]) + + def test_greater_equal(self): + assert_array_equal((self.A >= self.B), [[False, True], [True, True]]) + + def test_less_equal(self): + assert_array_equal((self.A <= self.B), [[True, True], [False, False]]) + + def test_greater(self): + assert_array_equal((self.A > self.B), [[False, False], [True, True]]) + + def test_less(self): + assert_array_equal((self.A < self.B), [[True, False], [False, False]]) + +class TestComparisonsMixed1(TestComparisons): + """Ticket #1276""" + + def setUp(self): + TestComparisons.setUp(self) + self.B = np.array([['efg', '123 '], + ['051', 'tuv']], np.unicode_).view(np.chararray) + +class TestComparisonsMixed2(TestComparisons): + """Ticket #1276""" + + def setUp(self): + TestComparisons.setUp(self) + self.A = np.array([['abc', '123'], + ['789', 'xyz']], np.unicode_).view(np.chararray) + +class TestInformation(TestCase): + def setUp(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) + self.B = np.array([[sixu(' \u03a3 '), sixu('')], + [sixu('12345'), sixu('MixedCase')], + [sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray) + + def test_len(self): + assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + + def test_count(self): + assert_(issubclass(self.A.count('').dtype.type, np.integer)) + assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + # Python doesn't seem to like counting NULL characters + # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) + # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + + def test_endswith(self): + assert_(issubclass(self.A.endswith('').dtype.type, np.bool_)) + assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + def fail(): + self.A.endswith('3', 'fdjk') + self.assertRaises(TypeError, fail) + + def test_find(self): + assert_(issubclass(self.A.find('a').dtype.type, np.integer)) + assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]]) + assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]]) + + def test_index(self): + def fail(): + self.A.index('a') + self.assertRaises(ValueError, fail) + assert_(np.char.index('abcba', 'b') == 1) + assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) + + def test_isalnum(self): + assert_(issubclass(self.A.isalnum().dtype.type, np.bool_)) + assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + + def test_isalpha(self): + assert_(issubclass(self.A.isalpha().dtype.type, np.bool_)) + assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + + def test_isdigit(self): + assert_(issubclass(self.A.isdigit().dtype.type, np.bool_)) + assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + + def test_islower(self): + assert_(issubclass(self.A.islower().dtype.type, np.bool_)) + assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + + def test_isspace(self): + assert_(issubclass(self.A.isspace().dtype.type, np.bool_)) + assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + + def test_istitle(self): + assert_(issubclass(self.A.istitle().dtype.type, np.bool_)) + assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + + def test_isupper(self): + assert_(issubclass(self.A.isupper().dtype.type, np.bool_)) + assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + + def test_rfind(self): + assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) + assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + + def test_rindex(self): + def fail(): + self.A.rindex('a') + self.assertRaises(ValueError, fail) + assert_(np.char.rindex('abcba', 'b') == 3) + assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) + + def test_startswith(self): + assert_(issubclass(self.A.startswith('').dtype.type, np.bool_)) + assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + def fail(): + self.A.startswith('3', 'fdjk') + self.assertRaises(TypeError, fail) + + +class TestMethods(TestCase): + def setUp(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.chararray) + self.B = np.array([[sixu(' \u03a3 '), sixu('')], + [sixu('12345'), sixu('MixedCase')], + [sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray) + + def test_capitalize(self): + assert_(issubclass(self.A.capitalize().dtype.type, np.string_)) + assert_array_equal(self.A.capitalize(), asbytes_nested([ + [' abc ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']])) + assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_)) + assert_array_equal(self.B.capitalize(), [ + [sixu(' \u03c3 '), ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']]) + + def test_center(self): + assert_(issubclass(self.A.center(10).dtype.type, np.string_)) + widths = np.array([[10, 20]]) + C = self.A.center([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + C = self.A.center(20, asbytes('#')) + assert_(np.all(C.startswith(asbytes('#')))) + assert_(np.all(C.endswith(asbytes('#')))) + C = np.char.center(asbytes('FOO'), [[10, 20], [15, 8]]) + assert_(issubclass(C.dtype.type, np.string_)) + assert_array_equal(C, asbytes_nested([ + [' FOO ', ' FOO '], + [' FOO ', ' FOO ']])) + + def test_decode(self): + if sys.version_info[0] >= 3: + A = np.char.array([asbytes('\\u03a3')]) + assert_(A.decode('unicode-escape')[0] == '\u03a3') + else: + A = np.char.array(['736563726574206d657373616765']) + assert_(A.decode('hex_codec')[0] == 'secret message') + + def test_encode(self): + B = self.B.encode('unicode_escape') + assert_(B[0][0] == str(' \\u03a3 ').encode('latin1')) + + def test_expandtabs(self): + T = self.A.expandtabs() + assert_(T[2][0] == asbytes('123 345')) + + def test_join(self): + if sys.version_info[0] >= 3: + # NOTE: list(b'123') == [49, 50, 51] + # so that b','.join(b'123') results to an error on Py3 + A0 = self.A.decode('ascii') + else: + A0 = self.A + + A = np.char.join([',', '#'], A0) + if sys.version_info[0] >= 3: + assert_(issubclass(A.dtype.type, np.unicode_)) + else: + assert_(issubclass(A.dtype.type, np.string_)) + assert_array_equal(np.char.join([',', '#'], A0), + [ + [' ,a,b,c, ', ''], + ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], + ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) + + def test_ljust(self): + assert_(issubclass(self.A.ljust(10).dtype.type, np.string_)) + widths = np.array([[10, 20]]) + C = self.A.ljust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + C = self.A.ljust(20, asbytes('#')) + assert_array_equal(C.startswith(asbytes('#')), [ + [False, True], [False, False], [False, False]]) + assert_(np.all(C.endswith(asbytes('#')))) + C = np.char.ljust(asbytes('FOO'), [[10, 20], [15, 8]]) + assert_(issubclass(C.dtype.type, np.string_)) + assert_array_equal(C, asbytes_nested([ + ['FOO ', 'FOO '], + ['FOO ', 'FOO ']])) + + def test_lower(self): + assert_(issubclass(self.A.lower().dtype.type, np.string_)) + assert_array_equal(self.A.lower(), asbytes_nested([ + [' abc ', ''], + ['12345', 'mixedcase'], + ['123 \t 345 \0 ', 'upper']])) + assert_(issubclass(self.B.lower().dtype.type, np.unicode_)) + assert_array_equal(self.B.lower(), [ + [sixu(' \u03c3 '), sixu('')], + [sixu('12345'), sixu('mixedcase')], + [sixu('123 \t 345 \0 '), sixu('upper')]]) + + def test_lstrip(self): + assert_(issubclass(self.A.lstrip().dtype.type, np.string_)) + assert_array_equal(self.A.lstrip(), asbytes_nested([ + ['abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']])) + assert_array_equal(self.A.lstrip(asbytes_nested(['1', 'M'])), + asbytes_nested([ + [' abc', ''], + ['2345', 'ixedCase'], + ['23 \t 345 \x00', 'UPPER']])) + assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_)) + assert_array_equal(self.B.lstrip(), [ + [sixu('\u03a3 '), ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) + + def test_partition(self): + P = self.A.partition(asbytes_nested(['3', 'M'])) + assert_(issubclass(P.dtype.type, np.string_)) + assert_array_equal(P, asbytes_nested([ + [(' abc ', '', ''), ('', '', '')], + [('12', '3', '45'), ('', 'M', 'ixedCase')], + [('12', '3', ' \t 345 \0 '), ('UPPER', '', '')]])) + + def test_replace(self): + R = self.A.replace(asbytes_nested(['3', 'a']), + asbytes_nested(['##########', '@'])) + assert_(issubclass(R.dtype.type, np.string_)) + assert_array_equal(R, asbytes_nested([ + [' abc ', ''], + ['12##########45', 'MixedC@se'], + ['12########## \t ##########45 \x00', 'UPPER']])) + + if sys.version_info[0] < 3: + # NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3 + R = self.A.replace(asbytes('a'), sixu('\u03a3')) + assert_(issubclass(R.dtype.type, np.unicode_)) + assert_array_equal(R, [ + [sixu(' \u03a3bc '), ''], + ['12345', sixu('MixedC\u03a3se')], + ['123 \t 345 \x00', 'UPPER']]) + + def test_rjust(self): + assert_(issubclass(self.A.rjust(10).dtype.type, np.string_)) + widths = np.array([[10, 20]]) + C = self.A.rjust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + C = self.A.rjust(20, asbytes('#')) + assert_(np.all(C.startswith(asbytes('#')))) + assert_array_equal(C.endswith(asbytes('#')), + [[False, True], [False, False], [False, False]]) + C = np.char.rjust(asbytes('FOO'), [[10, 20], [15, 8]]) + assert_(issubclass(C.dtype.type, np.string_)) + assert_array_equal(C, asbytes_nested([ + [' FOO', ' FOO'], + [' FOO', ' FOO']])) + + def test_rpartition(self): + P = self.A.rpartition(asbytes_nested(['3', 'M'])) + assert_(issubclass(P.dtype.type, np.string_)) + assert_array_equal(P, asbytes_nested([ + [('', '', ' abc '), ('', '', '')], + [('12', '3', '45'), ('', 'M', 'ixedCase')], + [('123 \t ', '3', '45 \0 '), ('', '', 'UPPER')]])) + + def test_rsplit(self): + A = self.A.rsplit(asbytes('3')) + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), asbytes_nested([ + [[' abc '], ['']], + [['12', '45'], ['MixedCase']], + [['12', ' \t ', '45 \x00 '], ['UPPER']]])) + + def test_rstrip(self): + assert_(issubclass(self.A.rstrip().dtype.type, np.string_)) + assert_array_equal(self.A.rstrip(), asbytes_nested([ + [' abc', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']])) + assert_array_equal(self.A.rstrip(asbytes_nested(['5', 'ER'])), + asbytes_nested([ + [' abc ', ''], + ['1234', 'MixedCase'], + ['123 \t 345 \x00', 'UPP']])) + assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_)) + assert_array_equal(self.B.rstrip(), [ + [sixu(' \u03a3'), ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']]) + + def test_strip(self): + assert_(issubclass(self.A.strip().dtype.type, np.string_)) + assert_array_equal(self.A.strip(), asbytes_nested([ + ['abc', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']])) + assert_array_equal(self.A.strip(asbytes_nested(['15', 'EReM'])), + asbytes_nested([ + [' abc ', ''], + ['234', 'ixedCas'], + ['23 \t 345 \x00', 'UPP']])) + assert_(issubclass(self.B.strip().dtype.type, np.unicode_)) + assert_array_equal(self.B.strip(), [ + [sixu('\u03a3'), ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']]) + + def test_split(self): + A = self.A.split(asbytes('3')) + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), asbytes_nested([ + [[' abc '], ['']], + [['12', '45'], ['MixedCase']], + [['12', ' \t ', '45 \x00 '], ['UPPER']]])) + + def test_splitlines(self): + A = np.char.array(['abc\nfds\nwer']).splitlines() + assert_(issubclass(A.dtype.type, np.object_)) + assert_(A.shape == (1,)) + assert_(len(A[0]) == 3) + + def test_swapcase(self): + assert_(issubclass(self.A.swapcase().dtype.type, np.string_)) + assert_array_equal(self.A.swapcase(), asbytes_nested([ + [' ABC ', ''], + ['12345', 'mIXEDcASE'], + ['123 \t 345 \0 ', 'upper']])) + assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_)) + assert_array_equal(self.B.swapcase(), [ + [sixu(' \u03c3 '), sixu('')], + [sixu('12345'), sixu('mIXEDcASE')], + [sixu('123 \t 345 \0 '), sixu('upper')]]) + + def test_title(self): + assert_(issubclass(self.A.title().dtype.type, np.string_)) + assert_array_equal(self.A.title(), asbytes_nested([ + [' Abc ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']])) + assert_(issubclass(self.B.title().dtype.type, np.unicode_)) + assert_array_equal(self.B.title(), [ + [sixu(' \u03a3 '), sixu('')], + [sixu('12345'), sixu('Mixedcase')], + [sixu('123 \t 345 \0 '), sixu('Upper')]]) + + def test_upper(self): + assert_(issubclass(self.A.upper().dtype.type, np.string_)) + assert_array_equal(self.A.upper(), asbytes_nested([ + [' ABC ', ''], + ['12345', 'MIXEDCASE'], + ['123 \t 345 \0 ', 'UPPER']])) + assert_(issubclass(self.B.upper().dtype.type, np.unicode_)) + assert_array_equal(self.B.upper(), [ + [sixu(' \u03a3 '), sixu('')], + [sixu('12345'), sixu('MIXEDCASE')], + [sixu('123 \t 345 \0 '), sixu('UPPER')]]) + + def test_isnumeric(self): + def fail(): + self.A.isnumeric() + self.assertRaises(TypeError, fail) + assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_)) + assert_array_equal(self.B.isnumeric(), [ + [False, False], [True, False], [False, False]]) + + def test_isdecimal(self): + def fail(): + self.A.isdecimal() + self.assertRaises(TypeError, fail) + assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_)) + assert_array_equal(self.B.isdecimal(), [ + [False, False], [True, False], [False, False]]) + + +class TestOperations(TestCase): + def setUp(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + self.B = np.array([['efg', '456'], + ['051', 'tuv']]).view(np.chararray) + + def test_add(self): + AB = np.array([['abcefg', '123456'], + ['789051', 'xyztuv']]).view(np.chararray) + assert_array_equal(AB, (self.A + self.B)) + assert_(len((self.A + self.B)[0][0]) == 6) + + def test_radd(self): + QA = np.array([['qabc', 'q123'], + ['q789', 'qxyz']]).view(np.chararray) + assert_array_equal(QA, ('q' + self.A)) + + def test_mul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) + + assert_array_equal(Ar, (self.A * r)) + + for ob in [object(), 'qrs']: + try: + A * ob + except ValueError: + pass + else: + self.fail("chararray can only be multiplied by integers") + + def test_rmul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) + assert_array_equal(Ar, (r * self.A)) + + for ob in [object(), 'qrs']: + try: + ob * A + except ValueError: + pass + else: + self.fail("chararray can only be multiplied by integers") + + def test_mod(self): + """Ticket #856""" + F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray) + C = np.array([[3, 7], [19, 1]]) + FC = np.array([['3', '7.000000'], + ['19', '1']]).view(np.chararray) + assert_array_equal(FC, F % C) + + A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray) + A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray) + assert_array_equal(A1, (A % 1)) + + A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray) + assert_array_equal(A2, (A % [[1, 2], [3, 4]])) + + def test_rmod(self): + assert_(("%s" % self.A) == str(self.A)) + assert_(("%r" % self.A) == repr(self.A)) + + for ob in [42, object()]: + try: + ob % self.A + except TypeError: + pass + else: + self.fail("chararray __rmod__ should fail with " \ + "non-string objects") + + +def test_empty_indexing(): + """Regression test for ticket 1948.""" + # Check that indexing a chararray with an empty list/array returns an + # empty chararray instead of a chararray with a single empty string in it. + s = np.chararray((4,)) + assert_(s[[]].size == 0) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_deprecations.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_deprecations.py new file mode 100644 index 0000000000000..ef56766f5f415 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_deprecations.py @@ -0,0 +1,512 @@ +""" +Tests related to deprecation warnings. Also a convenient place +to document how deprecations should eventually be turned into errors. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import operator +import warnings +from nose.plugins.skip import SkipTest + +import numpy as np +from numpy.testing import (dec, run_module_suite, assert_raises, + assert_warns, assert_array_equal, assert_) + + +class _DeprecationTestCase(object): + # Just as warning: warnings uses re.match, so the start of this message + # must match. + message = '' + + def setUp(self): + self.warn_ctx = warnings.catch_warnings(record=True) + self.log = self.warn_ctx.__enter__() + + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # http://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", message=self.message, + category=DeprecationWarning) + + + def tearDown(self): + self.warn_ctx.__exit__() + + + def assert_deprecated(self, function, num=1, ignore_others=False, + function_fails=False, + exceptions=(DeprecationWarning,), args=(), kwargs={}): + """Test if DeprecationWarnings are given and raised. + + This first checks if the function when called gives `num` + DeprecationWarnings, after that it tries to raise these + DeprecationWarnings and compares them with `exceptions`. + The exceptions can be different for cases where this code path + is simply not anticipated and the exception is replaced. + + Parameters + ---------- + f : callable + The function to test + num : int + Number of DeprecationWarnings to expect. This should normally be 1. + ignore_other : bool + Whether warnings of the wrong type should be ignored (note that + the message is not checked) + function_fails : bool + If the function would normally fail, setting this will check for + warnings inside a try/except block. + exceptions : Exception or tuple of Exceptions + Exception to expect when turning the warnings into an error. + The default checks for DeprecationWarnings. If exceptions is + empty the function is expected to run successfull. + args : tuple + Arguments for `f` + kwargs : dict + Keyword arguments for `f` + """ + # reset the log + self.log[:] = [] + + try: + function(*args, **kwargs) + except (Exception if function_fails else tuple()): + pass + # just in case, clear the registry + num_found = 0 + for warning in self.log: + if warning.category is DeprecationWarning: + num_found += 1 + elif not ignore_others: + raise AssertionError("expected DeprecationWarning but %s given" + % warning.category) + if num is not None and num_found != num: + raise AssertionError("%i warnings found but %i expected" + % (len(self.log), num)) + + with warnings.catch_warnings(): + warnings.filterwarnings("error", message=self.message, + category=DeprecationWarning) + + try: + function(*args, **kwargs) + if exceptions != tuple(): + raise AssertionError("No error raised during function call") + except exceptions: + if exceptions == tuple(): + raise AssertionError("Error raised during function call") + + + def assert_not_deprecated(self, function, args=(), kwargs={}): + """Test if DeprecationWarnings are given and raised. + + This is just a shorthand for: + + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + """ + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + + +class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): + """ + These test that ``DeprecationWarning`` is given when you try to use + non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` + and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. + + After deprecation, changes need to be done inside conversion_utils.c + in PyArray_PyIntAsIntp and possibly PyArray_IntpConverter. + In iterators.c the function slice_GetIndices could be removed in favor + of its python equivalent and in mapping.c the function _tuple_of_integers + can be simplified (if ``np.array([1]).__index__()`` is also deprecated). + + As for the deprecation time-frame: via Ralf Gommers, + + "Hard to put that as a version number, since we don't know if the + version after 1.8 will be 6 months or 2 years after. I'd say 2 + years is reasonable." + + I interpret this to mean 2 years after the 1.8 release. Possibly + giving a PendingDeprecationWarning before that (which is visible + by default) + + """ + message = "using a non-integer number instead of an integer " \ + "will result in an error in the future" + + def test_indexing(self): + a = np.array([[[5]]]) + def assert_deprecated(*args, **kwargs): + self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs) + + assert_deprecated(lambda: a[0.0]) + assert_deprecated(lambda: a[0, 0.0]) + assert_deprecated(lambda: a[0.0, 0]) + assert_deprecated(lambda: a[0.0,:]) + assert_deprecated(lambda: a[:, 0.0]) + assert_deprecated(lambda: a[:, 0.0,:]) + assert_deprecated(lambda: a[0.0,:,:]) + assert_deprecated(lambda: a[0, 0, 0.0]) + assert_deprecated(lambda: a[0.0, 0, 0]) + assert_deprecated(lambda: a[0, 0.0, 0]) + assert_deprecated(lambda: a[-1.4]) + assert_deprecated(lambda: a[0, -1.4]) + assert_deprecated(lambda: a[-1.4, 0]) + assert_deprecated(lambda: a[-1.4,:]) + assert_deprecated(lambda: a[:, -1.4]) + assert_deprecated(lambda: a[:, -1.4,:]) + assert_deprecated(lambda: a[-1.4,:,:]) + assert_deprecated(lambda: a[0, 0, -1.4]) + assert_deprecated(lambda: a[-1.4, 0, 0]) + assert_deprecated(lambda: a[0, -1.4, 0]) + + # Test that the slice parameter deprecation warning doesn't mask + # the scalar index warning. + assert_deprecated(lambda: a[0.0:, 0.0], num=2) + assert_deprecated(lambda: a[0.0:, 0.0,:], num=2) + + + def test_valid_indexing(self): + a = np.array([[[5]]]) + assert_not_deprecated = self.assert_not_deprecated + + assert_not_deprecated(lambda: a[np.array([0])]) + assert_not_deprecated(lambda: a[[0, 0]]) + assert_not_deprecated(lambda: a[:, [0, 0]]) + assert_not_deprecated(lambda: a[:, 0,:]) + assert_not_deprecated(lambda: a[:,:,:]) + + + def test_slicing(self): + a = np.array([[5]]) + def assert_deprecated(*args, **kwargs): + self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs) + + # start as float. + assert_deprecated(lambda: a[0.0:]) + assert_deprecated(lambda: a[0:, 0.0:2]) + assert_deprecated(lambda: a[0.0::2, :0]) + assert_deprecated(lambda: a[0.0:1:2,:]) + assert_deprecated(lambda: a[:, 0.0:]) + # stop as float. + assert_deprecated(lambda: a[:0.0]) + assert_deprecated(lambda: a[:0, 1:2.0]) + assert_deprecated(lambda: a[:0.0:2, :0]) + assert_deprecated(lambda: a[:0.0,:]) + assert_deprecated(lambda: a[:, 0:4.0:2]) + # step as float. + assert_deprecated(lambda: a[::1.0]) + assert_deprecated(lambda: a[0:, :2:2.0]) + assert_deprecated(lambda: a[1::4.0, :0]) + assert_deprecated(lambda: a[::5.0,:]) + assert_deprecated(lambda: a[:, 0:4:2.0]) + # mixed. + assert_deprecated(lambda: a[1.0:2:2.0], num=2) + assert_deprecated(lambda: a[1.0::2.0], num=2) + assert_deprecated(lambda: a[0:, :2.0:2.0], num=2) + assert_deprecated(lambda: a[1.0:1:4.0, :0], num=2) + assert_deprecated(lambda: a[1.0:5.0:5.0,:], num=3) + assert_deprecated(lambda: a[:, 0.4:4.0:2.0], num=3) + # should still get the DeprecationWarning if step = 0. + assert_deprecated(lambda: a[::0.0], function_fails=True) + + + def test_valid_slicing(self): + a = np.array([[[5]]]) + assert_not_deprecated = self.assert_not_deprecated + + assert_not_deprecated(lambda: a[::]) + assert_not_deprecated(lambda: a[0:]) + assert_not_deprecated(lambda: a[:2]) + assert_not_deprecated(lambda: a[0:2]) + assert_not_deprecated(lambda: a[::2]) + assert_not_deprecated(lambda: a[1::2]) + assert_not_deprecated(lambda: a[:2:2]) + assert_not_deprecated(lambda: a[1:2:2]) + + + def test_non_integer_argument_deprecations(self): + a = np.array([[5]]) + + self.assert_deprecated(np.reshape, args=(a, (1., 1., -1)), num=2) + self.assert_deprecated(np.reshape, args=(a, (np.array(1.), -1))) + self.assert_deprecated(np.take, args=(a, [0], 1.)) + self.assert_deprecated(np.take, args=(a, [0], np.float64(1.))) + + + def test_non_integer_sequence_multiplication(self): + # Numpy scalar sequence multiply should not work with non-integers + def mult(a, b): + return a * b + self.assert_deprecated(mult, args=([1], np.float_(3))) + self.assert_not_deprecated(mult, args=([1], np.int_(3))) + + + def test_reduce_axis_float_index(self): + d = np.zeros((3,3,3)) + self.assert_deprecated(np.min, args=(d, 0.5)) + self.assert_deprecated(np.min, num=1, args=(d, (0.5, 1))) + self.assert_deprecated(np.min, num=1, args=(d, (1, 2.2))) + self.assert_deprecated(np.min, num=2, args=(d, (.2, 1.2))) + + +class TestBooleanArgumentDeprecation(_DeprecationTestCase): + """This tests that using a boolean as integer argument/indexing is + deprecated. + + This should be kept in sync with TestFloatNonIntegerArgumentDeprecation + and like it is handled in PyArray_PyIntAsIntp. + """ + message = "using a boolean instead of an integer " \ + "will result in an error in the future" + + def test_bool_as_int_argument(self): + a = np.array([[[1]]]) + + self.assert_deprecated(np.reshape, args=(a, (True, -1))) + self.assert_deprecated(np.reshape, args=(a, (np.bool_(True), -1))) + # Note that operator.index(np.array(True)) does not work, a boolean + # array is thus also deprecated, but not with the same message: + assert_raises(TypeError, operator.index, np.array(True)) + self.assert_deprecated(np.take, args=(a, [0], False)) + self.assert_deprecated(lambda: a[False:True:True], exceptions=IndexError, num=3) + self.assert_deprecated(lambda: a[False, 0], exceptions=IndexError) + self.assert_deprecated(lambda: a[False, 0, 0], exceptions=IndexError) + + +class TestArrayToIndexDeprecation(_DeprecationTestCase): + """This tests that creating an an index from an array is deprecated + if the array is not 0d. + + This can probably be deprecated somewhat faster then the integer + deprecations. The deprecation period started with NumPy 1.8. + For deprecation this needs changing of array_index in number.c + """ + message = "converting an array with ndim \> 0 to an index will result " \ + "in an error in the future" + + def test_array_to_index_deprecation(self): + # This drops into the non-integer deprecation, which is ignored here, + # so no exception is expected. The raising is effectively tested above. + a = np.array([[[1]]]) + + self.assert_deprecated(operator.index, args=(np.array([1]),)) + self.assert_deprecated(np.reshape, args=(a, (a, -1)), exceptions=()) + self.assert_deprecated(np.take, args=(a, [0], a), exceptions=()) + # Check slicing. Normal indexing checks arrays specifically. + self.assert_deprecated(lambda: a[a:a:a], exceptions=(), num=3) + + +class TestNonIntegerArrayLike(_DeprecationTestCase): + """Tests that array likes, i.e. lists give a deprecation warning + when they cannot be safely cast to an integer. + """ + message = "non integer \(and non boolean\) array-likes will not be " \ + "accepted as indices in the future" + + def test_basic(self): + a = np.arange(10) + self.assert_deprecated(a.__getitem__, args=([0.5, 1.5],), + exceptions=IndexError) + self.assert_deprecated(a.__getitem__, args=((['1', '2'],),), + exceptions=IndexError) + + self.assert_not_deprecated(a.__getitem__, ([],)) + + + def test_boolean_futurewarning(self): + a = np.arange(10) + with warnings.catch_warnings(): + warnings.filterwarnings('always') + assert_warns(FutureWarning, a.__getitem__, [True]) + # Unfortunatly, the deprecation warning takes precedence: + #assert_warns(FutureWarning, a.__getitem__, True) + + with warnings.catch_warnings(): + warnings.filterwarnings('error') + assert_raises(FutureWarning, a.__getitem__, [True]) + #assert_raises(FutureWarning, a.__getitem__, True) + + +class TestMultipleEllipsisDeprecation(_DeprecationTestCase): + message = "an index can only have a single Ellipsis \(`...`\); replace " \ + "all but one with slices \(`:`\)." + + def test_basic(self): + a = np.arange(10) + self.assert_deprecated(a.__getitem__, args=((Ellipsis, Ellipsis),)) + + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', '', DeprecationWarning) + # Just check that this works: + b = a[...,...] + assert_array_equal(a, b) + assert_raises(IndexError, a.__getitem__, ((Ellipsis, ) * 3,)) + + +class TestBooleanSubtractDeprecations(_DeprecationTestCase): + """Test deprecation of boolean `-`. While + and * are well + defined, - is not and even a corrected form seems to have + no real uses. + + The deprecation process was started in NumPy 1.9. + """ + message = r"numpy boolean .* \(the .* `-` operator\) is deprecated, " \ + "use the bitwise" + + def test_operator_deprecation(self): + array = np.array([True]) + generic = np.bool_(True) + + # Minus operator/subtract ufunc: + self.assert_deprecated(operator.sub, args=(array, array)) + self.assert_deprecated(operator.sub, args=(generic, generic)) + + # Unary minus/negative ufunc: + self.assert_deprecated(operator.neg, args=(array,)) + self.assert_deprecated(operator.neg, args=(generic,)) + + +class TestRankDeprecation(_DeprecationTestCase): + """Test that np.rank is deprecated. The function should simply be + removed. The VisibleDeprecationWarning may become unnecessary. + """ + def test(self): + a = np.arange(10) + assert_warns(np.VisibleDeprecationWarning, np.rank, a) + + +class TestComparisonDepreactions(_DeprecationTestCase): + """This tests the deprecation, for non-elementwise comparison logic. + This used to mean that when an error occured during element-wise comparison + (i.e. broadcasting) NotImplemented was returned, but also in the comparison + itself, False was given instead of the error. + + Also test FutureWarning for the None comparison. + """ + + message = "elementwise comparison failed; " \ + "this will raise the error in the future." + + def test_normal_types(self): + for op in (operator.eq, operator.ne): + # Broadcasting errors: + self.assert_deprecated(op, args=(np.zeros(3), [])) + a = np.zeros(3, dtype='i,i') + # (warning is issued a couple of times here) + self.assert_deprecated(op, args=(a, a[:-1]), num=None) + + # Element comparison error (numpy array can't be compared). + a = np.array([1, np.array([1,2,3])], dtype=object) + b = np.array([1, np.array([1,2,3])], dtype=object) + self.assert_deprecated(op, args=(a, b), num=None) + + + def test_string(self): + # For two string arrays, strings always raised the broadcasting error: + a = np.array(['a', 'b']) + b = np.array(['a', 'b', 'c']) + assert_raises(ValueError, lambda x, y: x == y, a, b) + + # The empty list is not cast to string, this is only to document + # that fact (it likely should be changed). This means that the + # following works (and returns False) due to dtype mismatch: + a == [] + + + def test_none_comparison(self): + # Test comparison of None, which should result in elementwise + # comparison in the future. [1, 2] == None should be [False, False]. + with warnings.catch_warnings(): + warnings.filterwarnings('always', '', FutureWarning) + assert_warns(FutureWarning, operator.eq, np.arange(3), None) + assert_warns(FutureWarning, operator.ne, np.arange(3), None) + + with warnings.catch_warnings(): + warnings.filterwarnings('error', '', FutureWarning) + assert_raises(FutureWarning, operator.eq, np.arange(3), None) + assert_raises(FutureWarning, operator.ne, np.arange(3), None) + + def test_scalar_none_comparison(self): + # Scalars should still just return false and not give a warnings. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_(not np.float32(1) == None) + assert_(not np.str_('test') == None) + # This is dubious (see below): + assert_(not np.datetime64('NaT') == None) + + assert_(np.float32(1) != None) + assert_(np.str_('test') != None) + # This is dubious (see below): + assert_(np.datetime64('NaT') != None) + assert_(len(w) == 0) + + # For documentaiton purpose, this is why the datetime is dubious. + # At the time of deprecation this was no behaviour change, but + # it has to be considered when the deprecations is done. + assert_(np.equal(np.datetime64('NaT'), None)) + + +class TestIdentityComparisonDepreactions(_DeprecationTestCase): + """This tests the equal and not_equal object ufuncs identity check + deprecation. This was due to the usage of PyObject_RichCompareBool. + + This tests that for example for `a = np.array([np.nan], dtype=object)` + `a == a` it is warned that False and not `np.nan is np.nan` is returned. + + Should be kept in sync with TestComparisonDepreactions and new tests + added when the deprecation is over. Requires only removing of @identity@ + (and blocks) from the ufunc loops.c.src of the OBJECT comparisons. + """ + + message = "numpy .* will not check object identity in the future." + + def test_identity_equality_mismatch(self): + a = np.array([np.nan], dtype=object) + + with warnings.catch_warnings(): + warnings.filterwarnings('always', '', FutureWarning) + assert_warns(FutureWarning, np.equal, a, a) + assert_warns(FutureWarning, np.not_equal, a, a) + + with warnings.catch_warnings(): + warnings.filterwarnings('error', '', FutureWarning) + assert_raises(FutureWarning, np.equal, a, a) + assert_raises(FutureWarning, np.not_equal, a, a) + # And the other do not warn: + with np.errstate(invalid='ignore'): + np.less(a, a) + np.greater(a, a) + np.less_equal(a, a) + np.greater_equal(a, a) + + + def test_comparison_error(self): + class FunkyType(object): + def __eq__(self, other): + raise TypeError("I won't compare") + def __ne__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + self.assert_deprecated(np.equal, args=(a, a)) + self.assert_deprecated(np.not_equal, args=(a, a)) + + + def test_bool_error(self): + # The comparison result cannot be interpreted as a bool + a = np.array([np.array([1, 2, 3]), None], dtype=object) + self.assert_deprecated(np.equal, args=(a, a)) + self.assert_deprecated(np.not_equal, args=(a, a)) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_dtype.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_dtype.py new file mode 100644 index 0000000000000..18660351cb6a8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_dtype.py @@ -0,0 +1,542 @@ +from __future__ import division, absolute_import, print_function + +import sys +import numpy as np +from numpy.testing import * + +def assert_dtype_equal(a, b): + assert_equal(a, b) + assert_equal(hash(a), hash(b), + "two equivalent types do not hash to the same value !") + +def assert_dtype_not_equal(a, b): + assert_(a != b) + assert_(hash(a) != hash(b), + "two different types hash to the same value !") + +class TestBuiltin(TestCase): + def test_run(self): + """Only test hash runs at all.""" + for t in [np.int, np.float, np.complex, np.int32, np.str, np.object, + np.unicode]: + dt = np.dtype(t) + hash(dt) + + def test_dtype(self): + # Make sure equivalent byte order char hash the same (e.g. < and = on + # little endian) + for t in [np.int, np.float]: + dt = np.dtype(t) + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + if dt == dt2: + self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test") + assert_dtype_equal(dt, dt2) + else: + self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test") + assert_dtype_equal(dt, dt3) + + def test_equivalent_dtype_hashing(self): + # Make sure equivalent dtypes with different type num hash equal + uintp = np.dtype(np.uintp) + if uintp.itemsize == 4: + left = uintp + right = np.dtype(np.uint32) + else: + left = uintp + right = np.dtype(np.ulonglong) + self.assertTrue(left == right) + self.assertTrue(hash(left) == hash(right)) + + def test_invalid_types(self): + # Make sure invalid type strings raise a warning. + # For now, display a deprecation warning for invalid + # type sizes. In the future this should be changed + # to an exception. + + assert_warns(DeprecationWarning, np.dtype, 'O3') + assert_warns(DeprecationWarning, np.dtype, 'O5') + assert_warns(DeprecationWarning, np.dtype, 'O7') + assert_warns(DeprecationWarning, np.dtype, 'b3') + assert_warns(DeprecationWarning, np.dtype, 'h4') + assert_warns(DeprecationWarning, np.dtype, 'I5') + assert_warns(DeprecationWarning, np.dtype, 'e3') + assert_warns(DeprecationWarning, np.dtype, 'f5') + + if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: + assert_warns(DeprecationWarning, np.dtype, 'g12') + elif np.dtype('g').itemsize == 12: + assert_warns(DeprecationWarning, np.dtype, 'g16') + + if np.dtype('l').itemsize == 8: + assert_warns(DeprecationWarning, np.dtype, 'l4') + assert_warns(DeprecationWarning, np.dtype, 'L4') + else: + assert_warns(DeprecationWarning, np.dtype, 'l8') + assert_warns(DeprecationWarning, np.dtype, 'L8') + + if np.dtype('q').itemsize == 8: + assert_warns(DeprecationWarning, np.dtype, 'q4') + assert_warns(DeprecationWarning, np.dtype, 'Q4') + else: + assert_warns(DeprecationWarning, np.dtype, 'q8') + assert_warns(DeprecationWarning, np.dtype, 'Q8') + + def test_bad_param(self): + # Can't give a size that's too small + assert_raises(ValueError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i4', 'i1'], + 'offsets':[0, 4], + 'itemsize':4}) + # If alignment is enabled, the alignment (4) must divide the itemsize + assert_raises(ValueError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i4', 'i1'], + 'offsets':[0, 4], + 'itemsize':9}, align=True) + # If alignment is enabled, the individual fields must be aligned + assert_raises(ValueError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i1', 'f4'], + 'offsets':[0, 2]}, align=True) + +class TestRecord(TestCase): + def test_equivalent_record(self): + """Test whether equivalent record dtypes hash the same.""" + a = np.dtype([('yo', np.int)]) + b = np.dtype([('yo', np.int)]) + assert_dtype_equal(a, b) + + def test_different_names(self): + # In theory, they may hash the same (collision) ? + a = np.dtype([('yo', np.int)]) + b = np.dtype([('ye', np.int)]) + assert_dtype_not_equal(a, b) + + def test_different_titles(self): + # In theory, they may hash the same (collision) ? + a = np.dtype({'names': ['r', 'b'], + 'formats': ['u1', 'u1'], + 'titles': ['Red pixel', 'Blue pixel']}) + b = np.dtype({'names': ['r', 'b'], + 'formats': ['u1', 'u1'], + 'titles': ['RRed pixel', 'Blue pixel']}) + assert_dtype_not_equal(a, b) + + def test_not_lists(self): + """Test if an appropriate exception is raised when passing bad values to + the dtype constructor. + """ + self.assertRaises(TypeError, np.dtype, + dict(names=set(['A', 'B']), formats=['f8', 'i4'])) + self.assertRaises(TypeError, np.dtype, + dict(names=['A', 'B'], formats=set(['f8', 'i4']))) + + def test_aligned_size(self): + # Check that structured dtypes get padded to an aligned size + dt = np.dtype('i4, i1', align=True) + assert_equal(dt.itemsize, 8) + dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True) + assert_equal(dt.itemsize, 8) + dt = np.dtype({'names':['f0', 'f1'], + 'formats':['i4', 'u1'], + 'offsets':[0, 4]}, align=True) + assert_equal(dt.itemsize, 8) + dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True) + assert_equal(dt.itemsize, 8) + # Nesting should preserve that alignment + dt1 = np.dtype([('f0', 'i4'), + ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), + ('f2', 'i1')], align=True) + assert_equal(dt1.itemsize, 20) + dt2 = np.dtype({'names':['f0', 'f1', 'f2'], + 'formats':['i4', + [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], + 'i1'], + 'offsets':[0, 4, 16]}, align=True) + assert_equal(dt2.itemsize, 20) + dt3 = np.dtype({'f0': ('i4', 0), + 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), + 'f2': ('i1', 16)}, align=True) + assert_equal(dt3.itemsize, 20) + assert_equal(dt1, dt2) + assert_equal(dt2, dt3) + # Nesting should preserve packing + dt1 = np.dtype([('f0', 'i4'), + ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), + ('f2', 'i1')], align=False) + assert_equal(dt1.itemsize, 11) + dt2 = np.dtype({'names':['f0', 'f1', 'f2'], + 'formats':['i4', + [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], + 'i1'], + 'offsets':[0, 4, 10]}, align=False) + assert_equal(dt2.itemsize, 11) + dt3 = np.dtype({'f0': ('i4', 0), + 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), + 'f2': ('i1', 10)}, align=False) + assert_equal(dt3.itemsize, 11) + assert_equal(dt1, dt2) + assert_equal(dt2, dt3) + + def test_union_struct(self): + # Should be able to create union dtypes + dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(str(dt), + "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])]") + + # If the sticky aligned flag is set to True, it makes the + # str() function use a dict representation with an 'aligned' flag + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], + (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])], + align=True) + assert_equal(str(dt), + "{'names':['top','bottom'], " + "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,))," + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]], " + "'offsets':[0,76800], " + "'itemsize':80000, " + "'aligned':True}") + assert_equal(np.dtype(eval(str(dt))), dt) + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) + assert_equal(str(dt), + "[(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')]") + + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(repr(dt), + "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])])") + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, + align=True) + assert_equal(repr(dt), + "dtype([(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')], align=True)") + + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['= 3) + def test_dtype_str_with_long_in_shape(self): + # Pull request #376 + dt = np.dtype('(1L,)i4') + + def test_base_dtype_with_object_type(self): + # Issue gh-2798 + a = np.array(['a'], dtype="O").astype(("O", [("name", "O")])) + + def test_empty_string_to_object(self): + # Pull request #4722 + np.array(["", ""]).astype(object) + +class TestDtypeAttributeDeletion(object): + + def test_dtype_non_writable_attributes_deletion(self): + dt = np.dtype(np.double) + attr = ["subdtype", "descr", "str", "name", "base", "shape", + "isbuiltin", "isnative", "isalignedstruct", "fields", + "metadata", "hasobject"] + + if sys.version[:3] == '2.4': + error = TypeError + else: + error = AttributeError + + for s in attr: + assert_raises(error, delattr, dt, s) + + + def test_dtype_writable_attributes_deletion(self): + dt = np.dtype(np.double) + attr = ["names"] + for s in attr: + assert_raises(AttributeError, delattr, dt, s) + +class TestDtypeAttributes(TestCase): + + def test_name_builtin(self): + for t in np.typeDict.values(): + name = t.__name__ + if name.endswith('_'): + name = name[:-1] + assert_equal(np.dtype(t).name, name) + + def test_name_dtype_subclass(self): + # Ticket #4357 + class user_def_subcls(np.void): pass + assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls') + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_einsum.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_einsum.py new file mode 100644 index 0000000000000..226bde0a3fadc --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_einsum.py @@ -0,0 +1,573 @@ +from __future__ import division, absolute_import, print_function + +import sys +import warnings +from decimal import Decimal + +import numpy as np +from numpy.testing import * + +class TestEinSum(TestCase): + def test_einsum_errors(self): + # Need enough arguments + assert_raises(ValueError, np.einsum) + assert_raises(ValueError, np.einsum, "") + + # subscripts must be a string + assert_raises(TypeError, np.einsum, 0, 0) + + # out parameter must be an array + assert_raises(TypeError, np.einsum, "", 0, out='test') + + # order parameter must be a valid order + assert_raises(TypeError, np.einsum, "", 0, order='W') + + # casting parameter must be a valid casting + assert_raises(ValueError, np.einsum, "", 0, casting='blah') + + # dtype parameter must be a valid dtype + assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type') + + # other keyword arguments are rejected + assert_raises(TypeError, np.einsum, "", 0, bad_arg=0) + + # issue 4528 revealed a segfault with this call + assert_raises(TypeError, np.einsum, *(None,)*63) + + # number of operands must match count in subscripts string + assert_raises(ValueError, np.einsum, "", 0, 0) + assert_raises(ValueError, np.einsum, ",", 0, [0], [0]) + assert_raises(ValueError, np.einsum, ",", [0]) + + # can't have more subscripts than dimensions in the operand + assert_raises(ValueError, np.einsum, "i", 0) + assert_raises(ValueError, np.einsum, "ij", [0, 0]) + assert_raises(ValueError, np.einsum, "...i", 0) + assert_raises(ValueError, np.einsum, "i...j", [0, 0]) + assert_raises(ValueError, np.einsum, "i...", 0) + assert_raises(ValueError, np.einsum, "ij...", [0, 0]) + + # invalid ellipsis + assert_raises(ValueError, np.einsum, "i..", [0, 0]) + assert_raises(ValueError, np.einsum, ".i...", [0, 0]) + assert_raises(ValueError, np.einsum, "j->..j", [0, 0]) + assert_raises(ValueError, np.einsum, "j->.j...", [0, 0]) + + # invalid subscript character + assert_raises(ValueError, np.einsum, "i%...", [0, 0]) + assert_raises(ValueError, np.einsum, "...j$", [0, 0]) + assert_raises(ValueError, np.einsum, "i->&", [0, 0]) + + # output subscripts must appear in input + assert_raises(ValueError, np.einsum, "i->ij", [0, 0]) + + # output subscripts may only be specified once + assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]]) + + # dimensions much match when being collapsed + assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2, 3)) + assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2, 3)) + + # broadcasting to new dimensions must be enabled explicitly + assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3)) + assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], + out=np.arange(4).reshape(2, 2)) + + def test_einsum_views(self): + # pass-through + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("...", a) + assert_(b.base is a) + + b = np.einsum(a, [Ellipsis]) + assert_(b.base is a) + + b = np.einsum("ij", a) + assert_(b.base is a) + assert_equal(b, a) + + b = np.einsum(a, [0, 1]) + assert_(b.base is a) + assert_equal(b, a) + + # transpose + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("ji", a) + assert_(b.base is a) + assert_equal(b, a.T) + + b = np.einsum(a, [1, 0]) + assert_(b.base is a) + assert_equal(b, a.T) + + # diagonal + a = np.arange(9) + a.shape = (3, 3) + + b = np.einsum("ii->i", a) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0], [0]) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + # diagonal with various ways of broadcasting an additional dimension + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("...ii->...i", a) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0]) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum("ii...->...i", a) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0]) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum("...ii->i...", a) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis]) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("jii->ij", a) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [1, 0, 0], [0, 1]) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("ii...->i...", a) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis]) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->i...", a) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis]) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->...i", a) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0]) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + # triple diagonal + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("iii->i", a) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, 0], [0]) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + # swap axes + a = np.arange(24) + a.shape = (2, 3, 4) + + b = np.einsum("ijk->jik", a) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + b = np.einsum(a, [0, 1, 2], [1, 0, 2]) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + def check_einsum_sums(self, dtype): + # Check various sums. Does many sizes to exercise unrolled loops. + + # sum(a, axis=-1) + for n in range(1, 17): + a = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i->", a), np.sum(a, axis=-1).astype(dtype)) + assert_equal(np.einsum(a, [0], []), + np.sum(a, axis=-1).astype(dtype)) + + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("...i->...", a), + np.sum(a, axis=-1).astype(dtype)) + assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis]), + np.sum(a, axis=-1).astype(dtype)) + + # sum(a, axis=0) + for n in range(1, 17): + a = np.arange(2*n, dtype=dtype).reshape(2, n) + assert_equal(np.einsum("i...->...", a), + np.sum(a, axis=0).astype(dtype)) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]), + np.sum(a, axis=0).astype(dtype)) + + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("i...->...", a), + np.sum(a, axis=0).astype(dtype)) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]), + np.sum(a, axis=0).astype(dtype)) + + # trace(a) + for n in range(1, 17): + a = np.arange(n*n, dtype=dtype).reshape(n, n) + assert_equal(np.einsum("ii", a), np.trace(a).astype(dtype)) + assert_equal(np.einsum(a, [0, 0]), np.trace(a).astype(dtype)) + + # multiply(a, b) + assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case + for n in range(1, 17): + a = np.arange(3*n, dtype=dtype).reshape(3, n) + b = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("..., ...", a, b), np.multiply(a, b)) + assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis]), + np.multiply(a, b)) + + # inner(a,b) + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("...i, ...i", a, b), np.inner(a, b)) + assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0]), + np.inner(a, b)) + + for n in range(1, 11): + a = np.arange(n*3*2, dtype=dtype).reshape(n, 3, 2) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i..., i...", a, b), np.inner(a.T, b.T).T) + assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis]), + np.inner(a.T, b.T).T) + + # outer(a,b) + for n in range(1, 17): + a = np.arange(3, dtype=dtype)+1 + b = np.arange(n, dtype=dtype)+1 + assert_equal(np.einsum("i,j", a, b), np.outer(a, b)) + assert_equal(np.einsum(a, [0], b, [1]), np.outer(a, b)) + + # Suppress the complex warnings for the 'as f8' tests + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.ComplexWarning) + + # matvec(a,b) / a.dot(b) where a is matrix, b is vector + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ij, j", a, b), np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1]), np.dot(a, b)) + + c = np.arange(4, dtype=dtype) + np.einsum("ij,j", a, b, out=c, + dtype='f8', casting='unsafe') + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1], out=c, + dtype='f8', casting='unsafe') + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ji,j", a.T, b.T), np.dot(b.T, a.T)) + assert_equal(np.einsum(a.T, [1, 0], b.T, [1]), np.dot(b.T, a.T)) + + c = np.arange(4, dtype=dtype) + np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe') + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a.T, [1, 0], b.T, [1], out=c, + dtype='f8', casting='unsafe') + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + + # matmat(a,b) / a.dot(b) where a is matrix, b is matrix + for n in range(1, 17): + if n < 8 or dtype != 'f2': + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) + assert_equal(np.einsum("ij,jk", a, b), np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2]), np.dot(a, b)) + + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) + c = np.arange(24, dtype=dtype).reshape(4, 6) + np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe') + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], out=c, + dtype='f8', casting='unsafe') + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + # matrix triple product (note this is not currently an efficient + # way to multiply 3 matrices) + a = np.arange(12, dtype=dtype).reshape(3, 4) + b = np.arange(20, dtype=dtype).reshape(4, 5) + c = np.arange(30, dtype=dtype).reshape(5, 6) + if dtype != 'f2': + assert_equal(np.einsum("ij,jk,kl", a, b, c), + a.dot(b).dot(c)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3]), + a.dot(b).dot(c)) + + d = np.arange(18, dtype=dtype).reshape(3, 6) + np.einsum("ij,jk,kl", a, b, c, out=d, + dtype='f8', casting='unsafe') + assert_equal(d, a.astype('f8').dot(b.astype('f8') + ).dot(c.astype('f8')).astype(dtype)) + d[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, + dtype='f8', casting='unsafe') + assert_equal(d, a.astype('f8').dot(b.astype('f8') + ).dot(c.astype('f8')).astype(dtype)) + + # tensordot(a, b) + if np.dtype(dtype) != np.dtype('f2'): + a = np.arange(60, dtype=dtype).reshape(3, 4, 5) + b = np.arange(24, dtype=dtype).reshape(4, 3, 2) + assert_equal(np.einsum("ijk, jil -> kl", a, b), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + + c = np.arange(10, dtype=dtype).reshape(5, 2) + np.einsum("ijk,jil->kl", a, b, out=c, + dtype='f8', casting='unsafe') + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, + dtype='f8', casting='unsafe') + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + + # logical_and(logical_and(a!=0, b!=0), c!=0) + a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype) + b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype) + c = np.array([True, True, False, True, True, False, True, True]) + assert_equal(np.einsum("i,i,i->i", a, b, c, + dtype='?', casting='unsafe'), + np.logical_and(np.logical_and(a!=0, b!=0), c!=0)) + assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], + dtype='?', casting='unsafe'), + np.logical_and(np.logical_and(a!=0, b!=0), c!=0)) + + a = np.arange(9, dtype=dtype) + assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) + assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) + assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) + assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) + + # Various stride0, contiguous, and SSE aligned variants + for n in range(1, 25): + a = np.arange(n, dtype=dtype) + if np.dtype(dtype).itemsize > 1: + assert_equal(np.einsum("...,...", a, a), np.multiply(a, a)) + assert_equal(np.einsum("i,i", a, a), np.dot(a, a)) + assert_equal(np.einsum("i,->i", a, 2), 2*a) + assert_equal(np.einsum(",i->i", 2, a), 2*a) + assert_equal(np.einsum("i,->", a, 2), 2*np.sum(a)) + assert_equal(np.einsum(",i->", 2, a), 2*np.sum(a)) + + assert_equal(np.einsum("...,...", a[1:], a[:-1]), + np.multiply(a[1:], a[:-1])) + assert_equal(np.einsum("i,i", a[1:], a[:-1]), + np.dot(a[1:], a[:-1])) + assert_equal(np.einsum("i,->i", a[1:], 2), 2*a[1:]) + assert_equal(np.einsum(",i->i", 2, a[1:]), 2*a[1:]) + assert_equal(np.einsum("i,->", a[1:], 2), 2*np.sum(a[1:])) + assert_equal(np.einsum(",i->", 2, a[1:]), 2*np.sum(a[1:])) + + # An object array, summed as the data type + a = np.arange(9, dtype=object) + + b = np.einsum("i->", a, dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + assert_equal(b.dtype, np.dtype(dtype)) + + b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + assert_equal(b.dtype, np.dtype(dtype)) + + # A case which was failing (ticket #1885) + p = np.arange(2) + 1 + q = np.arange(4).reshape(2, 2) + 3 + r = np.arange(4).reshape(2, 2) + 7 + assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) + + def test_einsum_sums_int8(self): + self.check_einsum_sums('i1'); + + def test_einsum_sums_uint8(self): + self.check_einsum_sums('u1'); + + def test_einsum_sums_int16(self): + self.check_einsum_sums('i2'); + + def test_einsum_sums_uint16(self): + self.check_einsum_sums('u2'); + + def test_einsum_sums_int32(self): + self.check_einsum_sums('i4'); + + def test_einsum_sums_uint32(self): + self.check_einsum_sums('u4'); + + def test_einsum_sums_int64(self): + self.check_einsum_sums('i8'); + + def test_einsum_sums_uint64(self): + self.check_einsum_sums('u8'); + + def test_einsum_sums_float16(self): + self.check_einsum_sums('f2'); + + def test_einsum_sums_float32(self): + self.check_einsum_sums('f4'); + + def test_einsum_sums_float64(self): + self.check_einsum_sums('f8'); + + def test_einsum_sums_longdouble(self): + self.check_einsum_sums(np.longdouble); + + def test_einsum_sums_cfloat64(self): + self.check_einsum_sums('c8'); + + def test_einsum_sums_cfloat128(self): + self.check_einsum_sums('c16'); + + def test_einsum_sums_clongdouble(self): + self.check_einsum_sums(np.clongdouble); + + def test_einsum_misc(self): + # This call used to crash because of a bug in + # PyArray_AssignZero + a = np.ones((1, 2)) + b = np.ones((2, 2, 1)) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + + # The iterator had an issue with buffering this reduction + a = np.ones((5, 12, 4, 2, 3), np.int64) + b = np.ones((5, 12, 11), np.int64) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), + np.einsum('ijklm,ijn->', a, b)) + + # Issue #2027, was a problem in the contiguous 3-argument + # inner loop implementation + a = np.arange(1, 3) + b = np.arange(1, 5).reshape(2, 2) + c = np.arange(1, 9).reshape(4, 2) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + + def test_einsum_broadcast(self): + # Issue #2455 change in handling ellipsis + # remove the 'middle broadcast' error + # only use the 'RIGHT' iteration in prepare_op_axes + # adds auto broadcast on left where it belongs + # broadcast on right has to be explicit + + A = np.arange(2*3*4).reshape(2,3,4) + B = np.arange(3) + ref = np.einsum('ijk,j->ijk',A, B) + assert_equal(np.einsum('ij...,j...->ij...',A, B), ref) + assert_equal(np.einsum('ij...,...j->ij...',A, B), ref) + assert_equal(np.einsum('ij...,j->ij...',A, B), ref) # used to raise error + + A = np.arange(12).reshape((4,3)) + B = np.arange(6).reshape((3,2)) + ref = np.einsum('ik,kj->ij', A, B) + assert_equal(np.einsum('ik...,k...->i...', A, B), ref) + assert_equal(np.einsum('ik...,...kj->i...j', A, B), ref) + assert_equal(np.einsum('...k,kj', A, B), ref) # used to raise error + assert_equal(np.einsum('ik,k...->i...', A, B), ref) # used to raise error + + dims=[2,3,4,5]; + a = np.arange(np.prod(dims)).reshape(dims) + v = np.arange(dims[2]) + ref = np.einsum('ijkl,k->ijl', a, v) + assert_equal(np.einsum('ijkl,k', a, v), ref) + assert_equal(np.einsum('...kl,k', a, v), ref) # used to raise error + assert_equal(np.einsum('...kl,k...', a, v), ref) + # no real diff from 1st + + J,K,M=160,160,120; + A=np.arange(J*K*M).reshape(1,1,1,J,K,M) + B=np.arange(J*K*M*3).reshape(J,K,M,3) + ref = np.einsum('...lmn,...lmno->...o', A, B) + assert_equal(np.einsum('...lmn,lmno->...o', A, B), ref) # used to raise error + + def test_einsum_fixedstridebug(self): + # Issue #4485 obscure einsum bug + # This case revealed a bug in nditer where it reported a stride + # as 'fixed' (0) when it was in fact not fixed during processing + # (0 or 4). The reason for the bug was that the check for a fixed + # stride was using the information from the 2D inner loop reuse + # to restrict the iteration dimensions it had to validate to be + # the same, but that 2D inner loop reuse logic is only triggered + # during the buffer copying step, and hence it was invalid to + # rely on those values. The fix is to check all the dimensions + # of the stride in question, which in the test case reveals that + # the stride is not fixed. + # + # NOTE: This test is triggered by the fact that the default buffersize, + # used by einsum, is 8192, and 3*2731 = 8193, is larger than that + # and results in a mismatch between the buffering and the + # striding for operand A. + A = np.arange(2*3).reshape(2,3).astype(np.float32) + B = np.arange(2*3*2731).reshape(2,3,2731).astype(np.int16) + es = np.einsum('cl,cpx->lpx', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + # The following is the original test case from the bug report, + # made repeatable by changing random arrays to aranges. + A = np.arange(3*3).reshape(3,3).astype(np.float64) + B = np.arange(3*3*64*64).reshape(3,3,64,64).astype(np.float32) + es = np.einsum ('cl,cpxy->lpxy', A,B) + tp = np.tensordot(A,B, axes=(0,0)) + assert_equal(es, tp) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_errstate.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_errstate.py new file mode 100644 index 0000000000000..7eb0aba2ef767 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_errstate.py @@ -0,0 +1,51 @@ +from __future__ import division, absolute_import, print_function + +import platform + +import numpy as np +from numpy.testing import TestCase, assert_, run_module_suite, dec + + +class TestErrstate(TestCase): + @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") + def test_invalid(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(invalid='ignore'): + np.sqrt(a) + # While this should fail! + try: + np.sqrt(a) + except FloatingPointError: + pass + else: + self.fail("Did not raise an invalid error") + + def test_divide(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(divide='ignore'): + a // 0 + # While this should fail! + try: + a // 0 + except FloatingPointError: + pass + else: + self.fail("Did not raise divide by zero error") + + def test_errcall(self): + def foo(*args): + print(args) + olderrcall = np.geterrcall() + with np.errstate(call=foo): + assert_(np.geterrcall() is foo, 'call is not foo') + with np.errstate(call=None): + assert_(np.geterrcall() is None, 'call is not None') + assert_(np.geterrcall() is olderrcall, 'call is not olderrcall') + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_function_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_function_base.py new file mode 100644 index 0000000000000..f6ffd5a1048af --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_function_base.py @@ -0,0 +1,111 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * +from numpy import logspace, linspace, dtype, array + +class TestLogspace(TestCase): + + def test_basic(self): + y = logspace(0, 6) + assert_(len(y) == 50) + y = logspace(0, 6, num=100) + assert_(y[-1] == 10 ** 6) + y = logspace(0, 6, endpoint=0) + assert_(y[-1] < 10 ** 6) + y = logspace(0, 6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + def test_dtype(self): + y = logspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = logspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = logspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + +class TestLinspace(TestCase): + + def test_basic(self): + y = linspace(0, 10) + assert_(len(y) == 50) + y = linspace(2, 10, num=100) + assert_(y[-1] == 10) + y = linspace(2, 10, endpoint=0) + assert_(y[-1] < 10) + + def test_corner(self): + y = list(linspace(0, 1, 1)) + assert_(y == [0.0], y) + y = list(linspace(0, 1, 2.5)) + assert_(y == [0.0, 1.0]) + + def test_type(self): + t1 = linspace(0, 1, 0).dtype + t2 = linspace(0, 1, 1).dtype + t3 = linspace(0, 1, 2).dtype + assert_equal(t1, t2) + assert_equal(t2, t3) + + def test_dtype(self): + y = linspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = linspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = linspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_array_scalar(self): + lim1 = array([-120, 100], dtype="int8") + lim2 = array([120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = linspace(lim1[0], lim1[1], 5) + t2 = linspace(lim2[0], lim2[1], 5) + t3 = linspace(lim3[0], lim3[1], 5) + t4 = linspace(-120.0, 100.0, 5) + t5 = linspace(120.0, -100.0, 5) + t6 = linspace(1200.0, 1000.0, 5) + assert_equal(t1, t4) + assert_equal(t2, t5) + assert_equal(t3, t6) + + def test_complex(self): + lim1 = linspace(1 + 2j, 3 + 4j, 5) + t1 = array([ 1.0+2.j , 1.5+2.5j, 2.0+3.j , 2.5+3.5j, 3.0+4.j]) + lim2 = linspace(1j, 10, 5) + t2 = array([ 0.0+1.j , 2.5+0.75j, 5.0+0.5j , 7.5+0.25j, 10.0+0.j]) + assert_equal(lim1, t1) + assert_equal(lim2, t2) + + def test_physical_quantities(self): + class PhysicalQuantity(float): + def __new__(cls, value): + return float.__new__(cls, value) + + def __add__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) + float(self)) + __radd__ = __add__ + + def __sub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(self) - float(x)) + + def __rsub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) - float(self)) + + def __mul__(self, x): + return PhysicalQuantity(float(x) * float(self)) + __rmul__ = __mul__ + + def __div__(self, x): + return PhysicalQuantity(float(self) / float(x)) + + def __rdiv__(self, x): + return PhysicalQuantity(float(x) / float(self)) + + + a = PhysicalQuantity(0.0) + b = PhysicalQuantity(1.0) + assert_equal(linspace(a, b), linspace(0.0, 1.0)) \ No newline at end of file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_getlimits.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_getlimits.py new file mode 100644 index 0000000000000..6ccdbd5ded129 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_getlimits.py @@ -0,0 +1,86 @@ +""" Test functions for limits module. + +""" +from __future__ import division, absolute_import, print_function + +from numpy.testing import * + +from numpy.core import finfo, iinfo +from numpy import half, single, double, longdouble +import numpy as np + +################################################## + +class TestPythonFloat(TestCase): + def test_singleton(self): + ftype = finfo(float) + ftype2 = finfo(float) + assert_equal(id(ftype), id(ftype2)) + +class TestHalf(TestCase): + def test_singleton(self): + ftype = finfo(half) + ftype2 = finfo(half) + assert_equal(id(ftype), id(ftype2)) + +class TestSingle(TestCase): + def test_singleton(self): + ftype = finfo(single) + ftype2 = finfo(single) + assert_equal(id(ftype), id(ftype2)) + +class TestDouble(TestCase): + def test_singleton(self): + ftype = finfo(double) + ftype2 = finfo(double) + assert_equal(id(ftype), id(ftype2)) + +class TestLongdouble(TestCase): + def test_singleton(self,level=2): + ftype = finfo(longdouble) + ftype2 = finfo(longdouble) + assert_equal(id(ftype), id(ftype2)) + +class TestIinfo(TestCase): + def test_basic(self): + dts = list(zip(['i1', 'i2', 'i4', 'i8', + 'u1', 'u2', 'u4', 'u8'], + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64])) + for dt1, dt2 in dts: + assert_equal(iinfo(dt1).min, iinfo(dt2).min) + assert_equal(iinfo(dt1).max, iinfo(dt2).max) + self.assertRaises(ValueError, iinfo, 'f4') + + def test_unsigned_max(self): + types = np.sctypes['uint'] + for T in types: + assert_equal(iinfo(T).max, T(-1)) + +class TestRepr(TestCase): + def test_iinfo_repr(self): + expected = "iinfo(min=-32768, max=32767, dtype=int16)" + assert_equal(repr(np.iinfo(np.int16)), expected) + + def test_finfo_repr(self): + expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \ + " max=3.4028235e+38, dtype=float32)" + # Python 2.5 float formatting on Windows adds an extra 0 to the + # exponent. So test for both. Once 2.5 compatibility is dropped, this + # can simply use `assert_equal(repr(np.finfo(np.float32)), expected)`. + expected_win25 = "finfo(resolution=1e-006, min=-3.4028235e+038," + \ + " max=3.4028235e+038, dtype=float32)" + + actual = repr(np.finfo(np.float32)) + if not actual == expected: + if not actual == expected_win25: + msg = build_err_msg([actual, desired], verbose=True) + raise AssertionError(msg) + + +def test_instances(): + iinfo(10) + finfo(3.0) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_half.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_half.py new file mode 100644 index 0000000000000..928db48b70834 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_half.py @@ -0,0 +1,439 @@ +from __future__ import division, absolute_import, print_function + +import platform + +import numpy as np +from numpy import uint16, float16, float32, float64 +from numpy.testing import TestCase, run_module_suite, assert_, assert_equal, \ + dec + + +def assert_raises_fpe(strmatch, callable, *args, **kwargs): + try: + callable(*args, **kwargs) + except FloatingPointError as exc: + assert_(str(exc).find(strmatch) >= 0, + "Did not raise floating point %s error" % strmatch) + else: + assert_(False, + "Did not raise floating point %s error" % strmatch) + +class TestHalf(TestCase): + def setUp(self): + # An array of all possible float16 values + self.all_f16 = np.arange(0x10000, dtype=uint16) + self.all_f16.dtype = float16 + self.all_f32 = np.array(self.all_f16, dtype=float32) + self.all_f64 = np.array(self.all_f16, dtype=float64) + + # An array of all non-NaN float16 values, in sorted order + self.nonan_f16 = np.concatenate( + (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), + np.arange(0x0000, 0x7c01, 1, dtype=uint16)) + ) + self.nonan_f16.dtype = float16 + self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) + self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) + + # An array of all finite float16 values, in sorted order + self.finite_f16 = self.nonan_f16[1:-1] + self.finite_f32 = self.nonan_f32[1:-1] + self.finite_f64 = self.nonan_f64[1:-1] + + def test_half_conversions(self): + """Checks that all 16-bit values survive conversion + to/from 32-bit and 64-bit float""" + # Because the underlying routines preserve the NaN bits, every + # value is preserved when converting to/from other floats. + + # Convert from float32 back to float16 + b = np.array(self.all_f32, dtype=float16) + assert_equal(self.all_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Convert from float64 back to float16 + b = np.array(self.all_f64, dtype=float16) + assert_equal(self.all_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Convert float16 to longdouble and back + # This doesn't necessarily preserve the extra NaN bits, + # so exclude NaNs. + a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + b = np.array(a_ld, dtype=float16) + assert_equal(self.nonan_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Check the range for which all integers can be represented + i_int = np.arange(-2048, 2049) + i_f16 = np.array(i_int, dtype=float16) + j = np.array(i_f16, dtype=np.int) + assert_equal(i_int, j) + + def test_nans_infs(self): + with np.errstate(all='ignore'): + # Check some of the ufuncs + assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) + assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) + assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) + assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.spacing(float16(65504)), np.inf) + + # Check comparisons of all values with NaN + nan = float16(np.nan) + + assert_(not (self.all_f16 == nan).any()) + assert_(not (nan == self.all_f16).any()) + + assert_((self.all_f16 != nan).all()) + assert_((nan != self.all_f16).all()) + + assert_(not (self.all_f16 < nan).any()) + assert_(not (nan < self.all_f16).any()) + + assert_(not (self.all_f16 <= nan).any()) + assert_(not (nan <= self.all_f16).any()) + + assert_(not (self.all_f16 > nan).any()) + assert_(not (nan > self.all_f16).any()) + + assert_(not (self.all_f16 >= nan).any()) + assert_(not (nan >= self.all_f16).any()) + + def test_half_values(self): + """Confirms a small number of known half values""" + a = np.array([1.0, -1.0, + 2.0, -2.0, + 0.0999755859375, 0.333251953125, # 1/10, 1/3 + 65504, -65504, # Maximum magnitude + 2.0**(-14), -2.0**(-14), # Minimum normal + 2.0**(-24), -2.0**(-24), # Minimum subnormal + 0, -1/1e1000, # Signed zeros + np.inf, -np.inf]) + b = np.array([0x3c00, 0xbc00, + 0x4000, 0xc000, + 0x2e66, 0x3555, + 0x7bff, 0xfbff, + 0x0400, 0x8400, + 0x0001, 0x8001, + 0x0000, 0x8000, + 0x7c00, 0xfc00], dtype=uint16) + b.dtype = float16 + assert_equal(a, b) + + def test_half_rounding(self): + """Checks that rounding when converting to half is correct""" + a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal + 2.0**-25, # Underflows to zero (nearest even mode) + 2.0**-26, # Underflows to zero + 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) + 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) + 1.0+2.0**-12, # rounds to 1.0 + 65519, # rounds to 65504 + 65520], # rounds to inf + dtype=float64) + rounded = [2.0**-24, + 0.0, + 0.0, + 1.0+2.0**(-10), + 1.0, + 1.0, + 65504, + np.inf] + + # Check float64->float16 rounding + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + # Check float32->float16 rounding + a = np.array(a, dtype=float32) + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + def test_half_correctness(self): + """Take every finite float16, and check the casting functions with + a manual conversion.""" + + # Create an array of all finite float16s + a_f16 = self.finite_f16 + a_bits = a_f16.view(dtype=uint16) + + # Convert to 64-bit float manually + a_sgn = (-1.0)**((a_bits&0x8000) >> 15) + a_exp = np.array((a_bits&0x7c00) >> 10, dtype=np.int32) - 15 + a_man = (a_bits&0x03ff) * 2.0**(-10) + # Implicit bit of normalized floats + a_man[a_exp!=-15] += 1 + # Denormalized exponent is -14 + a_exp[a_exp==-15] = -14 + + a_manual = a_sgn * a_man * 2.0**a_exp + + a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + if len(a32_fail) != 0: + bad_index = a32_fail[0] + assert_equal(self.finite_f32, a_manual, + "First non-equal is half value %x -> %g != %g" % + (a[bad_index], + self.finite_f32[bad_index], + a_manual[bad_index])) + + a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + if len(a64_fail) != 0: + bad_index = a64_fail[0] + assert_equal(self.finite_f64, a_manual, + "First non-equal is half value %x -> %g != %g" % + (a[bad_index], + self.finite_f64[bad_index], + a_manual[bad_index])) + + def test_half_ordering(self): + """Make sure comparisons are working right""" + + # All non-NaN float16 values in reverse order + a = self.nonan_f16[::-1].copy() + + # 32-bit float copy + b = np.array(a, dtype=float32) + + # Should sort the same + a.sort() + b.sort() + assert_equal(a, b) + + # Comparisons should work + assert_((a[:-1] <= a[1:]).all()) + assert_(not (a[:-1] > a[1:]).any()) + assert_((a[1:] >= a[:-1]).all()) + assert_(not (a[1:] < a[:-1]).any()) + # All != except for +/-0 + assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) + assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) + + def test_half_funcs(self): + """Test the various ArrFuncs""" + + # fill + assert_equal(np.arange(10, dtype=float16), + np.arange(10, dtype=float32)) + + # fillwithscalar + a = np.zeros((5,), dtype=float16) + a.fill(1) + assert_equal(a, np.ones((5,), dtype=float16)) + + # nonzero and copyswap + a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + a = a.byteswap().newbyteorder() + assert_equal(a.nonzero()[0], + [2, 5, 6]) + + # dot + a = np.arange(0, 10, 0.5, dtype=float16) + b = np.ones((20,), dtype=float16) + assert_equal(np.dot(a, b), + 95) + + # argmax + a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 4) + a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 5) + + # getitem + a = np.arange(10, dtype=float16) + for i in range(10): + assert_equal(a.item(i), i) + + def test_spacing_nextafter(self): + """Test np.spacing and np.nextafter""" + # All non-negative finite #'s + a = np.arange(0x7c00, dtype=uint16) + hinf = np.array((np.inf,), dtype=float16) + a_f16 = a.view(dtype=float16) + + assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) + + assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) + assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) + + # switch to negatives + a |= 0x8000 + + assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) + assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) + + assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) + assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) + + + def test_half_ufuncs(self): + """Test the various ufuncs""" + + a = np.array([0, 1, 2, 4, 2], dtype=float16) + b = np.array([-2, 5, 1, 4, 3], dtype=float16) + c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) + + assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) + assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) + assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) + assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) + + assert_equal(np.equal(a, b), [False, False, False, True, False]) + assert_equal(np.not_equal(a, b), [True, True, True, False, True]) + assert_equal(np.less(a, b), [False, True, False, False, True]) + assert_equal(np.less_equal(a, b), [False, True, False, True, True]) + assert_equal(np.greater(a, b), [True, False, True, False, False]) + assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) + assert_equal(np.logical_and(a, b), [False, True, True, True, True]) + assert_equal(np.logical_or(a, b), [True, True, True, True, True]) + assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) + assert_equal(np.logical_not(a), [True, False, False, False, False]) + + assert_equal(np.isnan(c), [False, False, False, True, False]) + assert_equal(np.isinf(c), [False, False, True, False, False]) + assert_equal(np.isfinite(c), [True, True, False, False, True]) + assert_equal(np.signbit(b), [True, False, False, False, False]) + + assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) + + assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) + x = np.maximum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [0, 5, 1, 0, 6]) + assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) + x = np.minimum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [-2, -1, -np.inf, 0, 3]) + assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) + assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) + assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) + assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) + + assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) + assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) + assert_equal(np.square(b), [4, 25, 1, 16, 9]) + assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) + assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) + assert_equal(np.conjugate(b), b) + assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) + assert_equal(np.negative(b), [2, -5, -1, -4, -3]) + assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) + assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) + assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) + assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) + + def test_half_coercion(self): + """Test that half gets coerced properly with the other types""" + a16 = np.array((1,), dtype=float16) + a32 = np.array((1,), dtype=float32) + b16 = float16(1) + b32 = float32(1) + + assert_equal(np.power(a16, 2).dtype, float16) + assert_equal(np.power(a16, 2.0).dtype, float16) + assert_equal(np.power(a16, b16).dtype, float16) + assert_equal(np.power(a16, b32).dtype, float16) + assert_equal(np.power(a16, a16).dtype, float16) + assert_equal(np.power(a16, a32).dtype, float32) + + assert_equal(np.power(b16, 2).dtype, float64) + assert_equal(np.power(b16, 2.0).dtype, float64) + assert_equal(np.power(b16, b16).dtype, float16) + assert_equal(np.power(b16, b32).dtype, float32) + assert_equal(np.power(b16, a16).dtype, float16) + assert_equal(np.power(b16, a32).dtype, float32) + + assert_equal(np.power(a32, a16).dtype, float32) + assert_equal(np.power(a32, b16).dtype, float32) + assert_equal(np.power(b32, a16).dtype, float16) + assert_equal(np.power(b32, b16).dtype, float32) + + @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") + def test_half_fpe(self): + with np.errstate(all='raise'): + sx16 = np.array((1e-4,), dtype=float16) + bx16 = np.array((1e4,), dtype=float16) + sy16 = float16(1e-4) + by16 = float16(1e4) + + # Underflow errors + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(-2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14+2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(-2.**-14-2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14+2**-23), float16(4)) + + # Overflow errors + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b:a+b, + float16(65504), float16(17)) + assert_raises_fpe('overflow', lambda a, b:a-b, + float16(-65504), float16(17)) + assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.spacing, float16(65504)) + + # Invalid value errors + assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.nan)) + assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0)) + assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0)) + assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan)) + + # These should not raise + float16(65472)+float16(32) + float16(2**-13)/float16(2) + float16(2**-14)/float16(2**10) + np.spacing(float16(-65504)) + np.nextafter(float16(65504), float16(-np.inf)) + np.nextafter(float16(-65504), float16(np.inf)) + float16(2**-14)/float16(2**10) + float16(-2**-14)/float16(2**10) + float16(2**-14+2**-23)/float16(2) + float16(-2**-14-2**-23)/float16(2) + + def test_half_array_interface(self): + """Test that half is compatible with __array_interface__""" + class Dummy: + pass + + a = np.ones((1,), dtype=float16) + b = Dummy() + b.__array_interface__ = a.__array_interface__ + c = np.array(b) + assert_(c.dtype == float16) + assert_equal(a, c) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexerrors.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexerrors.py new file mode 100644 index 0000000000000..e5dc9dbab6d1e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexerrors.py @@ -0,0 +1,127 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import TestCase, run_module_suite, assert_raises, assert_equal, assert_ +import sys + +class TestIndexErrors(TestCase): + '''Tests to exercise indexerrors not covered by other tests.''' + + def test_arraytypes_fasttake(self): + 'take from a 0-length dimension' + x = np.empty((2, 3, 0, 4)) + assert_raises(IndexError, x.take, [0], axis=2) + assert_raises(IndexError, x.take, [1], axis=2) + assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') + assert_raises(IndexError, x.take, [0], axis=2, mode='clip') + + def test_take_from_object(self): + # Check exception taking from object array + d = np.zeros(5, dtype=object) + assert_raises(IndexError, d.take, [6]) + + # Check exception taking from 0-d array + d = np.zeros((5, 0), dtype=object) + assert_raises(IndexError, d.take, [1], axis=1) + assert_raises(IndexError, d.take, [0], axis=1) + assert_raises(IndexError, d.take, [0]) + assert_raises(IndexError, d.take, [0], mode='wrap') + assert_raises(IndexError, d.take, [0], mode='clip') + + def test_multiindex_exceptions(self): + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.item, 20) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.item, (0, 0)) + + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.itemset, 20, 0) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.itemset, (0, 0), 0) + + def test_put_exceptions(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + + def test_iterators_exceptions(self): + "cases in iterators.c" + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a[0, 5, None, 2]) + assert_raises(IndexError, lambda: a[0, 5, 0, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) + assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) + + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a[0, 0, None, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + + def test_mapping(self): + "cases from mapping.c" + + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros((0, 10)) + assert_raises(IndexError, lambda: a[12]) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(10, 20)]) + assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, 0)]) + assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) + + a = np.zeros((10,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + a = np.zeros((0,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(1, [1, 20])]) + assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, [0, 1])]) + assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) + + def test_methods(self): + "cases from methods.c" + + a = np.zeros((3, 3)) + assert_raises(IndexError, lambda: a.item(100)) + assert_raises(IndexError, lambda: a.itemset(100, 1)) + a = np.zeros((0, 3)) + assert_raises(IndexError, lambda: a.item(100)) + assert_raises(IndexError, lambda: a.itemset(100, 1)) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexing.py new file mode 100644 index 0000000000000..7f6fab72e5579 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexing.py @@ -0,0 +1,983 @@ +from __future__ import division, absolute_import, print_function + +import sys +import warnings +import functools + +import numpy as np +from numpy.core.multiarray_tests import array_indexing +from itertools import product +from numpy.testing import * + + +try: + cdll = np.ctypeslib.load_library('multiarray', np.core.multiarray.__file__) + _HAS_CTYPE = True +except ImportError: + _HAS_CTYPE = False + + +class TestIndexing(TestCase): + def test_none_index(self): + # `None` index adds newaxis + a = np.array([1, 2, 3]) + assert_equal(a[None], a[np.newaxis]) + assert_equal(a[None].ndim, a.ndim + 1) + + def test_empty_tuple_index(self): + # Empty tuple index creates a view + a = np.array([1, 2, 3]) + assert_equal(a[()], a) + assert_(a[()].base is a) + a = np.array(0) + assert_(isinstance(a[()], np.int_)) + + # Regression, it needs to fall through integer and fancy indexing + # cases, so need the with statement to ignore the non-integer error. + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', '', DeprecationWarning) + a = np.array([1.]) + assert_(isinstance(a[0.], np.float_)) + + a = np.array([np.array(1)], dtype=object) + assert_(isinstance(a[0.], np.ndarray)) + + def test_same_kind_index_casting(self): + # Indexes should be cast with same-kind and not safe, even if + # that is somewhat unsafe. So test various different code paths. + index = np.arange(5) + u_index = index.astype(np.uintp) + arr = np.arange(10) + + assert_array_equal(arr[index], arr[u_index]) + arr[u_index] = np.arange(5) + assert_array_equal(arr, np.arange(10)) + + arr = np.arange(10).reshape(5, 2) + assert_array_equal(arr[index], arr[u_index]) + + arr[u_index] = np.arange(5)[:,None] + assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) + + arr = np.arange(25).reshape(5, 5) + assert_array_equal(arr[u_index, u_index], arr[index, index]) + + def test_empty_fancy_index(self): + # Empty list index creates an empty array + # with the same dtype (but with weird shape) + a = np.array([1, 2, 3]) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([], dtype=np.intp) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([]) + assert_raises(IndexError, a.__getitem__, b) + + def test_ellipsis_index(self): + # Ellipsis index does not create a view + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + assert_equal(a[...], a) + assert_(a[...].base is a) # `a[...]` was `a` in numpy <1.9.) + + # Slicing with ellipsis can skip an + # arbitrary number of dimensions + assert_equal(a[0, ...], a[0]) + assert_equal(a[0, ...], a[0,:]) + assert_equal(a[..., 0], a[:, 0]) + + # Slicing with ellipsis always results + # in an array, not a scalar + assert_equal(a[0, ..., 1], np.array(2)) + + # Assignment with `(Ellipsis,)` on 0-d arrays + b = np.array(1) + b[(Ellipsis,)] = 2 + assert_equal(b, 2) + + def test_single_int_index(self): + # Single integer index selects one row + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[0], [1, 2, 3]) + assert_equal(a[-1], [7, 8, 9]) + + # Index out of bounds produces IndexError + assert_raises(IndexError, a.__getitem__, 1<<30) + # Index overflow produces IndexError + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', DeprecationWarning) + assert_raises(IndexError, a.__getitem__, 1<<64) + + def test_single_bool_index(self): + # Single boolean index + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + # Python boolean converts to integer + # These are being deprecated (and test in test_deprecations) + #assert_equal(a[True], a[1]) + #assert_equal(a[False], a[0]) + + # Same with NumPy boolean scalar + # Before DEPRECATE, this is an error (as always, but telling about + # future change): + assert_raises(IndexError, a.__getitem__, np.array(True)) + assert_raises(IndexError, a.__getitem__, np.array(False)) + # After DEPRECATE, this behaviour can be enabled: + #assert_equal(a[np.array(True)], a[None]) + #assert_equal(a[np.array(False), a[None][0:0]]) + + + def test_boolean_indexing_onedim(self): + # Indexing a 2-dimensional array with + # boolean array of length one + a = np.array([[ 0., 0., 0.]]) + b = np.array([ True], dtype=bool) + assert_equal(a[b], a) + # boolean assignment + a[b] = 1. + assert_equal(a, [[1., 1., 1.]]) + + + def test_boolean_assignment_value_mismatch(self): + # A boolean assignment should fail when the shape of the values + # cannot be broadcast to the subscription. (see also gh-3458) + a = np.arange(4) + def f(a, v): + a[a > -1] = v + + assert_raises(ValueError, f, a, []) + assert_raises(ValueError, f, a, [1, 2, 3]) + assert_raises(ValueError, f, a[:1], [1, 2, 3]) + + + def test_boolean_indexing_twodim(self): + # Indexing a 2-dimensional array with + # 2-dimensional boolean array + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + b = np.array([[ True, False, True], + [False, True, False], + [ True, False, True]]) + assert_equal(a[b], [1, 3, 5, 7, 9]) + assert_equal(a[b[1]], [[4, 5, 6]]) + assert_equal(a[b[0]], a[b[2]]) + + # boolean assignment + a[b] = 0 + assert_equal(a, [[0, 2, 0], + [4, 0, 6], + [0, 8, 0]]) + + + def test_reverse_strides_and_subspace_bufferinit(self): + # This tests that the strides are not reversed for simple and + # subspace fancy indexing. + a = np.ones(5) + b = np.zeros(5, dtype=np.intp)[::-1] + c = np.arange(5)[::-1] + + a[b] = c + # If the strides are not reversed, the 0 in the arange comes last. + assert_equal(a[0], 0) + + # This also tests that the subspace buffer is initialized: + a = np.ones((5, 2)) + c = np.arange(10).reshape(5, 2)[::-1] + a[b, :] = c + assert_equal(a[0], [0, 1]) + + def test_reversed_strides_result_allocation(self): + # Test a bug when calculating the output strides for a result array + # when the subspace size was 1 (and test other cases as well) + a = np.arange(10)[:, None] + i = np.arange(10)[::-1] + assert_array_equal(a[i], a[i.copy('C')]) + + a = np.arange(20).reshape(-1, 2) + + + def test_uncontiguous_subspace_assignment(self): + # During development there was a bug activating a skip logic + # based on ndim instead of size. + a = np.full((3, 4, 2), -1) + b = np.full((3, 4, 2), -1) + + a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T + b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() + + assert_equal(a, b) + + + def test_too_many_fancy_indices_special_case(self): + # Just documents behaviour, this is a small limitation. + a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS + assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32) + + + def test_scalar_array_bool(self): + # Numpy bools can be used as boolean index (python ones as of yet not) + a = np.array(1) + assert_equal(a[np.bool_(True)], a[np.array(True)]) + assert_equal(a[np.bool_(False)], a[np.array(False)]) + + # After deprecating bools as integers: + #a = np.array([0,1,2]) + #assert_equal(a[True, :], a[None, :]) + #assert_equal(a[:, True], a[:, None]) + # + #assert_(not np.may_share_memory(a, a[True, :])) + + + def test_everything_returns_views(self): + # Before `...` would return a itself. + a = np.arange(5) + + assert_(a is not a[()]) + assert_(a is not a[...]) + assert_(a is not a[:]) + + + def test_broaderrors_indexing(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) + assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) + + + def test_trivial_fancy_out_of_bounds(self): + a = np.zeros(5) + ind = np.ones(20, dtype=np.intp) + ind[-1] = 10 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + ind = np.ones(20, dtype=np.intp) + ind[0] = 11 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + + + def test_nonbaseclass_values(self): + class SubClass(np.ndarray): + def __array_finalize__(self, old): + # Have array finalize do funny things + self.fill(99) + + a = np.zeros((5, 5)) + s = a.copy().view(type=SubClass) + s.fill(1) + + a[[0, 1, 2, 3, 4], :] = s + assert_((a == 1).all()) + + # Subspace is last, so transposing might want to finalize + a[:, [0, 1, 2, 3, 4]] = s + assert_((a == 1).all()) + + a.fill(0) + a[...] = s + assert_((a == 1).all()) + + + def test_subclass_writeable(self): + d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], + dtype=[('target', 'S20'), ('V_mag', '>f4')]) + ind = np.array([False, True, True], dtype=bool) + assert_(d[ind].flags.writeable) + ind = np.array([0, 1]) + assert_(d[ind].flags.writeable) + assert_(d[...].flags.writeable) + assert_(d[0].flags.writeable) + + + def test_memory_order(self): + # This is not necessary to preserve. Memory layouts for + # more complex indices are not as simple. + a = np.arange(10) + b = np.arange(10).reshape(5,2).T + assert_(a[b].flags.f_contiguous) + + # Takes a different implementation branch: + a = a.reshape(-1, 1) + assert_(a[b, 0].flags.f_contiguous) + + + def test_scalar_return_type(self): + # Full scalar indices should return scalars and object + # arrays should not call PyArray_Return on their items + class Zero(object): + # The most basic valid indexing + def __index__(self): + return 0 + z = Zero() + + class ArrayLike(object): + # Simple array, should behave like the array + def __array__(self): + return np.array(0) + + a = np.zeros(()) + assert_(isinstance(a[()], np.float_)) + a = np.zeros(1) + assert_(isinstance(a[z], np.float_)) + a = np.zeros((1, 1)) + assert_(isinstance(a[z, np.array(0)], np.float_)) + assert_(isinstance(a[z, ArrayLike()], np.float_)) + + # And object arrays do not call it too often: + b = np.array(0) + a = np.array(0, dtype=object) + a[()] = b + assert_(isinstance(a[()], np.ndarray)) + a = np.array([b, None]) + assert_(isinstance(a[z], np.ndarray)) + a = np.array([[b, None]]) + assert_(isinstance(a[z, np.array(0)], np.ndarray)) + assert_(isinstance(a[z, ArrayLike()], np.ndarray)) + + + def test_small_regressions(self): + # Reference count of intp for index checks + a = np.array([0]) + refcount = sys.getrefcount(np.dtype(np.intp)) + # item setting always checks indices in separate function: + a[np.array([0], dtype=np.intp)] = 1 + a[np.array([0], dtype=np.uint8)] = 1 + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.intp), 1) + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.uint8), 1) + + assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) + + def test_unaligned(self): + v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] + d = v.view(np.dtype("S8")) + # unaligned source + x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] + x = x.view(np.dtype("S8")) + x[...] = np.array("b" * 8, dtype="S") + b = np.arange(d.size) + #trivial + assert_equal(d[b], d) + d[b] = x + # nontrivial + # unaligned index array + b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] + b = b.view(np.intp)[:d.size] + b[...] = np.arange(d.size) + assert_equal(d[b.astype(np.int16)], d) + d[b.astype(np.int16)] = x + # boolean + d[b % 2 == 0] + d[b % 2 == 0] = x[::2] + + +class TestFieldIndexing(TestCase): + def test_scalar_return_type(self): + # Field access on an array should return an array, even if it + # is 0-d. + a = np.zeros((), [('a','f8')]) + assert_(isinstance(a['a'], np.ndarray)) + assert_(isinstance(a[['a']], np.ndarray)) + + +class TestBroadcastedAssignments(TestCase): + def assign(self, a, ind, val): + a[ind] = val + return a + + + def test_prepending_ones(self): + a = np.zeros((3, 2)) + + a[...] = np.ones((1, 3, 2)) + # Fancy with subspace with and without transpose + a[[0, 1, 2], :] = np.ones((1, 3, 2)) + a[:, [0, 1]] = np.ones((1, 3, 2)) + # Fancy without subspace (with broadcasting) + a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) + + + def test_prepend_not_one(self): + assign = self.assign + s_ = np.s_ + + a = np.zeros(5) + + # Too large and not only ones. + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + + with warnings.catch_warnings(): + # Will be a ValueError as well. + warnings.simplefilter("error", DeprecationWarning) + assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],], + np.ones((2, 1))) + assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],], + np.ones((2,2,1))) + + + def test_simple_broadcasting_errors(self): + assign = self.assign + s_ = np.s_ + + a = np.zeros((5, 1)) + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) + + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) + + assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) + + + def test_index_is_larger(self): + # Simple case of fancy index broadcasting of the index. + a = np.zeros((5, 5)) + a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] + + assert_((a[:3, :3] == [2, 3, 4]).all()) + + + def test_broadcast_subspace(self): + a = np.zeros((100, 100)) + v = np.arange(100)[:,None] + b = np.arange(100)[::-1] + a[b] = v + assert_((a[::-1] == v).all()) + + +class TestSubclasses(TestCase): + def test_basic(self): + class SubClass(np.ndarray): + pass + + s = np.arange(5).view(SubClass) + assert_(isinstance(s[:3], SubClass)) + assert_(s[:3].base is s) + + assert_(isinstance(s[[0, 1, 2]], SubClass)) + assert_(isinstance(s[s > 0], SubClass)) + + + def test_matrix_fancy(self): + # The matrix class messes with the shape. While this is always + # weird (getitem is not used, it does not have setitem nor knows + # about fancy indexing), this tests gh-3110 + m = np.matrix([[1, 2], [3, 4]]) + + assert_(isinstance(m[[0,1,0], :], np.matrix)) + + # gh-3110. Note the transpose currently because matrices do *not* + # support dimension fixing for fancy indexing correctly. + x = np.asmatrix(np.arange(50).reshape(5,10)) + assert_equal(x[:2, np.array(-1)], x[:2, -1].T) + + + def test_finalize_gets_full_info(self): + # Array finalize should be called on the filled array. + class SubClass(np.ndarray): + def __array_finalize__(self, old): + self.finalize_status = np.array(self) + self.old = old + + s = np.arange(10).view(SubClass) + new_s = s[:3] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[[0,1,2,3]] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[s > 0] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + +class TestFancyIndexingEquivalence(TestCase): + def test_object_assign(self): + # Check that the field and object special case using copyto is active. + # The right hand side cannot be converted to an array here. + a = np.arange(5, dtype=object) + b = a.copy() + a[:3] = [1, (1,2), 3] + b[[0, 1, 2]] = [1, (1,2), 3] + assert_array_equal(a, b) + + # test same for subspace fancy indexing + b = np.arange(5, dtype=object)[None, :] + b[[0], :3] = [[1, (1,2), 3]] + assert_array_equal(a, b[0]) + + + def test_cast_equivalence(self): + # Yes, normal slicing uses unsafe casting. + a = np.arange(5) + b = a.copy() + + a[:3] = np.array(['2', '-3', '-1']) + b[[0, 2, 1]] = np.array(['2', '-1', '-3']) + assert_array_equal(a, b) + + # test the same for subspace fancy indexing + b = np.arange(5)[None, :] + b[[0], :3] = np.array([['2', '-3', '-1']]) + assert_array_equal(a, b[0]) + + +class TestMultiIndexingAutomated(TestCase): + """ + These test use code to mimic the C-Code indexing for selection. + + NOTE: * This still lacks tests for complex item setting. + * If you change behavior of indexing, you might want to modify + these tests to try more combinations. + * Behavior was written to match numpy version 1.8. (though a + first version matched 1.7.) + * Only tuple indices are supported by the mimicking code. + (and tested as of writing this) + * Error types should match most of the time as long as there + is only one error. For multiple errors, what gets raised + will usually not be the same one. They are *not* tested. + """ + def setUp(self): + self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + self.b = np.empty((3, 0, 5, 6)) + self.complex_indices = ['skip', Ellipsis, + 0, + # Boolean indices, up to 3-d for some special cases of eating up + # dimensions, also need to test all False + np.array(False), + np.array([True, False, False]), + np.array([[True, False], [False, True]]), + np.array([[[False, False], [False, False]]]), + # Some slices: + slice(-5, 5, 2), + slice(1, 1, 100), + slice(4, -1, -2), + slice(None, None, -3), + # Some Fancy indexes: + np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast + np.array([0, 1, -2]), + np.array([[2], [0], [1]]), + np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), + np.array([2, -1], dtype=np.int8), + np.zeros([1]*31, dtype=int), # trigger too large array. + np.array([0., 1.])] # invalid datatype + # Some simpler indices that still cover a bit more + self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] + # Very simple ones to fill the rest: + self.fill_indices = [slice(None, None), 0] + + + def _get_multi_index(self, arr, indices): + """Mimic multi dimensional indexing. + + Parameters + ---------- + arr : ndarray + Array to be indexed. + indices : tuple of index objects + + Returns + ------- + out : ndarray + An array equivalent to the indexing operation (but always a copy). + `arr[indices]` should be identical. + no_copy : bool + Whether the indexing operation requires a copy. If this is `True`, + `np.may_share_memory(arr, arr[indicies])` should be `True` (with + some exceptions for scalars and possibly 0-d arrays). + + Notes + ----- + While the function may mostly match the errors of normal indexing this + is generally not the case. + """ + in_indices = list(indices) + indices = [] + # if False, this is a fancy or boolean index + no_copy = True + # number of fancy/scalar indexes that are not consecutive + num_fancy = 0 + # number of dimensions indexed by a "fancy" index + fancy_dim = 0 + # NOTE: This is a funny twist (and probably OK to change). + # The boolean array has illegal indexes, but this is + # allowed if the broadcast fancy-indices are 0-sized. + # This variable is to catch that case. + error_unless_broadcast_to_empty = False + + # We need to handle Ellipsis and make arrays from indices, also + # check if this is fancy indexing (set no_copy). + ndim = 0 + ellipsis_pos = None # define here mostly to replace all but first. + for i, indx in enumerate(in_indices): + if indx is None: + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + no_copy = False + if indx.ndim == 0: + raise IndexError + # boolean indices can have higher dimensions + ndim += indx.ndim + fancy_dim += indx.ndim + continue + if indx is Ellipsis: + if ellipsis_pos is None: + ellipsis_pos = i + continue # do not increment ndim counter + raise IndexError + if isinstance(indx, slice): + ndim += 1 + continue + if not isinstance(indx, np.ndarray): + # This could be open for changes in numpy. + # numpy should maybe raise an error if casting to intp + # is not safe. It rejects np.array([1., 2.]) but not + # [1., 2.] as index (same for ie. np.take). + # (Note the importance of empty lists if changing this here) + indx = np.array(indx, dtype=np.intp) + in_indices[i] = indx + elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': + raise IndexError('arrays used as indices must be of integer (or boolean) type') + if indx.ndim != 0: + no_copy = False + ndim += 1 + fancy_dim += 1 + + if arr.ndim - ndim < 0: + # we can't take more dimensions then we have, not even for 0-d arrays. + # since a[()] makes sense, but not a[(),]. We will raise an error + # later on, unless a broadcasting error occurs first. + raise IndexError + + if ndim == 0 and not None in in_indices: + # Well we have no indexes or one Ellipsis. This is legal. + return arr.copy(), no_copy + + if ellipsis_pos is not None: + in_indices[ellipsis_pos:ellipsis_pos+1] = [slice(None, None)] * (arr.ndim - ndim) + + for ax, indx in enumerate(in_indices): + if isinstance(indx, slice): + # convert to an index array + indx = np.arange(*indx.indices(arr.shape[ax])) + indices.append(['s', indx]) + continue + elif indx is None: + # this is like taking a slice with one element from a new axis: + indices.append(['n', np.array([0], dtype=np.intp)]) + arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + # This may be open for improvement in numpy. + # numpy should probably cast boolean lists to boolean indices + # instead of intp! + + # Numpy supports for a boolean index with + # non-matching shape as long as the True values are not + # out of bounds. Numpy maybe should maybe not allow this, + # (at least not array that are larger then the original one). + try: + flat_indx = np.ravel_multi_index(np.nonzero(indx), + arr.shape[ax:ax+indx.ndim], mode='raise') + except: + error_unless_broadcast_to_empty = True + # fill with 0s instead, and raise error later + flat_indx = np.array([0]*indx.sum(), dtype=np.intp) + # concatenate axis into a single one: + if indx.ndim != 0: + arr = arr.reshape((arr.shape[:ax] + + (np.prod(arr.shape[ax:ax+indx.ndim]),) + + arr.shape[ax+indx.ndim:])) + indx = flat_indx + else: + # This could be changed, a 0-d boolean index can + # make sense (even outside the 0-d indexed array case) + # Note that originally this is could be interpreted as + # integer in the full integer special case. + raise IndexError + else: + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + if indx.ndim == 0: + if indx >= arr.shape[ax] or indx < -arr.shape[ax]: + raise IndexError + if indx.ndim == 0: + # The index is a scalar. This used to be two fold, but if fancy + # indexing was active, the check was done later, possibly + # after broadcasting it away (1.7. or earlier). Now it is always + # done. + if indx >= arr.shape[ax] or indx < - arr.shape[ax]: + raise IndexError + if len(indices) > 0 and indices[-1][0] == 'f' and ax != ellipsis_pos: + # NOTE: There could still have been a 0-sized Ellipsis + # between them. Checked that with ellipsis_pos. + indices[-1].append(indx) + else: + # We have a fancy index that is not after an existing one. + # NOTE: A 0-d array triggers this as well, while + # one may expect it to not trigger it, since a scalar + # would not be considered fancy indexing. + num_fancy += 1 + indices.append(['f', indx]) + + if num_fancy > 1 and not no_copy: + # We have to flush the fancy indexes left + new_indices = indices[:] + axes = list(range(arr.ndim)) + fancy_axes = [] + new_indices.insert(0, ['f']) + ni = 0 + ai = 0 + for indx in indices: + ni += 1 + if indx[0] == 'f': + new_indices[0].extend(indx[1:]) + del new_indices[ni] + ni -= 1 + for ax in range(ai, ai + len(indx[1:])): + fancy_axes.append(ax) + axes.remove(ax) + ai += len(indx) - 1 # axis we are at + indices = new_indices + # and now we need to transpose arr: + arr = arr.transpose(*(fancy_axes + axes)) + + # We only have one 'f' index now and arr is transposed accordingly. + # Now handle newaxis by reshaping... + ax = 0 + for indx in indices: + if indx[0] == 'f': + if len(indx) == 1: + continue + # First of all, reshape arr to combine fancy axes into one: + orig_shape = arr.shape + orig_slice = orig_shape[ax:ax + len(indx[1:])] + arr = arr.reshape((arr.shape[:ax] + + (np.prod(orig_slice).astype(int),) + + arr.shape[ax + len(indx[1:]):])) + + # Check if broadcasting works + if len(indx[1:]) != 1: + res = np.broadcast(*indx[1:]) # raises ValueError... + else: + res = indx[1] + # unfortunately the indices might be out of bounds. So check + # that first, and use mode='wrap' then. However only if + # there are any indices... + if res.size != 0: + if error_unless_broadcast_to_empty: + raise IndexError + for _indx, _size in zip(indx[1:], orig_slice): + if _indx.size == 0: + continue + if np.any(_indx >= _size) or np.any(_indx < -_size): + raise IndexError + if len(indx[1:]) == len(orig_slice): + if np.product(orig_slice) == 0: + # Work around for a crash or IndexError with 'wrap' + # in some 0-sized cases. + try: + mi = np.ravel_multi_index(indx[1:], orig_slice, mode='raise') + except: + # This happens with 0-sized orig_slice (sometimes?) + # here it is a ValueError, but indexing gives a: + raise IndexError('invalid index into 0-sized') + else: + mi = np.ravel_multi_index(indx[1:], orig_slice, mode='wrap') + else: + # Maybe never happens... + raise ValueError + arr = arr.take(mi.ravel(), axis=ax) + arr = arr.reshape((arr.shape[:ax] + + mi.shape + + arr.shape[ax+1:])) + ax += mi.ndim + continue + + # If we are here, we have a 1D array for take: + arr = arr.take(indx[1], axis=ax) + ax += 1 + + return arr, no_copy + + + def _check_multi_index(self, arr, index): + """Check a multi index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be a reshaped arange. + index : tuple of indexing objects + Index being tested. + """ + # Test item getting + try: + mimic_get, no_copy = self._get_multi_index(arr, index) + except Exception as e: + prev_refcount = sys.getrefcount(arr) + assert_raises(Exception, arr.__getitem__, index) + assert_raises(Exception, arr.__setitem__, index, 0) + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + + def _check_single_index(self, arr, index): + """Check a single index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be an arange. + index : indexing object + Index being tested. Must be a single index and not a tuple + of indexing objects (see also `_check_multi_index`). + """ + try: + mimic_get, no_copy = self._get_multi_index(arr, (index,)) + except Exception as e: + prev_refcount = sys.getrefcount(arr) + assert_raises(Exception, arr.__getitem__, index) + assert_raises(Exception, arr.__setitem__, index, 0) + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + + def _compare_index_result(self, arr, index, mimic_get, no_copy): + """Compare mimicked result to indexing result. + """ + arr = arr.copy() + indexed_arr = arr[index] + assert_array_equal(indexed_arr, mimic_get) + # Check if we got a view, unless its a 0-sized or 0-d array. + # (then its not a view, and that does not matter) + if indexed_arr.size != 0 and indexed_arr.ndim != 0: + assert_(np.may_share_memory(indexed_arr, arr) == no_copy) + # Check reference count of the original array + if no_copy: + # refcount increases by one: + assert_equal(sys.getrefcount(arr), 3) + else: + assert_equal(sys.getrefcount(arr), 2) + + # Test non-broadcast setitem: + b = arr.copy() + b[index] = mimic_get + 1000 + if b.size == 0: + return # nothing to compare here... + if no_copy and indexed_arr.ndim != 0: + # change indexed_arr in-place to manipulate original: + indexed_arr += 1000 + assert_array_equal(arr, b) + return + # Use the fact that the array is originally an arange: + arr.flat[indexed_arr.ravel()] += 1000 + assert_array_equal(arr, b) + + + def test_boolean(self): + a = np.array(5) + assert_equal(a[np.array(True)], 5) + a[np.array(True)] = 1 + assert_equal(a, 1) + # NOTE: This is different from normal broadcasting, as + # arr[boolean_array] works like in a multi index. Which means + # it is aligned to the left. This is probably correct for + # consistency with arr[boolean_array,] also no broadcasting + # is done at all + self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool),)) + self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + + + def test_multidim(self): + # Automatically test combinations with complex indexes on 2nd (or 1st) + # spot and the simple ones in one other spot. + with warnings.catch_warnings(): + # This is so that np.array(True) is not accepted in a full integer + # index, when running the file separately. + warnings.filterwarnings('error', '', DeprecationWarning) + for simple_pos in [0, 2, 3]: + tocheck = [self.fill_indices, self.complex_indices, + self.fill_indices, self.fill_indices] + tocheck[simple_pos] = self.simple_indices + for index in product(*tocheck): + index = tuple(i for i in index if i != 'skip') + self._check_multi_index(self.a, index) + self._check_multi_index(self.b, index) + + # Check very simple item getting: + self._check_multi_index(self.a, (0, 0, 0, 0)) + self._check_multi_index(self.b, (0, 0, 0, 0)) + # Also check (simple cases of) too many indices: + assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + + + def test_1d(self): + a = np.arange(10) + with warnings.catch_warnings(): + warnings.filterwarnings('error', '', DeprecationWarning) + for index in self.complex_indices: + self._check_single_index(a, index) + + +class TestCApiAccess(TestCase): + def test_getitem(self): + subscript = functools.partial(array_indexing, 0) + + # 0-d arrays don't work: + assert_raises(IndexError, subscript, np.ones(()), 0) + # Out of bound values: + assert_raises(IndexError, subscript, np.ones(10), 11) + assert_raises(IndexError, subscript, np.ones(10), -11) + assert_raises(IndexError, subscript, np.ones((10, 10)), 11) + assert_raises(IndexError, subscript, np.ones((10, 10)), -11) + + a = np.arange(10) + assert_array_equal(a[4], subscript(a, 4)) + a = a.reshape(5, 2) + assert_array_equal(a[-4], subscript(a, -4)) + + def test_setitem(self): + assign = functools.partial(array_indexing, 1) + + # Deletion is impossible: + assert_raises(ValueError, assign, np.ones(10), 0) + # 0-d arrays don't work: + assert_raises(IndexError, assign, np.ones(()), 0, 0) + # Out of bound values: + assert_raises(IndexError, assign, np.ones(10), 11, 0) + assert_raises(IndexError, assign, np.ones(10), -11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) + + a = np.arange(10) + assign(a, 4, 10) + assert_(a[4] == 10) + + a = a.reshape(5, 2) + assign(a, 4, 10) + assert_array_equal(a[-1], [10, 10]) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_item_selection.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_item_selection.py new file mode 100644 index 0000000000000..d8e9e6fd0faf5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_item_selection.py @@ -0,0 +1,70 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import * +import sys, warnings + + +class TestTake(TestCase): + def test_simple(self): + a = [[1, 2], [3, 4]] + a_str = [[b'1', b'2'], [b'3', b'4']] + modes = ['raise', 'wrap', 'clip'] + indices = [-1, 4] + index_arrays = [np.empty(0, dtype=np.intp), + np.empty(tuple(), dtype=np.intp), + np.empty((1, 1), dtype=np.intp)] + real_indices = {} + real_indices['raise'] = {-1:1, 4:IndexError} + real_indices['wrap'] = {-1:1, 4:0} + real_indices['clip'] = {-1:0, 4:1} + # Currently all types but object, use the same function generation. + # So it should not be necessary to test all. However test also a non + # refcounted struct on top of object. + types = np.int, np.object, np.dtype([('', 'i', 2)]) + for t in types: + # ta works, even if the array may be odd if buffer interface is used + ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) + tresult = list(ta.T.copy()) + for index_array in index_arrays: + if index_array.size != 0: + tresult[0].shape = (2,) + index_array.shape + tresult[1].shape = (2,) + index_array.shape + for mode in modes: + for index in indices: + real_index = real_indices[mode][index] + if real_index is IndexError and index_array.size != 0: + index_array.put(0, index) + assert_raises(IndexError, ta.take, index_array, + mode=mode, axis=1) + elif index_array.size != 0: + index_array.put(0, index) + res = ta.take(index_array, mode=mode, axis=1) + assert_array_equal(res, tresult[real_index]) + else: + res = ta.take(index_array, mode=mode, axis=1) + assert_(res.shape == (2,) + index_array.shape) + + + def test_refcounting(self): + objects = [object() for i in range(10)] + for mode in ('raise', 'clip', 'wrap'): + a = np.array(objects) + b = np.array([2, 2, 4, 5, 3, 5]) + a.take(b, out=a[:6]) + del a + assert_(all(sys.getrefcount(o) == 3 for o in objects)) + # not contiguous, example: + a = np.array(objects * 2)[::2] + a.take(b, out=a[:6]) + del a + assert_(all(sys.getrefcount(o) == 3 for o in objects)) + + def test_unicode_mode(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.take, 5, mode=k) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_machar.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_machar.py new file mode 100644 index 0000000000000..8d858c28b83bf --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_machar.py @@ -0,0 +1,30 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * + +from numpy.core.machar import MachAr +import numpy.core.numerictypes as ntypes +from numpy import errstate, array + +class TestMachAr(TestCase): + def _run_machar_highprec(self): + # Instanciate MachAr instance with high enough precision to cause + # underflow + try: + hiprec = ntypes.float96 + machar = MachAr(lambda v:array([v], hiprec)) + except AttributeError: + "Skipping test: no nyptes.float96 available on this platform." + + def test_underlow(self): + """Regression testing for #759: instanciating MachAr for dtype = + np.float96 raises spurious warning.""" + with errstate(all='raise'): + try: + self._run_machar_highprec() + except FloatingPointError as e: + self.fail("Caught %s exception, should not have been raised." % e) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_memmap.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_memmap.py new file mode 100644 index 0000000000000..b364f5eb990f4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_memmap.py @@ -0,0 +1,127 @@ +from __future__ import division, absolute_import, print_function + +import sys +from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp +import os +import shutil + +from numpy import memmap +from numpy import arange, allclose, asarray +from numpy.testing import * + +class TestMemmap(TestCase): + def setUp(self): + self.tmpfp = NamedTemporaryFile(prefix='mmap') + self.tempdir = mkdtemp() + self.shape = (3, 4) + self.dtype = 'float32' + self.data = arange(12, dtype=self.dtype) + self.data.resize(self.shape) + + def tearDown(self): + self.tmpfp.close() + shutil.rmtree(self.tempdir) + + def test_roundtrip(self): + # Write data to file + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp # Test __del__ machinery, which handles cleanup + + # Read data back from file + newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', + shape=self.shape) + assert_(allclose(self.data, newfp)) + assert_array_equal(self.data, newfp) + + def test_open_with_filename(self): + tmpname = mktemp('', 'mmap', dir=self.tempdir) + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp + + def test_unnamed_file(self): + with TemporaryFile() as f: + fp = memmap(f, dtype=self.dtype, shape=self.shape) + del fp + + def test_attributes(self): + offset = 1 + mode = "w+" + fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, + shape=self.shape, offset=offset) + self.assertEqual(offset, fp.offset) + self.assertEqual(mode, fp.mode) + del fp + + def test_filename(self): + tmpname = mktemp('', 'mmap', dir=self.tempdir) + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + abspath = os.path.abspath(tmpname) + fp[:] = self.data[:] + self.assertEqual(abspath, fp.filename) + b = fp[:1] + self.assertEqual(abspath, b.filename) + del b + del fp + + def test_filename_fileobj(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", + shape=self.shape) + self.assertEqual(fp.filename, self.tmpfp.name) + + @dec.knownfailureif(sys.platform=='gnu0', "This test is known to fail on hurd") + def test_flush(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + assert_equal(fp[0], self.data[0]) + fp.flush() + + def test_del(self): + # Make sure a view does not delete the underlying mmap + fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp_base[0] = 5 + fp_view = fp_base[0:1] + assert_equal(fp_view[0], 5) + del fp_view + # Should still be able to access and assign values after + # deleting the view + assert_equal(fp_base[0], 5) + fp_base[0] = 6 + assert_equal(fp_base[0], 6) + + def test_arithmetic_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = (fp + 10) + if isinstance(tmp, memmap): + assert tmp._mmap is not fp._mmap + + def test_indexing_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = fp[[(1, 2), (2, 3)]] + if isinstance(tmp, memmap): + assert tmp._mmap is not fp._mmap + + def test_slicing_keeps_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + assert fp[:2, :2]._mmap is fp._mmap + + def test_view(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + new1 = fp.view() + new2 = new1.view() + assert(new1.base is fp) + assert(new2.base is fp) + new_array = asarray(fp) + assert(new_array.base is fp) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray.py new file mode 100644 index 0000000000000..68e1c11a0711d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray.py @@ -0,0 +1,4482 @@ +from __future__ import division, absolute_import, print_function + +import tempfile +import sys +import os +import shutil +import warnings +import operator +import io +if sys.version_info[0] >= 3: + import builtins +else: + import __builtin__ as builtins +from decimal import Decimal + + +import numpy as np +from nose import SkipTest +from numpy.core import * +from numpy.compat import asbytes, getexception, strchar, sixu +from test_print import in_foreign_locale +from numpy.core.multiarray_tests import ( + test_neighborhood_iterator, test_neighborhood_iterator_oob, + test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end, + test_inplace_increment, get_buffer_info + ) +from numpy.testing import ( + TestCase, run_module_suite, assert_, assert_raises, + assert_equal, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose, + assert_array_less, runstring, dec + ) + +# Need to test an object that does not fully implement math interface +from datetime import timedelta + + +if sys.version_info[:2] > (3, 2): + # In Python 3.3 the representation of empty shape, strides and suboffsets + # is an empty tuple instead of None. + # http://docs.python.org/dev/whatsnew/3.3.html#api-changes + EMPTY = () +else: + EMPTY = None + + +class TestFlags(TestCase): + def setUp(self): + self.a = arange(10) + + def test_writeable(self): + mydict = locals() + self.a.flags.writeable = False + self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict) + self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) + self.a.flags.writeable = True + self.a[0] = 5 + self.a[0] = 0 + + def test_otherflags(self): + assert_equal(self.a.flags.carray, True) + assert_equal(self.a.flags.farray, False) + assert_equal(self.a.flags.behaved, True) + assert_equal(self.a.flags.fnc, False) + assert_equal(self.a.flags.forc, True) + assert_equal(self.a.flags.owndata, True) + assert_equal(self.a.flags.writeable, True) + assert_equal(self.a.flags.aligned, True) + assert_equal(self.a.flags.updateifcopy, False) + +class TestHash(TestCase): + # see #3793 + def test_int(self): + for st, ut, s in [(np.int8, np.uint8, 8), + (np.int16, np.uint16, 16), + (np.int32, np.uint32, 32), + (np.int64, np.uint64, 64)]: + for i in range(1, s): + assert_equal(hash(st(-2**i)), hash(-2**i), + err_msg="%r: -2**%d" % (st, i)) + assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (st, i - 1)) + assert_equal(hash(st(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (st, i)) + + i = max(i - 1, 1) + assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (ut, i - 1)) + assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (ut, i)) + +class TestAttributes(TestCase): + def setUp(self): + self.one = arange(10) + self.two = arange(20).reshape(4, 5) + self.three = arange(60, dtype=float64).reshape(2, 5, 6) + + def test_attributes(self): + assert_equal(self.one.shape, (10,)) + assert_equal(self.two.shape, (4, 5)) + assert_equal(self.three.shape, (2, 5, 6)) + self.three.shape = (10, 3, 2) + assert_equal(self.three.shape, (10, 3, 2)) + self.three.shape = (2, 5, 6) + assert_equal(self.one.strides, (self.one.itemsize,)) + num = self.two.itemsize + assert_equal(self.two.strides, (5*num, num)) + num = self.three.itemsize + assert_equal(self.three.strides, (30*num, 6*num, num)) + assert_equal(self.one.ndim, 1) + assert_equal(self.two.ndim, 2) + assert_equal(self.three.ndim, 3) + num = self.two.itemsize + assert_equal(self.two.size, 20) + assert_equal(self.two.nbytes, 20*num) + assert_equal(self.two.itemsize, self.two.dtype.itemsize) + assert_equal(self.two.base, arange(20)) + + def test_dtypeattr(self): + assert_equal(self.one.dtype, dtype(int_)) + assert_equal(self.three.dtype, dtype(float_)) + assert_equal(self.one.dtype.char, 'l') + assert_equal(self.three.dtype.char, 'd') + self.assertTrue(self.three.dtype.str[0] in '<>') + assert_equal(self.one.dtype.str[1], 'i') + assert_equal(self.three.dtype.str[1], 'f') + + def test_int_subclassing(self): + # Regression test for https://github.com/numpy/numpy/pull/3526 + + numpy_int = np.int_(0) + + if sys.version_info[0] >= 3: + # On Py3k int_ should not inherit from int, because it's not fixed-width anymore + assert_equal(isinstance(numpy_int, int), False) + else: + # Otherwise, it should inherit from int... + assert_equal(isinstance(numpy_int, int), True) + + # ... and fast-path checks on C-API level should also work + from numpy.core.multiarray_tests import test_int_subclass + assert_equal(test_int_subclass(numpy_int), True) + + def test_stridesattr(self): + x = self.one + def make_array(size, offset, strides): + return ndarray(size, buffer=x, dtype=int, + offset=offset*x.itemsize, + strides=strides*x.itemsize) + assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) + self.assertRaises(ValueError, make_array, 4, 4, -2) + self.assertRaises(ValueError, make_array, 4, 2, -1) + self.assertRaises(ValueError, make_array, 8, 3, 1) + assert_equal(make_array(8, 3, 0), np.array([3]*8)) + # Check behavior reported in gh-2503: + self.assertRaises(ValueError, make_array, (2, 3), 5, array([-2, -3])) + make_array(0, 0, 10) + + def test_set_stridesattr(self): + x = self.one + def make_array(size, offset, strides): + try: + r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) + except: + raise RuntimeError(getexception()) + r.strides = strides=strides*x.itemsize + return r + assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) + assert_equal(make_array(7, 3, 1), array([3, 4, 5, 6, 7, 8, 9])) + self.assertRaises(ValueError, make_array, 4, 4, -2) + self.assertRaises(ValueError, make_array, 4, 2, -1) + self.assertRaises(RuntimeError, make_array, 8, 3, 1) + # Check that the true extent of the array is used. + # Test relies on as_strided base not exposing a buffer. + x = np.lib.stride_tricks.as_strided(arange(1), (10, 10), (0, 0)) + def set_strides(arr, strides): + arr.strides = strides + self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) + + # Test for offset calculations: + x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + shape=(10,), strides=(-1,)) + self.assertRaises(ValueError, set_strides, x[::-1], -1) + a = x[::-1] + a.strides = 1 + a[::2].strides = 2 + + def test_fill(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = empty((3, 2, 1), t) + y = empty((3, 2, 1), t) + x.fill(1) + y[...] = 1 + assert_equal(x, y) + + def test_fill_struct_array(self): + # Filling from a scalar + x = array([(0, 0.0), (1, 1.0)], dtype='i4,f8') + x.fill(x[0]) + assert_equal(x['f1'][1], x['f1'][0]) + # Filling from a tuple that can be converted + # to a scalar + x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) + x.fill((3.5, -2)) + assert_array_equal(x['a'], [3.5, 3.5]) + assert_array_equal(x['b'], [-2, -2]) + + +class TestArrayConstruction(TestCase): + def test_array(self): + d = np.ones(6) + r = np.array([d, d]) + assert_equal(r, np.ones((2, 6))) + + d = np.ones(6) + tgt = np.ones((2, 6)) + r = np.array([d, d]) + assert_equal(r, tgt) + tgt[1] = 2 + r = np.array([d, d + 1]) + assert_equal(r, tgt) + + d = np.ones(6) + r = np.array([[d, d]]) + assert_equal(r, np.ones((1, 2, 6))) + + d = np.ones(6) + r = np.array([[d, d], [d, d]]) + assert_equal(r, np.ones((2, 2, 6))) + + d = np.ones((6, 6)) + r = np.array([d, d]) + assert_equal(r, np.ones((2, 6, 6))) + + d = np.ones((6, )) + r = np.array([[d, d + 1], d + 2]) + assert_equal(len(r), 2) + assert_equal(r[0], [d, d + 1]) + assert_equal(r[1], d + 2) + + tgt = np.ones((2, 3), dtype=np.bool) + tgt[0, 2] = False + tgt[1, 0:2] = False + r = np.array([[True, True, False], [False, False, True]]) + assert_equal(r, tgt) + r = np.array([[True, False], [True, False], [False, True]]) + assert_equal(r, tgt.T) + + +class TestAssignment(TestCase): + def test_assignment_broadcasting(self): + a = np.arange(6).reshape(2, 3) + + # Broadcasting the input to the output + a[...] = np.arange(3) + assert_equal(a, [[0, 1, 2], [0, 1, 2]]) + a[...] = np.arange(2).reshape(2, 1) + assert_equal(a, [[0, 0, 0], [1, 1, 1]]) + + # For compatibility with <= 1.5, a limited version of broadcasting + # the output to the input. + # + # This behavior is inconsistent with NumPy broadcasting + # in general, because it only uses one of the two broadcasting + # rules (adding a new "1" dimension to the left of the shape), + # applied to the output instead of an input. In NumPy 2.0, this kind + # of broadcasting assignment will likely be disallowed. + a[...] = np.arange(6)[::-1].reshape(1, 2, 3) + assert_equal(a, [[5, 4, 3], [2, 1, 0]]) + # The other type of broadcasting would require a reduction operation. + def assign(a, b): + a[...] = b + assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) + + def test_assignment_errors(self): + # Address issue #2276 + class C: + pass + a = np.zeros(1) + def assign(v): + a[0] = v + assert_raises((AttributeError, TypeError), assign, C()) + assert_raises(ValueError, assign, [1]) + +class TestDtypedescr(TestCase): + def test_construction(self): + d1 = dtype('i4') + assert_equal(d1, dtype(int32)) + d2 = dtype('f8') + assert_equal(d2, dtype(float64)) + +class TestZeroRank(TestCase): + def setUp(self): + self.d = array(0), array('x', object) + + def test_ellipsis_subscript(self): + a, b = self.d + self.assertEqual(a[...], 0) + self.assertEqual(b[...], 'x') + self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9. + self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9. + + def test_empty_subscript(self): + a, b = self.d + self.assertEqual(a[()], 0) + self.assertEqual(b[()], 'x') + self.assertTrue(type(a[()]) is a.dtype.type) + self.assertTrue(type(b[()]) is str) + + def test_invalid_subscript(self): + a, b = self.d + self.assertRaises(IndexError, lambda x: x[0], a) + self.assertRaises(IndexError, lambda x: x[0], b) + self.assertRaises(IndexError, lambda x: x[array([], int)], a) + self.assertRaises(IndexError, lambda x: x[array([], int)], b) + + def test_ellipsis_subscript_assignment(self): + a, b = self.d + a[...] = 42 + self.assertEqual(a, 42) + b[...] = '' + self.assertEqual(b.item(), '') + + def test_empty_subscript_assignment(self): + a, b = self.d + a[()] = 42 + self.assertEqual(a, 42) + b[()] = '' + self.assertEqual(b.item(), '') + + def test_invalid_subscript_assignment(self): + a, b = self.d + def assign(x, i, v): + x[i] = v + self.assertRaises(IndexError, assign, a, 0, 42) + self.assertRaises(IndexError, assign, b, 0, '') + self.assertRaises(ValueError, assign, a, (), '') + + def test_newaxis(self): + a, b = self.d + self.assertEqual(a[newaxis].shape, (1,)) + self.assertEqual(a[..., newaxis].shape, (1,)) + self.assertEqual(a[newaxis, ...].shape, (1,)) + self.assertEqual(a[..., newaxis].shape, (1,)) + self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1)) + self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1)) + self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1)) + self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) + + def test_invalid_newaxis(self): + a, b = self.d + def subscript(x, i): x[i] + self.assertRaises(IndexError, subscript, a, (newaxis, 0)) + self.assertRaises(IndexError, subscript, a, (newaxis,)*50) + + def test_constructor(self): + x = ndarray(()) + x[()] = 5 + self.assertEqual(x[()], 5) + y = ndarray((), buffer=x) + y[()] = 6 + self.assertEqual(x[()], 6) + + def test_output(self): + x = array(2) + self.assertRaises(ValueError, add, x, [1], x) + + +class TestScalarIndexing(TestCase): + def setUp(self): + self.d = array([0, 1])[0] + + def test_ellipsis_subscript(self): + a = self.d + self.assertEqual(a[...], 0) + self.assertEqual(a[...].shape, ()) + + def test_empty_subscript(self): + a = self.d + self.assertEqual(a[()], 0) + self.assertEqual(a[()].shape, ()) + + def test_invalid_subscript(self): + a = self.d + self.assertRaises(IndexError, lambda x: x[0], a) + self.assertRaises(IndexError, lambda x: x[array([], int)], a) + + def test_invalid_subscript_assignment(self): + a = self.d + def assign(x, i, v): + x[i] = v + self.assertRaises(TypeError, assign, a, 0, 42) + + def test_newaxis(self): + a = self.d + self.assertEqual(a[newaxis].shape, (1,)) + self.assertEqual(a[..., newaxis].shape, (1,)) + self.assertEqual(a[newaxis, ...].shape, (1,)) + self.assertEqual(a[..., newaxis].shape, (1,)) + self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1)) + self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1)) + self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1)) + self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) + + def test_invalid_newaxis(self): + a = self.d + def subscript(x, i): x[i] + self.assertRaises(IndexError, subscript, a, (newaxis, 0)) + self.assertRaises(IndexError, subscript, a, (newaxis,)*50) + + def test_overlapping_assignment(self): + # With positive strides + a = np.arange(4) + a[:-1] = a[1:] + assert_equal(a, [1, 2, 3, 3]) + + a = np.arange(4) + a[1:] = a[:-1] + assert_equal(a, [0, 0, 1, 2]) + + # With positive and negative strides + a = np.arange(4) + a[:] = a[::-1] + assert_equal(a, [3, 2, 1, 0]) + + a = np.arange(6).reshape(2, 3) + a[::-1,:] = a[:, ::-1] + assert_equal(a, [[5, 4, 3], [2, 1, 0]]) + + a = np.arange(6).reshape(2, 3) + a[::-1, ::-1] = a[:, ::-1] + assert_equal(a, [[3, 4, 5], [0, 1, 2]]) + + # With just one element overlapping + a = np.arange(5) + a[:3] = a[2:] + assert_equal(a, [2, 3, 4, 3, 4]) + + a = np.arange(5) + a[2:] = a[:3] + assert_equal(a, [0, 1, 0, 1, 2]) + + a = np.arange(5) + a[2::-1] = a[2:] + assert_equal(a, [4, 3, 2, 3, 4]) + + a = np.arange(5) + a[2:] = a[2::-1] + assert_equal(a, [0, 1, 2, 1, 0]) + + a = np.arange(5) + a[2::-1] = a[:1:-1] + assert_equal(a, [2, 3, 4, 3, 4]) + + a = np.arange(5) + a[:1:-1] = a[2::-1] + assert_equal(a, [0, 1, 0, 1, 2]) + +class TestCreation(TestCase): + def test_from_attribute(self): + class x(object): + def __array__(self, dtype=None): + pass + self.assertRaises(ValueError, array, x()) + + def test_from_string(self) : + types = np.typecodes['AllInteger'] + np.typecodes['Float'] + nstr = ['123', '123'] + result = array([123, 123], dtype=int) + for type in types : + msg = 'String conversion for %s' % type + assert_equal(array(nstr, dtype=type), result, err_msg=msg) + + def test_void(self): + arr = np.array([], dtype='V') + assert_equal(arr.dtype.kind, 'V') + + def test_zeros(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + for dt in types: + d = np.zeros((13,), dtype=dt) + assert_equal(np.count_nonzero(d), 0) + # true for ieee floats + assert_equal(d.sum(), 0) + assert_(not d.any()) + + d = np.zeros(2, dtype='(2,4)i4') + assert_equal(np.count_nonzero(d), 0) + assert_equal(d.sum(), 0) + assert_(not d.any()) + + d = np.zeros(2, dtype='4i4') + assert_equal(np.count_nonzero(d), 0) + assert_equal(d.sum(), 0) + assert_(not d.any()) + + d = np.zeros(2, dtype='(2,4)i4, (2,4)i4') + assert_equal(np.count_nonzero(d), 0) + + @dec.slow + def test_zeros_big(self): + # test big array as they might be allocated different by the sytem + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + for dt in types: + d = np.zeros((30 * 1024**2,), dtype=dt) + assert_(not d.any()) + + def test_zeros_obj(self): + # test initialization from PyLong(0) + d = np.zeros((13,), dtype=object) + assert_array_equal(d, [0] * 13) + assert_equal(np.count_nonzero(d), 0) + + def test_zeros_obj_obj(self): + d = zeros(10, dtype=[('k', object, 2)]) + assert_array_equal(d['k'], 0) + + def test_zeros_like_like_zeros(self): + # test zeros_like returns the same as zeros + for c in np.typecodes['All']: + if c == 'V': + continue + d = zeros((3,3), dtype=c) + assert_array_equal(zeros_like(d), d) + assert_equal(zeros_like(d).dtype, d.dtype) + # explicitly check some special cases + d = zeros((3,3), dtype='S5') + assert_array_equal(zeros_like(d), d) + assert_equal(zeros_like(d).dtype, d.dtype) + d = zeros((3,3), dtype='U5') + assert_array_equal(zeros_like(d), d) + assert_equal(zeros_like(d).dtype, d.dtype) + + d = zeros((3,3), dtype='= 3) + def test_sequence_long(self): + assert_equal(np.array([long(4), long(4)]).dtype, np.long) + assert_equal(np.array([long(4), 2**80]).dtype, np.object) + assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object) + assert_equal(np.array([2**80, long(4)]).dtype, np.object) + + def test_non_sequence_sequence(self): + """Should not segfault. + + Class Fail breaks the sequence protocol for new style classes, i.e., + those derived from object. Class Map is a mapping type indicated by + raising a ValueError. At some point we may raise a warning instead + of an error in the Fail case. + + """ + class Fail(object): + def __len__(self): + return 1 + + def __getitem__(self, index): + raise ValueError() + + class Map(object): + def __len__(self): + return 1 + + def __getitem__(self, index): + raise KeyError() + + a = np.array([Map()]) + assert_(a.shape == (1,)) + assert_(a.dtype == np.dtype(object)) + assert_raises(ValueError, np.array, [Fail()]) + + +class TestStructured(TestCase): + def test_subarray_field_access(self): + a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) + a['a'] = np.arange(60).reshape(3, 5, 2, 2) + + # Since the subarray is always in C-order, a transpose + # does not swap the subarray: + assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) + + # In Fortran order, the subarray gets appended + # like in all other cases, not prepended as a special case + b = a.copy(order='F') + assert_equal(a['a'].shape, b['a'].shape) + assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) + + def test_subarray_comparison(self): + # Check that comparisons between record arrays with + # multi-dimensional field types work properly + a = np.rec.fromrecords( + [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], + dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))]) + b = a.copy() + assert_equal(a==b, [True, True]) + assert_equal(a!=b, [False, False]) + b[1].b = 'c' + assert_equal(a==b, [True, False]) + assert_equal(a!=b, [False, True]) + for i in range(3): + b[0].a = a[0].a + b[0].a[i] = 5 + assert_equal(a==b, [False, False]) + assert_equal(a!=b, [True, True]) + for i in range(2): + for j in range(2): + b = a.copy() + b[0].c[i, j] = 10 + assert_equal(a==b, [False, True]) + assert_equal(a!=b, [True, False]) + + # Check that broadcasting with a subarray works + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) + assert_equal(a==b, [[True, True, False], [False, False, True]]) + assert_equal(b==a, [[True, True, False], [False, False, True]]) + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) + assert_equal(a==b, [[True, True, False], [False, False, True]]) + assert_equal(b==a, [[True, True, False], [False, False, True]]) + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) + assert_equal(a==b, [[True, False, False], [False, False, True]]) + assert_equal(b==a, [[True, False, False], [False, False, True]]) + + # Check that broadcasting Fortran-style arrays with a subarray work + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) + assert_equal(a==b, [[True, False, False], [False, False, True]]) + assert_equal(b==a, [[True, False, False], [False, False, True]]) + + # Check that incompatible sub-array shapes don't result to broadcasting + x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) + y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) + assert_equal(x == y, False) + + x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) + y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) + assert_equal(x == y, False) + + # Check that structured arrays that are different only in + # byte-order work + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', 'f8')]) + assert_equal(a == b, [False, True]) + + +class TestBool(TestCase): + def test_test_interning(self): + a0 = bool_(0) + b0 = bool_(False) + self.assertTrue(a0 is b0) + a1 = bool_(1) + b1 = bool_(True) + self.assertTrue(a1 is b1) + self.assertTrue(array([True])[0] is a1) + self.assertTrue(array(True)[()] is a1) + + def test_sum(self): + d = np.ones(101, dtype=np.bool); + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + d = np.frombuffer(b'\xff\xff' * 100, dtype=bool) + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + def check_count_nonzero(self, power, length): + powers = [2 ** i for i in range(length)] + for i in range(2**power): + l = [(i & x) != 0 for x in powers] + a = np.array(l, dtype=np.bool) + c = builtins.sum(l) + self.assertEqual(np.count_nonzero(a), c) + av = a.view(np.uint8) + av *= 3 + self.assertEqual(np.count_nonzero(a), c) + av *= 4 + self.assertEqual(np.count_nonzero(a), c) + av[av != 0] = 0xFF + self.assertEqual(np.count_nonzero(a), c) + + def test_count_nonzero(self): + # check all 12 bit combinations in a length 17 array + # covers most cases of the 16 byte unrolled code + self.check_count_nonzero(12, 17) + + @dec.slow + def test_count_nonzero_all(self): + # check all combinations in a length 17 array + # covers all cases of the 16 byte unrolled code + self.check_count_nonzero(17, 17) + + def test_count_nonzero_unaligned(self): + # prevent mistakes as e.g. gh-4060 + for o in range(7): + a = np.zeros((18,), dtype=np.bool)[o+1:] + a[:o] = True + self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) + a = np.ones((18,), dtype=np.bool)[o+1:] + a[:o] = False + self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) + +class TestMethods(TestCase): + def test_test_round(self): + assert_equal(array([1.2, 1.5]).round(), [1, 2]) + assert_equal(array(1.5).round(), 2) + assert_equal(array([12.2, 15.5]).round(-1), [10, 20]) + assert_equal(array([12.15, 15.51]).round(1), [12.2, 15.5]) + + def test_transpose(self): + a = array([[1, 2], [3, 4]]) + assert_equal(a.transpose(), [[1, 3], [2, 4]]) + self.assertRaises(ValueError, lambda: a.transpose(0)) + self.assertRaises(ValueError, lambda: a.transpose(0, 0)) + self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2)) + + def test_sort(self): + # test ordering for floats and complex containing nans. It is only + # necessary to check the lessthan comparison, so sorts that + # only follow the insertion sort path are sufficient. We only + # test doubles and complex doubles as the logic is the same. + + # check doubles + msg = "Test real sort order with nans" + a = np.array([np.nan, 1, 0]) + b = sort(a) + assert_equal(b, a[::-1], msg) + # check complex + msg = "Test complex sort order with nans" + a = np.zeros(9, dtype=np.complex128) + a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] + a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] + b = sort(a) + assert_equal(b, a[::-1], msg) + + # all c scalar sorts use the same code with different types + # so it suffices to run a quick check with one type. The number + # of sorted items must be greater than ~50 to check the actual + # algorithm because quick and merge sort fall over to insertion + # sort for small arrays. + a = np.arange(101) + b = a[::-1].copy() + for kind in ['q', 'm', 'h'] : + msg = "scalar sort, kind=%s" % kind + c = a.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test complex sorts. These use the same code as the scalars + # but the compare fuction differs. + ai = a*1j + 1 + bi = b*1j + 1 + for kind in ['q', 'm', 'h'] : + msg = "complex sort, real part == 1, kind=%s" % kind + c = ai.copy(); + c.sort(kind=kind) + assert_equal(c, ai, msg) + c = bi.copy(); + c.sort(kind=kind) + assert_equal(c, ai, msg) + ai = a + 1j + bi = b + 1j + for kind in ['q', 'm', 'h'] : + msg = "complex sort, imag part == 1, kind=%s" % kind + c = ai.copy(); + c.sort(kind=kind) + assert_equal(c, ai, msg) + c = bi.copy(); + c.sort(kind=kind) + assert_equal(c, ai, msg) + + # test string sorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)]) + b = a[::-1].copy() + for kind in ['q', 'm', 'h'] : + msg = "string sort, kind=%s" % kind + c = a.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test unicode sorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) + b = a[::-1].copy() + for kind in ['q', 'm', 'h'] : + msg = "unicode sort, kind=%s" % kind + c = a.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test object array sorts. + a = np.empty((101,), dtype=np.object) + a[:] = list(range(101)) + b = a[::-1] + for kind in ['q', 'h', 'm'] : + msg = "object sort, kind=%s" % kind + c = a.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test record array sorts. + dt = np.dtype([('f', float), ('i', int)]) + a = array([(i, i) for i in range(101)], dtype = dt) + b = a[::-1] + for kind in ['q', 'h', 'm'] : + msg = "object sort, kind=%s" % kind + c = a.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test datetime64 sorts. + a = np.arange(0, 101, dtype='datetime64[D]') + b = a[::-1] + for kind in ['q', 'h', 'm'] : + msg = "datetime64 sort, kind=%s" % kind + c = a.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + + # test timedelta64 sorts. + a = np.arange(0, 101, dtype='timedelta64[D]') + b = a[::-1] + for kind in ['q', 'h', 'm'] : + msg = "timedelta64 sort, kind=%s" % kind + c = a.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy(); + c.sort(kind=kind) + assert_equal(c, a, msg) + + # check axis handling. This should be the same for all type + # specific sorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 0], [3, 2]]) + c = np.array([[2, 3], [0, 1]]) + d = a.copy() + d.sort(axis=0) + assert_equal(d, b, "test sort with axis=0") + d = a.copy() + d.sort(axis=1) + assert_equal(d, c, "test sort with axis=1") + d = a.copy() + d.sort() + assert_equal(d, c, "test sort with default axis") + + def test_copy(self): + def assert_fortran(arr): + assert_(arr.flags.fortran) + assert_(arr.flags.f_contiguous) + assert_(not arr.flags.c_contiguous) + + def assert_c(arr): + assert_(not arr.flags.fortran) + assert_(not arr.flags.f_contiguous) + assert_(arr.flags.c_contiguous) + + a = np.empty((2, 2), order='F') + # Test copying a Fortran array + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_fortran(a.copy('A')) + + # Now test starting with a C array. + a = np.empty((2, 2), order='C') + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_c(a.copy('A')) + + def test_sort_order(self): + # Test sorting an array with fields + x1=np.array([21, 32, 14]) + x2=np.array(['my', 'first', 'name']) + x3=np.array([3.1, 4.5, 6.2]) + r=np.rec.fromarrays([x1, x2, x3], names='id,word,number') + + r.sort(order=['id']) + assert_equal(r.id, array([14, 21, 32])) + assert_equal(r.word, array(['name', 'my', 'first'])) + assert_equal(r.number, array([6.2, 3.1, 4.5])) + + r.sort(order=['word']) + assert_equal(r.id, array([32, 21, 14])) + assert_equal(r.word, array(['first', 'my', 'name'])) + assert_equal(r.number, array([4.5, 3.1, 6.2])) + + r.sort(order=['number']) + assert_equal(r.id, array([21, 32, 14])) + assert_equal(r.word, array(['my', 'first', 'name'])) + assert_equal(r.number, array([3.1, 4.5, 6.2])) + + if sys.byteorder == 'little': + strtype = '>i2' + else: + strtype = ' p[:, i]).all(), + msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) + aae(p, d1[np.arange(d1.shape[0])[:, None], + np.argpartition(d1, i, axis=1, kind=k)]) + + p = np.partition(d0, i, axis=0, kind=k) + aae(p[i,:], np.array([i] * d1.shape[0], + dtype=dt)) + # array_less does not seem to work right + at((p[:i,:] <= p[i,:]).all(), + msg="%d: %r <= %r" % (i, p[i,:], p[:i,:])) + at((p[i + 1:,:] > p[i,:]).all(), + msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:])) + aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), + np.arange(d0.shape[1])[None,:]]) + + # check inplace + dc = d.copy() + dc.partition(i, kind=k) + assert_equal(dc, np.partition(d, i, kind=k)) + dc = d0.copy() + dc.partition(i, axis=0, kind=k) + assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) + dc = d1.copy() + dc.partition(i, axis=1, kind=k) + assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) + + + def assert_partitioned(self, d, kth): + prev = 0 + for k in np.sort(kth): + assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) + assert_((d[k:] >= d[k]).all(), + msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) + prev = k + 1 + + + def test_partition_iterative(self): + d = np.arange(17) + kth = (0, 1, 2, 429, 231) + assert_raises(ValueError, d.partition, kth) + assert_raises(ValueError, d.argpartition, kth) + d = np.arange(10).reshape((2, 5)) + assert_raises(ValueError, d.partition, kth, axis=0) + assert_raises(ValueError, d.partition, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=None) + + d = np.array([3, 4, 2, 1]) + p = np.partition(d, (0, 3)) + self.assert_partitioned(p, (0, 3)) + self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + assert_array_equal(p, np.partition(d, (-3, -1))) + assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) + + d = np.arange(17) + np.random.shuffle(d) + d.partition(range(d.size)) + assert_array_equal(np.arange(17), d) + np.random.shuffle(d) + assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) + + # test unsorted kth + d = np.arange(17) + np.random.shuffle(d) + keys = np.array([1, 3, 8, -2]) + np.random.shuffle(d) + p = np.partition(d, keys) + self.assert_partitioned(p, keys) + p = d[np.argpartition(d, keys)] + self.assert_partitioned(p, keys) + np.random.shuffle(keys) + assert_array_equal(np.partition(d, keys), p) + assert_array_equal(d[np.argpartition(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + self.assert_partitioned(np.partition(d, [5]*4), [5]) + self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), + [5]*4 + [6, 13]) + self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) + self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], + [5]*4 + [6, 13]) + + d = np.arange(12) + np.random.shuffle(d) + d1 = np.tile(np.arange(12), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + + kth = (1, 6, 7, -1) + p = np.partition(d1, kth, axis=1) + pa = d1[np.arange(d1.shape[0])[:, None], + d1.argpartition(kth, axis=1)] + assert_array_equal(p, pa) + for i in range(d1.shape[0]): + self.assert_partitioned(p[i,:], kth) + p = np.partition(d0, kth, axis=0) + pa = d0[np.argpartition(d0, kth, axis=0), + np.arange(d0.shape[1])[None,:]] + assert_array_equal(p, pa) + for i in range(d0.shape[1]): + self.assert_partitioned(p[:, i], kth) + + + def test_partition_cdtype(self): + d = array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.9, 38)], + dtype=[('name', '|S10'), ('height', ' obj, "nope") + assert_equal(arr < obj, "yep") + assert_equal(np.multiply(arr, obj), "ufunc") + arr *= obj + assert_equal(arr, 321) + + assert_equal(obj2 * arr, 123) + assert_equal(arr * obj2, 321) + assert_equal(arr > obj2, "nope") + assert_equal(arr < obj2, "yep") + assert_equal(np.multiply(arr, obj2), "ufunc") + arr *= obj2 + assert_equal(arr, 321) + + obj2 += 33 + assert_equal(obj2[0], 42) + assert_equal(obj2.sum(), 42) + assert_(isinstance(obj2, SomeClass2)) + + +class TestSubscripting(TestCase): + def test_test_zero_rank(self): + x = array([1, 2, 3]) + self.assertTrue(isinstance(x[0], np.int_)) + if sys.version_info[0] < 3: + self.assertTrue(isinstance(x[0], int)) + self.assertTrue(type(x[0, ...]) is ndarray) + + +class TestPickling(TestCase): + def test_roundtrip(self): + import pickle + carray = array([[2, 9], [7, 0], [3, 8]]) + DATA = [ + carray, + transpose(carray), + array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), + ('c', float)]) + ] + + for a in DATA: + assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a) + + def _loads(self, obj): + if sys.version_info[0] >= 3: + return loads(obj, encoding='latin1') + else: + return loads(obj) + + # version 0 pickles, using protocol=2 to pickle + # version 0 doesn't have a version field + def test_version0_int8(self): + s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' + a = array([1, 2, 3, 4], dtype=int8) + p = self._loads(asbytes(s)) + assert_equal(a, p) + + def test_version0_float32(self): + s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + def test_mixed(self): + g1 = array(["spam", "spa", "spammer", "and eggs"]) + g2 = "spam" + assert_array_equal(g1 == g2, [x == g2 for x in g1]) + assert_array_equal(g1 != g2, [x != g2 for x in g1]) + assert_array_equal(g1 < g2, [x < g2 for x in g1]) + assert_array_equal(g1 > g2, [x > g2 for x in g1]) + assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) + assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) + + + def test_unicode(self): + g1 = array([sixu("This"), sixu("is"), sixu("example")]) + g2 = array([sixu("This"), sixu("was"), sixu("example")]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + +class TestArgmax(TestCase): + + nan_arr = [ + ([0, 1, 2, 3, np.nan], 4), + ([0, 1, 2, np.nan, 3], 3), + ([np.nan, 0, 1, 2, 3], 0), + ([np.nan, 0, np.nan, 2, 3], 0), + ([0, 1, 2, 3, complex(0, np.nan)], 4), + ([0, 1, 2, 3, complex(np.nan, 0)], 4), + ([0, 1, 2, complex(np.nan, 0), 3], 3), + ([0, 1, 2, complex(0, np.nan), 3], 3), + ([complex(0, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), + + ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), + ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), + ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), + + ([np.datetime64('1923-04-14T12:43:12'), + np.datetime64('1994-06-21T14:43:15'), + np.datetime64('2001-10-15T04:10:32'), + np.datetime64('1995-11-25T16:02:16'), + np.datetime64('2005-01-04T03:14:12'), + np.datetime64('2041-12-03T14:05:03')], 5), + ([np.datetime64('1935-09-14T04:40:11'), + np.datetime64('1949-10-12T12:32:11'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('2015-11-20T12:20:59'), + np.datetime64('1932-09-23T10:10:13'), + np.datetime64('2014-10-10T03:50:30')], 3), + ([np.datetime64('2059-03-14T12:43:12'), + np.datetime64('1996-09-21T14:43:15'), + np.datetime64('2001-10-15T04:10:32'), + np.datetime64('2022-12-25T16:02:16'), + np.datetime64('1963-10-04T03:14:12'), + np.datetime64('2013-05-08T18:15:23')], 0), + + ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), + timedelta(days=-1, seconds=23)], 0), + ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), + timedelta(days=5, seconds=14)], 1), + ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), + timedelta(days=10, seconds=43)], 2), + + ([False, False, False, False, True], 4), + ([False, False, False, True, False], 3), + ([True, False, False, False, False], 0), + ([True, False, True, False, False], 0), + + # Can't reduce a "flexible type" + #(['a', 'z', 'aa', 'zz'], 3), + #(['zz', 'a', 'aa', 'a'], 0), + #(['aa', 'z', 'zz', 'a'], 2), + ] + + def test_all(self): + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + for i in range(a.ndim): + amax = a.max(i) + aargmax = a.argmax(i) + axes = list(range(a.ndim)) + axes.remove(i) + assert_(all(amax == aargmax.choose(*a.transpose(i,*axes)))) + + def test_combinations(self): + for arr, pos in self.nan_arr: + assert_equal(np.argmax(arr), pos, err_msg="%r"%arr) + assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr) + + def test_output_shape(self): + # see also gh-616 + a = np.ones((10, 5)) + # Check some simple shape mismatches + out = np.ones(11, dtype=np.int_) + assert_raises(ValueError, a.argmax, -1, out) + + out = np.ones((2, 5), dtype=np.int_) + assert_raises(ValueError, a.argmax, -1, out) + + # these could be relaxed possibly (used to allow even the previous) + out = np.ones((1, 10), dtype=np.int_) + assert_raises(ValueError, a.argmax, -1, np.ones((1, 10))) + + out = np.ones(10, dtype=np.int_) + a.argmax(-1, out=out) + assert_equal(out, a.argmax(-1)) + + +class TestArgmin(TestCase): + + nan_arr = [ + ([0, 1, 2, 3, np.nan], 4), + ([0, 1, 2, np.nan, 3], 3), + ([np.nan, 0, 1, 2, 3], 0), + ([np.nan, 0, np.nan, 2, 3], 0), + ([0, 1, 2, 3, complex(0, np.nan)], 4), + ([0, 1, 2, 3, complex(np.nan, 0)], 4), + ([0, 1, 2, complex(np.nan, 0), 3], 3), + ([0, 1, 2, complex(0, np.nan), 3], 3), + ([complex(0, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), + + ([complex(0, 0), complex(0, 2), complex(0, 1)], 0), + ([complex(1, 0), complex(0, 2), complex(0, 1)], 2), + ([complex(1, 0), complex(0, 2), complex(1, 1)], 1), + + ([np.datetime64('1923-04-14T12:43:12'), + np.datetime64('1994-06-21T14:43:15'), + np.datetime64('2001-10-15T04:10:32'), + np.datetime64('1995-11-25T16:02:16'), + np.datetime64('2005-01-04T03:14:12'), + np.datetime64('2041-12-03T14:05:03')], 0), + ([np.datetime64('1935-09-14T04:40:11'), + np.datetime64('1949-10-12T12:32:11'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('2014-11-20T12:20:59'), + np.datetime64('2015-09-23T10:10:13'), + np.datetime64('1932-10-10T03:50:30')], 5), + ([np.datetime64('2059-03-14T12:43:12'), + np.datetime64('1996-09-21T14:43:15'), + np.datetime64('2001-10-15T04:10:32'), + np.datetime64('2022-12-25T16:02:16'), + np.datetime64('1963-10-04T03:14:12'), + np.datetime64('2013-05-08T18:15:23')], 4), + + ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), + timedelta(days=-1, seconds=23)], 2), + ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), + timedelta(days=5, seconds=14)], 0), + ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), + timedelta(days=10, seconds=43)], 1), + + ([True, True, True, True, False], 4), + ([True, True, True, False, True], 3), + ([False, True, True, True, True], 0), + ([False, True, False, True, True], 0), + + # Can't reduce a "flexible type" + #(['a', 'z', 'aa', 'zz'], 0), + #(['zz', 'a', 'aa', 'a'], 1), + #(['aa', 'z', 'zz', 'a'], 3), + ] + + def test_all(self): + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + for i in range(a.ndim): + amin = a.min(i) + aargmin = a.argmin(i) + axes = list(range(a.ndim)) + axes.remove(i) + assert_(all(amin == aargmin.choose(*a.transpose(i,*axes)))) + + def test_combinations(self): + for arr, pos in self.nan_arr: + assert_equal(np.argmin(arr), pos, err_msg="%r"%arr) + assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr) + + def test_minimum_signed_integers(self): + + a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8) + assert_equal(np.argmin(a), 1) + + a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16) + assert_equal(np.argmin(a), 1) + + a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32) + assert_equal(np.argmin(a), 1) + + a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64) + assert_equal(np.argmin(a), 1) + + def test_output_shape(self): + # see also gh-616 + a = np.ones((10, 5)) + # Check some simple shape mismatches + out = np.ones(11, dtype=np.int_) + assert_raises(ValueError, a.argmin, -1, out) + + out = np.ones((2, 5), dtype=np.int_) + assert_raises(ValueError, a.argmin, -1, out) + + # these could be relaxed possibly (used to allow even the previous) + out = np.ones((1, 10), dtype=np.int_) + assert_raises(ValueError, a.argmin, -1, np.ones((1, 10))) + + out = np.ones(10, dtype=np.int_) + a.argmin(-1, out=out) + assert_equal(out, a.argmin(-1)) + + +class TestMinMax(TestCase): + def test_scalar(self): + assert_raises(ValueError, np.amax, 1, 1) + assert_raises(ValueError, np.amin, 1, 1) + + assert_equal(np.amax(1, axis=0), 1) + assert_equal(np.amin(1, axis=0), 1) + assert_equal(np.amax(1, axis=None), 1) + assert_equal(np.amin(1, axis=None), 1) + + def test_axis(self): + assert_raises(ValueError, np.amax, [1, 2, 3], 1000) + assert_equal(np.amax([[1, 2, 3]], axis=1), 3) + +class TestNewaxis(TestCase): + def test_basic(self): + sk = array([0, -0.1, 0.1]) + res = 250*sk[:, newaxis] + assert_almost_equal(res.ravel(), 250*sk) + + +class TestClip(TestCase): + def _check_range(self, x, cmin, cmax): + assert_(np.all(x >= cmin)) + assert_(np.all(x <= cmax)) + + def _clip_type(self,type_group,array_max, + clip_min,clip_max,inplace=False, + expected_min=None,expected_max=None): + if expected_min is None: + expected_min = clip_min + if expected_max is None: + expected_max = clip_max + + for T in np.sctypes[type_group]: + if sys.byteorder == 'little': + byte_orders = ['=', '>'] + else: + byte_orders = ['<', '='] + + for byteorder in byte_orders: + dtype = np.dtype(T).newbyteorder(byteorder) + + x = (np.random.random(1000) * array_max).astype(dtype) + if inplace: + x.clip(clip_min, clip_max, x) + else: + x = x.clip(clip_min, clip_max) + byteorder = '=' + + if x.dtype.byteorder == '|': byteorder = '|' + assert_equal(x.dtype.byteorder, byteorder) + self._check_range(x, expected_min, expected_max) + return x + + def test_basic(self): + for inplace in [False, True]: + self._clip_type('float', 1024, -12.8, 100.2, inplace=inplace) + self._clip_type('float', 1024, 0, 0, inplace=inplace) + + self._clip_type('int', 1024, -120, 100.5, inplace=inplace) + self._clip_type('int', 1024, 0, 0, inplace=inplace) + + x = self._clip_type('uint', 1024, -120, 100, expected_min=0, + inplace=inplace) + x = self._clip_type('uint', 1024, 0, 0, inplace=inplace) + + def test_record_array(self): + rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], + dtype=[('x', '= 3)) + x = val.clip(min=3) + assert_(np.all(x >= 3)) + x = val.clip(max=4) + assert_(np.all(x <= 4)) + + +class TestPutmask(object): + def tst_basic(self, x, T, mask, val): + np.putmask(x, mask, val) + assert_(np.all(x[mask] == T(val))) + assert_(x.dtype == T) + + def test_ip_types(self): + unchecked_types = [str, unicode, np.void, object] + + x = np.random.random(1000)*100 + mask = x < 40 + + for val in [-100, 0, 15]: + for types in np.sctypes.values(): + for T in types: + if T not in unchecked_types: + yield self.tst_basic, x.copy().astype(T), T, mask, val + + def test_mask_size(self): + assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) + + def tst_byteorder(self, dtype): + x = np.array([1, 2, 3], dtype) + np.putmask(x, [True, False, True], -1) + assert_array_equal(x, [-1, 2, -1]) + + def test_ip_byteorder(self): + for dtype in ('>i4', 'f8'), ('z', 'i4', 'f8'), ('z', ' 1 minute on mechanical hard drive + def test_big_binary(self): + """Test workarounds for 32-bit limited fwrite, fseek, and ftell + calls in windows. These normally would hang doing something like this. + See http://projects.scipy.org/numpy/ticket/1660""" + if sys.platform != 'win32': + return + try: + # before workarounds, only up to 2**32-1 worked + fourgbplus = 2**32 + 2**16 + testbytes = np.arange(8, dtype=np.int8) + n = len(testbytes) + flike = tempfile.NamedTemporaryFile() + f = flike.file + np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) + flike.seek(0) + a = np.fromfile(f, dtype=np.int8) + flike.close() + assert_(len(a) == fourgbplus) + # check only start and end for speed: + assert_((a[:n] == testbytes).all()) + assert_((a[-n:] == testbytes).all()) + except (MemoryError, ValueError): + pass + + def test_string(self): + self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',') + + def test_counted_string(self): + self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') + self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',') + self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') + + def test_string_with_ws(self): + self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') + + def test_counted_string_with_ws(self): + self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int, + sep=' ') + + def test_ascii(self): + self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') + self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') + + def test_malformed(self): + self._check_from('1.234 1,234', [1.234, 1.], sep=' ') + + def test_long_sep(self): + self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') + + def test_dtype(self): + v = np.array([1, 2, 3, 4], dtype=np.int_) + self._check_from('1,2,3,4', v, sep=',', dtype=np.int_) + + def test_dtype_bool(self): + # can't use _check_from because fromstring can't handle True/False + v = np.array([True, False, True, False], dtype=np.bool_) + s = '1,0,-2.3,0' + f = open(self.filename, 'wb') + f.write(asbytes(s)) + f.close() + y = np.fromfile(self.filename, sep=',', dtype=np.bool_) + assert_(y.dtype == '?') + assert_array_equal(y, v) + + def test_tofile_sep(self): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + f = open(self.filename, 'w') + x.tofile(f, sep=',') + f.close() + f = open(self.filename, 'r') + s = f.read() + f.close() + assert_equal(s, '1.51,2.0,3.51,4.0') + + def test_tofile_format(self): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + f = open(self.filename, 'w') + x.tofile(f, sep=',', format='%.2f') + f.close() + f = open(self.filename, 'r') + s = f.read() + f.close() + assert_equal(s, '1.51,2.00,3.51,4.00') + + def test_locale(self): + in_foreign_locale(self.test_numbers)() + in_foreign_locale(self.test_nan)() + in_foreign_locale(self.test_inf)() + in_foreign_locale(self.test_counted_string)() + in_foreign_locale(self.test_ascii)() + in_foreign_locale(self.test_malformed)() + in_foreign_locale(self.test_tofile_sep)() + in_foreign_locale(self.test_tofile_format)() + + +class TestFromBuffer(object): + def tst_basic(self, buffer, expected, kwargs): + assert_array_equal(np.frombuffer(buffer,**kwargs), expected) + + def test_ip_basic(self): + for byteorder in ['<', '>']: + for dtype in [float, int, np.complex]: + dt = np.dtype(dtype).newbyteorder(byteorder) + x = (np.random.random((4, 7))*5).astype(dt) + buf = x.tobytes() + yield self.tst_basic, buf, x.flat, {'dtype':dt} + + def test_empty(self): + yield self.tst_basic, asbytes(''), np.array([]), {} + + +class TestFlat(TestCase): + def setUp(self): + a0 = arange(20.0) + a = a0.reshape(4, 5) + a0.shape = (4, 5) + a.flags.writeable = False + self.a = a + self.b = a[::2, ::2] + self.a0 = a0 + self.b0 = a0[::2, ::2] + + def test_contiguous(self): + testpassed = False + try: + self.a.flat[12] = 100.0 + except ValueError: + testpassed = True + assert testpassed + assert self.a.flat[12] == 12.0 + + def test_discontiguous(self): + testpassed = False + try: + self.b.flat[4] = 100.0 + except ValueError: + testpassed = True + assert testpassed + assert self.b.flat[4] == 12.0 + + def test___array__(self): + c = self.a.flat.__array__() + d = self.b.flat.__array__() + e = self.a0.flat.__array__() + f = self.b0.flat.__array__() + + assert c.flags.writeable is False + assert d.flags.writeable is False + assert e.flags.writeable is True + assert f.flags.writeable is True + + assert c.flags.updateifcopy is False + assert d.flags.updateifcopy is False + assert e.flags.updateifcopy is False + assert f.flags.updateifcopy is True + assert f.base is self.b0 + +class TestResize(TestCase): + def test_basic(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + x.resize((5, 5)) + assert_array_equal(x.flat[:9], + np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) + assert_array_equal(x[9:].flat, 0) + + def test_check_reference(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + y = x + self.assertRaises(ValueError, x.resize, (5, 1)) + + def test_int_shape(self): + x = np.eye(3) + x.resize(3) + assert_array_equal(x, np.eye(3)[0,:]) + + def test_none_shape(self): + x = np.eye(3) + x.resize(None) + assert_array_equal(x, np.eye(3)) + x.resize() + assert_array_equal(x, np.eye(3)) + + def test_invalid_arguements(self): + self.assertRaises(TypeError, np.eye(3).resize, 'hi') + self.assertRaises(ValueError, np.eye(3).resize, -1) + self.assertRaises(TypeError, np.eye(3).resize, order=1) + self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi') + + def test_freeform_shape(self): + x = np.eye(3) + x.resize(3, 2, 1) + assert_(x.shape == (3, 2, 1)) + + def test_zeros_appended(self): + x = np.eye(3) + x.resize(2, 3, 3) + assert_array_equal(x[0], np.eye(3)) + assert_array_equal(x[1], np.zeros((3, 3))) + + def test_obj_obj(self): + # check memory is initialized on resize, gh-4857 + a = ones(10, dtype=[('k', object, 2)]) + a.resize(15,) + assert_equal(a.shape, (15,)) + assert_array_equal(a['k'][-5:], 0) + assert_array_equal(a['k'][:-5], 1) + + +class TestRecord(TestCase): + def test_field_rename(self): + dt = np.dtype([('f', float), ('i', int)]) + dt.names = ['p', 'q'] + assert_equal(dt.names, ['p', 'q']) + + if sys.version_info[0] >= 3: + def test_bytes_fields(self): + # Bytes are not allowed in field names and not recognized in titles + # on Py3 + assert_raises(TypeError, np.dtype, [(asbytes('a'), int)]) + assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)]) + + dt = np.dtype([((asbytes('a'), 'b'), int)]) + assert_raises(ValueError, dt.__getitem__, asbytes('a')) + + x = np.array([(1,), (2,), (3,)], dtype=dt) + assert_raises(ValueError, x.__getitem__, asbytes('a')) + + y = x[0] + assert_raises(IndexError, y.__getitem__, asbytes('a')) + else: + def test_unicode_field_titles(self): + # Unicode field titles are added to field dict on Py2 + title = unicode('b') + dt = np.dtype([((title, 'a'), int)]) + dt[title] + dt['a'] + x = np.array([(1,), (2,), (3,)], dtype=dt) + x[title] + x['a'] + y = x[0] + y[title] + y['a'] + + def test_unicode_field_names(self): + # Unicode field names are not allowed on Py2 + title = unicode('b') + assert_raises(TypeError, np.dtype, [(title, int)]) + assert_raises(TypeError, np.dtype, [(('a', title), int)]) + + def test_field_names(self): + # Test unicode and 8-bit / byte strings can be used + a = np.zeros((1,), dtype=[('f1', 'i4'), + ('f2', 'i4'), + ('f3', [('sf1', 'i4')])]) + is_py3 = sys.version_info[0] >= 3 + if is_py3: + funcs = (str,) + # byte string indexing fails gracefully + assert_raises(ValueError, a.__setitem__, asbytes('f1'), 1) + assert_raises(ValueError, a.__getitem__, asbytes('f1')) + assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1) + assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1')) + else: + funcs = (str, unicode) + for func in funcs: + b = a.copy() + fn1 = func('f1') + b[fn1] = 1 + assert_equal(b[fn1], 1) + fnn = func('not at all') + assert_raises(ValueError, b.__setitem__, fnn, 1) + assert_raises(ValueError, b.__getitem__, fnn) + b[0][fn1] = 2 + assert_equal(b[fn1], 2) + # Subfield + assert_raises(IndexError, b[0].__setitem__, fnn, 1) + assert_raises(IndexError, b[0].__getitem__, fnn) + # Subfield + fn3 = func('f3') + sfn1 = func('sf1') + b[fn3][sfn1] = 1 + assert_equal(b[fn3][sfn1], 1) + assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) + assert_raises(ValueError, b[fn3].__getitem__, fnn) + # multiple Subfields + fn2 = func('f2') + b[fn2] = 3 + assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) + assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) + # view of subfield view/copy + assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2)) + view_dtype=[('f1', 'i4'), ('f3', [('', 'i4')])] + assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,))) + # non-ascii unicode field indexing is well behaved + if not is_py3: + raise SkipTest('non ascii unicode field indexing skipped; ' + 'raises segfault on python 2.x') + else: + assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1) + assert_raises(ValueError, a.__getitem__, sixu('\u03e0')) + + def test_field_names_deprecation(self): + + def collect_warning_types(f, *args, **kwargs): + with warnings.catch_warnings(record=True) as log: + warnings.simplefilter("always") + f(*args, **kwargs) + return [w.category for w in log] + + a = np.zeros((1,), dtype=[('f1', 'i4'), + ('f2', 'i4'), + ('f3', [('sf1', 'i4')])]) + a['f1'][0] = 1 + a['f2'][0] = 2 + a['f3'][0] = (3,) + b = np.zeros((1,), dtype=[('f1', 'i4'), + ('f2', 'i4'), + ('f3', [('sf1', 'i4')])]) + b['f1'][0] = 1 + b['f2'][0] = 2 + b['f3'][0] = (3,) + + # All the different functions raise a warning, but not an error, and + # 'a' is not modified: + assert_equal(collect_warning_types(a[['f1', 'f2']].__setitem__, 0, (10, 20)), + [FutureWarning]) + assert_equal(a, b) + # Views also warn + subset = a[['f1', 'f2']] + subset_view = subset.view() + assert_equal(collect_warning_types(subset_view['f1'].__setitem__, 0, 10), + [FutureWarning]) + # But the write goes through: + assert_equal(subset['f1'][0], 10) + # Only one warning per multiple field indexing, though (even if there are + # multiple views involved): + assert_equal(collect_warning_types(subset['f1'].__setitem__, 0, 10), + []) + + def test_record_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + a.flags.writeable = False + b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) + b.flags.writeable = False + c = np.array([(1, 2), (3, 4)], dtype='i1,i2') + c.flags.writeable = False + self.assertTrue(hash(a[0]) == hash(a[1])) + self.assertTrue(hash(a[0]) == hash(b[0])) + self.assertTrue(hash(a[0]) != hash(b[1])) + self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0]) + + def test_record_no_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + self.assertRaises(TypeError, hash, a[0]) + +class TestView(TestCase): + def test_basic(self): + x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8), + ('b', np.int8), ('a', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype=' 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + A = np.zeros((0, 3)) + for f in self.funcs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(A, axis=axis)).all()) + assert_(len(w) > 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(A, axis=axis), np.zeros([])) + + def test_mean_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * mat.shape[axis] + assert_almost_equal(res, tgt) + for axis in [None]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * np.prod(mat.shape) + assert_almost_equal(res, tgt) + + def test_var_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_std_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + tgt = np.sqrt(_var(mat, axis=axis)) + res = _std(mat, axis=axis) + assert_almost_equal(res, tgt) + + + def test_subclass(self): + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + def __array_finalize__(self, obj): + self.info = getattr(obj, "info", '') + + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + res = dat.mean(1) + assert_(res.info == dat.info) + res = dat.std(1) + assert_(res.info == dat.info) + res = dat.var(1) + assert_(res.info == dat.info) + +class TestDot(TestCase): + def test_dot_2args(self): + from numpy.core.multiarray import dot + + a = np.array([[1, 2], [3, 4]], dtype=float) + b = np.array([[1, 0], [1, 1]], dtype=float) + c = np.array([[3, 2], [7, 4]], dtype=float) + + d = dot(a, b) + assert_allclose(c, d) + + def test_dot_3args(self): + from numpy.core.multiarray import dot + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 32)) + for i in range(12): + dot(f, v, r) + assert_equal(sys.getrefcount(r), 2) + r2 = dot(f, v, out=None) + assert_array_equal(r2, r) + assert_(r is dot(f, v, out=r)) + + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = dot(f, v) + assert_(r is dot(f, v, r)) + assert_array_equal(r2, r) + + def test_dot_3args_errors(self): + from numpy.core.multiarray import dot + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 31)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32, 1024)) + assert_raises(ValueError, dot, f, v, r) + assert_raises(ValueError, dot, f, v, r.T) + + r = np.empty((1024, 64)) + assert_raises(ValueError, dot, f, v, r[:, ::2]) + assert_raises(ValueError, dot, f, v, r[:, :32]) + + r = np.empty((1024, 32), dtype=np.float32) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024, 32), dtype=int) + assert_raises(ValueError, dot, f, v, r) + + def test_dot_scalar_and_matrix_of_objects(self): + # Ticket #2469 + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.dot(arr, 3), desired) + assert_equal(np.dot(3, arr), desired) + + +class TestInner(TestCase): + + def test_inner_scalar_and_matrix_of_objects(self): + # Ticket #4482 + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.inner(arr, 3), desired) + assert_equal(np.inner(3, arr), desired) + + +class TestSummarization(TestCase): + def test_1d(self): + A = np.arange(1001) + strA = '[ 0 1 2 ..., 998 999 1000]' + assert_(str(A) == strA) + + reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' + assert_(repr(A) == reprA) + + def test_2d(self): + A = np.arange(1002).reshape(2, 501) + strA = '[[ 0 1 2 ..., 498 499 500]\n' \ + ' [ 501 502 503 ..., 999 1000 1001]]' + assert_(str(A) == strA) + + reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ + ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + assert_(repr(A) == reprA) + + +class TestChoose(TestCase): + def setUp(self): + self.x = 2*ones((3,), dtype=int) + self.y = 3*ones((3,), dtype=int) + self.x2 = 2*ones((2, 3), dtype=int) + self.y2 = 3*ones((2, 3), dtype=int) + self.ind = [0, 0, 1] + + def test_basic(self): + A = np.choose(self.ind, (self.x, self.y)) + assert_equal(A, [2, 2, 3]) + + def test_broadcast1(self): + A = np.choose(self.ind, (self.x2, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + def test_broadcast2(self): + A = np.choose(self.ind, (self.x, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + +# TODO: test for multidimensional +NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} +class TestNeighborhoodIter(TestCase): + # Simple, 2d tests + def _test_simple2d(self, dt): + # Test zero and one padding for simple data type + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), + np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), + np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), + np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] + l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], + NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), + np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] + l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], + NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), + np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), + np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4, + NEIGH_MODE['constant']) + assert_array_equal(l, r) + + def test_simple2d(self): + self._test_simple2d(np.float) + + def test_simple2d_object(self): + self._test_simple2d(Decimal) + + def _test_mirror2d(self, dt): + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), + np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] + l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], + NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + def test_mirror2d(self): + self._test_mirror2d(np.float) + + def test_mirror2d_object(self): + self._test_mirror2d(Decimal) + + # Simple, 1d tests + def _test_simple(self, dt): + # Test padding with constant values + x = np.linspace(1, 5, 5).astype(dt) + r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] + l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] + l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] + l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant']) + assert_array_equal(l, r) + + def test_simple_float(self): + self._test_simple(np.float) + + def test_simple_object(self): + self._test_simple(Decimal) + + # Test mirror modes + def _test_mirror(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) + l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror']) + self.assertTrue([i.dtype == dt for i in l]) + assert_array_equal(l, r) + + def test_mirror(self): + self._test_mirror(np.float) + + def test_mirror_object(self): + self._test_mirror(Decimal) + + # Circular mode + def _test_circular(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) + l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + def test_circular(self): + self._test_circular(np.float) + + def test_circular_object(self): + self._test_circular(Decimal) + +# Test stacking neighborhood iterators +class TestStackedNeighborhoodIter(TestCase): + # Simple, 1d test: stacking 2 constant-padded neigh iterators + def test_simple_const(self): + dt = np.float64 + # Test zero and one padding for simple data type + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0], dtype=dt), + np.array([0], dtype=dt), + np.array([1], dtype=dt), + np.array([2], dtype=dt), + np.array([3], dtype=dt), + np.array([0], dtype=dt), + np.array([0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'], + [0, 0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([1, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 1], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-1, 1], NEIGH_MODE['one']) + assert_array_equal(l, r) + + # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # mirror padding + def test_simple_mirror(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 1], dtype=dt), + np.array([1, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 3], dtype=dt), + np.array([3, 3, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'], + [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-2, 0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 3], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [0, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 3], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-2, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # circular padding + def test_simple_circular(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 3, 1], dtype=dt), + np.array([3, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 1], dtype=dt), + np.array([3, 1, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'], + [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-2, 0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [0, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 1], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], + [-2, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator + # being strictly within the array + def test_simple_strict_within(self): + dt = np.float64 + # Stacking zero on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 0], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], + [-1, 2], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 3], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], + [-1, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 1], dtype=dt)] + l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], + [-1, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + +class TestWarnings(object): + + def test_complex_warning(self): + x = np.array([1, 2]) + y = np.array([1-2j, 1+2j]) + + with warnings.catch_warnings(): + warnings.simplefilter("error", np.ComplexWarning) + assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) + assert_equal(x, [1, 2]) + +class TestMinScalarType(object): + + def test_usigned_shortshort(self): + dt = np.min_scalar_type(2**8-1) + wanted = np.dtype('uint8') + assert_equal(wanted, dt) + + def test_usigned_short(self): + dt = np.min_scalar_type(2**16-1) + wanted = np.dtype('uint16') + assert_equal(wanted, dt) + + def test_usigned_int(self): + dt = np.min_scalar_type(2**32-1) + wanted = np.dtype('uint32') + assert_equal(wanted, dt) + + def test_usigned_longlong(self): + dt = np.min_scalar_type(2**63-1) + wanted = np.dtype('uint64') + assert_equal(wanted, dt) + + def test_object(self): + dt = np.min_scalar_type(2**64) + wanted = np.dtype('O') + assert_equal(wanted, dt) + + +if sys.version_info[:2] == (2, 6): + from numpy.core.multiarray import memorysimpleview as memoryview + +from numpy.core._internal import _dtype_from_pep3118 + +class TestPEP3118Dtype(object): + def _check(self, spec, wanted): + dt = np.dtype(wanted) + if isinstance(wanted, list) and isinstance(wanted[-1], tuple): + if wanted[-1][0] == '': + names = list(dt.names) + names[-1] = '' + dt.names = tuple(names) + assert_equal(_dtype_from_pep3118(spec), dt, + err_msg="spec %r != dtype %r" % (spec, wanted)) + + def test_native_padding(self): + align = np.dtype('i').alignment + for j in range(8): + if j == 0: + s = 'bi' + else: + s = 'b%dxi' % j + self._check('@'+s, {'f0': ('i1', 0), + 'f1': ('i', align*(1 + j//align))}) + self._check('='+s, {'f0': ('i1', 0), + 'f1': ('i', 1+j)}) + + def test_native_padding_2(self): + # Native padding should work also for structs and sub-arrays + self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) + self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) + + def test_trailing_padding(self): + # Trailing padding should be included, *and*, the item size + # should match the alignment if in aligned mode + align = np.dtype('i').alignment + def VV(n): + return 'V%d' % (align*(1 + (n-1)//align)) + + self._check('ix', [('f0', 'i'), ('', VV(1))]) + self._check('ixx', [('f0', 'i'), ('', VV(2))]) + self._check('ixxx', [('f0', 'i'), ('', VV(3))]) + self._check('ixxxx', [('f0', 'i'), ('', VV(4))]) + self._check('i7x', [('f0', 'i'), ('', VV(7))]) + + self._check('^ix', [('f0', 'i'), ('', 'V1')]) + self._check('^ixx', [('f0', 'i'), ('', 'V2')]) + self._check('^ixxx', [('f0', 'i'), ('', 'V3')]) + self._check('^ixxxx', [('f0', 'i'), ('', 'V4')]) + self._check('^i7x', [('f0', 'i'), ('', 'V7')]) + + def test_native_padding_3(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), + ('sub', np.dtype('b,i')), ('c', 'i')], + align=True) + self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) + + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), + ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) + self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) + + def test_padding_with_array_inside_struct(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), + ('d', 'i')], + align=True) + self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) + + def test_byteorder_inside_struct(self): + # The byte order after @T{=i} should be '=', not '@'. + # Check this by noting the absence of native alignment. + self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), + 'f1': ('i', 5)}) + + def test_intra_padding(self): + # Natively aligned sub-arrays may require some internal padding + align = np.dtype('i').alignment + def VV(n): + return 'V%d' % (align*(1 + (n-1)//align)) + + self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,))) + +class TestNewBufferProtocol(object): + def _check_roundtrip(self, obj): + obj = np.asarray(obj) + x = memoryview(obj) + y = np.asarray(x) + y2 = np.array(x) + assert_(not y.flags.owndata) + assert_(y2.flags.owndata) + + assert_equal(y.dtype, obj.dtype) + assert_equal(y.shape, obj.shape) + assert_array_equal(obj, y) + + assert_equal(y2.dtype, obj.dtype) + assert_equal(y2.shape, obj.shape) + assert_array_equal(obj, y2) + + def test_roundtrip(self): + x = np.array([1, 2, 3, 4, 5], dtype='i4') + self._check_roundtrip(x) + + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + self._check_roundtrip(x) + + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + self._check_roundtrip(x) + + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)], + dtype=dt) + self._check_roundtrip(x) + + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='>i2') + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='') + x = np.zeros(4, dtype=dt) + self._check_roundtrip(x) + + def test_roundtrip_scalar(self): + # Issue #4015. + self._check_roundtrip(0) + + def test_export_simple_1d(self): + x = np.array([1, 2, 3, 4, 5], dtype='i') + y = memoryview(x) + assert_equal(y.format, 'i') + assert_equal(y.shape, (5,)) + assert_equal(y.ndim, 1) + assert_equal(y.strides, (4,)) + assert_equal(y.suboffsets, EMPTY) + assert_equal(y.itemsize, 4) + + def test_export_simple_nd(self): + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + y = memoryview(x) + assert_equal(y.format, 'd') + assert_equal(y.shape, (2, 2)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (16, 8)) + assert_equal(y.suboffsets, EMPTY) + assert_equal(y.itemsize, 8) + + def test_export_discontiguous(self): + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + y = memoryview(x) + assert_equal(y.format, 'f') + assert_equal(y.shape, (3, 3)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (36, 4)) + assert_equal(y.suboffsets, EMPTY) + assert_equal(y.itemsize, 4) + + def test_export_record(self): + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)], + dtype=dt) + y = memoryview(x) + assert_equal(y.shape, (1,)) + assert_equal(y.ndim, 1) + assert_equal(y.suboffsets, EMPTY) + + sz = sum([dtype(b).itemsize for a, b in dt]) + if dtype('l').itemsize == 4: + assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + else: + assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides + if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): + assert_equal(y.strides, (sz,)) + assert_equal(y.itemsize, sz) + + def test_export_subarray(self): + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) + y = memoryview(x) + assert_equal(y.format, 'T{(2,2)i:a:}') + assert_equal(y.shape, EMPTY) + assert_equal(y.ndim, 0) + assert_equal(y.strides, EMPTY) + assert_equal(y.suboffsets, EMPTY) + assert_equal(y.itemsize, 16) + + def test_export_endian(self): + x = np.array([1, 2, 3], dtype='>i') + y = memoryview(x) + if sys.byteorder == 'little': + assert_equal(y.format, '>i') + else: + assert_equal(y.format, 'i') + + x = np.array([1, 2, 3], dtype=' array) + + def __le__(self, array): + if isinstance(array, PriorityNdarray): + array = array.array + return PriorityNdarray(self.array <= array) + + def __ge__(self, array): + if isinstance(array, PriorityNdarray): + array = array.array + return PriorityNdarray(self.array >= array) + + def __eq__(self, array): + if isinstance(array, PriorityNdarray): + array = array.array + return PriorityNdarray(self.array == array) + + def __ne__(self, array): + if isinstance(array, PriorityNdarray): + array = array.array + return PriorityNdarray(self.array != array) + + +class TestArrayPriority(TestCase): + def test_lt(self): + l = np.asarray([0., -1., 1.], dtype=dtype) + r = np.asarray([0., 1., -1.], dtype=dtype) + lp = PriorityNdarray(l) + rp = PriorityNdarray(r) + res1 = l < r + res2 = l < rp + res3 = lp < r + res4 = lp < rp + + assert_array_equal(res1, res2.array) + assert_array_equal(res1, res3.array) + assert_array_equal(res1, res4.array) + assert_(isinstance(res1, np.ndarray)) + assert_(isinstance(res2, PriorityNdarray)) + assert_(isinstance(res3, PriorityNdarray)) + assert_(isinstance(res4, PriorityNdarray)) + + def test_gt(self): + l = np.asarray([0., -1., 1.], dtype=dtype) + r = np.asarray([0., 1., -1.], dtype=dtype) + lp = PriorityNdarray(l) + rp = PriorityNdarray(r) + res1 = l > r + res2 = l > rp + res3 = lp > r + res4 = lp > rp + + assert_array_equal(res1, res2.array) + assert_array_equal(res1, res3.array) + assert_array_equal(res1, res4.array) + assert_(isinstance(res1, np.ndarray)) + assert_(isinstance(res2, PriorityNdarray)) + assert_(isinstance(res3, PriorityNdarray)) + assert_(isinstance(res4, PriorityNdarray)) + + def test_le(self): + l = np.asarray([0., -1., 1.], dtype=dtype) + r = np.asarray([0., 1., -1.], dtype=dtype) + lp = PriorityNdarray(l) + rp = PriorityNdarray(r) + res1 = l <= r + res2 = l <= rp + res3 = lp <= r + res4 = lp <= rp + + assert_array_equal(res1, res2.array) + assert_array_equal(res1, res3.array) + assert_array_equal(res1, res4.array) + assert_(isinstance(res1, np.ndarray)) + assert_(isinstance(res2, PriorityNdarray)) + assert_(isinstance(res3, PriorityNdarray)) + assert_(isinstance(res4, PriorityNdarray)) + + def test_ge(self): + l = np.asarray([0., -1., 1.], dtype=dtype) + r = np.asarray([0., 1., -1.], dtype=dtype) + lp = PriorityNdarray(l) + rp = PriorityNdarray(r) + res1 = l >= r + res2 = l >= rp + res3 = lp >= r + res4 = lp >= rp + + assert_array_equal(res1, res2.array) + assert_array_equal(res1, res3.array) + assert_array_equal(res1, res4.array) + assert_(isinstance(res1, np.ndarray)) + assert_(isinstance(res2, PriorityNdarray)) + assert_(isinstance(res3, PriorityNdarray)) + assert_(isinstance(res4, PriorityNdarray)) + + def test_eq(self): + l = np.asarray([0., -1., 1.], dtype=dtype) + r = np.asarray([0., 1., -1.], dtype=dtype) + lp = PriorityNdarray(l) + rp = PriorityNdarray(r) + res1 = l == r + res2 = l == rp + res3 = lp == r + res4 = lp == rp + + assert_array_equal(res1, res2.array) + assert_array_equal(res1, res3.array) + assert_array_equal(res1, res4.array) + assert_(isinstance(res1, np.ndarray)) + assert_(isinstance(res2, PriorityNdarray)) + assert_(isinstance(res3, PriorityNdarray)) + assert_(isinstance(res4, PriorityNdarray)) + + def test_ne(self): + l = np.asarray([0., -1., 1.], dtype=dtype) + r = np.asarray([0., 1., -1.], dtype=dtype) + lp = PriorityNdarray(l) + rp = PriorityNdarray(r) + res1 = l != r + res2 = l != rp + res3 = lp != r + res4 = lp != rp + + assert_array_equal(res1, res2.array) + assert_array_equal(res1, res3.array) + assert_array_equal(res1, res4.array) + assert_(isinstance(res1, np.ndarray)) + assert_(isinstance(res2, PriorityNdarray)) + assert_(isinstance(res3, PriorityNdarray)) + assert_(isinstance(res4, PriorityNdarray)) + + +class TestConversion(TestCase): + def test_array_scalar_relational_operation(self): + #All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + #Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + + #unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + #Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + +class TestWhere(TestCase): + def test_basic(self): + dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128, + np.longdouble, np.clongdouble] + for dt in dts: + c = np.ones(53, dtype=np.bool) + assert_equal(np.where( c, dt(0), dt(1)), dt(0)) + assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) + assert_equal(np.where(True, dt(0), dt(1)), dt(0)) + assert_equal(np.where(False, dt(0), dt(1)), dt(1)) + d = np.ones_like(c).astype(dt) + e = np.zeros_like(d) + r = d.astype(dt) + c[7] = False + r[7] = e[7] + assert_equal(np.where(c, e, e), e) + assert_equal(np.where(c, d, e), r) + assert_equal(np.where(c, d, e[0]), r) + assert_equal(np.where(c, d[0], e), r) + assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) + assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) + assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) + assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) + assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) + assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) + assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) + + def test_exotic(self): + # object + assert_array_equal(np.where(True, None, None), np.array(None)) + # zero sized + m = np.array([], dtype=bool).reshape(0, 3) + b = np.array([], dtype=np.float64).reshape(0, 3) + assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) + + # object cast + d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, + 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, + 1.267, 0.229, -1.39, 0.487]) + nan = float('NaN') + e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, + 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], + dtype=object); + m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool) + + r = e[:] + r[np.where(m)] = d[np.where(m)] + assert_array_equal(np.where(m, d, e), r) + + r = e[:] + r[np.where(~m)] = d[np.where(~m)] + assert_array_equal(np.where(m, e, d), r) + + assert_array_equal(np.where(m, e, e), e) + + # minimal dtype result with NaN scalar (e.g required by pandas) + d = np.array([1., 2.], dtype=np.float32) + e = float('NaN') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('-Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + # also check upcast + e = float(1e150) + assert_equal(np.where(True, d, e).dtype, np.float64) + + def test_ndim(self): + c = [True, False] + a = np.zeros((2, 25)) + b = np.ones((2, 25)) + r = np.where(np.array(c)[:,np.newaxis], a, b) + assert_array_equal(r[0], a[0]) + assert_array_equal(r[1], b[0]) + + a = a.T + b = b.T + r = np.where(c, a, b) + assert_array_equal(r[:,0], a[:,0]) + assert_array_equal(r[:,1], b[:,0]) + + def test_dtype_mix(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + a = np.uint32(1) + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + a = a.astype(np.float32) + b = b.astype(np.int64) + assert_equal(np.where(c, a, b), r) + + # non bool mask + c = c.astype(np.int) + c[c != 0] = 34242324 + assert_equal(np.where(c, a, b), r) + # invert + tmpmask = c != 0 + c[c == 0] = 41247212 + c[tmpmask] = 0 + assert_equal(np.where(c, b, a), r) + + def test_foreign(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + a = np.ones(1, dtype='>i4') + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + b = b.astype('>f8') + assert_equal(np.where(c, a, b), r) + + a = a.astype('i4') + assert_equal(np.where(c, a, b), r) + + def test_error(self): + c = [True, True] + a = np.ones((4, 5)) + b = np.ones((5, 5)) + assert_raises(ValueError, np.where, c, a, a) + assert_raises(ValueError, np.where, c[0], a, b) + + def test_string(self): + # gh-4778 check strings are properly filled with nulls + a = np.array("abc") + b = np.array("x" * 753) + assert_equal(np.where(True, a, b), "abc") + assert_equal(np.where(False, b, a), "abc") + + # check native datatype sized strings + a = np.array("abcd") + b = np.array("x" * 8) + assert_equal(np.where(True, a, b), "abcd") + assert_equal(np.where(False, b, a), "abcd") + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray_assignment.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray_assignment.py new file mode 100644 index 0000000000000..65a09086bc2a5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray_assignment.py @@ -0,0 +1,80 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import TestCase + +ndims = 2 +size = 10 +shape = tuple([size] * ndims) + + +def _indices_for_nelems(nelems): + """Returns slices of length nelems, from start onwards, in direction sign.""" + + if nelems == 0: + return [size // 2] # int index + + res = [] + for step in (1, 2): + for sign in (-1, 1): + start = size // 2 - nelems * step * sign // 2 + stop = start + nelems * step * sign + res.append(slice(start, stop, step * sign)) + + return res + + +def _indices_for_axis(): + """Returns (src, dst) pairs of indices.""" + + res = [] + for nelems in (0, 2, 3): + ind = _indices_for_nelems(nelems) + + # no itertools.product available in Py2.4 + res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems" + + return res + + +def _indices(ndims): + """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" + + ind = _indices_for_axis() + + # no itertools.product available in Py2.4 + + res = [[]] + for i in range(ndims): + newres = [] + for elem in ind: + for others in res: + newres.append([elem] + others) + res = newres + + return res + + +def _check_assignment(srcidx, dstidx): + """Check assignment arr[dstidx] = arr[srcidx] works.""" + + arr = np.arange(np.product(shape)).reshape(shape) + + cpy = arr.copy() + + cpy[dstidx] = arr[srcidx] + arr[dstidx] = arr[srcidx] + + assert np.all(arr == cpy), 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx) + + +def test_overlapping_assignments(): + """Test automatically generated assignments which overlap in memory.""" + + inds = _indices(ndims) + + for ind in inds: + srcidx = tuple([a[0] for a in ind]) + dstidx = tuple([a[1] for a in ind]) + + yield _check_assignment, srcidx, dstidx diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_nditer.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_nditer.py new file mode 100644 index 0000000000000..0055c038b7494 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_nditer.py @@ -0,0 +1,2630 @@ +from __future__ import division, absolute_import, print_function + +import sys, warnings + +import numpy as np +from numpy import array, arange, nditer, all +from numpy.compat import asbytes, sixu +from numpy.testing import * +from numpy.core.multiarray_tests import test_nditer_too_large + + +def iter_multi_index(i): + ret = [] + while not i.finished: + ret.append(i.multi_index) + i.iternext() + return ret + +def iter_indices(i): + ret = [] + while not i.finished: + ret.append(i.index) + i.iternext() + return ret + +def iter_iterindices(i): + ret = [] + while not i.finished: + ret.append(i.iterindex) + i.iternext() + return ret + +def test_iter_refcount(): + # Make sure the iterator doesn't leak + + # Basic + a = arange(6) + dt = np.dtype('f4').newbyteorder() + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + it = nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='unsafe', + op_dtypes=[dt]) + assert_(not it.iterationneedsapi) + assert_(sys.getrefcount(a) > rc_a) + assert_(sys.getrefcount(dt) > rc_dt) + it = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + # With a copy + a = arange(6, dtype='f4') + dt = np.dtype('f4') + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + it = nditer(a, [], + [['readwrite']], + op_dtypes=[dt]) + rc2_a = sys.getrefcount(a) + rc2_dt = sys.getrefcount(dt) + it2 = it.copy() + assert_(sys.getrefcount(a) > rc2_a) + assert_(sys.getrefcount(dt) > rc2_dt) + it = None + assert_equal(sys.getrefcount(a), rc2_a) + assert_equal(sys.getrefcount(dt), rc2_dt) + it2 = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + +def test_iter_best_order(): + # The iterator should always find the iteration order + # with increasing memory addresses + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit)&dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, [], [['readonly']]) + assert_equal([x for x in i], a) + # Fortran-order + i = nditer(aview.T, [], [['readonly']]) + assert_equal([x for x in i], a) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) + assert_equal([x for x in i], a) + +def test_iter_c_order(): + # Test forcing C order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit)&dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='C') + assert_equal([x for x in i], aview.ravel(order='C')) + # Fortran-order + i = nditer(aview.T, order='C') + assert_equal([x for x in i], aview.T.ravel(order='C')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='C') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='C')) + +def test_iter_f_order(): + # Test forcing F order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit)&dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='F') + assert_equal([x for x in i], aview.ravel(order='F')) + # Fortran-order + i = nditer(aview.T, order='F') + assert_equal([x for x in i], aview.T.ravel(order='F')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='F') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='F')) + +def test_iter_c_or_f_order(): + # Test forcing any contiguous (C or F) order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit)&dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='A') + assert_equal([x for x in i], aview.ravel(order='A')) + # Fortran-order + i = nditer(aview.T, order='A') + assert_equal([x for x in i], aview.T.ravel(order='A')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='A') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='A')) + +def test_iter_best_order_multi_index_1d(): + # The multi-indices should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) + # 1D reversed order + i = nditer(a[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) + +def test_iter_best_order_multi_index_2d(): + # The multi-indices should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) + +def test_iter_best_order_multi_index_3d(): + # The multi-indices should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), + (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), + (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), + (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), + (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), + (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), + (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), + (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), + (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + +def test_iter_best_order_c_index_1d(): + # The C index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_c_index_2d(): + # The C index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) + +def test_iter_best_order_c_index_3d(): + # The C index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + +def test_iter_best_order_f_index_1d(): + # The Fortran index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_f_index_2d(): + # The Fortran index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + +def test_iter_best_order_f_index_3d(): + # The Fortran index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + +def test_iter_no_inner_full_coalesce(): + # Check no_inner iterators which coalesce into a single inner loop + + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + size = np.prod(shape) + a = arange(size) + # Test each combination of forward and backwards indexing + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit)&dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Fortran-order + i = nditer(aview.T, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), + ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + +def test_iter_no_inner_dim_coalescing(): + # Check no_inner iterators whose dimensions may not coalesce completely + + # Skipping the last element in a dimension prevents coalescing + # with the next-bigger dimension + a = arange(24).reshape(2, 3, 4)[:,:, :-1] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (3,)) + a = arange(24).reshape(2, 3, 4)[:, :-1,:] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (8,)) + a = arange(24).reshape(2, 3, 4)[:-1,:,:] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (12,)) + + # Even with lots of 1-sized dimensions, should still coalesce + a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (24,)) + +def test_iter_dim_coalescing(): + # Check that the correct number of dimensions are coalesced + + # Tracking a multi-index disables coalescing + a = arange(24).reshape(2, 3, 4) + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # A tracked index can allow coalescing if it's compatible with the array + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['f_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # When C or F order is forced, coalescing may still occur + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, order='C') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='C') + assert_equal(i.ndim, 3) + i = nditer(a3d, order='F') + assert_equal(i.ndim, 3) + i = nditer(a3d.T, order='F') + assert_equal(i.ndim, 1) + i = nditer(a3d, order='A') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='A') + assert_equal(i.ndim, 1) + +def test_iter_broadcasting(): + # Standard NumPy broadcasting rules + + # 1D with scalar + i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (6,)) + + # 2D with scalar + i = nditer([arange(6).reshape(2, 3), np.int32(2)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 1D + i = nditer([arange(6).reshape(2, 3), arange(3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 2D + i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + + # 3D with scalar + i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 1D + i = nditer([arange(3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 2D + i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 3D + i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), + arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']]*3) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + +def test_iter_itershape(): + # Check that allocated outputs work with a specified shape + a = np.arange(6, dtype='i2').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (2, 3, 4)) + assert_equal(i.operands[1].strides, (24, 8, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (8, 24, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + order='F', + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (2, 6, 12)) + + # If we specify 1 in the itershape, it shouldn't allow broadcasting + # of that dimension to a bigger value + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, 1, 4)) + # Test bug that for no op_axes but itershape, they are NULLed correctly + i = np.nditer([np.ones(2), None, None], itershape=(2,)) + +def test_iter_broadcasting_errors(): + # Check that errors are thrown for bad broadcasting shapes + + # 1D with 1D + assert_raises(ValueError, nditer, [arange(2), arange(3)], + [], [['readonly']]*2) + # 2D with 1D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(2)], + [], [['readonly']]*2) + # 2D with 2D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], + [], [['readonly']]*2) + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], + [], [['readonly']]*2) + # 3D with 3D + assert_raises(ValueError, nditer, + [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], + [], [['readonly']]*2) + assert_raises(ValueError, nditer, + [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], + [], [['readonly']]*2) + + # Verify that the error message mentions the right shapes + try: + i = nditer([arange(2).reshape(1, 2, 1), + arange(3).reshape(1, 3), + arange(6).reshape(2, 3)], + [], + [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) + assert_(False, 'Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the 3rd operand + assert_(msg.find('(2,3)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) + # The message should contain the broadcast shape + assert_(msg.find('(1,2,3)') >= 0, + 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) + + try: + i = nditer([arange(6).reshape(2, 3), arange(2)], [], + [['readonly'], ['readonly']], + op_axes=[[0, 1], [0, np.newaxis]], + itershape=(4, 3)) + assert_(False, 'Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain "shape->remappedshape" for each operand + assert_(msg.find('(2,3)->(2,3)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) + assert_(msg.find('(2,)->(2,newaxis)') >= 0, + ('Message "%s" doesn\'t contain remapped operand shape' + + '(2,)->(2,newaxis)') % msg) + # The message should contain the itershape parameter + assert_(msg.find('(4,3)') >= 0, + 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) + + try: + i = nditer([np.zeros((2, 1, 1)), np.zeros((2,))], + [], + [['writeonly', 'no_broadcast'], ['readonly']]) + assert_(False, 'Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the bad operand + assert_(msg.find('(2,1,1)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) + # The message should contain the broadcast shape + assert_(msg.find('(2,1,2)') >= 0, + 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) + +def test_iter_flags_errors(): + # Check that bad combinations of flags produce errors + + a = arange(6) + + # Not enough operands + assert_raises(ValueError, nditer, [], [], []) + # Too many operands + assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) + # Bad global flag + assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) + # Bad op flag + assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) + # Bad order parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') + # Bad casting parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') + # op_flags must match ops + assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) + # Cannot track both a C and an F index + assert_raises(ValueError, nditer, a, + ['c_index', 'f_index'], [['readonly']]) + # Inner iteration and multi-indices/indices are incompatible + assert_raises(ValueError, nditer, a, + ['external_loop', 'multi_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'c_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'f_index'], [['readonly']]) + # Must specify exactly one of readwrite/readonly/writeonly per operand + assert_raises(ValueError, nditer, a, [], [[]]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, + [], [['readonly', 'writeonly', 'readwrite']]) + # Python scalars are always readonly + assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) + assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) + # Array scalars are always readonly + assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) + assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) + # Check readonly array + a.flags.writeable = False + assert_raises(ValueError, nditer, a, [], [['writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readwrite']]) + a.flags.writeable = True + # Multi-indices available only with the multi_index flag + i = nditer(arange(6), [], [['readonly']]) + assert_raises(ValueError, lambda i:i.multi_index, i) + # Index available only with an index flag + assert_raises(ValueError, lambda i:i.index, i) + # GotoCoords and GotoIndex incompatible with buffering or no_inner + def assign_multi_index(i): + i.multi_index = (0,) + def assign_index(i): + i.index = 0 + def assign_iterindex(i): + i.iterindex = 0; + def assign_iterrange(i): + i.iterrange = (0, 1); + i = nditer(arange(6), ['external_loop']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterindex, i) + assert_raises(ValueError, assign_iterrange, i) + i = nditer(arange(6), ['buffered']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterrange, i) + # Can't iterate if size is zero + assert_raises(ValueError, nditer, np.array([])) + +def test_iter_slice(): + a, b, c = np.arange(3), np.arange(3), np.arange(3.) + i = nditer([a, b, c], [], ['readwrite']) + i[0:2] = (3, 3) + assert_equal(a, [3, 1, 2]) + assert_equal(b, [3, 1, 2]) + assert_equal(c, [0, 1, 2]) + i[1] = 12 + assert_equal(i[0:2], [3, 12]) + +def test_iter_nbo_align_contig(): + # Check that byte order, alignment, and contig changes work + + # Byte order change by requesting a specific dtype + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + i = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 2 + i = None + assert_equal(au, [2]*6) + + # Byte order change by requesting NBO + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + i = nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], casting='equiv') + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 2 + i = None + assert_equal(au, [2]*6) + + # Unaligned input + a = np.zeros((6*4+1,), dtype='i1')[1:] + a.dtype = 'f4' + a[:] = np.arange(6, dtype='f4') + assert_(not a.flags.aligned) + # Without 'aligned', shouldn't copy + i = nditer(a, [], [['readonly']]) + assert_(not i.operands[0].flags.aligned) + assert_equal(i.operands[0], a); + # With 'aligned', should make a copy + i = nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) + assert_(i.operands[0].flags.aligned) + assert_equal(i.operands[0], a); + i.operands[0][:] = 3 + i = None + assert_equal(a, [3]*6) + + # Discontiguous input + a = arange(12) + # If it is contiguous, shouldn't copy + i = nditer(a[:6], [], [['readonly']]) + assert_(i.operands[0].flags.contiguous) + assert_equal(i.operands[0], a[:6]); + # If it isn't contiguous, should buffer + i = nditer(a[::2], ['buffered', 'external_loop'], + [['readonly', 'contig']], + buffersize=10) + assert_(i[0].flags.contiguous) + assert_equal(i[0], a[::2]) + +def test_iter_array_cast(): + # Check that arrays are cast as requested + + # No cast 'f4' -> 'f4' + a = np.arange(6, dtype='f4').reshape(2, 3) + i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + + # Byte-order cast ' '>f4' + a = np.arange(6, dtype='f4')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('>f4')) + + # Safe case 'f4' -> 'f8' + a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + # The memory layout of the temporary should match a (a is (48,4,16)) + # except negative strides get flipped to positive strides. + assert_equal(i.operands[0].strides, (96, 8, 32)) + a = a[::-1,:, ::-1] + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + assert_equal(i.operands[0].strides, (96, 8, 32)) + + # Same-kind cast 'f8' -> 'f4' -> 'f8' + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + i = nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + assert_equal(i.operands[0].strides, (4, 16, 48)) + # Check that UPDATEIFCOPY is activated + i.operands[0][2, 1, 1] = -12.5 + assert_(a[2, 1, 1] != -12.5) + i = None + assert_equal(a[2, 1, 1], -12.5) + + a = np.arange(6, dtype='i4')[::-2] + i = nditer(a, [], + [['writeonly', 'updateifcopy']], + casting='unsafe', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + # Even though the stride was negative in 'a', it + # becomes positive in the temporary + assert_equal(i.operands[0].strides, (4,)) + i.operands[0][:] = [1, 2, 3] + i = None + assert_equal(a, [1, 2, 3]) + +def test_iter_array_cast_errors(): + # Check that invalid casts are caught + + # Need to enable copying for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly']], op_dtypes=[np.dtype('f8')]) + # Also need to allow casting for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='no', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='equiv', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='no', + op_dtypes=[np.dtype('f4')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + # ' '>f4' should not work with casting='no' + assert_raises(TypeError, nditer, arange(2, dtype='f4')]) + # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], + [['writeonly', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + +def test_iter_scalar_cast(): + # Check that scalars are cast as requested + + # No cast 'f4' -> 'f4' + i = nditer(np.float32(2.5), [], [['readonly']], + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Safe cast 'f4' -> 'f8' + i = nditer(np.float32(2.5), [], + [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.value.dtype, np.dtype('f8')) + assert_equal(i.value, 2.5) + # Same-kind cast 'f8' -> 'f4' + i = nditer(np.float64(2.5), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Unsafe cast 'f8' -> 'i4' + i = nditer(np.float64(3.0), [], + [['readonly', 'copy']], + casting='unsafe', + op_dtypes=[np.dtype('i4')]) + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.value.dtype, np.dtype('i4')) + assert_equal(i.value, 3) + # Readonly scalars may be cast even without setting COPY or BUFFERED + i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) + assert_equal(i[0].dtype, np.dtype('f8')) + assert_equal(i[0], 3.) + +def test_iter_scalar_cast_errors(): + # Check that invalid casts are caught + + # Need to allow copying/buffering for write casts of scalars to occur + assert_raises(TypeError, nditer, np.float32(2), [], + [['readwrite']], op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, 2.5, [], + [['readwrite']], op_dtypes=[np.dtype('f4')]) + # 'f8' -> 'f4' isn't a safe cast if the value would overflow + assert_raises(TypeError, nditer, np.float64(1e60), [], + [['readonly']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, np.float32(2), [], + [['readonly']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + +def test_iter_object_arrays_basic(): + # Check that object arrays work + + obj = {'a':3,'b':'d'} + a = np.array([[1, 2, 3], None, obj, None], dtype='O') + rc = sys.getrefcount(obj) + + # Need to allow references for object arrays + assert_raises(TypeError, nditer, a) + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a, ['refs_ok'], ['readonly']) + vals = [x[()] for x in i] + assert_equal(np.array(vals, dtype='O'), a) + vals, i, x = [None]*3 + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readonly'], order='C') + assert_(i.iterationneedsapi) + vals = [x[()] for x in i] + assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) + vals, i, x = [None]*3 + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readwrite'], order='C') + for x in i: + x[...] = None + vals, i, x = [None]*3 + assert_equal(sys.getrefcount(obj), rc-1) + assert_equal(a, np.array([None]*4, dtype='O')) + +def test_iter_object_arrays_conversions(): + # Conversions to/from objects + a = np.arange(6, dtype='O') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + a = np.arange(6, dtype='i4') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + # Non-contiguous object array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) + a = a['a'] + a[:] = np.arange(6) + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + #Non-contiguous value array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) + a = a['a'] + a[:] = np.arange(6) + 98172488 + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + ob = i[0][()] + rc = sys.getrefcount(ob) + for x in i: + x[...] += 1 + assert_equal(sys.getrefcount(ob), rc-1) + assert_equal(a, np.arange(6)+98172489) + +def test_iter_common_dtype(): + # Check that the iterator finds a common data type correctly + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')); + assert_equal(i.dtypes[1], np.dtype('f8')); + i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')); + assert_equal(i.dtypes[1], np.dtype('f8')); + i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='same_kind') + assert_equal(i.dtypes[0], np.dtype('f4')); + assert_equal(i.dtypes[1], np.dtype('f4')); + i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('u4')); + assert_equal(i.dtypes[1], np.dtype('u4')); + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')); + assert_equal(i.dtypes[1], np.dtype('i8')); + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), + array([2j], dtype='c8'), array([9], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*4, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')); + assert_equal(i.dtypes[1], np.dtype('c16')); + assert_equal(i.dtypes[2], np.dtype('c16')); + assert_equal(i.dtypes[3], np.dtype('c16')); + assert_equal(i.value, (3, -12, 2j, 9)) + + # When allocating outputs, other outputs aren't factored in + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i4')); + assert_equal(i.dtypes[1], np.dtype('i4')); + assert_equal(i.dtypes[2], np.dtype('c16')); + # But, if common data types are requested, they are + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], + ['common_dtype'], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')); + assert_equal(i.dtypes[1], np.dtype('c16')); + assert_equal(i.dtypes[2], np.dtype('c16')); + +def test_iter_op_axes(): + # Check that custom axes work + + # Reverse the axes + a = arange(6).reshape(2, 3) + i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) + assert_(all([x==y for (x, y) in i])) + a = arange(24).reshape(2, 3, 4) + i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) + assert_(all([x==y for (x, y) in i])) + + # Broadcast 1D to any dimension + a = arange(1, 31).reshape(2, 3, 5) + b = arange(1, 3) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) + b = arange(1, 4) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) + b = arange(1, 6) + i = nditer([a, b], [], [['readonly']]*2, + op_axes=[None, [np.newaxis, np.newaxis, 0]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) + + # Inner product-style broadcasting + a = arange(24).reshape(2, 3, 4) + b = arange(40).reshape(5, 2, 4) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) + assert_equal(i.shape, (2, 3, 5, 2)) + + # Matrix product-style broadcasting + a = arange(12).reshape(3, 4) + b = arange(20).reshape(4, 5) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, -1], [-1, 1]]) + assert_equal(i.shape, (3, 5)) + +def test_iter_op_axes_errors(): + # Check that custom axes throws errors for bad inputs + + # Wrong number of items in op_axes + a = arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0], [1], [0]]) + # Out of bounds items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[2, 1], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [2, -1]]) + # Duplicate items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 0], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 1]]) + + # Different sized arrays in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [0, 1, 0]]) + + # Non-broadcastable dimensions in the result + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 0]]) + +def test_iter_copy(): + # Check that copying the iterator works correctly + a = arange(24).reshape(2, 3, 4) + + # Simple iterator + i = nditer(a) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Buffered iterator + i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (3, 9) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (2, 18) + next(i) + next(i) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Casting iterator + i = nditer(a, ['buffered'], order='F', casting='unsafe', + op_dtypes='f8', buffersize=5) + j = i.copy() + i = None + assert_equal([x[()] for x in j], a.ravel(order='F')) + + a = arange(24, dtype='cast->swap + + a = np.arange(10, dtype='f4').newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8').newbyteorder()], + buffersize=3) + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f4')) + + try: + warnings.simplefilter("ignore", np.ComplexWarning) + + a = np.arange(10, dtype='f8').newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='unsafe', + op_dtypes=[np.dtype('c8').newbyteorder()], + buffersize=3) + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f8')) + finally: + warnings.simplefilter("default", np.ComplexWarning) + +def test_iter_buffered_cast_byteswapped_complex(): + # Test that buffering can handle a cast which requires swap->cast->copy + + a = np.arange(10, dtype='c8').newbyteorder().byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype='c8') + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16').newbyteorder()], + buffersize=3) + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) + + a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f4')], + buffersize=7) + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) + +def test_iter_buffered_cast_structured_type(): + # Tests buffering of structured types + + # simple -> struct type (duplicates the value) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.arange(3, dtype='f4') + 0.5 + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [np.array(x) for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + + # object -> struct type + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.zeros((3,), dtype='O') + a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) + a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) + a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) + rc = sys.getrefcount(a[0]) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [x.copy() for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + vals, i, x = [None]*3 + assert_equal(sys.getrefcount(a[0]), rc) + + # struct type -> simple (takes the first value) + sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4') + assert_equal([x[()] for x in i], [5, 8]) + + # struct type -> struct type (field-wise copy) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + assert_equal([np.array(x) for x in i], + [np.array((3, 1, 2), dtype=sdt2), + np.array((6, 4, 5), dtype=sdt2)]) + + # struct type -> struct type (field gets discarded) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('b', 'O'), ('a', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + vals = [] + for x in i: + vals.append(np.array(x)) + x['a'] = x['b']+3 + assert_equal(vals, [np.array((2, 1), dtype=sdt2), + np.array((5, 4), dtype=sdt2)]) + assert_equal(a, np.array([(5, 2, None), (8, 5, None)], dtype=sdt1)) + + # struct type -> struct type (structured field gets discarded) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'i4')])] + sdt2 = [('b', 'O'), ('a', 'f8')] + a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + vals = [] + for x in i: + vals.append(np.array(x)) + x['a'] = x['b']+3 + assert_equal(vals, [np.array((2, 1), dtype=sdt2), + np.array((5, 4), dtype=sdt2)]) + assert_equal(a, np.array([(5, 2, (0, 0)), (8, 5, (0, 0))], dtype=sdt1)) + + # struct type -> struct type (structured field w/ ref gets discarded) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] + sdt2 = [('b', 'O'), ('a', 'f8')] + a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + vals = [] + for x in i: + vals.append(np.array(x)) + x['a'] = x['b']+3 + assert_equal(vals, [np.array((2, 1), dtype=sdt2), + np.array((5, 4), dtype=sdt2)]) + assert_equal(a, np.array([(5, 2, (0, None)), (8, 5, (0, None))], dtype=sdt1)) + + # struct type -> struct type back (structured field w/ ref gets discarded) + sdt1 = [('b', 'O'), ('a', 'f8')] + sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] + a = np.array([(1, 2), (4, 5)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + vals = [] + for x in i: + vals.append(np.array(x)) + assert_equal(x['d'], np.array((0, None), dtype=[('a', 'i2'), ('b', 'O')])) + x['a'] = x['b']+3 + assert_equal(vals, [np.array((2, 1, (0, None)), dtype=sdt2), + np.array((5, 4, (0, None)), dtype=sdt2)]) + assert_equal(a, np.array([(1, 4), (4, 7)], dtype=sdt1)) + +def test_iter_buffered_cast_subarray(): + # Tests buffering of subarrays + + # one element -> many (copies it to all) + sdt1 = [('a', 'f4')] + sdt2 = [('a', 'f8', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + for x, count in zip(i, list(range(6))): + assert_(np.all(x['a'] == count)) + + # one element -> many -> back (copies it to all) + sdt1 = [('a', 'O', (1, 1))] + sdt2 = [('a', 'O', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_(np.all(x['a'] == count)) + x['a'][0] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + x['a'] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'f8', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> one element (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> matching shape (straightforward copy) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a']) + count += 1 + + # vector -> smaller vector (truncates) + sdt1 = [('a', 'f8', (6,))] + sdt2 = [('a', 'f4', (2,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*6).reshape(6, 6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a'][:2]) + count += 1 + + # vector -> bigger vector (pads with zeros) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (6,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2], a[count]['a']) + assert_equal(x['a'][2:], [0, 0, 0, 0]) + count += 1 + + # vector -> matrix (broadcasts) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][0], a[count]['a']) + assert_equal(x['a'][1], a[count]['a']) + count += 1 + + # vector -> matrix (broadcasts and zero-pads) + sdt1 = [('a', 'f8', (2, 1))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2, 1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) + assert_equal(x['a'][2,:], [0, 0]) + count += 1 + + # matrix -> matrix (truncates and zero-pads) + sdt1 = [('a', 'f8', (2, 3))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2*3).reshape(6, 2, 3) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) + assert_equal(x['a'][2,:], [0, 0]) + count += 1 + +def test_iter_buffering_badwriteback(): + # Writing back from a buffer cannot combine elements + + # a needs write buffering, but had a broadcast dimension + a = np.arange(6).reshape(2, 3, 1) + b = np.arange(12).reshape(2, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + + # But if a is readonly, it's fine + i = nditer([a, b], ['buffered', 'external_loop'], + [['readonly'], ['writeonly']], + order='C') + + # If a has just one element, it's fine too (constant 0 stride, a reduction) + a = np.arange(1).reshape(1, 1, 1) + i = nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['writeonly']], + order='C') + + # check that it fails on other dimensions too + a = np.arange(6).reshape(1, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + a = np.arange(4).reshape(2, 1, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + +def test_iter_buffering_string(): + # Safe casting disallows shrinking strings + a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) + assert_equal(a.dtype, np.dtype('S4')); + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='S2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') + assert_equal(i[0], asbytes('abc')) + assert_equal(i[0].dtype, np.dtype('S6')) + + a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode) + assert_equal(a.dtype, np.dtype('U4')); + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='U2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') + assert_equal(i[0], sixu('abc')) + assert_equal(i[0].dtype, np.dtype('U6')) + +def test_iter_buffering_growinner(): + # Test that the inner loop grows when no buffering is needed + a = np.arange(30) + i = nditer(a, ['buffered', 'growinner', 'external_loop'], + buffersize=5) + # Should end up with just one inner loop here + assert_equal(i[0].size, a.size) + + +@dec.slow +def test_iter_buffered_reduce_reuse(): + # large enough array for all views, including negative strides. + a = np.arange(2*3**5)[3**5:3**5+1] + flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] + op_flags = [('readonly',), ('readwrite', 'allocate')] + op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] + # wrong dtype to force buffering + op_dtypes = [np.float, a.dtype] + + def get_params(): + for xs in range(-3**2, 3**2 + 1): + for ys in range(xs, 3**2 + 1): + for op_axes in op_axes_list: + # last stride is reduced and because of that not + # important for this test, as it is the inner stride. + strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) + arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) + + for skip in [0, 1]: + yield arr, op_axes, skip + + for arr, op_axes, skip in get_params(): + nditer2 = np.nditer([arr.copy(), None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + op_dtypes=op_dtypes) + nditer2.operands[-1][...] = 0 + nditer2.reset() + nditer2.iterindex = skip + + for (a2_in, b2_in) in nditer2: + b2_in += a2_in.astype(np.int_) + + comp_res = nditer2.operands[-1] + + for bufsize in range(0, 3**3): + nditer1 = np.nditer([arr, None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + buffersize=bufsize, op_dtypes=op_dtypes) + nditer1.operands[-1][...] = 0 + nditer1.reset() + nditer1.iterindex = skip + + for (a1_in, b1_in) in nditer1: + b1_in += a1_in.astype(np.int_) + + res = nditer1.operands[-1] + assert_array_equal(res, comp_res) + + +def test_iter_no_broadcast(): + # Test that the no_broadcast flag works + a = np.arange(24).reshape(2, 3, 4) + b = np.arange(6).reshape(2, 3, 1) + c = np.arange(12).reshape(3, 4) + + i = nditer([a, b, c], [], + [['readonly', 'no_broadcast'], ['readonly'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) + +def test_iter_nested_iters_basic(): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + +def test_iter_nested_iters_reorder(): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + +def test_iter_nested_iters_flip_axes(): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + +def test_iter_nested_iters_broadcast(): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) + + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + +def test_iter_nested_iters_dtype_copy(): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # updateifcopy + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i, j, x, y = (None,)*4 # force the updateifcopy + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + +def test_iter_nested_iters_dtype_buffered(): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + +def test_iter_reduction_error(): + + a = np.arange(6) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + + a = np.arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, None], ['external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + +def test_iter_reduction(): + # Test doing reductions with the iterator + + a = np.arange(6) + i = nditer([a, None], ['reduce_ok'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + # Need to initialize the output operand to the addition unit + i.operands[1][...] = 0 + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + a = np.arange(6).reshape(2, 3) + i = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + # Need to initialize the output operand to the addition unit + i.operands[1][...] = 0 + # Reduction shape/strides for the output + assert_equal(i[1].shape, (6,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + # This is a tricky reduction case for the buffering double loop + # to handle + a = np.ones((2, 3, 5)) + it1 = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]]) + it2 = nditer([a, None], ['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]], buffersize=10) + it1.operands[1].fill(0) + it2.operands[1].fill(0) + it2.reset() + for x in it1: + x[1][...] += x[0] + for x in it2: + x[1][...] += x[0] + assert_equal(it1.operands[1], it2.operands[1]) + assert_equal(it2.operands[1].sum(), a.size) + +def test_iter_buffering_reduction(): + # Test doing buffered reductions with the iterator + + a = np.arange(6) + b = np.array(0., dtype='f8').byteswap().newbyteorder() + i = nditer([a, b], ['reduce_ok', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0], [-1]]) + assert_equal(i[1].dtype, np.dtype('f8')) + assert_(i[1].dtype != b.dtype) + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(b, np.sum(a)) + + a = np.arange(6).reshape(2, 3) + b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() + i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0, 1], [0, -1]]) + # Reduction shape/strides for the output + assert_equal(i[1].shape, (3,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + y[...] += x + assert_equal(b, np.sum(a, axis=1)) + + # Iterator inner double loop was wrong on this one + p = np.arange(2) + 1 + it = np.nditer([p, None], + ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[-1, 0], [-1, -1]], + itershape=(2, 2)) + it.operands[1].fill(0) + it.reset() + assert_equal(it[0], [1, 2, 1, 2]) + +def test_iter_buffering_reduction_reuse_reduce_loops(): + # There was a bug triggering reuse of the reduce loop inappropriately, + # which caused processing to happen in unnecessarily small chunks + # and overran the buffer. + + a = np.zeros((2, 7)) + b = np.zeros((1, 7)) + it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], + op_flags=[['readonly'], ['readwrite']], + buffersize = 5) + + bufsizes = [] + for x, y in it: + bufsizes.append(x.shape[0]) + assert_equal(bufsizes, [5, 2, 5, 2]) + assert_equal(sum(bufsizes), a.size) + +def test_iter_writemasked_badinput(): + a = np.zeros((2, 3)) + b = np.zeros((3,)) + m = np.array([[True, True, False], [False, True, False]]) + m2 = np.array([True, True, False]) + m3 = np.array([0, 1, 1], dtype='u1') + mbad1 = np.array([0, 1, 1], dtype='i1') + mbad2 = np.array([0, 1, 1], dtype='f4') + + # Need an 'arraymask' if any operand is 'writemasked' + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite', 'writemasked'], ['readonly']]) + + # A 'writemasked' operand must not be readonly + assert_raises(ValueError, nditer, [a, m], [], + [['readonly', 'writemasked'], ['readonly', 'arraymask']]) + + # 'writemasked' and 'arraymask' may not be used together + assert_raises(ValueError, nditer, [a, m], [], + [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) + + # 'arraymask' may only be specified once + assert_raises(ValueError, nditer, [a, m, m2], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask'], + ['readonly', 'arraymask']]) + + # An 'arraymask' with nothing 'writemasked' also doesn't make sense + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite'], ['readonly', 'arraymask']]) + + # A writemasked reduction requires a similarly smaller mask + assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # But this should work with a smaller/equal mask to the reduction operand + np.nditer([a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # The arraymask itself cannot be a reduction + assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readwrite', 'arraymask']]) + + # A uint8 mask is ok too + np.nditer([a, m3], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # An int8 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # A float32 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + +def test_iter_writemasked(): + a = np.zeros((3,), dtype='f8') + msk = np.array([True, True, False]) + + # When buffering is unused, 'writemasked' effectively does nothing. + # It's up to the user of the iterator to obey the requested semantics. + it = np.nditer([a, msk], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + for x, m in it: + x[...] = 1 + # Because we violated the semantics, all the values became 1 + assert_equal(a, [1, 1, 1]) + + # Even if buffering is enabled, we still may be accessing the array + # directly. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + for x, m in it: + x[...] = 2.5 + # Because we violated the semantics, all the values became 2.5 + assert_equal(a, [2.5, 2.5, 2.5]) + + # If buffering will definitely happening, for instance because of + # a cast, only the items selected by the mask will be copied back from + # the buffer. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['i8', None], + casting='unsafe') + for x, m in it: + x[...] = 3 + # Even though we violated the semantics, only the selected values + # were copied back + assert_equal(a, [3, 3, 2.5]) + +def test_iter_non_writable_attribute_deletion(): + it = np.nditer(np.ones(2)) + attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc", + "iterationneedsapi", "has_multi_index", "has_index", "dtypes", + "ndim", "nop", "itersize", "finished"] + + if sys.version[:3] == '2.4': + error = TypeError + else: + error = AttributeError + + for s in attr: + assert_raises(error, delattr, it, s) + + +def test_iter_writable_attribute_deletion(): + it = np.nditer(np.ones(2)) + attr = [ "multi_index", "index", "iterrange", "iterindex"] + for s in attr: + assert_raises(AttributeError, delattr, it, s) + + +def test_iter_element_deletion(): + it = np.nditer(np.ones(3)) + try: + del it[1] + del it[1:2] + except TypeError: + pass + except: + raise AssertionError + +def test_iter_allocated_array_dtypes(): + # If the dtype of an allocated output has a shape, the shape gets + # tacked onto the end of the result. + it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))]) + for a, b in it: + b[0] = a - 1 + b[1] = a + 1 + assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) + + # Make sure this works for scalars too + it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) + for a, b, c in it: + c[0, 0] = a - b + c[0, 1] = a + b + c[1, 0] = a * b + c[1, 1] = a / b + assert_equal(it.operands[2], [[8, 12], [20, 5]]) + + +def test_0d_iter(): + # Basic test for iteration of 0-d arrays: + i = nditer([2, 3], ['multi_index'], [['readonly']]*2) + assert_equal(i.ndim, 0) + assert_equal(next(i), (2, 3)) + assert_equal(i.multi_index, ()) + assert_equal(i.iterindex, 0) + assert_raises(StopIteration, next, i) + # test reset: + i.reset() + assert_equal(next(i), (2, 3)) + assert_raises(StopIteration, next, i) + + # test forcing to 0-d + i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()]) + assert_equal(i.ndim, 0) + assert_equal(len(i), 1) + # note that itershape=(), still behaves like None due to the conversions + + # Test a more complex buffered casting case (same as another test above) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.array(0.5, dtype='f4') + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', op_dtypes=sdt) + vals = next(i) + assert_equal(vals['a'], 0.5) + assert_equal(vals['b'], 0) + assert_equal(vals['c'], [[(0.5)]*3]*2) + assert_equal(vals['d'], 0.5) + + +def test_0d_nested_iter(): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append([z for z in k]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + +def test_iter_too_large(): + # The total size of the iterator must not exceed the maximum intp due + # to broadcasting. Dividing by 1024 will keep it small enough to + # give a legal array. + size = np.iinfo(np.intp).max // 1024 + arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,)) + assert_raises(ValueError, nditer, (arr, arr[:, None])) + # test the same for multiindex. That may get more interesting when + # removing 0 dimensional axis is allowed (since an iterator can grow then) + assert_raises(ValueError, nditer, + (arr, arr[:, None]), flags=['multi_index']) + + +def test_iter_too_large_with_multiindex(): + # When a multi index is being tracked, the error is delayed this + # checks the delayed error messages and getting below that by + # removing an axis. + base_size = 2**10 + num = 1 + while base_size**num < np.iinfo(np.intp).max: + num += 1 + + shape_template = [1, 1] * num + arrays = [] + for i in range(num): + shape = shape_template[:] + shape[i * 2] = 2**10 + arrays.append(np.empty(shape)) + arrays = tuple(arrays) + + # arrays are now too large to be broadcast. The different modes test + # different nditer functionality with or without GIL. + for mode in range(6): + assert_raises(ValueError, test_nditer_too_large, arrays, -1, mode) + # but if we do nothing with the nditer, it can be constructed: + test_nditer_too_large(arrays, -1, 7) + + # When an axis is removed, things should work again (half the time): + for i in range(num): + for mode in range(6): + # an axis with size 1024 is removed: + test_nditer_too_large(arrays, i*2, mode) + # an axis with size 1 is removed: + assert_raises(ValueError, test_nditer_too_large, + arrays, i*2 + 1, mode) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numeric.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numeric.py new file mode 100644 index 0000000000000..b9c05e456c62d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numeric.py @@ -0,0 +1,2091 @@ +from __future__ import division, absolute_import, print_function + +import sys +import platform +from decimal import Decimal +import warnings +import itertools +import platform + +import numpy as np +from numpy.core import * +from numpy.core import umath +from numpy.random import rand, randint, randn +from numpy.testing import * +from numpy.core.multiarray import dot as dot_ + + +class Vec(object): + def __init__(self,sequence=None): + if sequence is None: + sequence=[] + self.array=array(sequence) + def __add__(self, other): + out=Vec() + out.array=self.array+other.array + return out + def __sub__(self, other): + out=Vec() + out.array=self.array-other.array + return out + def __mul__(self, other): # with scalar + out=Vec(self.array.copy()) + out.array*=other + return out + def __rmul__(self, other): + return self*other + + +class TestDot(TestCase): + def setUp(self): + self.A = rand(10, 8) + self.b1 = rand(8, 1) + self.b2 = rand(8) + self.b3 = rand(1, 8) + self.b4 = rand(10) + self.N = 14 + + def test_matmat(self): + A = self.A + c1 = dot(A.transpose(), A) + c2 = dot_(A.transpose(), A) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_matvec(self): + A, b1 = self.A, self.b1 + c1 = dot(A, b1) + c2 = dot_(A, b1) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_matvec2(self): + A, b2 = self.A, self.b2 + c1 = dot(A, b2) + c2 = dot_(A, b2) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_vecmat(self): + A, b4 = self.A, self.b4 + c1 = dot(b4, A) + c2 = dot_(b4, A) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_vecmat2(self): + b3, A = self.b3, self.A + c1 = dot(b3, A.transpose()) + c2 = dot_(b3, A.transpose()) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_vecmat3(self): + A, b4 = self.A, self.b4 + c1 = dot(A.transpose(), b4) + c2 = dot_(A.transpose(), b4) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_vecvecouter(self): + b1, b3 = self.b1, self.b3 + c1 = dot(b1, b3) + c2 = dot_(b1, b3) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_vecvecinner(self): + b1, b3 = self.b1, self.b3 + c1 = dot(b3, b1) + c2 = dot_(b3, b1) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_columnvect1(self): + b1 = ones((3, 1)) + b2 = [5.3] + c1 = dot(b1, b2) + c2 = dot_(b1, b2) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_columnvect2(self): + b1 = ones((3, 1)).transpose() + b2 = [6.2] + c1 = dot(b2, b1) + c2 = dot_(b2, b1) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_vecscalar(self): + b1 = rand(1, 1) + b2 = rand(1, 8) + c1 = dot(b1, b2) + c2 = dot_(b1, b2) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_vecscalar2(self): + b1 = rand(8, 1) + b2 = rand(1, 1) + c1 = dot(b1, b2) + c2 = dot_(b1, b2) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_all(self): + dims = [(), (1,), (1, 1)] + for dim1 in dims: + for dim2 in dims: + arg1 = rand(*dim1) + arg2 = rand(*dim2) + c1 = dot(arg1, arg2) + c2 = dot_(arg1, arg2) + assert_(c1.shape == c2.shape) + assert_almost_equal(c1, c2, decimal=self.N) + + def test_vecobject(self): + U_non_cont = transpose([[1., 1.], [1., 2.]]) + U_cont = ascontiguousarray(U_non_cont) + x = array([Vec([1., 0.]), Vec([0., 1.])]) + zeros = array([Vec([0., 0.]), Vec([0., 0.])]) + zeros_test = dot(U_cont, x) - dot(U_non_cont, x) + assert_equal(zeros[0].array, zeros_test[0].array) + assert_equal(zeros[1].array, zeros_test[1].array) + + +class TestResize(TestCase): + def test_copies(self): + A = array([[1, 2], [3, 4]]) + Ar1 = array([[1, 2, 3, 4], [1, 2, 3, 4]]) + assert_equal(resize(A, (2, 4)), Ar1) + + Ar2 = array([[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(resize(A, (4, 2)), Ar2) + + Ar3 = array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) + assert_equal(resize(A, (4, 3)), Ar3) + + def test_zeroresize(self): + A = array([[1, 2], [3, 4]]) + Ar = resize(A, (0,)) + assert_equal(Ar, array([])) + +class TestNonarrayArgs(TestCase): + # check that non-array arguments to functions wrap them in arrays + def test_squeeze(self): + A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] + assert_(squeeze(A).shape == (3, 3)) + + def test_cumproduct(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(all(cumproduct(A) == array([1, 2, 6, 24, 120, 720]))) + + def test_size(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(size(A) == 6) + assert_(size(A, 0) == 2) + assert_(size(A, 1) == 3) + + def test_mean(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(mean(A) == 3.5) + assert_(all(mean(A, 0) == array([2.5, 3.5, 4.5]))) + assert_(all(mean(A, 1) == array([2., 5.]))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(isnan(mean([]))) + assert_(w[0].category is RuntimeWarning) + + def test_std(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(std(A), 1.707825127659933) + assert_almost_equal(std(A, 0), array([1.5, 1.5, 1.5])) + assert_almost_equal(std(A, 1), array([0.81649658, 0.81649658])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(isnan(std([]))) + assert_(w[0].category is RuntimeWarning) + + def test_var(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(var(A), 2.9166666666666665) + assert_almost_equal(var(A, 0), array([2.25, 2.25, 2.25])) + assert_almost_equal(var(A, 1), array([0.66666667, 0.66666667])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(isnan(var([]))) + assert_(w[0].category is RuntimeWarning) + + +class TestBoolScalar(TestCase): + def test_logical(self): + f = False_ + t = True_ + s = "xyz" + self.assertTrue((t and s) is s) + self.assertTrue((f and s) is f) + + def test_bitwise_or(self): + f = False_ + t = True_ + self.assertTrue((t | t) is t) + self.assertTrue((f | t) is t) + self.assertTrue((t | f) is t) + self.assertTrue((f | f) is f) + + def test_bitwise_and(self): + f = False_ + t = True_ + self.assertTrue((t & t) is t) + self.assertTrue((f & t) is f) + self.assertTrue((t & f) is f) + self.assertTrue((f & f) is f) + + def test_bitwise_xor(self): + f = False_ + t = True_ + self.assertTrue((t ^ t) is f) + self.assertTrue((f ^ t) is t) + self.assertTrue((t ^ f) is t) + self.assertTrue((f ^ f) is f) + + +class TestBoolArray(TestCase): + def setUp(self): + # offset for simd tests + self.t = array([True] * 41, dtype=np.bool)[1::] + self.f = array([False] * 41, dtype=np.bool)[1::] + self.o = array([False] * 42, dtype=np.bool)[2::] + self.nm = self.f.copy() + self.im = self.t.copy() + self.nm[3] = True + self.nm[-2] = True + self.im[3] = False + self.im[-2] = False + + def test_all_any(self): + self.assertTrue(self.t.all()) + self.assertTrue(self.t.any()) + self.assertFalse(self.f.all()) + self.assertFalse(self.f.any()) + self.assertTrue(self.nm.any()) + self.assertTrue(self.im.any()) + self.assertFalse(self.nm.all()) + self.assertFalse(self.im.all()) + # check bad element in all positions + for i in range(256 - 7): + d = array([False] * 256, dtype=np.bool)[7::] + d[i] = True + self.assertTrue(np.any(d)) + e = array([True] * 256, dtype=np.bool)[7::] + e[i] = False + self.assertFalse(np.all(e)) + assert_array_equal(e, ~d) + # big array test for blocked libc loops + for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: + d = array([False] * 100043, dtype=np.bool) + d[i] = True + self.assertTrue(np.any(d), msg="%r" % i) + e = array([True] * 100043, dtype=np.bool) + e[i] = False + self.assertFalse(np.all(e), msg="%r" % i) + + def test_logical_not_abs(self): + assert_array_equal(~self.t, self.f) + assert_array_equal(np.abs(~self.t), self.f) + assert_array_equal(np.abs(~self.f), self.t) + assert_array_equal(np.abs(self.f), self.f) + assert_array_equal(~np.abs(self.f), self.t) + assert_array_equal(~np.abs(self.t), self.f) + assert_array_equal(np.abs(~self.nm), self.im) + np.logical_not(self.t, out=self.o) + assert_array_equal(self.o, self.f) + np.abs(self.t, out=self.o) + assert_array_equal(self.o, self.t) + + def test_logical_and_or_xor(self): + assert_array_equal(self.t | self.t, self.t) + assert_array_equal(self.f | self.f, self.f) + assert_array_equal(self.t | self.f, self.t) + assert_array_equal(self.f | self.t, self.t) + np.logical_or(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t & self.t, self.t) + assert_array_equal(self.f & self.f, self.f) + assert_array_equal(self.t & self.f, self.f) + assert_array_equal(self.f & self.t, self.f) + np.logical_and(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t ^ self.t, self.f) + assert_array_equal(self.f ^ self.f, self.f) + assert_array_equal(self.t ^ self.f, self.t) + assert_array_equal(self.f ^ self.t, self.t) + np.logical_xor(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.f) + + assert_array_equal(self.nm & self.t, self.nm) + assert_array_equal(self.im & self.f, False) + assert_array_equal(self.nm & True, self.nm) + assert_array_equal(self.im & False, self.f) + assert_array_equal(self.nm | self.t, self.t) + assert_array_equal(self.im | self.f, self.im) + assert_array_equal(self.nm | True, self.t) + assert_array_equal(self.im | False, self.im) + assert_array_equal(self.nm ^ self.t, self.im) + assert_array_equal(self.im ^ self.f, self.im) + assert_array_equal(self.nm ^ True, self.im) + assert_array_equal(self.im ^ False, self.im) + + +class TestBoolCmp(TestCase): + def setUp(self): + self.f = ones(256, dtype=np.float32) + self.ef = ones(self.f.size, dtype=np.bool) + self.d = ones(128, dtype=np.float64) + self.ed = ones(self.d.size, dtype=np.bool) + # generate values for all permutation of 256bit simd vectors + s = 0 + for i in range(32): + self.f[s:s+8] = [i & 2**x for x in range(8)] + self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] + s += 8 + s = 0 + for i in range(16): + self.d[s:s+4] = [i & 2**x for x in range(4)] + self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] + s += 4 + + self.nf = self.f.copy() + self.nd = self.d.copy() + self.nf[self.ef] = np.nan + self.nd[self.ed] = np.nan + + def test_float(self): + # offset for alignment test + for i in range(4): + assert_array_equal(self.f[i:] > 0, self.ef[i:]) + assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) + assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) + assert_array_equal(-self.f[i:] < 0, self.ef[i:]) + assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) + r = self.f[i:] != 0 + assert_array_equal(r, self.ef[i:]) + r2 = self.f[i:] != np.zeros_like(self.f[i:]) + r3 = 0 != self.f[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same codepath + assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) + + def test_double(self): + # offset for alignment test + for i in range(2): + assert_array_equal(self.d[i:] > 0, self.ed[i:]) + assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) + assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) + assert_array_equal(-self.d[i:] < 0, self.ed[i:]) + assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) + r = self.d[i:] != 0 + assert_array_equal(r, self.ed[i:]) + r2 = self.d[i:] != np.zeros_like(self.d[i:]) + r3 = 0 != self.d[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same codepath + assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) + + +class TestSeterr(TestCase): + def test_default(self): + err = geterr() + self.assertEqual(err, dict( + divide='warn', + invalid='warn', + over='warn', + under='ignore', + )) + + def test_set(self): + with np.errstate(): + err = seterr() + old = seterr(divide='print') + self.assertTrue(err == old) + new = seterr() + self.assertTrue(new['divide'] == 'print') + seterr(over='raise') + self.assertTrue(geterr()['over'] == 'raise') + self.assertTrue(new['divide'] == 'print') + seterr(**old) + self.assertTrue(geterr() == old) + + @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") + def test_divide_err(self): + with errstate(divide='raise'): + try: + array([1.]) / array([0.]) + except FloatingPointError: + pass + else: + self.fail() + seterr(divide='ignore') + array([1.]) / array([0.]) + + def test_errobj(self): + olderrobj = np.geterrobj() + self.called = 0 + try: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with errstate(divide='warn'): + np.seterrobj([20000, 1, None]) + array([1.]) / array([0.]) + self.assertEqual(len(w), 1) + + def log_err(*args): + self.called += 1 + extobj_err = args + assert (len(extobj_err) == 2) + assert ("divide" in extobj_err[0]) + + with errstate(divide='ignore'): + np.seterrobj([20000, 3, log_err]) + array([1.]) / array([0.]) + self.assertEqual(self.called, 1) + + np.seterrobj(olderrobj) + with errstate(divide='ignore'): + np.divide(1., 0., extobj=[20000, 3, log_err]) + self.assertEqual(self.called, 2) + finally: + np.seterrobj(olderrobj) + del self.called + + def test_errobj_noerrmask(self): + # errmask = 0 has a special code path for the default + olderrobj = np.geterrobj() + try: + # set errobj to something non default + np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, + umath.ERR_DEFAULT + 1, None]) + #call a ufunc + np.isnan(np.array([6])) + # same with the default, lots of times to get rid of possible + # pre-existing stack in the code + for i in range(10000): + np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT, + None]) + np.isnan(np.array([6])) + finally: + np.seterrobj(olderrobj) + + +class TestFloatExceptions(TestCase): + def assert_raises_fpe(self, fpeerr, flop, x, y): + ftype = type(x) + try: + flop(x, y) + assert_(False, + "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) + except FloatingPointError as exc: + assert_(str(exc).find(fpeerr) >= 0, + "Type %s raised wrong fpe error '%s'." % (ftype, exc)) + + def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): + # Check that fpe exception is raised. + # + # Given a floating operation `flop` and two scalar values, check that + # the operation raises the floating point exception specified by + #`fpeerr`. Tests all variants with 0-d array scalars as well. + + self.assert_raises_fpe(fpeerr, flop, sc1, sc2); + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2); + self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]); + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]); + + @dec.knownfailureif(True, "See ticket #2350") + def test_floating_exceptions(self): + # Test basic arithmetic function errors + with np.errstate(all='raise'): + # Test for all real and complex float types + for typecode in np.typecodes['AllFloat']: + ftype = np.obj2sctype(typecode) + if np.dtype(ftype).kind == 'f': + # Get some extreme values for the type + fi = np.finfo(ftype) + ft_tiny = fi.tiny + ft_max = fi.max + ft_eps = fi.eps + underflow = 'underflow' + divbyzero = 'divide by zero' + else: + # 'c', complex, corresponding real dtype + rtype = type(ftype(0).real) + fi = np.finfo(rtype) + ft_tiny = ftype(fi.tiny) + ft_max = ftype(fi.max) + ft_eps = ftype(fi.eps) + # The complex types raise different exceptions + underflow = '' + divbyzero = '' + overflow = 'overflow' + invalid = 'invalid' + + self.assert_raises_fpe(underflow, + lambda a, b:a/b, ft_tiny, ft_max) + self.assert_raises_fpe(underflow, + lambda a, b:a*b, ft_tiny, ft_tiny) + self.assert_raises_fpe(overflow, + lambda a, b:a*b, ft_max, ftype(2)) + self.assert_raises_fpe(overflow, + lambda a, b:a/b, ft_max, ftype(0.5)) + self.assert_raises_fpe(overflow, + lambda a, b:a+b, ft_max, ft_max*ft_eps) + self.assert_raises_fpe(overflow, + lambda a, b:a-b, -ft_max, ft_max*ft_eps) + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) + self.assert_raises_fpe(divbyzero, + lambda a, b:a/b, ftype(1), ftype(0)) + self.assert_raises_fpe(invalid, + lambda a, b:a/b, ftype(np.inf), ftype(np.inf)) + self.assert_raises_fpe(invalid, + lambda a, b:a/b, ftype(0), ftype(0)) + self.assert_raises_fpe(invalid, + lambda a, b:a-b, ftype(np.inf), ftype(np.inf)) + self.assert_raises_fpe(invalid, + lambda a, b:a+b, ftype(np.inf), ftype(-np.inf)) + self.assert_raises_fpe(invalid, + lambda a, b:a*b, ftype(0), ftype(np.inf)) + + def test_warnings(self): + # test warning code path + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(all="warn"): + np.divide(1, 0.) + self.assertEqual(len(w), 1) + self.assertTrue("divide by zero" in str(w[0].message)) + np.array(1e300) * np.array(1e300) + self.assertEqual(len(w), 2) + self.assertTrue("overflow" in str(w[-1].message)) + np.array(np.inf) - np.array(np.inf) + self.assertEqual(len(w), 3) + self.assertTrue("invalid value" in str(w[-1].message)) + np.array(1e-300) * np.array(1e-300) + self.assertEqual(len(w), 4) + self.assertTrue("underflow" in str(w[-1].message)) + + +class TestTypes(TestCase): + def check_promotion_cases(self, promote_func): + #Tests that the scalars get coerced correctly. + b = np.bool_(0) + i8, i16, i32, i64 = int8(0), int16(0), int32(0), int64(0) + u8, u16, u32, u64 = uint8(0), uint16(0), uint32(0), uint64(0) + f32, f64, fld = float32(0), float64(0), longdouble(0) + c64, c128, cld = complex64(0), complex128(0), clongdouble(0) + + # coercion within the same kind + assert_equal(promote_func(i8, i16), np.dtype(int16)) + assert_equal(promote_func(i32, i8), np.dtype(int32)) + assert_equal(promote_func(i16, i64), np.dtype(int64)) + assert_equal(promote_func(u8, u32), np.dtype(uint32)) + assert_equal(promote_func(f32, f64), np.dtype(float64)) + assert_equal(promote_func(fld, f32), np.dtype(longdouble)) + assert_equal(promote_func(f64, fld), np.dtype(longdouble)) + assert_equal(promote_func(c128, c64), np.dtype(complex128)) + assert_equal(promote_func(cld, c128), np.dtype(clongdouble)) + assert_equal(promote_func(c64, fld), np.dtype(clongdouble)) + + # coercion between kinds + assert_equal(promote_func(b, i32), np.dtype(int32)) + assert_equal(promote_func(b, u8), np.dtype(uint8)) + assert_equal(promote_func(i8, u8), np.dtype(int16)) + assert_equal(promote_func(u8, i32), np.dtype(int32)) + assert_equal(promote_func(i64, u32), np.dtype(int64)) + assert_equal(promote_func(u64, i32), np.dtype(float64)) + assert_equal(promote_func(i32, f32), np.dtype(float64)) + assert_equal(promote_func(i64, f32), np.dtype(float64)) + assert_equal(promote_func(f32, i16), np.dtype(float32)) + assert_equal(promote_func(f32, u32), np.dtype(float64)) + assert_equal(promote_func(f32, c64), np.dtype(complex64)) + assert_equal(promote_func(c128, f32), np.dtype(complex128)) + assert_equal(promote_func(cld, f64), np.dtype(clongdouble)) + + # coercion between scalars and 1-D arrays + assert_equal(promote_func(array([b]), i8), np.dtype(int8)) + assert_equal(promote_func(array([b]), u8), np.dtype(uint8)) + assert_equal(promote_func(array([b]), i32), np.dtype(int32)) + assert_equal(promote_func(array([b]), u32), np.dtype(uint32)) + assert_equal(promote_func(array([i8]), i64), np.dtype(int8)) + assert_equal(promote_func(u64, array([i32])), np.dtype(int32)) + assert_equal(promote_func(i64, array([u32])), np.dtype(uint32)) + assert_equal(promote_func(int32(-1), array([u64])), np.dtype(float64)) + assert_equal(promote_func(f64, array([f32])), np.dtype(float32)) + assert_equal(promote_func(fld, array([f32])), np.dtype(float32)) + assert_equal(promote_func(array([f64]), fld), np.dtype(float64)) + assert_equal(promote_func(fld, array([c64])), np.dtype(complex64)) + assert_equal(promote_func(c64, array([f64])), np.dtype(complex128)) + assert_equal(promote_func(complex64(3j), array([f64])), + np.dtype(complex128)) + + # coercion between scalars and 1-D arrays, where + # the scalar has greater kind than the array + assert_equal(promote_func(array([b]), f64), np.dtype(float64)) + assert_equal(promote_func(array([b]), i64), np.dtype(int64)) + assert_equal(promote_func(array([b]), u64), np.dtype(uint64)) + assert_equal(promote_func(array([i8]), f64), np.dtype(float64)) + assert_equal(promote_func(array([u16]), f64), np.dtype(float64)) + + # uint and int are treated as the same "kind" for + # the purposes of array-scalar promotion. + assert_equal(promote_func(array([u16]), i32), np.dtype(uint16)) + + # float and complex are treated as the same "kind" for + # the purposes of array-scalar promotion, so that you can do + # (0j + float32array) to get a complex64 array instead of + # a complex128 array. + assert_equal(promote_func(array([f32]), c128), np.dtype(complex64)) + + def test_coercion(self): + def res_type(a, b): + return np.add(a, b).dtype + self.check_promotion_cases(res_type) + + # Use-case: float/complex scalar * bool/int8 array + # shouldn't narrow the float/complex type + for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: + b = 1.234 * a + assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + b = np.longdouble(1.234) * a + assert_equal(b.dtype, np.dtype(np.longdouble), + "array type %s" % a.dtype) + b = np.float64(1.234) * a + assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + b = np.float32(1.234) * a + assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) + b = np.float16(1.234) * a + assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) + + b = 1.234j * a + assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + b = np.clongdouble(1.234j) * a + assert_equal(b.dtype, np.dtype(np.clongdouble), + "array type %s" % a.dtype) + b = np.complex128(1.234j) * a + assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + b = np.complex64(1.234j) * a + assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) + + # The following use-case is problematic, and to resolve its + # tricky side-effects requires more changes. + # + ## Use-case: (1-t)*a, where 't' is a boolean array and 'a' is + ## a float32, shouldn't promote to float64 + #a = np.array([1.0, 1.5], dtype=np.float32) + #t = np.array([True, False]) + #b = t*a + #assert_equal(b, [1.0, 0.0]) + #assert_equal(b.dtype, np.dtype('f4')) + #b = (1-t)*a + #assert_equal(b, [0.0, 1.5]) + #assert_equal(b.dtype, np.dtype('f4')) + ## Probably ~t (bitwise negation) is more proper to use here, + ## but this is arguably less intuitive to understand at a glance, and + ## would fail if 't' is actually an integer array instead of boolean: + #b = (~t)*a + #assert_equal(b, [0.0, 1.5]) + #assert_equal(b.dtype, np.dtype('f4')) + + def test_result_type(self): + self.check_promotion_cases(np.result_type) + assert_(np.result_type(None) == np.dtype(None)) + + def test_promote_types_endian(self): + # promote_types should always return native-endian types + assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) + + assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) + assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) + assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) + + assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) + assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) + + def test_promote_types_strings(self): + assert_equal(np.promote_types('bool', 'S'), np.dtype('S5')) + assert_equal(np.promote_types('b', 'S'), np.dtype('S4')) + assert_equal(np.promote_types('u1', 'S'), np.dtype('S3')) + assert_equal(np.promote_types('u2', 'S'), np.dtype('S5')) + assert_equal(np.promote_types('u4', 'S'), np.dtype('S10')) + assert_equal(np.promote_types('u8', 'S'), np.dtype('S20')) + assert_equal(np.promote_types('i1', 'S'), np.dtype('S4')) + assert_equal(np.promote_types('i2', 'S'), np.dtype('S6')) + assert_equal(np.promote_types('i4', 'S'), np.dtype('S11')) + assert_equal(np.promote_types('i8', 'S'), np.dtype('S21')) + assert_equal(np.promote_types('bool', 'U'), np.dtype('U5')) + assert_equal(np.promote_types('b', 'U'), np.dtype('U4')) + assert_equal(np.promote_types('u1', 'U'), np.dtype('U3')) + assert_equal(np.promote_types('u2', 'U'), np.dtype('U5')) + assert_equal(np.promote_types('u4', 'U'), np.dtype('U10')) + assert_equal(np.promote_types('u8', 'U'), np.dtype('U20')) + assert_equal(np.promote_types('i1', 'U'), np.dtype('U4')) + assert_equal(np.promote_types('i2', 'U'), np.dtype('U6')) + assert_equal(np.promote_types('i4', 'U'), np.dtype('U11')) + assert_equal(np.promote_types('i8', 'U'), np.dtype('U21')) + assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5')) + assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('b', 'S1'), np.dtype('S4')) + assert_equal(np.promote_types('b', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3')) + assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5')) + assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10')) + assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30')) + assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20')) + assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30')) + + def test_can_cast(self): + assert_(np.can_cast(np.int32, np.int64)) + assert_(np.can_cast(np.float64, np.complex)) + assert_(not np.can_cast(np.complex, np.float)) + + assert_(np.can_cast('i8', 'f8')) + assert_(not np.can_cast('i8', 'f4')) + assert_(np.can_cast('i4', 'S11')) + + assert_(np.can_cast('i8', 'i8', 'no')) + assert_(not np.can_cast('i8', 'no')) + + assert_(np.can_cast('i8', 'equiv')) + assert_(not np.can_cast('i8', 'equiv')) + + assert_(np.can_cast('i8', 'safe')) + assert_(not np.can_cast('i4', 'safe')) + + assert_(np.can_cast('i4', 'same_kind')) + assert_(not np.can_cast('u4', 'same_kind')) + + assert_(np.can_cast('u4', 'unsafe')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'S4')) + assert_(not np.can_cast('b', 'S3')) + + assert_(np.can_cast('u1', 'S3')) + assert_(not np.can_cast('u1', 'S2')) + assert_(np.can_cast('u2', 'S5')) + assert_(not np.can_cast('u2', 'S4')) + assert_(np.can_cast('u4', 'S10')) + assert_(not np.can_cast('u4', 'S9')) + assert_(np.can_cast('u8', 'S20')) + assert_(not np.can_cast('u8', 'S19')) + + assert_(np.can_cast('i1', 'S4')) + assert_(not np.can_cast('i1', 'S3')) + assert_(np.can_cast('i2', 'S6')) + assert_(not np.can_cast('i2', 'S5')) + assert_(np.can_cast('i4', 'S11')) + assert_(not np.can_cast('i4', 'S10')) + assert_(np.can_cast('i8', 'S21')) + assert_(not np.can_cast('i8', 'S20')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'U4')) + assert_(not np.can_cast('b', 'U3')) + + assert_(np.can_cast('u1', 'U3')) + assert_(not np.can_cast('u1', 'U2')) + assert_(np.can_cast('u2', 'U5')) + assert_(not np.can_cast('u2', 'U4')) + assert_(np.can_cast('u4', 'U10')) + assert_(not np.can_cast('u4', 'U9')) + assert_(np.can_cast('u8', 'U20')) + assert_(not np.can_cast('u8', 'U19')) + + assert_(np.can_cast('i1', 'U4')) + assert_(not np.can_cast('i1', 'U3')) + assert_(np.can_cast('i2', 'U6')) + assert_(not np.can_cast('i2', 'U5')) + assert_(np.can_cast('i4', 'U11')) + assert_(not np.can_cast('i4', 'U10')) + assert_(np.can_cast('i8', 'U21')) + assert_(not np.can_cast('i8', 'U20')) + + assert_raises(TypeError, np.can_cast, 'i4', None) + assert_raises(TypeError, np.can_cast, None, 'i4') + + +# Custom exception class to test exception propagation in fromiter +class NIterError(Exception): pass + + +class TestFromiter(TestCase): + def makegen(self): + for x in range(24): + yield x**2 + + def test_types(self): + ai32 = fromiter(self.makegen(), int32) + ai64 = fromiter(self.makegen(), int64) + af = fromiter(self.makegen(), float) + self.assertTrue(ai32.dtype == dtype(int32)) + self.assertTrue(ai64.dtype == dtype(int64)) + self.assertTrue(af.dtype == dtype(float)) + + def test_lengths(self): + expected = array(list(self.makegen())) + a = fromiter(self.makegen(), int) + a20 = fromiter(self.makegen(), int, 20) + self.assertTrue(len(a) == len(expected)) + self.assertTrue(len(a20) == 20) + self.assertRaises(ValueError, fromiter, + self.makegen(), int, len(expected) + 10) + + def test_values(self): + expected = array(list(self.makegen())) + a = fromiter(self.makegen(), int) + a20 = fromiter(self.makegen(), int, 20) + self.assertTrue(alltrue(a == expected, axis=0)) + self.assertTrue(alltrue(a20 == expected[:20], axis=0)) + + def load_data(self, n, eindex): + # Utility method for the issue 2592 tests. + # Raise an exception at the desired index in the iterator. + for e in range(n): + if e == eindex: + raise NIterError('error at index %s' % eindex) + yield e + + def test_2592(self): + # Test iteration exceptions are correctly raised. + count, eindex = 10, 5 + self.assertRaises(NIterError, np.fromiter, + self.load_data(count, eindex), dtype=int, count=count) + + def test_2592_edge(self): + # Test iter. exceptions, edge case (exception at end of iterator). + count = 10 + eindex = count-1 + self.assertRaises(NIterError, np.fromiter, + self.load_data(count, eindex), dtype=int, count=count) + + +class TestNonzero(TestCase): + def test_nonzero_trivial(self): + assert_equal(np.count_nonzero(array([])), 0) + assert_equal(np.count_nonzero(array([], dtype='?')), 0) + assert_equal(np.nonzero(array([])), ([],)) + + assert_equal(np.count_nonzero(array(0)), 0) + assert_equal(np.count_nonzero(array(0, dtype='?')), 0) + assert_equal(np.nonzero(array(0)), ([],)) + assert_equal(np.count_nonzero(array(1)), 1) + assert_equal(np.count_nonzero(array(1, dtype='?')), 1) + assert_equal(np.nonzero(array(1)), ([0],)) + + def test_nonzero_onedim(self): + x = array([1, 0, 2, -1, 0, 0, 8]) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) + + x = array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], + dtype=[('a', 'i4'), ('b', 'i2')]) + assert_equal(np.count_nonzero(x['a']), 3) + assert_equal(np.count_nonzero(x['b']), 4) + assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) + assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) + + def test_nonzero_twodim(self): + x = array([[0, 1, 0], [2, 0, 3]]) + assert_equal(np.count_nonzero(x), 3) + assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) + + x = np.eye(3) + assert_equal(np.count_nonzero(x), 3) + assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) + + x = array([[(0, 1), (0, 0), (1, 11)], + [(1, 1), (1, 0), (0, 0)], + [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) + assert_equal(np.count_nonzero(x['a']), 4) + assert_equal(np.count_nonzero(x['b']), 5) + assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) + assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) + + assert_(not x['a'].T.flags.aligned) + assert_equal(np.count_nonzero(x['a'].T), 4) + assert_equal(np.count_nonzero(x['b'].T), 5) + assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) + assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) + + def test_sparse(self): + # test special sparse condition boolean code path + for i in range(20): + c = np.zeros(200, dtype=np.bool) + c[i::20] = True + assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) + + c = np.zeros(400, dtype=np.bool) + c[10 + i:20 + i] = True + c[20 + i*2] = True + assert_equal(np.nonzero(c)[0], + np.concatenate((np.arange(10 +i, 20 + i), [20 +i*2]))) + + +class TestIndex(TestCase): + def test_boolean(self): + a = rand(3, 5, 8) + V = rand(5, 8) + g1 = randint(0, 5, size=15) + g2 = randint(0, 8, size=15) + V[g1, g2] = -V[g1, g2] + assert_((array([a[0][V>0], a[1][V>0], a[2][V>0]]) == a[:, V>0]).all()) + + def test_boolean_edgecase(self): + a = np.array([], dtype='int32') + b = np.array([], dtype='bool') + c = a[b] + assert_equal(c, []) + assert_equal(c.dtype, np.dtype('int32')) + + +class TestBinaryRepr(TestCase): + def test_zero(self): + assert_equal(binary_repr(0), '0') + + def test_large(self): + assert_equal(binary_repr(10736848), '101000111101010011010000') + + def test_negative(self): + assert_equal(binary_repr(-1), '-1') + assert_equal(binary_repr(-1, width=8), '11111111') + +class TestBaseRepr(TestCase): + def test_base3(self): + assert_equal(base_repr(3**5, 3), '100000') + + def test_positive(self): + assert_equal(base_repr(12, 10), '12') + assert_equal(base_repr(12, 10, 4), '000012') + assert_equal(base_repr(12, 4), '30') + assert_equal(base_repr(3731624803700888, 36), '10QR0ROFCEW') + + def test_negative(self): + assert_equal(base_repr(-12, 10), '-12') + assert_equal(base_repr(-12, 10, 4), '-000012') + assert_equal(base_repr(-12, 4), '-30') + +class TestArrayComparisons(TestCase): + def test_array_equal(self): + res = array_equal(array([1, 2]), array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = array_equal(array([1, 2]), array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = array_equal(array([1, 2]), array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = array_equal(array([1, 2]), array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + res = array_equal(array(['a'], dtype='S1'), array(['a'], dtype='S1')) + assert_(res) + assert_(type(res) is bool) + res = array_equal(array([('a', 1)], dtype='S1,u4'), array([('a', 1)], dtype='S1,u4')) + assert_(res) + assert_(type(res) is bool) + + def test_array_equiv(self): + res = array_equiv(array([1, 2]), array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = array_equiv(array([1, 2]), array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = array_equiv(array([1, 2]), array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = array_equiv(array([1, 2]), array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + + res = array_equiv(array([1, 1]), array([1])) + assert_(res) + assert_(type(res) is bool) + res = array_equiv(array([1, 1]), array([[1], [1]])) + assert_(res) + assert_(type(res) is bool) + res = array_equiv(array([1, 2]), array([2])) + assert_(not res) + assert_(type(res) is bool) + res = array_equiv(array([1, 2]), array([[1], [2]])) + assert_(not res) + assert_(type(res) is bool) + res = array_equiv(array([1, 2]), array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + assert_(not res) + assert_(type(res) is bool) + + +def assert_array_strict_equal(x, y): + assert_array_equal(x, y) + # Check flags, 32 bit arches typically don't provide 16 byte alignment + if ((x.dtype.alignment <= 8 or + np.intp().dtype.itemsize != 4) and + sys.platform != 'win32'): + assert_(x.flags == y.flags) + else: + assert_(x.flags.owndata == y.flags.owndata) + assert_(x.flags.writeable == y.flags.writeable) + assert_(x.flags.c_contiguous == y.flags.c_contiguous) + assert_(x.flags.f_contiguous == y.flags.f_contiguous) + assert_(x.flags.updateifcopy == y.flags.updateifcopy) + # check endianness + assert_(x.dtype.isnative == y.dtype.isnative) + + +class TestClip(TestCase): + def setUp(self): + self.nr = 5 + self.nc = 3 + + def fastclip(self, a, m, M, out=None): + if out is None: + return a.clip(m, M) + else: + return a.clip(m, M, out) + + def clip(self, a, m, M, out=None): + # use slow-clip + selector = less(a, m)+2*greater(a, M) + return selector.choose((a, m, M), out=out) + + # Handy functions + def _generate_data(self, n, m): + return randn(n, m) + + def _generate_data_complex(self, n, m): + return randn(n, m) + 1.j *rand(n, m) + + def _generate_flt_data(self, n, m): + return (randn(n, m)).astype(float32) + + def _neg_byteorder(self, a): + a = asarray(a) + if sys.byteorder == 'little': + a = a.astype(a.dtype.newbyteorder('>')) + else: + a = a.astype(a.dtype.newbyteorder('<')) + return a + + def _generate_non_native_data(self, n, m): + data = randn(n, m) + data = self._neg_byteorder(data) + assert_(not data.dtype.isnative) + return data + + def _generate_int_data(self, n, m): + return (10 * rand(n, m)).astype(int64) + + def _generate_int32_data(self, n, m): + return (10 * rand(n, m)).astype(int32) + + # Now the real test cases + def test_simple_double(self): + #Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.1 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_int(self): + #Test native int input with scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(int) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_array_double(self): + #Test native double input with array min/max. + a = self._generate_data(self.nr, self.nc) + m = zeros(a.shape) + M = m + 0.5 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_nonnative(self): + #Test non native double input with scalar min/max. + #Test native double input with non native double scalar min/max. + a = self._generate_non_native_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + #Test native double input with non native double scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = self._neg_byteorder(0.6) + assert_(not M.dtype.isnative) + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + def test_simple_complex(self): + #Test native complex input with native double scalar min/max. + #Test native input with complex double scalar min/max. + a = 3 * self._generate_data_complex(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + #Test native input with complex double scalar min/max. + a = 3 * self._generate_data(self.nr, self.nc) + m = -0.5 + 1.j + M = 1. + 2.j + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_clip_non_contig(self): + #Test clip for non contiguous native input and native scalar min/max. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = self.fastclip(a, -1.6, 1.7) + act = self.clip(a, -1.6, 1.7) + assert_array_strict_equal(ac, act) + + def test_simple_out(self): + #Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = zeros(a.shape) + act = zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int32_inout(self): + #Test native int32 input with double min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = float64(0) + M = float64(2) + ac = zeros(a.shape, dtype = int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_out(self): + #Test native int32 input with int32 scalar min/max and int64 out. + a = self._generate_int32_data(self.nr, self.nc) + m = int32(-1) + M = int32(1) + ac = zeros(a.shape, dtype = int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_inout(self): + #Test native int32 input with double array min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = zeros(a.shape, float64) + M = float64(1) + ac = zeros(a.shape, dtype = int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int32_out(self): + #Test native double input with scalar min/max and int out. + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = zeros(a.shape, dtype = int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_inplace_01(self): + #Test native double input with array min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_simple_inplace_02(self): + #Test native double input with scalar min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_noncontig_inplace(self): + #Test non contiguous double input with double scalar min/max in-place. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_equal(a, ac) + + def test_type_cast_01(self): + #Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_02(self): + #Test native int32 input with int32 scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(int32) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_03(self): + #Test native int32 input with float64 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = -2 + M = 4 + ac = self.fastclip(a, float64(m), float64(M)) + act = self.clip(a, float64(m), float64(M)) + assert_array_strict_equal(ac, act) + + def test_type_cast_04(self): + #Test native int32 input with float32 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = float32(-2) + M = float32(4) + act = self.fastclip(a, m, M) + ac = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_05(self): + #Test native int32 with double arrays min/max. + a = self._generate_int_data(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m * zeros(a.shape), M) + act = self.clip(a, m * zeros(a.shape), M) + assert_array_strict_equal(ac, act) + + def test_type_cast_06(self): + #Test native with NON native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.5 + m_s = self._neg_byteorder(m) + M = 1. + act = self.clip(a, m_s, M) + ac = self.fastclip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_07(self): + #Test NON native with native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * ones(a.shape) + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + act = a_s.clip(m, M) + ac = self.fastclip(a_s, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_08(self): + #Test NON native with native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + ac = self.fastclip(a_s, m, M) + act = a_s.clip(m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_09(self): + #Test native with NON native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * ones(a.shape) + M = 1. + m_s = self._neg_byteorder(m) + assert_(not m_s.dtype.isnative) + ac = self.fastclip(a, m_s, M) + act = self.clip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_10(self): + #Test native int32 with float min/max and float out for output argument. + a = self._generate_int_data(self.nr, self.nc) + b = zeros(a.shape, dtype = float32) + m = float32(-0.5) + M = float32(1) + act = self.clip(a, m, M, out = b) + ac = self.fastclip(a, m, M, out = b) + assert_array_strict_equal(ac, act) + + def test_type_cast_11(self): + #Test non native with native scalar, min/max, out non native + a = self._generate_non_native_data(self.nr, self.nc) + b = a.copy() + b = b.astype(b.dtype.newbyteorder('>')) + bt = b.copy() + m = -0.5 + M = 1. + self.fastclip(a, m, M, out = b) + self.clip(a, m, M, out = bt) + assert_array_strict_equal(b, bt) + + def test_type_cast_12(self): + #Test native int32 input and min/max and float out + a = self._generate_int_data(self.nr, self.nc) + b = zeros(a.shape, dtype = float32) + m = int32(0) + M = int32(1) + act = self.clip(a, m, M, out = b) + ac = self.fastclip(a, m, M, out = b) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple(self): + #Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = zeros(a.shape) + act = zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple2(self): + #Test native int32 input with double min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = float64(0) + M = float64(2) + ac = zeros(a.shape, dtype = int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple_int32(self): + #Test native int32 input with int32 scalar min/max and int64 out + a = self._generate_int32_data(self.nr, self.nc) + m = int32(-1) + M = int32(1) + ac = zeros(a.shape, dtype = int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_int32(self): + #Test native int32 input with double array min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = zeros(a.shape, float64) + M = float64(1) + ac = zeros(a.shape, dtype = int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_outint32(self): + #Test native double input with scalar min/max and int out + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = zeros(a.shape, dtype = int32) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_inplace_array(self): + #Test native double input with array min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_inplace_simple(self): + #Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_func_takes_out(self): + # Ensure that the clip() function takes an out= argument. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + a2 = clip(a, m, M, out=a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a2, ac) + self.assertTrue(a2 is a) + + +class TestAllclose(object): + rtol = 1e-5 + atol = 1e-8 + + def setUp(self): + self.olderr = np.seterr(invalid='ignore') + + def tearDown(self): + np.seterr(**self.olderr) + + def tst_allclose(self, x, y): + assert_(allclose(x, y), "%s and %s not close" % (x, y)) + + def tst_not_allclose(self, x, y): + assert_(not allclose(x, y), "%s and %s shouldn't be close" % (x, y)) + + def test_ip_allclose(self): + #Parametric test factory. + arr = array([100, 1000]) + aran = arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1+rtol+atol]), + (arr, arr + arr*rtol), + (arr, arr + arr*rtol + atol*2), + (aran, aran + aran*rtol), + (inf, inf), + (inf, [inf])] + + for (x, y) in data: + yield (self.tst_allclose, x, y) + + def test_ip_not_allclose(self): + #Parametric test factory. + aran = arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([inf, 0], [1, inf]), + ([inf, 0], [1, 0]), + ([inf, inf], [1, inf]), + ([inf, inf], [1, 0]), + ([-inf, 0], [inf, 0]), + ([nan, 0], [nan, 0]), + ([atol*2], [0]), + ([1], [1+rtol+atol*2]), + (aran, aran + aran*atol + atol*2), + (array([inf, 1]), array([0, inf]))] + + for (x, y) in data: + yield (self.tst_not_allclose, x, y) + + def test_no_parameter_modification(self): + x = array([inf, 1]) + y = array([0, inf]) + allclose(x, y) + assert_array_equal(x, array([inf, 1])) + assert_array_equal(y, array([0, inf])) + + + def test_min_int(self): + # Could make problems because of abs(min_int) == min_int + min_int = np.iinfo(np.int_).min + a = np.array([min_int], dtype=np.int_) + assert_(allclose(a, a)) + + +class TestIsclose(object): + rtol = 1e-5 + atol = 1e-8 + + def setup(self): + atol = self.atol + rtol = self.rtol + arr = array([100, 1000]) + aran = arange(125).reshape((5, 5, 5)) + + self.all_close_tests = [ + ([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr*rtol), + (arr, arr + arr*rtol + atol), + (aran, aran + aran*rtol), + (inf, inf), + (inf, [inf]), + ([inf, -inf], [inf, -inf]), + ] + self.none_close_tests = [ + ([inf, 0], [1, inf]), + ([inf, -inf], [1, 0]), + ([inf, inf], [1, -inf]), + ([inf, inf], [1, 0]), + ([nan, 0], [nan, -inf]), + ([atol*2], [0]), + ([1], [1 + rtol + atol*2]), + (aran, aran + rtol*1.1*aran + atol*1.1), + (array([inf, 1]), array([0, inf])), + ] + self.some_close_tests = [ + ([inf, 0], [inf, atol*2]), + ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, nan, 1e6]), + (arange(3), [0, 1, 2.1]), + (nan, [nan, nan, nan]), + ([0], [atol, inf, -inf, nan]), + (0, [atol, inf, -inf, nan]), + ] + self.some_close_results = [ + [True, False], + [True, False, False], + [True, True, False], + [False, False, False], + [True, False, False, False], + [True, False, False, False], + ] + + def test_ip_isclose(self): + self.setup() + tests = self.some_close_tests + results = self.some_close_results + for (x, y), result in zip(tests, results): + yield (assert_array_equal, isclose(x, y), result) + + def tst_all_isclose(self, x, y): + assert_(all(isclose(x, y)), "%s and %s not close" % (x, y)) + + def tst_none_isclose(self, x, y): + msg = "%s and %s shouldn't be close" + assert_(not any(isclose(x, y)), msg % (x, y)) + + def tst_isclose_allclose(self, x, y): + msg = "isclose.all() and allclose aren't same for %s and %s" + assert_array_equal(isclose(x, y).all(), allclose(x, y), msg % (x, y)) + + def test_ip_all_isclose(self): + self.setup() + for (x, y) in self.all_close_tests: + yield (self.tst_all_isclose, x, y) + + def test_ip_none_isclose(self): + self.setup() + for (x, y) in self.none_close_tests: + yield (self.tst_none_isclose, x, y) + + def test_ip_isclose_allclose(self): + self.setup() + tests = (self.all_close_tests + self.none_close_tests + + self.some_close_tests) + for (x, y) in tests: + yield (self.tst_isclose_allclose, x, y) + + def test_equal_nan(self): + assert_array_equal(isclose(nan, nan, equal_nan=True), [True]) + arr = array([1.0, nan]) + assert_array_equal(isclose(arr, arr, equal_nan=True), [True, True]) + + def test_masked_arrays(self): + x = np.ma.masked_where([True, True, False], np.arange(3)) + assert_(type(x) is type(isclose(2, x))) + + x = np.ma.masked_where([True, True, False], [nan, inf, nan]) + assert_(type(x) is type(isclose(inf, x))) + + x = np.ma.masked_where([True, True, False], [nan, nan, nan]) + y = isclose(nan, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + x = np.ma.masked_where([True, True, False], [nan, nan, nan]) + y = isclose(x, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + def test_scalar_return(self): + assert_(isscalar(isclose(1, 1))) + + def test_no_parameter_modification(self): + x = array([inf, 1]) + y = array([0, inf]) + isclose(x, y) + assert_array_equal(x, array([inf, 1])) + assert_array_equal(y, array([0, inf])) + +class TestStdVar(TestCase): + def setUp(self): + self.A = array([1, -1, 1, -1]) + self.real_var = 1 + + def test_basic(self): + assert_almost_equal(var(self.A), self.real_var) + assert_almost_equal(std(self.A)**2, self.real_var) + + def test_scalars(self): + assert_equal(var(1), 0) + assert_equal(std(1), 0) + + def test_ddof1(self): + assert_almost_equal(var(self.A, ddof=1), + self.real_var*len(self.A)/float(len(self.A)-1)) + assert_almost_equal(std(self.A, ddof=1)**2, + self.real_var*len(self.A)/float(len(self.A)-1)) + + def test_ddof2(self): + assert_almost_equal(var(self.A, ddof=2), + self.real_var*len(self.A)/float(len(self.A)-2)) + assert_almost_equal(std(self.A, ddof=2)**2, + self.real_var*len(self.A)/float(len(self.A)-2)) + +class TestStdVarComplex(TestCase): + def test_basic(self): + A = array([1, 1.j, -1, -1.j]) + real_var = 1 + assert_almost_equal(var(A), real_var) + assert_almost_equal(std(A)**2, real_var) + + def test_scalars(self): + assert_equal(var(1j), 0) + assert_equal(std(1j), 0) + + +class TestCreationFuncs(TestCase): + #Test ones, zeros, empty and filled + + def setUp(self): + self.dtypes = ('b', 'i', 'u', 'f', 'c', 'S', 'a', 'U', 'V') + self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + self.ndims = 10 + + def check_function(self, func, fill_value=None): + par = ( + (0, 1, 2), + range(self.ndims), + self.orders, + self.dtypes, + 2**np.arange(9) + ) + fill_kwarg = {} + if fill_value is not None: + fill_kwarg = {'fill_value': fill_value} + with warnings.catch_warnings(): + warnings.simplefilter('ignore', DeprecationWarning) + for size, ndims, order, type, bytes in itertools.product(*par): + shape = ndims * [size] + try: + dtype = np.dtype('{0}{1}'.format(type, bytes)) + except TypeError: # dtype combination does not exist + continue + else: + # do not fill void type + if fill_value is not None and type in 'V': + continue + + arr = func(shape, order=order, dtype=dtype, + **fill_kwarg) + + assert_(arr.dtype == dtype) + assert_(getattr(arr.flags, self.orders[order])) + + if fill_value is not None: + if dtype.str.startswith('|S'): + val = str(fill_value) + else: + val = fill_value + assert_equal(arr, dtype.type(val)) + + def test_zeros(self): + self.check_function(np.zeros) + + def test_ones(self): + self.check_function(np.zeros) + + def test_empty(self): + self.check_function(np.empty) + + def test_filled(self): + self.check_function(np.full, 0) + self.check_function(np.full, 1) + + def test_for_reference_leak(self): + # Make sure we have an object for reference + dim = 1 + beg = sys.getrefcount(dim) + np.zeros([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.ones([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.empty([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.full([dim]*10, 0) + assert_(sys.getrefcount(dim) == beg) + + + +class TestLikeFuncs(TestCase): + '''Test ones_like, zeros_like, empty_like and full_like''' + + def setUp(self): + self.data = [ + # Array scalars + (array(3.), None), + (array(3), 'f8'), + # 1D arrays + (arange(6, dtype='f4'), None), + (arange(6), 'c16'), + # 2D C-layout arrays + (arange(6).reshape(2, 3), None), + (arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (arange(6).reshape((2, 3), order='F'), None), + (arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (arange(24).reshape(2, 3, 4), None), + (arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (arange(24).reshape((2, 3, 4), order='F'), None), + (arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + + def compare_array_value(self, dz, value, fill_value): + if value is not None: + if fill_value: + try: + z = dz.dtype.type(value) + except OverflowError: + pass + else: + assert_(all(dz == z)) + else: + assert_(all(dz == value)) + + def check_like_function(self, like_function, value, fill_value=False): + if fill_value: + fill_kwarg = {'fill_value': value} + else: + fill_kwarg = {} + for d, dtype in self.data: + # default (K) order, dtype + dz = like_function(d, dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_equal(array(dz.strides)*d.dtype.itemsize, + array(d.strides)*dz.dtype.itemsize) + assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) + assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # C order, default dtype + dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # F order, default dtype + dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # A order + dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + if d.flags.f_contiguous: + assert_(dz.flags.f_contiguous) + else: + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # Test the 'subok' parameter + a = np.matrix([[1, 2], [3, 4]]) + + b = like_function(a, **fill_kwarg) + assert_(type(b) is np.matrix) + + b = like_function(a, subok=False, **fill_kwarg) + assert_(type(b) is not np.matrix) + + def test_ones_like(self): + self.check_like_function(np.ones_like, 1) + + def test_zeros_like(self): + self.check_like_function(np.zeros_like, 0) + + def test_empty_like(self): + self.check_like_function(np.empty_like, None) + + def test_filled_like(self): + self.check_like_function(np.full_like, 0, True) + self.check_like_function(np.full_like, 1, True) + self.check_like_function(np.full_like, 1000, True) + self.check_like_function(np.full_like, 123.456, True) + self.check_like_function(np.full_like, np.inf, True) + +class _TestCorrelate(TestCase): + def _setup(self, dt): + self.x = np.array([1, 2, 3, 4, 5], dtype=dt) + self.y = np.array([-1, -2, -3], dtype=dt) + self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) + self.z2 = np.array([ -5., -14., -26., -20., -14., -8., -3.], dtype=dt) + + def test_float(self): + self._setup(np.float) + z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.z2) + + def test_object(self): + self._setup(Decimal) + z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, self.z2) + +class TestCorrelate(_TestCorrelate): + old_behavior = True + def _setup(self, dt): + # correlate uses an unconventional definition so that correlate(a, b) + # == correlate(b, a), so force the corresponding outputs to be the same + # as well + _TestCorrelate._setup(self, dt) + self.z2 = self.z1 + + @dec.deprecated() + def test_complex(self): + x = np.array([1, 2, 3, 4+1j], dtype=np.complex) + y = np.array([-1, -2j, 3+1j], dtype=np.complex) + r_z = np.array([3+1j, 6, 8-1j, 9+1j, -1-8j, -4-1j], dtype=np.complex) + z = np.correlate(x, y, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, r_z) + + @dec.deprecated() + def test_float(self): + _TestCorrelate.test_float(self) + + @dec.deprecated() + def test_object(self): + _TestCorrelate.test_object(self) + +class TestCorrelateNew(_TestCorrelate): + old_behavior = False + def test_complex(self): + x = np.array([1, 2, 3, 4+1j], dtype=np.complex) + y = np.array([-1, -2j, 3+1j], dtype=np.complex) + r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex) + #z = np.acorrelate(x, y, 'full') + #assert_array_almost_equal(z, r_z) + + r_z = r_z[::-1].conjugate() + z = np.correlate(y, x, 'full', old_behavior=self.old_behavior) + assert_array_almost_equal(z, r_z) + +class TestArgwhere(object): + def test_2D(self): + x = np.arange(6).reshape((2, 3)) + assert_array_equal(np.argwhere(x > 1), + [[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + def test_list(self): + assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) + +class TestStringFunction(object): + def test_set_string_function(self): + a = np.array([1]) + np.set_string_function(lambda x: "FOO", repr=True) + assert_equal(repr(a), "FOO") + np.set_string_function(None, repr=True) + assert_equal(repr(a), "array([1])") + + np.set_string_function(lambda x: "FOO", repr=False) + assert_equal(str(a), "FOO") + np.set_string_function(None, repr=False) + assert_equal(str(a), "[1]") + +class TestRoll(TestCase): + def test_roll1d(self): + x = np.arange(10) + xr = np.roll(x, 2) + assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) + + def test_roll2d(self): + x2 = np.reshape(np.arange(10), (2, 5)) + x2r = np.roll(x2, 1) + assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) + + x2r = np.roll(x2, 1, axis=0) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, 1, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + def test_roll_empty(self): + x = np.array([]) + assert_equal(np.roll(x, 1), np.array([])) + +class TestCross(TestCase): + def test_2x2(self): + u = [1, 2] + v = [3, 4] + z = -2 + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_2x3(self): + u = [1, 2] + v = [3, 4, 5] + z = np.array([10, -5, -2]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_3x3(self): + u = [1, 2, 3] + v = [4, 5, 6] + z = np.array([-3, 6, -3]) + cp = cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_broadcasting(self): + # Ticket #2624 (Trac #2032) + u = np.tile([1, 2], (11, 1)) + v = np.tile([3, 4], (11, 1)) + z = -2 + assert_equal(np.cross(u, v), z) + assert_equal(np.cross(v, u), -z) + assert_equal(np.cross(u, u), 0) + + u = np.tile([1, 2], (11, 1)).T + v = np.tile([3, 4, 5], (11, 1)) + z = np.tile([10, -5, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0), z) + assert_equal(np.cross(v, u.T), -z) + assert_equal(np.cross(v, v), 0) + + u = np.tile([1, 2, 3], (11, 1)).T + v = np.tile([3, 4], (11, 1)).T + z = np.tile([-12, 9, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0, axisb=0), z) + assert_equal(np.cross(v.T, u.T), -z) + assert_equal(np.cross(u.T, u.T), 0) + + u = np.tile([1, 2, 3], (5, 1)) + v = np.tile([4, 5, 6], (5, 1)).T + z = np.tile([-3, 6, -3], (5, 1)) + assert_equal(np.cross(u, v, axisb=0), z) + assert_equal(np.cross(v.T, u), -z) + assert_equal(np.cross(u, u), 0) + + def test_broadcasting_shapes(self): + u = np.ones((2, 1, 3)) + v = np.ones((5, 3)) + assert_equal(np.cross(u, v).shape, (2, 5, 3)) + u = np.ones((10, 3, 5)) + v = np.ones((2, 5)) + assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) + assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=2) + assert_raises(ValueError, np.cross, u, v, axisa=3, axisb=0) + u = np.ones((10, 3, 5, 7)) + v = np.ones((5, 7, 2)) + assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) + assert_raises(ValueError, np.cross, u, v, axisa=-5, axisb=2) + assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=-4) + +def test_outer_out_param(): + arr1 = np.ones((5,)) + arr2 = np.ones((2,)) + arr3 = np.linspace(-2, 2, 5) + out1 = np.ndarray(shape=(5,5)) + out2 = np.ndarray(shape=(2, 5)) + res1 = np.outer(arr1, arr3, out1) + assert_equal(res1, out1) + assert_equal(np.outer(arr2, arr3, out2), out2) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numerictypes.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numerictypes.py new file mode 100644 index 0000000000000..ef8db0f334781 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numerictypes.py @@ -0,0 +1,377 @@ +from __future__ import division, absolute_import, print_function + +import sys +import warnings +from numpy.testing import * +from numpy.compat import asbytes, asunicode +import numpy as np + +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., (asbytes('nn'), [6j, 4j], [6., 4.], [1, 2]), asbytes('NN'), True), asbytes('cc'), (asunicode('NN'), 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., (asbytes('oo'), [7j, 5j], [7., 5.], [2, 1]), asbytes('OO'), False), asbytes('dd'), (asunicode('OO'), 7j), [[7., 5.], [7., 5.]], 9), + ] + + +byteorder = {'little':'<', 'big':'>'}[sys.byteorder] + +def normalize_descr(descr): + "Normalize a description adding the platform byteorder." + + out = [] + for item in descr: + dtype = item[1] + if isinstance(dtype, str): + if dtype[0] not in ['|', '<', '>']: + onebyte = dtype[1:] == "1" + if onebyte or dtype[0] in ['S', 'V', 'b']: + dtype = "|" + dtype + else: + dtype = byteorder + dtype + if len(item) > 2 and np.prod(item[2]) > 1: + nitem = (item[0], dtype, item[2]) + else: + nitem = (item[0], dtype) + out.append(nitem) + elif isinstance(item[1], list): + l = [] + for j in normalize_descr(item[1]): + l.append(j) + out.append((item[0], l)) + else: + raise ValueError("Expected a str or list and got %s" % \ + (type(item))) + return out + + +############################################################ +# Creation tests +############################################################ + +class create_zeros(object): + """Check the creation of heterogeneous arrays zero-valued""" + + def test_zeros0D(self): + """Check creation of 0-dimensional objects""" + h = np.zeros((), dtype=self._descr) + self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) + self.assertTrue(h.dtype.fields['x'][0].name[:4] == 'void') + self.assertTrue(h.dtype.fields['x'][0].char == 'V') + self.assertTrue(h.dtype.fields['x'][0].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((), dtype='u1')) + + def test_zerosSD(self): + """Check creation of single-dimensional objects""" + h = np.zeros((2,), dtype=self._descr) + self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) + self.assertTrue(h.dtype['y'].name[:4] == 'void') + self.assertTrue(h.dtype['y'].char == 'V') + self.assertTrue(h.dtype['y'].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2,), dtype='u1')) + + def test_zerosMD(self): + """Check creation of multi-dimensional objects""" + h = np.zeros((2, 3), dtype=self._descr) + self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) + self.assertTrue(h.dtype['z'].name == 'uint8') + self.assertTrue(h.dtype['z'].char == 'B') + self.assertTrue(h.dtype['z'].type == np.uint8) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) + + +class test_create_zeros_plain(create_zeros, TestCase): + """Check the creation of heterogeneous arrays zero-valued (plain)""" + _descr = Pdescr + +class test_create_zeros_nested(create_zeros, TestCase): + """Check the creation of heterogeneous arrays zero-valued (nested)""" + _descr = Ndescr + + +class create_values(object): + """Check the creation of heterogeneous arrays with values""" + + def test_tuple(self): + """Check creation from tuples""" + h = np.array(self._buffer, dtype=self._descr) + self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + self.assertTrue(h.shape == (2,)) + else: + self.assertTrue(h.shape == ()) + + def test_list_of_tuple(self): + """Check creation from list of tuples""" + h = np.array([self._buffer], dtype=self._descr) + self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + self.assertTrue(h.shape == (1, 2)) + else: + self.assertTrue(h.shape == (1,)) + + def test_list_of_list_of_tuple(self): + """Check creation from list of list of tuples""" + h = np.array([[self._buffer]], dtype=self._descr) + self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + self.assertTrue(h.shape == (1, 1, 2)) + else: + self.assertTrue(h.shape == (1, 1)) + + +class test_create_values_plain_single(create_values, TestCase): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class test_create_values_plain_multiple(create_values, TestCase): + """Check the creation of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class test_create_values_nested_single(create_values, TestCase): + """Check the creation of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = 0 + _buffer = NbufferT[0] + +class test_create_values_nested_multiple(create_values, TestCase): + """Check the creation of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = 1 + _buffer = NbufferT + + +############################################################ +# Reading tests +############################################################ + +class read_values_plain(object): + """Check the reading of values in heterogeneous arrays (plain)""" + + def test_access_fields(self): + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + self.assertTrue(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) + else: + self.assertTrue(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][1], + self._buffer[1][1]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][2], + self._buffer[1][2]], dtype='u1')) + + +class test_read_values_plain_single(read_values_plain, TestCase): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class test_read_values_plain_multiple(read_values_plain, TestCase): + """Check the values of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class read_values_nested(object): + """Check the reading of values in heterogeneous arrays (nested)""" + + + def test_access_top_fields(self): + """Check reading the top fields of a nested array""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + self.assertTrue(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) + else: + self.assertTrue(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][4], + self._buffer[1][4]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][5], + self._buffer[1][5]], dtype='u1')) + + + def test_nested1_acessors(self): + """Check reading the nested fields of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['value'], + np.array(self._buffer[1][0], dtype='c16')) + assert_equal(h['Info']['y2'], + np.array(self._buffer[1][1], dtype='f8')) + assert_equal(h['info']['Name'], + np.array(self._buffer[3][0], dtype='U2')) + assert_equal(h['info']['Value'], + np.array(self._buffer[3][1], dtype='c16')) + else: + assert_equal(h['Info']['value'], + np.array([self._buffer[0][1][0], + self._buffer[1][1][0]], + dtype='c16')) + assert_equal(h['Info']['y2'], + np.array([self._buffer[0][1][1], + self._buffer[1][1][1]], + dtype='f8')) + assert_equal(h['info']['Name'], + np.array([self._buffer[0][3][0], + self._buffer[1][3][0]], + dtype='U2')) + assert_equal(h['info']['Value'], + np.array([self._buffer[0][3][1], + self._buffer[1][3][1]], + dtype='c16')) + + def test_nested2_acessors(self): + """Check reading the nested fields of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['Info2']['value'], + np.array(self._buffer[1][2][1], dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array(self._buffer[1][2][3], dtype='u4')) + else: + assert_equal(h['Info']['Info2']['value'], + np.array([self._buffer[0][1][2][1], + self._buffer[1][1][2][1]], + dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array([self._buffer[0][1][2][3], + self._buffer[1][1][2][3]], + dtype='u4')) + + def test_nested1_descriptor(self): + """Check access nested descriptors of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + self.assertTrue(h.dtype['Info']['value'].name == 'complex128') + self.assertTrue(h.dtype['Info']['y2'].name == 'float64') + if sys.version_info[0] >= 3: + self.assertTrue(h.dtype['info']['Name'].name == 'str256') + else: + self.assertTrue(h.dtype['info']['Name'].name == 'unicode256') + self.assertTrue(h.dtype['info']['Value'].name == 'complex128') + + def test_nested2_descriptor(self): + """Check access nested descriptors of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + self.assertTrue(h.dtype['Info']['Info2']['value'].name == 'void256') + self.assertTrue(h.dtype['Info']['Info2']['z3'].name == 'void64') + + +class test_read_values_nested_single(read_values_nested, TestCase): + """Check the values of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = False + _buffer = NbufferT[0] + +class test_read_values_nested_multiple(read_values_nested, TestCase): + """Check the values of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = True + _buffer = NbufferT + +class TestEmptyField(TestCase): + def test_assign(self): + a = np.arange(10, dtype=np.float32) + a.dtype = [("int", "<0i4"), ("float", "<2f4")] + assert_(a['int'].shape == (5, 0)) + assert_(a['float'].shape == (5, 2)) + +class TestCommonType(TestCase): + def test_scalar_loses1(self): + res = np.find_common_type(['f4', 'f4', 'i2'], ['f8']) + assert_(res == 'f4') + def test_scalar_loses2(self): + res = np.find_common_type(['f4', 'f4'], ['i8']) + assert_(res == 'f4') + def test_scalar_wins(self): + res = np.find_common_type(['f4', 'f4', 'i2'], ['c8']) + assert_(res == 'c8') + def test_scalar_wins2(self): + res = np.find_common_type(['u4', 'i4', 'i4'], ['f4']) + assert_(res == 'f8') + def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose + res = np.find_common_type(['u8', 'i8', 'i8'], ['f8']) + assert_(res == 'f8') + +class TestMultipleFields(TestCase): + def setUp(self): + self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + def _bad_call(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', DeprecationWarning) + return self.ary['f0', 'f1'] + def test_no_tuple(self): + self.assertRaises(IndexError, self._bad_call) + def test_return(self): + res = self.ary[['f0', 'f2']].tolist() + assert_(res == [(1, 3), (5, 7)]) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_print.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_print.py new file mode 100644 index 0000000000000..487b5de7d15bd --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_print.py @@ -0,0 +1,245 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import * +import nose + +import locale +import sys + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} + + +def check_float_type(tp): + for x in [0, 1, -1, 1e20] : + assert_equal(str(tp(x)), str(float(x)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e10).itemsize > 4: + assert_equal(str(tp(1e10)), str(float('1e10')), + err_msg='Failed str formatting for type %s' % tp) + else: + ref = '1e+10' + assert_equal(str(tp(1e10)), ref, + err_msg='Failed str formatting for type %s' % tp) + +def test_float_types(): + """ Check formatting. + + This is only for the str function, and only for simple types. + The precision of np.float and np.longdouble aren't the same as the + python float precision. + + """ + for t in [np.float32, np.double, np.longdouble] : + yield check_float_type, t + +def check_nan_inf_float(tp): + for x in [np.inf, -np.inf, np.nan]: + assert_equal(str(tp(x)), _REF[x], + err_msg='Failed str formatting for type %s' % tp) + +def test_nan_inf_float(): + """ Check formatting of nan & inf. + + This is only for the str function, and only for simple types. + The precision of np.float and np.longdouble aren't the same as the + python float precision. + + """ + for t in [np.float32, np.double, np.longdouble] : + yield check_nan_inf_float, t + +def check_complex_type(tp): + for x in [0, 1, -1, 1e20] : + assert_equal(str(tp(x)), str(complex(x)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x*1j)), str(complex(x*1j)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e10).itemsize > 8: + assert_equal(str(tp(1e10)), str(complex(1e10)), + err_msg='Failed str formatting for type %s' % tp) + else: + ref = '(1e+10+0j)' + assert_equal(str(tp(1e10)), ref, + err_msg='Failed str formatting for type %s' % tp) + +def test_complex_types(): + """Check formatting of complex types. + + This is only for the str function, and only for simple types. + The precision of np.float and np.longdouble aren't the same as the + python float precision. + + """ + for t in [np.complex64, np.cdouble, np.clongdouble] : + yield check_complex_type, t + +def test_complex_inf_nan(): + """Check inf/nan formatting of complex types.""" + TESTS = { + complex(np.inf, 0): "(inf+0j)", + complex(0, np.inf): "inf*j", + complex(-np.inf, 0): "(-inf+0j)", + complex(0, -np.inf): "-inf*j", + complex(np.inf, 1): "(inf+1j)", + complex(1, np.inf): "(1+inf*j)", + complex(-np.inf, 1): "(-inf+1j)", + complex(1, -np.inf): "(1-inf*j)", + complex(np.nan, 0): "(nan+0j)", + complex(0, np.nan): "nan*j", + complex(-np.nan, 0): "(nan+0j)", + complex(0, -np.nan): "nan*j", + complex(np.nan, 1): "(nan+1j)", + complex(1, np.nan): "(1+nan*j)", + complex(-np.nan, 1): "(nan+1j)", + complex(1, -np.nan): "(1+nan*j)", + } + for tp in [np.complex64, np.cdouble, np.clongdouble]: + for c, s in TESTS.items(): + yield _check_complex_inf_nan, c, s, tp + +def _check_complex_inf_nan(c, s, dtype): + assert_equal(str(dtype(c)), s) + +# print tests +def _test_redirected_print(x, tp, ref=None): + file = StringIO() + file_tp = StringIO() + stdout = sys.stdout + try: + sys.stdout = file_tp + print(tp(x)) + sys.stdout = file + if ref: + print(ref) + else: + print(x) + finally: + sys.stdout = stdout + + assert_equal(file.getvalue(), file_tp.getvalue(), + err_msg='print failed for type%s' % tp) + +def check_float_type_print(tp): + for x in [0, 1, -1, 1e20]: + _test_redirected_print(float(x), tp) + + for x in [np.inf, -np.inf, np.nan]: + _test_redirected_print(float(x), tp, _REF[x]) + + if tp(1e10).itemsize > 4: + _test_redirected_print(float(1e10), tp) + else: + ref = '1e+10' + _test_redirected_print(float(1e10), tp, ref) + +def check_complex_type_print(tp): + # We do not create complex with inf/nan directly because the feature is + # missing in python < 2.6 + for x in [0, 1, -1, 1e20]: + _test_redirected_print(complex(x), tp) + + if tp(1e10).itemsize > 8: + _test_redirected_print(complex(1e10), tp) + else: + ref = '(1e+10+0j)' + _test_redirected_print(complex(1e10), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + +def test_float_type_print(): + """Check formatting when using print """ + for t in [np.float32, np.double, np.longdouble] : + yield check_float_type_print, t + +def test_complex_type_print(): + """Check formatting when using print """ + for t in [np.complex64, np.cdouble, np.clongdouble] : + yield check_complex_type_print, t + +def test_scalar_format(): + """Test the str.format method with NumPy scalar types""" + tests = [('{0}', True, np.bool_), + ('{0}', False, np.bool_), + ('{0:d}', 130, np.uint8), + ('{0:d}', 50000, np.uint16), + ('{0:d}', 3000000000, np.uint32), + ('{0:d}', 15000000000000000000, np.uint64), + ('{0:d}', -120, np.int8), + ('{0:d}', -30000, np.int16), + ('{0:d}', -2000000000, np.int32), + ('{0:d}', -7000000000000000000, np.int64), + ('{0:g}', 1.5, np.float16), + ('{0:g}', 1.5, np.float32), + ('{0:g}', 1.5, np.float64), + ('{0:g}', 1.5, np.longdouble)] + # Python 2.6 doesn't implement complex.__format__ + if sys.version_info[:2] > (2, 6): + tests += [('{0:g}', 1.5+0.5j, np.complex64), + ('{0:g}', 1.5+0.5j, np.complex128), + ('{0:g}', 1.5+0.5j, np.clongdouble)] + + for (fmat, val, valtype) in tests: + try: + assert_equal(fmat.format(val), fmat.format(valtype(val)), + "failed with val %s, type %s" % (val, valtype)) + except ValueError as e: + assert_(False, + "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % + (fmat, repr(val), repr(valtype), str(e))) + + +# Locale tests: scalar types formatting should be independent of the locale +def in_foreign_locale(func): + """ + Swap LC_NUMERIC locale to one in which the decimal point is ',' and not '.' + If not possible, raise nose.SkipTest + + """ + if sys.platform == 'win32': + locales = ['FRENCH'] + else: + locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] + + def wrapper(*args, **kwargs): + curloc = locale.getlocale(locale.LC_NUMERIC) + try: + for loc in locales: + try: + locale.setlocale(locale.LC_NUMERIC, loc) + break + except locale.Error: + pass + else: + raise nose.SkipTest("Skipping locale test, because " + "French locale not found") + return func(*args, **kwargs) + finally: + locale.setlocale(locale.LC_NUMERIC, locale=curloc) + return nose.tools.make_decorator(func)(wrapper) + +@in_foreign_locale +def test_locale_single(): + assert_equal(str(np.float32(1.2)), str(float(1.2))) + +@in_foreign_locale +def test_locale_double(): + assert_equal(str(np.double(1.2)), str(float(1.2))) + +@in_foreign_locale +def test_locale_longdouble(): + assert_equal(str(np.longdouble(1.2)), str(float(1.2))) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_records.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_records.py new file mode 100644 index 0000000000000..8c9ce5c708a47 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_records.py @@ -0,0 +1,176 @@ +from __future__ import division, absolute_import, print_function + +from os import path +import numpy as np +from numpy.testing import * +from numpy.compat import asbytes, asunicode + +import warnings +import collections +import pickle + + +class TestFromrecords(TestCase): + def test_fromrecords(self): + r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], + names='col1,col2,col3') + assert_equal(r[0].item(), (456, 'dbe', 1.2)) + + def test_method_array(self): + r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big') + assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924)) + + def test_method_array2(self): + r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), + (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') + assert_equal(r[1].item(), (2, 22.0, asbytes('b'))) + + def test_recarray_slices(self): + r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), + (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') + assert_equal(r[1::2][1].item(), (4, 44.0, asbytes('d'))) + + def test_recarray_fromarrays(self): + x1 = np.array([1, 2, 3, 4]) + x2 = np.array(['a', 'dd', 'xyz', '12']) + x3 = np.array([1.1, 2, 3, 4]) + r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') + assert_equal(r[1].item(), (2, 'dd', 2.0)) + x1[1] = 34 + assert_equal(r.a, np.array([1, 2, 3, 4])) + + def test_recarray_fromfile(self): + data_dir = path.join(path.dirname(__file__), 'data') + filename = path.join(data_dir, 'recarray_from_file.fits') + fd = open(filename, 'rb') + fd.seek(2880 * 2) + r = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') + fd.seek(2880 * 2) + r = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big') + fd.close() + + def test_recarray_from_obj(self): + count = 10 + a = np.zeros(count, dtype='O') + b = np.zeros(count, dtype='f8') + c = np.zeros(count, dtype='f8') + for i in range(len(a)): + a[i] = list(range(1, 10)) + + mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') + for i in range(len(a)): + assert_((mine.date[i] == list(range(1, 10)))) + assert_((mine.data1[i] == 0.0)) + assert_((mine.data2[i] == 0.0)) + + def test_recarray_from_repr(self): + x = np.rec.array([ (1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + y = eval("np." + repr(x)) + assert_(isinstance(y, np.recarray)) + assert_equal(y, x) + + def test_recarray_from_names(self): + ra = np.rec.array([ + (1, 'abc', 3.7000002861022949, 0), + (2, 'xy', 6.6999998092651367, 1), + (0, ' ', 0.40000000596046448, 0)], + names='c1, c2, c3, c4') + pa = np.rec.fromrecords([ + (1, 'abc', 3.7000002861022949, 0), + (2, 'xy', 6.6999998092651367, 1), + (0, ' ', 0.40000000596046448, 0)], + names='c1, c2, c3, c4') + assert_(ra.dtype == pa.dtype) + assert_(ra.shape == pa.shape) + for k in range(len(ra)): + assert_(ra[k].item() == pa[k].item()) + + def test_recarray_conflict_fields(self): + ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2), + (3, 'wrs', 1.3)], + names='field, shape, mean') + ra.mean = [1.1, 2.2, 3.3] + assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3]) + assert_(type(ra.mean) is type(ra.var)) + ra.shape = (1, 3) + assert_(ra.shape == (1, 3)) + ra.shape = ['A', 'B', 'C'] + assert_array_equal(ra['shape'], [['A', 'B', 'C']]) + ra.field = 5 + assert_array_equal(ra['field'], [[5, 5, 5]]) + assert_(isinstance(ra.field, collections.Callable)) + + def test_fromrecords_with_explicit_dtype(self): + a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], + dtype=[('a', int), ('b', np.object)]) + assert_equal(a.a, [1, 2]) + assert_equal(a[0].a, 1) + assert_equal(a.b, ['a', 'bbb']) + assert_equal(a[-1].b, 'bbb') + # + ndtype = np.dtype([('a', int), ('b', np.object)]) + a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype) + assert_equal(a.a, [1, 2]) + assert_equal(a[0].a, 1) + assert_equal(a.b, ['a', 'bbb']) + assert_equal(a[-1].b, 'bbb') + + +class TestRecord(TestCase): + def setUp(self): + self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], + dtype=[("col1", "= 3) or + (sys.platform == "win32" and + platform.architecture()[0] == "64bit"), + "numpy.intp('0xff', 16) not supported on Py3, " + "as it does not inherit from Python int") + def test_intp(self,level=rlevel): + """Ticket #99""" + i_width = np.int_(0).nbytes*2 - 1 + np.intp('0x' + 'f'*i_width, 16) + self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) + self.assertRaises(ValueError, np.intp, '0x1', 32) + assert_equal(255, np.intp('0xFF', 16)) + assert_equal(1024, np.intp(1024)) + + def test_endian_bool_indexing(self,level=rlevel): + """Ticket #105""" + a = np.arange(10., dtype='>f8') + b = np.arange(10., dtype='2) & (a<6)) + xb = np.where((b>2) & (b<6)) + ya = ((a>2) & (a<6)) + yb = ((b>2) & (b<6)) + assert_array_almost_equal(xa, ya.nonzero()) + assert_array_almost_equal(xb, yb.nonzero()) + assert_(np.all(a[ya] > 0.5)) + assert_(np.all(b[yb] > 0.5)) + + def test_endian_where(self,level=rlevel): + """GitHub issue #369""" + net = np.zeros(3, dtype='>f4') + net[1] = 0.00458849 + net[2] = 0.605202 + max_net = net.max() + test = np.where(net <= 0., max_net, net) + correct = np.array([ 0.60520202, 0.00458849, 0.60520202]) + assert_array_almost_equal(test, correct) + + def test_endian_recarray(self,level=rlevel): + """Ticket #2185""" + dt = np.dtype([ + ('head', '>u4'), + ('data', '>u4', 2), + ]) + buf = np.recarray(1, dtype=dt) + buf[0]['head'] = 1 + buf[0]['data'][:] = [1, 1] + + h = buf[0]['head'] + d = buf[0]['data'][0] + buf[0]['head'] = h + buf[0]['data'][0] = d + assert_(buf[0]['head'] == 1) + + def test_mem_dot(self,level=rlevel): + """Ticket #106""" + x = np.random.randn(0, 1) + y = np.random.randn(10, 1) + # Dummy array to detect bad memory access: + _z = np.ones(10) + _dummy = np.empty((0, 10)) + z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) + np.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + # Do the same for the built-in dot: + np.core.multiarray.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + + def test_arange_endian(self,level=rlevel): + """Ticket #111""" + ref = np.arange(10) + x = np.arange(10, dtype=' 8: +# a = np.exp(np.array([1000],dtype=np.longfloat)) +# assert_(str(a)[1:9] == str(a[0])[:8]) + + def test_argmax(self,level=rlevel): + """Ticket #119""" + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + for i in range(a.ndim): + aargmax = a.argmax(i) + + def test_mem_divmod(self,level=rlevel): + """Ticket #126""" + for i in range(10): + divmod(np.array([i])[0], 10) + + + def test_hstack_invalid_dims(self,level=rlevel): + """Ticket #128""" + x = np.arange(9).reshape((3, 3)) + y = np.array([0, 0, 0]) + self.assertRaises(ValueError, np.hstack, (x, y)) + + def test_squeeze_type(self,level=rlevel): + """Ticket #133""" + a = np.array([3]) + b = np.array(3) + assert_(type(a.squeeze()) is np.ndarray) + assert_(type(b.squeeze()) is np.ndarray) + + def test_add_identity(self,level=rlevel): + """Ticket #143""" + assert_equal(0, np.add.identity) + + def test_numpy_float_python_long_addition(self): + # Check that numpy float and python longs can be added correctly. + a = np.float_(23.) + 2**135 + assert_equal(a, 23. + 2**135) + + def test_binary_repr_0(self,level=rlevel): + """Ticket #151""" + assert_equal('0', np.binary_repr(0)) + + def test_rec_iterate(self,level=rlevel): + """Ticket #160""" + descr = np.dtype([('i', int), ('f', float), ('s', '|S3')]) + x = np.rec.array([(1, 1.1, '1.0'), + (2, 2.2, '2.0')], dtype=descr) + x[0].tolist() + [i for i in x[0]] + + def test_unicode_string_comparison(self,level=rlevel): + """Ticket #190""" + a = np.array('hello', np.unicode_) + b = np.array('world') + a == b + + def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel): + """Fix in r2836""" + # Create non-contiguous Fortran ordered array + x = np.array(np.random.rand(3, 3), order='F')[:, :2] + assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes())) + + def test_flat_assignment(self,level=rlevel): + """Correct behaviour of ticket #194""" + x = np.empty((3, 1)) + x.flat = np.arange(3) + assert_array_almost_equal(x, [[0], [1], [2]]) + x.flat = np.arange(3, dtype=float) + assert_array_almost_equal(x, [[0], [1], [2]]) + + def test_broadcast_flat_assignment(self,level=rlevel): + """Ticket #194""" + x = np.empty((3, 1)) + def bfa(): x[:] = np.arange(3) + def bfb(): x[:] = np.arange(3, dtype=float) + self.assertRaises(ValueError, bfa) + self.assertRaises(ValueError, bfb) + + def test_nonarray_assignment(self): + # See also Issue gh-2870, test for non-array assignment + # and equivalent unsafe casted array assignment + a = np.arange(10) + b = np.ones(10, dtype=bool) + r = np.arange(10) + def assign(a, b, c): + a[b] = c + assert_raises(ValueError, assign, a, b, np.nan) + a[b] = np.array(np.nan) # but not this. + assert_raises(ValueError, assign, a, r, np.nan) + a[r] = np.array(np.nan) + + def test_unpickle_dtype_with_object(self,level=rlevel): + """Implemented in r2840""" + dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')]) + f = BytesIO() + pickle.dump(dt, f) + f.seek(0) + dt_ = pickle.load(f) + f.close() + assert_equal(dt, dt_) + + def test_mem_array_creation_invalid_specification(self,level=rlevel): + """Ticket #196""" + dt = np.dtype([('x', int), ('y', np.object_)]) + # Wrong way + self.assertRaises(ValueError, np.array, [1, 'object'], dt) + # Correct way + np.array([(1, 'object')], dt) + + def test_recarray_single_element(self,level=rlevel): + """Ticket #202""" + a = np.array([1, 2, 3], dtype=np.int32) + b = a.copy() + r = np.rec.array(a, shape=1, formats=['3i4'], names=['d']) + assert_array_equal(a, b) + assert_equal(a, r[0][0]) + + def test_zero_sized_array_indexing(self,level=rlevel): + """Ticket #205""" + tmp = np.array([]) + def index_tmp(): tmp[np.array(10)] + self.assertRaises(IndexError, index_tmp) + + def test_chararray_rstrip(self,level=rlevel): + """Ticket #222""" + x = np.chararray((1,), 5) + x[0] = asbytes('a ') + x = x.rstrip() + assert_equal(x[0], asbytes('a')) + + def test_object_array_shape(self,level=rlevel): + """Ticket #239""" + assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,)) + assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2)) + assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2)) + assert_equal(np.array([], dtype=object).shape, (0,)) + assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0)) + assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,)) + + def test_mem_around(self,level=rlevel): + """Ticket #243""" + x = np.zeros((1,)) + y = [0] + decimal = 6 + np.around(abs(x-y), decimal) <= 10.0**(-decimal) + + def test_character_array_strip(self,level=rlevel): + """Ticket #246""" + x = np.char.array(("x", "x ", "x ")) + for c in x: assert_equal(c, "x") + + def test_lexsort(self,level=rlevel): + """Lexsort memory error""" + v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + assert_equal(np.lexsort(v), 0) + + def test_lexsort_invalid_sequence(self): + # Issue gh-4123 + class BuggySequence(object): + def __len__(self): + return 4 + def __getitem__(self, key): + raise KeyError + + assert_raises(KeyError, np.lexsort, BuggySequence()) + + def test_pickle_py2_bytes_encoding(self): + # Check that arrays and scalars pickled on Py2 are + # unpickleable on Py3 using encoding='bytes' + + test_data = [ + # (original, py2_pickle) + (np.unicode_('\u6f2c'), + asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" + "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n" + "I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")), + + (np.array([9e123], dtype=np.float64), + asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n" + "p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n" + "p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n" + "I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")), + + (np.array([(9e123,)], dtype=[('name', float)]), + asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n" + "(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n" + "(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n" + "(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n" + "I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n" + "bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")), + ] + + if sys.version_info[:2] >= (3, 4): + # encoding='bytes' was added in Py3.4 + for original, data in test_data: + result = pickle.loads(data, encoding='bytes') + assert_equal(result, original) + + if isinstance(result, np.ndarray) and result.dtype.names: + for name in result.dtype.names: + assert_(isinstance(name, str)) + + def test_pickle_dtype(self,level=rlevel): + """Ticket #251""" + pickle.dumps(np.float) + + def test_swap_real(self, level=rlevel): + """Ticket #265""" + assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0) + assert_equal(np.arange(4, dtype=' 1 and x['two'] > 2) + + def test_method_args(self, level=rlevel): + # Make sure methods and functions have same default axis + # keyword and arguments + funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'), + ('sometrue', 'any'), + ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'), + 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', + 'round', 'min', 'max', 'argsort', 'sort'] + funcs2 = ['compress', 'take', 'repeat'] + + for func in funcs1: + arr = np.random.rand(8, 7) + arr2 = arr.copy() + if isinstance(func, tuple): + func_meth = func[1] + func = func[0] + else: + func_meth = func + res1 = getattr(arr, func_meth)() + res2 = getattr(np, func)(arr2) + if res1 is None: + res1 = arr + + if res1.dtype.kind in 'uib': + assert_((res1 == res2).all(), func) + else: + assert_(abs(res1-res2).max() < 1e-8, func) + + for func in funcs2: + arr1 = np.random.rand(8, 7) + arr2 = np.random.rand(8, 7) + res1 = None + if func == 'compress': + arr1 = arr1.ravel() + res1 = getattr(arr2, func)(arr1) + else: + arr2 = (15*arr2).astype(int).ravel() + if res1 is None: + res1 = getattr(arr1, func)(arr2) + res2 = getattr(np, func)(arr1, arr2) + assert_(abs(res1-res2).max() < 1e-8, func) + + def test_mem_lexsort_strings(self, level=rlevel): + """Ticket #298""" + lst = ['abc', 'cde', 'fgh'] + np.lexsort((lst,)) + + def test_fancy_index(self, level=rlevel): + """Ticket #302""" + x = np.array([1, 2])[np.array([0])] + assert_equal(x.shape, (1,)) + + def test_recarray_copy(self, level=rlevel): + """Ticket #312""" + dt = [('x', np.int16), ('y', np.float64)] + ra = np.array([(1, 2.3)], dtype=dt) + rb = np.rec.array(ra, dtype=dt) + rb['x'] = 2. + assert_(ra['x'] != rb['x']) + + def test_rec_fromarray(self, level=rlevel): + """Ticket #322""" + x1 = np.array([[1, 2], [3, 4], [5, 6]]) + x2 = np.array(['a', 'dd', 'xyz']) + x3 = np.array([1.1, 2, 3]) + np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") + + def test_object_array_assign(self, level=rlevel): + x = np.empty((2, 2), object) + x.flat[2] = (1, 2, 3) + assert_equal(x.flat[2], (1, 2, 3)) + + def test_ndmin_float64(self, level=rlevel): + """Ticket #324""" + x = np.array([1, 2, 3], dtype=np.float64) + assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) + assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) + + def test_ndmin_order(self, level=rlevel): + """Issue #465 and related checks""" + assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) + assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) + + def test_mem_axis_minimization(self, level=rlevel): + """Ticket #327""" + data = np.arange(5) + data = np.add.outer(data, data) + + def test_mem_float_imag(self, level=rlevel): + """Ticket #330""" + np.float64(1.0).imag + + def test_dtype_tuple(self, level=rlevel): + """Ticket #334""" + assert_(np.dtype('i4') == np.dtype(('i4', ()))) + + def test_dtype_posttuple(self, level=rlevel): + """Ticket #335""" + np.dtype([('col1', '()i4')]) + + def test_numeric_carray_compare(self, level=rlevel): + """Ticket #341""" + assert_equal(np.array(['X'], 'c'), asbytes('X')) + + def test_string_array_size(self, level=rlevel): + """Ticket #342""" + self.assertRaises(ValueError, + np.array, [['X'], ['X', 'X', 'X']], '|S1') + + def test_dtype_repr(self, level=rlevel): + """Ticket #344""" + dt1=np.dtype(('uint32', 2)) + dt2=np.dtype(('uint32', (2,))) + assert_equal(dt1.__repr__(), dt2.__repr__()) + + def test_reshape_order(self, level=rlevel): + """Make sure reshape order works.""" + a = np.arange(6).reshape(2, 3, order='F') + assert_equal(a, [[0, 2, 4], [1, 3, 5]]) + a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + b = a[:, 1] + assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) + + def test_reshape_zero_strides(self, level=rlevel): + """Issue #380, test reshaping of zero strided arrays""" + a = np.ones(1) + a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) + assert_(a.reshape(5, 1).strides[0] == 0) + + def test_reshape_zero_size(self, level=rlevel): + """GitHub Issue #2700, setting shape failed for 0-sized arrays""" + a = np.ones((0, 2)) + a.shape = (-1, 2) + + # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. + # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous. + @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max) + def test_reshape_trailing_ones_strides(self): + # GitHub issue gh-2949, bad strides for trailing ones of new shape + a = np.zeros(12, dtype=np.int32)[::2] # not contiguous + strides_c = (16, 8, 8, 8) + strides_f = (8, 24, 48, 48) + assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) + assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) + assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) + + def test_repeat_discont(self, level=rlevel): + """Ticket #352""" + a = np.arange(12).reshape(4, 3)[:, 2] + assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) + + def test_array_index(self, level=rlevel): + """Make sure optimization is not called in this case.""" + a = np.array([1, 2, 3]) + a2 = np.array([[1, 2, 3]]) + assert_equal(a[np.where(a==3)], a2[np.where(a2==3)]) + + def test_object_argmax(self, level=rlevel): + a = np.array([1, 2, 3], dtype=object) + assert_(a.argmax() == 2) + + def test_recarray_fields(self, level=rlevel): + """Ticket #372""" + dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) + dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) + for a in [np.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)]), + np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), + np.rec.fromarrays([(1, 2), (3, 4)])]: + assert_(a.dtype in [dt0, dt1]) + + def test_random_shuffle(self, level=rlevel): + """Ticket #374""" + a = np.arange(5).reshape((5, 1)) + b = a.copy() + np.random.shuffle(b) + assert_equal(np.sort(b, axis=0), a) + + def test_refcount_vdot(self, level=rlevel): + """Changeset #3443""" + _assert_valid_refcount(np.vdot) + + def test_startswith(self, level=rlevel): + ca = np.char.array(['Hi', 'There']) + assert_equal(ca.startswith('H'), [True, False]) + + def test_noncommutative_reduce_accumulate(self, level=rlevel): + """Ticket #413""" + tosubtract = np.arange(5) + todivide = np.array([2.0, 0.5, 0.25]) + assert_equal(np.subtract.reduce(tosubtract), -10) + assert_equal(np.divide.reduce(todivide), 16.0) + assert_array_equal(np.subtract.accumulate(tosubtract), + np.array([0, -1, -3, -6, -10])) + assert_array_equal(np.divide.accumulate(todivide), + np.array([2., 4., 16.])) + + def test_convolve_empty(self, level=rlevel): + """Convolve should raise an error for empty input array.""" + self.assertRaises(ValueError, np.convolve, [], [1]) + self.assertRaises(ValueError, np.convolve, [1], []) + + def test_multidim_byteswap(self, level=rlevel): + """Ticket #449""" + r=np.array([(1, (0, 1, 2))], dtype="i2,3i2") + assert_array_equal(r.byteswap(), + np.array([(256, (0, 256, 512))], r.dtype)) + + def test_string_NULL(self, level=rlevel): + """Changeset 3557""" + assert_equal(np.array("a\x00\x0b\x0c\x00").item(), + 'a\x00\x0b\x0c') + + def test_junk_in_string_fields_of_recarray(self, level=rlevel): + """Ticket #483""" + r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')]) + assert_(asbytes(r['var1'][0][0]) == asbytes('abc')) + + def test_take_output(self, level=rlevel): + """Ensure that 'take' honours output parameter.""" + x = np.arange(12).reshape((3, 4)) + a = np.take(x, [0, 2], axis=1) + b = np.zeros_like(a) + np.take(x, [0, 2], axis=1, out=b) + assert_array_equal(a, b) + + def test_take_object_fail(self): + # Issue gh-3001 + d = 123. + a = np.array([d, 1], dtype=object) + ref_d = sys.getrefcount(d) + try: + a.take([0, 100]) + except IndexError: + pass + assert_(ref_d == sys.getrefcount(d)) + + def test_array_str_64bit(self, level=rlevel): + """Ticket #501""" + s = np.array([1, np.nan], dtype=np.float64) + with np.errstate(all='raise'): + sstr = np.array_str(s) + + def test_frompyfunc_endian(self, level=rlevel): + """Ticket #503""" + from math import radians + uradians = np.frompyfunc(radians, 1, 1) + big_endian = np.array([83.4, 83.5], dtype='>f8') + little_endian = np.array([83.4, 83.5], dtype=' object + # casting succeeds + def rs(): + x = np.ones([484, 286]) + y = np.zeros([484, 286]) + x |= y + self.assertRaises(TypeError, rs) + + def test_unicode_scalar(self, level=rlevel): + """Ticket #600""" + x = np.array(["DROND", "DROND1"], dtype="U6") + el = x[1] + new = pickle.loads(pickle.dumps(el)) + assert_equal(new, el) + + def test_arange_non_native_dtype(self, level=rlevel): + """Ticket #616""" + for T in ('>f4', '0)]=v + # After removing deprecation, the following are ValueErrors. + # This might seem odd as compared to the value error below. This + # is due to the fact that the new code always uses "nonzero" logic + # and the boolean special case is not taken. + self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float)) + self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float)) + # Old special case (different code path): + self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) + + def test_mem_scalar_indexing(self, level=rlevel): + """Ticket #603""" + x = np.array([0], dtype=float) + index = np.array(0, dtype=np.int32) + x[index] + + def test_binary_repr_0_width(self, level=rlevel): + assert_equal(np.binary_repr(0, width=3), '000') + + def test_fromstring(self, level=rlevel): + assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), + [12, 9, 9]) + + def test_searchsorted_variable_length(self, level=rlevel): + x = np.array(['a', 'aa', 'b']) + y = np.array(['d', 'e']) + assert_equal(x.searchsorted(y), [3, 3]) + + def test_string_argsort_with_zeros(self, level=rlevel): + """Check argsort for strings containing zeros.""" + x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") + assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) + assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) + + def test_string_sort_with_zeros(self, level=rlevel): + """Check sort for strings containing zeros.""" + x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") + y = np.fromstring("\x00\x01\x00\x02", dtype="|S2") + assert_array_equal(np.sort(x, kind="q"), y) + + def test_copy_detection_zero_dim(self, level=rlevel): + """Ticket #658""" + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_flat_byteorder(self, level=rlevel): + """Ticket #657""" + x = np.arange(10) + assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): + x = np.array([-1, 0, 1], dtype=dt) + assert_equal(x.flat[0].dtype, x[0].dtype) + + def test_copy_detection_corner_case(self, level=rlevel): + """Ticket #658""" + np.indices((0, 3, 4)).T.reshape(-1, 3) + + # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. + # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous, + # 0-sized reshape itself is tested elsewhere. + @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max) + def test_copy_detection_corner_case2(self, level=rlevel): + """Ticket #771: strides are not set correctly when reshaping 0-sized + arrays""" + b = np.indices((0, 3, 4)).T.reshape(-1, 3) + assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) + + def test_object_array_refcounting(self, level=rlevel): + """Ticket #633""" + if not hasattr(sys, 'getrefcount'): + return + + # NB. this is probably CPython-specific + + cnt = sys.getrefcount + + a = object() + b = object() + c = object() + + cnt0_a = cnt(a) + cnt0_b = cnt(b) + cnt0_c = cnt(c) + + # -- 0d -> 1-d broadcast slice assignment + + arr = np.zeros(5, dtype=np.object_) + + arr[:] = a + assert_equal(cnt(a), cnt0_a + 5) + + arr[:] = b + assert_equal(cnt(a), cnt0_a) + assert_equal(cnt(b), cnt0_b + 5) + + arr[:2] = c + assert_equal(cnt(b), cnt0_b + 3) + assert_equal(cnt(c), cnt0_c + 2) + + del arr + + # -- 1-d -> 2-d broadcast slice assignment + + arr = np.zeros((5, 2), dtype=np.object_) + arr0 = np.zeros(2, dtype=np.object_) + + arr0[0] = a + assert_(cnt(a) == cnt0_a + 1) + arr0[1] = b + assert_(cnt(b) == cnt0_b + 1) + + arr[:,:] = arr0 + assert_(cnt(a) == cnt0_a + 6) + assert_(cnt(b) == cnt0_b + 6) + + arr[:, 0] = None + assert_(cnt(a) == cnt0_a + 1) + + del arr, arr0 + + # -- 2-d copying + flattening + + arr = np.zeros((5, 2), dtype=np.object_) + + arr[:, 0] = a + arr[:, 1] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + arr2 = arr[:, 0].copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.flatten() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + del arr, arr2 + + # -- concatenate, repeat, take, choose + + arr1 = np.zeros((5, 1), dtype=np.object_) + arr2 = np.zeros((5, 1), dtype=np.object_) + + arr1[...] = a + arr2[...] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + arr3 = np.concatenate((arr1, arr2)) + assert_(cnt(a) == cnt0_a + 5 + 5) + assert_(cnt(b) == cnt0_b + 5 + 5) + + arr3 = arr1.repeat(3, axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3*5) + + arr3 = arr1.take([1, 2, 3], axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3) + + x = np.array([[0], [1], [0], [1], [1]], int) + arr3 = x.choose(arr1, arr2) + assert_(cnt(a) == cnt0_a + 5 + 2) + assert_(cnt(b) == cnt0_b + 5 + 3) + + def test_mem_custom_float_to_array(self, level=rlevel): + """Ticket 702""" + class MyFloat(object): + def __float__(self): + return 1.0 + + tmp = np.atleast_1d([MyFloat()]) + tmp2 = tmp.astype(float) + + def test_object_array_refcount_self_assign(self, level=rlevel): + """Ticket #711""" + class VictimObject(object): + deleted = False + def __del__(self): + self.deleted = True + d = VictimObject() + arr = np.zeros(5, dtype=np.object_) + arr[:] = d + del d + arr[:] = arr # refcount of 'd' might hit zero here + assert_(not arr[0].deleted) + arr[:] = arr # trying to induce a segfault by doing it again... + assert_(not arr[0].deleted) + + def test_mem_fromiter_invalid_dtype_string(self, level=rlevel): + x = [1, 2, 3] + self.assertRaises(ValueError, + np.fromiter, [xi for xi in x], dtype='S') + + def test_reduce_big_object_array(self, level=rlevel): + """Ticket #713""" + oldsize = np.setbufsize(10*16) + a = np.array([None]*161, object) + assert_(not np.any(a)) + np.setbufsize(oldsize) + + def test_mem_0d_array_index(self, level=rlevel): + """Ticket #714""" + np.zeros(10)[np.array(0)] + + def test_floats_from_string(self, level=rlevel): + """Ticket #640, floats from string""" + fsingle = np.single('1.234') + fdouble = np.double('1.234') + flongdouble = np.longdouble('1.234') + assert_almost_equal(fsingle, 1.234) + assert_almost_equal(fdouble, 1.234) + assert_almost_equal(flongdouble, 1.234) + + def test_nonnative_endian_fill(self, level=rlevel): + """ Non-native endian arrays were incorrectly filled with scalars before + r5034. + """ + if sys.byteorder == 'little': + dtype = np.dtype('>i4') + else: + dtype = np.dtype('= 3: + f = open(filename, 'rb') + xp = pickle.load(f, encoding='latin1') + f.close() + else: + f = open(filename) + xp = pickle.load(f) + f.close() + xpd = xp.astype(np.float64) + assert_((xp.__array_interface__['data'][0] != + xpd.__array_interface__['data'][0])) + + def test_compress_small_type(self, level=rlevel): + """Ticket #789, changeset 5217. + """ + # compress with out argument segfaulted if cannot cast safely + import numpy as np + a = np.array([[1, 2], [3, 4]]) + b = np.zeros((2, 1), dtype = np.single) + try: + a.compress([True, False], axis = 1, out = b) + raise AssertionError("compress with an out which cannot be " + "safely casted should not return " + "successfully") + except TypeError: + pass + + def test_attributes(self, level=rlevel): + """Ticket #791 + """ + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + assert_(dat.info == 'jubba') + dat.resize((4, 2)) + assert_(dat.info == 'jubba') + dat.sort() + assert_(dat.info == 'jubba') + dat.fill(2) + assert_(dat.info == 'jubba') + dat.put([2, 3, 4], [6, 3, 4]) + assert_(dat.info == 'jubba') + dat.setfield(4, np.int32, 0) + assert_(dat.info == 'jubba') + dat.setflags() + assert_(dat.info == 'jubba') + assert_(dat.all(1).info == 'jubba') + assert_(dat.any(1).info == 'jubba') + assert_(dat.argmax(1).info == 'jubba') + assert_(dat.argmin(1).info == 'jubba') + assert_(dat.argsort(1).info == 'jubba') + assert_(dat.astype(TestArray).info == 'jubba') + assert_(dat.byteswap().info == 'jubba') + assert_(dat.clip(2, 7).info == 'jubba') + assert_(dat.compress([0, 1, 1]).info == 'jubba') + assert_(dat.conj().info == 'jubba') + assert_(dat.conjugate().info == 'jubba') + assert_(dat.copy().info == 'jubba') + dat2 = TestArray([2, 3, 1, 0], 'jubba') + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + assert_(dat2.choose(choices).info == 'jubba') + assert_(dat.cumprod(1).info == 'jubba') + assert_(dat.cumsum(1).info == 'jubba') + assert_(dat.diagonal().info == 'jubba') + assert_(dat.flatten().info == 'jubba') + assert_(dat.getfield(np.int32, 0).info == 'jubba') + assert_(dat.imag.info == 'jubba') + assert_(dat.max(1).info == 'jubba') + assert_(dat.mean(1).info == 'jubba') + assert_(dat.min(1).info == 'jubba') + assert_(dat.newbyteorder().info == 'jubba') + assert_(dat.nonzero()[0].info == 'jubba') + assert_(dat.nonzero()[1].info == 'jubba') + assert_(dat.prod(1).info == 'jubba') + assert_(dat.ptp(1).info == 'jubba') + assert_(dat.ravel().info == 'jubba') + assert_(dat.real.info == 'jubba') + assert_(dat.repeat(2).info == 'jubba') + assert_(dat.reshape((2, 4)).info == 'jubba') + assert_(dat.round().info == 'jubba') + assert_(dat.squeeze().info == 'jubba') + assert_(dat.std(1).info == 'jubba') + assert_(dat.sum(1).info == 'jubba') + assert_(dat.swapaxes(0, 1).info == 'jubba') + assert_(dat.take([2, 3, 5]).info == 'jubba') + assert_(dat.transpose().info == 'jubba') + assert_(dat.T.info == 'jubba') + assert_(dat.var(1).info == 'jubba') + assert_(dat.view(TestArray).info == 'jubba') + + def test_recarray_tolist(self, level=rlevel): + """Ticket #793, changeset r5215 + """ + # Comparisons fail for NaN, so we can't use random memory + # for the test. + buf = np.zeros(40, dtype=np.int8) + a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf) + b = a.tolist() + assert_( a[0].tolist() == b[0]) + assert_( a[1].tolist() == b[1]) + + def test_nonscalar_item_method(self): + # Make sure that .item() fails graciously when it should + a = np.arange(5) + assert_raises(ValueError, a.item) + + def test_char_array_creation(self, level=rlevel): + a = np.array('123', dtype='c') + b = np.array(asbytes_nested(['1', '2', '3'])) + assert_equal(a, b) + + def test_unaligned_unicode_access(self, level=rlevel) : + """Ticket #825""" + for i in range(1, 9) : + msg = 'unicode offset: %d chars'%i + t = np.dtype([('a', 'S%d'%i), ('b', 'U2')]) + x = np.array([(asbytes('a'), sixu('b'))], dtype=t) + if sys.version_info[0] >= 3: + assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) + else: + assert_equal(str(x), "[('a', u'b')]", err_msg=msg) + + def test_sign_for_complex_nan(self, level=rlevel): + """Ticket 794.""" + with np.errstate(invalid='ignore'): + C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) + have = np.sign(C) + want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) + assert_equal(have, want) + + def test_for_equal_names(self, level=rlevel): + """Ticket #674""" + dt = np.dtype([('foo', float), ('bar', float)]) + a = np.zeros(10, dt) + b = list(a.dtype.names) + b[0] = "notfoo" + a.dtype.names = b + assert_(a.dtype.names[0] == "notfoo") + assert_(a.dtype.names[1] == "bar") + + def test_for_object_scalar_creation(self, level=rlevel): + """Ticket #816""" + a = np.object_() + b = np.object_(3) + b2 = np.object_(3.0) + c = np.object_([4, 5]) + d = np.object_([None, {}, []]) + assert_(a is None) + assert_(type(b) is int) + assert_(type(b2) is float) + assert_(type(c) is np.ndarray) + assert_(c.dtype == object) + assert_(d.dtype == object) + + def test_array_resize_method_system_error(self): + """Ticket #840 - order should be an invalid keyword.""" + x = np.array([[0, 1], [2, 3]]) + self.assertRaises(TypeError, x.resize, (2, 2), order='C') + + def test_for_zero_length_in_choose(self, level=rlevel): + "Ticket #882" + a = np.array(1) + self.assertRaises(ValueError, lambda x: x.choose([]), a) + + def test_array_ndmin_overflow(self): + "Ticket #947." + self.assertRaises(ValueError, lambda: np.array([1], ndmin=33)) + + def test_errobj_reference_leak(self, level=rlevel): + """Ticket #955""" + with np.errstate(all="ignore"): + z = int(0) + p = np.int32(-1) + + gc.collect() + n_before = len(gc.get_objects()) + z**p # this shouldn't leak a reference to errobj + gc.collect() + n_after = len(gc.get_objects()) + assert_(n_before >= n_after, (n_before, n_after)) + + def test_void_scalar_with_titles(self, level=rlevel): + """No ticket""" + data = [('john', 4), ('mary', 5)] + dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] + arr = np.array(data, dtype=dtype1) + assert_(arr[0][0] == 'john') + assert_(arr[0][1] == 4) + + def test_void_scalar_constructor(self): + #Issue #1550 + + #Create test string data, construct void scalar from data and assert + #that void scalar contains original data. + test_string = np.array("test") + test_string_void_scalar = np.core.multiarray.scalar( + np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes()) + + assert_(test_string_void_scalar.view(test_string.dtype) == test_string) + + #Create record scalar, construct from data and assert that + #reconstructed scalar is correct. + test_record = np.ones((), "i,i") + test_record_void_scalar = np.core.multiarray.scalar( + test_record.dtype, test_record.tobytes()) + + assert_(test_record_void_scalar == test_record) + + #Test pickle and unpickle of void and record scalars + assert_(pickle.loads(pickle.dumps(test_string)) == test_string) + assert_(pickle.loads(pickle.dumps(test_record)) == test_record) + + def test_blasdot_uninitialized_memory(self): + """Ticket #950""" + for m in [0, 1, 2]: + for n in [0, 1, 2]: + for k in range(3): + # Try to ensure that x->data contains non-zero floats + x = np.array([123456789e199], dtype=np.float64) + x.resize((m, 0)) + y = np.array([123456789e199], dtype=np.float64) + y.resize((0, n)) + + # `dot` should just return zero (m,n) matrix + z = np.dot(x, y) + assert_(np.all(z == 0)) + assert_(z.shape == (m, n)) + + def test_zeros(self): + """Regression test for #1061.""" + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + good = 'Maximum allowed dimension exceeded' + try: + np.empty(sz) + except ValueError as e: + if not str(e) == good: + self.fail("Got msg '%s', expected '%s'" % (e, good)) + except Exception as e: + self.fail("Got exception of type %s instead of ValueError" % type(e)) + + def test_huge_arange(self): + """Regression test for #1062.""" + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + good = 'Maximum allowed size exceeded' + try: + a = np.arange(sz) + self.assertTrue(np.size == sz) + except ValueError as e: + if not str(e) == good: + self.fail("Got msg '%s', expected '%s'" % (e, good)) + except Exception as e: + self.fail("Got exception of type %s instead of ValueError" % type(e)) + + def test_fromiter_bytes(self): + """Ticket #1058""" + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_array_from_sequence_scalar_array(self): + """Ticket #1078: segfaults when creating an array with a sequence of 0d + arrays.""" + a = np.array((np.ones(2), np.array(2))) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], np.ones(2)) + assert_equal(a[1], np.array(2)) + + a = np.array(((1,), np.array(1))) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], (1,)) + assert_equal(a[1], np.array(1)) + + def test_array_from_sequence_scalar_array2(self): + """Ticket #1081: weird array with strange input...""" + t = np.array([np.array([]), np.array(0, object)]) + assert_equal(t.shape, (2,)) + assert_equal(t.dtype, np.dtype(object)) + + def test_array_too_big(self): + """Ticket #1080.""" + assert_raises(ValueError, np.zeros, [975]*7, np.int8) + assert_raises(ValueError, np.zeros, [26244]*5, np.int8) + + def test_dtype_keyerrors_(self): + """Ticket #1106.""" + dt = np.dtype([('f1', np.uint)]) + assert_raises(KeyError, dt.__getitem__, "f2") + assert_raises(IndexError, dt.__getitem__, 1) + assert_raises(ValueError, dt.__getitem__, 0.0) + + def test_lexsort_buffer_length(self): + """Ticket #1217, don't segfault.""" + a = np.ones(100, dtype=np.int8) + b = np.ones(100, dtype=np.int32) + i = np.lexsort((a[::-1], b)) + assert_equal(i, np.arange(100, dtype=np.int)) + + def test_object_array_to_fixed_string(self): + """Ticket #1235.""" + a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) + b = np.array(a, dtype=(np.str_, 8)) + assert_equal(a, b) + c = np.array(a, dtype=(np.str_, 5)) + assert_equal(c, np.array(['abcde', 'ijklm'])) + d = np.array(a, dtype=(np.str_, 12)) + assert_equal(a, d) + e = np.empty((2, ), dtype=(np.str_, 8)) + e[:] = a[:] + assert_equal(a, e) + + def test_unicode_to_string_cast(self): + """Ticket #1240.""" + a = np.array( + [ [sixu('abc'), sixu('\u03a3')], + [sixu('asdf'), sixu('erw')] + ], dtype='U') + def fail(): + b = np.array(a, 'S4') + self.assertRaises(UnicodeEncodeError, fail) + + def test_mixed_string_unicode_array_creation(self): + a = np.array(['1234', sixu('123')]) + assert_(a.itemsize == 16) + a = np.array([sixu('123'), '1234']) + assert_(a.itemsize == 16) + a = np.array(['1234', sixu('123'), '12345']) + assert_(a.itemsize == 20) + a = np.array([sixu('123'), '1234', sixu('12345')]) + assert_(a.itemsize == 20) + a = np.array([sixu('123'), '1234', sixu('1234')]) + assert_(a.itemsize == 16) + + def test_misaligned_objects_segfault(self): + """Ticket #1198 and #1267""" + a1 = np.zeros((10,), dtype='O,c') + a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') + a1['f0'] = a2 + r = repr(a1) + np.argmax(a1['f0']) + a1['f0'][1] = "FOO" + a1['f0'] = "FOO" + a3 = np.array(a1['f0'], dtype='S') + np.nonzero(a1['f0']) + a1.sort() + a4 = copy.deepcopy(a1) + + def test_misaligned_scalars_segfault(self): + """Ticket #1267""" + s1 = np.array(('a', 'Foo'), dtype='c,O') + s2 = np.array(('b', 'Bar'), dtype='c,O') + s1['f1'] = s2['f1'] + s1['f1'] = 'Baz' + + def test_misaligned_dot_product_objects(self): + """Ticket #1267""" + # This didn't require a fix, but it's worth testing anyway, because + # it may fail if .dot stops enforcing the arrays to be BEHAVED + a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') + b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') + np.dot(a['f0'], b['f0']) + + def test_byteswap_complex_scalar(self): + """Ticket #1259 and gh-441""" + for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: + z = np.array([2.2-1.1j], dtype) + x = z[0] # always native-endian + y = x.byteswap() + if x.dtype.byteorder == z.dtype.byteorder: + # little-endian machine + assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder())) + else: + # big-endian machine + assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype)) + # double check real and imaginary parts: + assert_equal(x.real, y.real.byteswap()) + assert_equal(x.imag, y.imag.byteswap()) + + def test_structured_arrays_with_objects1(self): + """Ticket #1299""" + stra = 'aaaa' + strb = 'bbbb' + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(x[0, 1] == x[0, 0]) + + def test_structured_arrays_with_objects2(self): + """Ticket #1299 second test""" + stra = 'aaaa' + strb = 'bbbb' + numb = sys.getrefcount(strb) + numa = sys.getrefcount(stra) + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(sys.getrefcount(strb) == numb) + assert_(sys.getrefcount(stra) == numa + 2) + + def test_duplicate_title_and_name(self): + """Ticket #1254""" + def func(): + x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')]) + self.assertRaises(ValueError, func) + + def test_signed_integer_division_overflow(self): + """Ticket #1317.""" + def test_type(t): + min = np.array([np.iinfo(t).min]) + min //= -1 + + with np.errstate(divide="ignore"): + for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long): + test_type(t) + + def test_buffer_hashlib(self): + try: + from hashlib import md5 + except ImportError: + from md5 import new as md5 + + x = np.array([1, 2, 3], dtype=np.dtype('c') + + def test_log1p_compiler_shenanigans(self): + # Check if log1p is behaving on 32 bit intel systems. + assert_(np.isfinite(np.log1p(np.exp2(-53)))) + + def test_fromiter_comparison(self, level=rlevel): + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_fromstring_crash(self): + # Ticket #1345: the following should not cause a crash + np.fromstring(asbytes('aa, aa, 1.0'), sep=',') + + def test_ticket_1539(self): + dtypes = [x for x in np.typeDict.values() + if (issubclass(x, np.number) + and not issubclass(x, np.timedelta64))] + a = np.array([], dtypes[0]) + failures = [] + # ignore complex warnings + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.ComplexWarning) + for x in dtypes: + b = a.astype(x) + for y in dtypes: + c = a.astype(y) + try: + np.dot(b, c) + except TypeError as e: + failures.append((x, y)) + if failures: + raise AssertionError("Failures: %r" % failures) + + def test_ticket_1538(self): + x = np.finfo(np.float32) + for name in 'eps epsneg max min resolution tiny'.split(): + assert_equal(type(getattr(x, name)), np.float32, + err_msg=name) + + def test_ticket_1434(self): + # Check that the out= argument in var and std has an effect + data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) + out = np.zeros((3,)) + + ret = data.var(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.var(axis=1)) + + ret = data.std(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.std(axis=1)) + + def test_complex_nan_maximum(self): + cnan = complex(0, np.nan) + assert_equal(np.maximum(1, cnan), cnan) + + def test_subclass_int_tuple_assignment(self): + # ticket #1563 + class Subclass(np.ndarray): + def __new__(cls, i): + return np.ones((i,)).view(cls) + x = Subclass(5) + x[(0,)] = 2 # shouldn't raise an exception + assert_equal(x[0], 2) + + def test_ufunc_no_unnecessary_views(self): + # ticket #1548 + class Subclass(np.ndarray): + pass + x = np.array([1, 2, 3]).view(Subclass) + y = np.add(x, x, x) + assert_equal(id(x), id(y)) + + def test_take_refcount(self): + # ticket #939 + a = np.arange(16, dtype=np.float) + a.shape = (4, 4) + lut = np.ones((5 + 3, 4), np.float) + rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) + c1 = sys.getrefcount(rgba) + try: + lut.take(a, axis=0, mode='clip', out=rgba) + except TypeError: + pass + c2 = sys.getrefcount(rgba) + assert_equal(c1, c2) + + def test_fromfile_tofile_seeks(self): + # On Python 3, tofile/fromfile used to get (#1610) the Python + # file handle out of sync + f0 = tempfile.NamedTemporaryFile() + f = f0.file + f.write(np.arange(255, dtype='u1').tobytes()) + + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) + + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) + + f.seek(40) + data = f.read(3) + assert_equal(data, asbytes("\x01\x02\x03")) + + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) + + f.close() + + def test_complex_scalar_warning(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1+2j) + assert_warns(np.ComplexWarning, float, x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + assert_equal(float(x), float(x.real)) + + def test_complex_scalar_complex_cast(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1+2j) + assert_equal(complex(x), 1+2j) + + def test_complex_boolean_cast(self): + """Ticket #2218""" + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp) + assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) + assert_(np.any(x)) + assert_(np.all(x[1:])) + + def test_uint_int_conversion(self): + x = 2**64 - 1 + assert_equal(int(np.uint64(x)), x) + + def test_duplicate_field_names_assign(self): + ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8') + ra.dtype.names = ('f1', 'f2') + rep = repr(ra) # should not cause a segmentation fault + assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) + + def test_eq_string_and_object_array(self): + # From e-mail thread "__eq__ with str and object" (Keith Goodman) + a1 = np.array(['a', 'b'], dtype=object) + a2 = np.array(['a', 'c']) + assert_array_equal(a1 == a2, [True, False]) + assert_array_equal(a2 == a1, [True, False]) + + def test_nonzero_byteswap(self): + a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) + a.dtype = np.float32 + assert_equal(a.nonzero()[0], [1]) + a = a.byteswap().newbyteorder() + assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap + + def test_find_common_type_boolean(self): + # Ticket #1695 + assert_(np.find_common_type([], ['?', '?']) == '?') + + def test_empty_mul(self): + a = np.array([1.]) + a[1:1] *= 2 + assert_equal(a, [1.]) + + def test_array_side_effect(self): + assert_equal(np.dtype('S10').itemsize, 10) + + A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_) + + # This was throwing an exception because in ctors.c, + # discover_itemsize was calling PyObject_Length without checking + # the return code. This failed to get the length of the number 2, + # and the exception hung around until something checked + # PyErr_Occurred() and returned an error. + assert_equal(np.dtype('S10').itemsize, 10) + + def test_any_float(self): + # all and any for floats + a = np.array([0.1, 0.9]) + assert_(np.any(a)) + assert_(np.all(a)) + + def test_large_float_sum(self): + a = np.arange(10000, dtype='f') + assert_equal(a.sum(dtype='d'), a.astype('d').sum()) + + def test_ufunc_casting_out(self): + a = np.array(1.0, dtype=np.float32) + b = np.array(1.0, dtype=np.float64) + c = np.array(1.0, dtype=np.float32) + np.add(a, b, out=c) + assert_equal(c, 2.0) + + def test_array_scalar_contiguous(self): + # Array scalars are both C and Fortran contiguous + assert_(np.array(1.0).flags.c_contiguous) + assert_(np.array(1.0).flags.f_contiguous) + assert_(np.array(np.float32(1.0)).flags.c_contiguous) + assert_(np.array(np.float32(1.0)).flags.f_contiguous) + + def test_squeeze_contiguous(self): + """Similar to GitHub issue #387""" + a = np.zeros((1, 2)).squeeze() + b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze() + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.f_contiguous) + + def test_reduce_contiguous(self): + """GitHub issue #387""" + a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) + b = np.add.reduce(np.zeros((2, 1, 2)), 1) + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.c_contiguous) + + def test_object_array_self_reference(self): + # Object arrays with references to themselves can cause problems + a = np.array(0, dtype=object) + a[()] = a + assert_raises(TypeError, int, a) + assert_raises(TypeError, long, a) + assert_raises(TypeError, float, a) + assert_raises(TypeError, oct, a) + assert_raises(TypeError, hex, a) + + # Test the same for a circular reference. + b = np.array(a, dtype=object) + a[()] = b + assert_raises(TypeError, int, a) + # Numpy has no tp_traverse currently, so circular references + # cannot be detected. So resolve it: + a[()] = 0 + + # This was causing a to become like the above + a = np.array(0, dtype=object) + a[...] += 1 + assert_equal(a, 1) + + def test_object_array_self_copy(self): + # An object array being copied into itself DECREF'ed before INCREF'ing + # causing segmentation faults (gh-3787) + a = np.array(object(), dtype=object) + np.copyto(a, a) + assert_equal(sys.getrefcount(a[()]), 2) + a[()].__class__ # will segfault if object was deleted + + def test_zerosize_accumulate(self): + "Ticket #1733" + x = np.array([[42, 0]], dtype=np.uint32) + assert_equal(np.add.accumulate(x[:-1, 0]), []) + + def test_objectarray_setfield(self): + # Setfield directly manipulates the raw array data, + # so is invalid for object arrays. + x = np.array([1, 2, 3], dtype=object) + assert_raises(RuntimeError, x.setfield, 4, np.int32, 0) + + def test_setting_rank0_string(self): + "Ticket #1736" + s1 = asbytes("hello1") + s2 = asbytes("hello2") + a = np.zeros((), dtype="S10") + a[()] = s1 + assert_equal(a, np.array(s1)) + a[()] = np.array(s2) + assert_equal(a, np.array(s2)) + + a = np.zeros((), dtype='f4') + a[()] = 3 + assert_equal(a, np.array(3)) + a[()] = np.array(4) + assert_equal(a, np.array(4)) + + def test_string_astype(self): + "Ticket #1748" + s1 = asbytes('black') + s2 = asbytes('white') + s3 = asbytes('other') + a = np.array([[s1], [s2], [s3]]) + assert_equal(a.dtype, np.dtype('S5')) + b = a.astype(np.dtype('S0')) + assert_equal(b.dtype, np.dtype('S5')) + + def test_ticket_1756(self): + """Ticket #1756 """ + s = asbytes('0123456789abcdef') + a = np.array([s]*5) + for i in range(1, 17): + a1 = np.array(a, "|S%d"%i) + a2 = np.array([s[:i]]*5) + assert_equal(a1, a2) + + def test_fields_strides(self): + "Ticket #1760" + r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') + assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) + assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) + assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) + assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) + + def test_alignment_update(self): + """Check that alignment flag is updated on stride setting""" + a = np.arange(10) + assert_(a.flags.aligned) + a.strides = 3 + assert_(not a.flags.aligned) + + def test_ticket_1770(self): + "Should not segfault on python 3k" + import numpy as np + try: + a = np.zeros((1,), dtype=[('f1', 'f')]) + a['f1'] = 1 + a['f2'] = 1 + except ValueError: + pass + except: + raise AssertionError + + def test_ticket_1608(self): + "x.flat shouldn't modify data" + x = np.array([[1, 2], [3, 4]]).T + y = np.array(x.flat) + assert_equal(x, [[1, 3], [2, 4]]) + + def test_pickle_string_overwrite(self): + import re + + data = np.array([1], dtype='b') + blob = pickle.dumps(data, protocol=1) + data = pickle.loads(blob) + + # Check that loads does not clobber interned strings + s = re.sub("a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + data[0] = 0xbb + s = re.sub("a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + + def test_pickle_bytes_overwrite(self): + if sys.version_info[0] >= 3: + data = np.array([1], dtype='b') + data = pickle.loads(pickle.dumps(data)) + data[0] = 0xdd + bytestring = "\x01 ".encode('ascii') + assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + + def test_pickle_py2_array_latin1_hack(self): + # Check that unpickling hacks in Py3 that support + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) + data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n" + "tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" + "I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" + "p13\ntp14\nb.") + if sys.version_info[0] >= 3: + # This should work: + result = pickle.loads(data, encoding='latin1') + assert_array_equal(result, np.array([129], dtype='b')) + # Should not segfault: + assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + + def test_pickle_py2_scalar_latin1_hack(self): + # Check that scalar unpickling hack in Py3 that supports + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(...) + datas = [ + # (original, python2_pickle, koi8r_validity) + (np.unicode_('\u6bd2'), + asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" + "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n" + "tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.float64(9e123), + asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n" + "p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n" + "bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1 + asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n" + "I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n" + "tp8\nRp9\n."), + 'different'), + ] + if sys.version_info[0] >= 3: + for original, data, koi8r_validity in datas: + result = pickle.loads(data, encoding='latin1') + assert_equal(result, original) + + # Decoding under non-latin1 encoding (e.g.) KOI8-R can + # produce bad results, but should not segfault. + if koi8r_validity == 'different': + # Unicode code points happen to lie within latin1, + # but are different in koi8-r, resulting to silent + # bogus results + result = pickle.loads(data, encoding='koi8-r') + assert_(result != original) + elif koi8r_validity == 'invalid': + # Unicode code points outside latin1, so results + # to an encoding exception + assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') + else: + raise ValueError(koi8r_validity) + + def test_structured_type_to_object(self): + a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') + a_obj = np.empty((2,), dtype=object) + a_obj[0] = (0, 1) + a_obj[1] = (3, 2) + # astype records -> object + assert_equal(a_rec.astype(object), a_obj) + # '=' records -> object + b = np.empty_like(a_obj) + b[...] = a_rec + assert_equal(b, a_obj) + # '=' object -> records + b = np.empty_like(a_rec) + b[...] = a_obj + assert_equal(b, a_rec) + + def test_assign_obj_listoflists(self): + # Ticket # 1870 + # The inner list should get assigned to the object elements + a = np.zeros(4, dtype=object) + b = a.copy() + a[0] = [1] + a[1] = [2] + a[2] = [3] + a[3] = [4] + b[...] = [[1], [2], [3], [4]] + assert_equal(a, b) + # The first dimension should get broadcast + a = np.zeros((2, 2), dtype=object) + a[...] = [[1, 2]] + assert_equal(a, [[1, 2], [1, 2]]) + + def test_memoryleak(self): + # Ticket #1917 - ensure that array data doesn't leak + for i in range(1000): + # 100MB times 1000 would give 100GB of memory usage if it leaks + a = np.empty((100000000,), dtype='i1') + del a + + def test_ufunc_reduce_memoryleak(self): + a = np.arange(6) + acnt = sys.getrefcount(a) + res = np.add.reduce(a) + assert_equal(sys.getrefcount(a), acnt) + + def test_search_sorted_invalid_arguments(self): + # Ticket #2021, should not segfault. + x = np.arange(0, 4, dtype='datetime64[D]') + assert_raises(TypeError, x.searchsorted, 1) + + def test_string_truncation(self): + # Ticket #1990 - Data can be truncated in creation of an array from a + # mixed sequence of numeric values and strings + for val in [True, 1234, 123.4, complex(1, 234)]: + for tostr in [asunicode, asbytes]: + b = np.array([val, tostr('xx')]) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xx'), val]) + assert_equal(tostr(b[1]), tostr(val)) + + # test also with longer strings + b = np.array([val, tostr('xxxxxxxxxx')]) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xxxxxxxxxx'), val]) + assert_equal(tostr(b[1]), tostr(val)) + + def test_string_truncation_ucs2(self): + # Ticket #2081. Python compiled with two byte unicode + # can lead to truncation if itemsize is not properly + # adjusted for Numpy's four byte unicode. + if sys.version_info[0] >= 3: + a = np.array(['abcd']) + else: + a = np.array([sixu('abcd')]) + assert_equal(a.dtype.itemsize, 16) + + def test_unique_stable(self): + # Ticket #2063 must always choose stable sort for argsort to + # get consistent results + v = np.array(([0]*5 + [1]*6 + [2]*6)*4) + res = np.unique(v, return_index=True) + tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11])) + assert_equal(res, tgt) + + def test_unicode_alloc_dealloc_match(self): + # Ticket #1578, the mismatch only showed up when running + # python-debug for python versions >= 2.7, and then as + # a core dump and error message. + a = np.array(['abc'], dtype=np.unicode)[0] + del a + + def test_refcount_error_in_clip(self): + # Ticket #1588 + a = np.zeros((2,), dtype='>i2').clip(min=0) + x = a + a + # This used to segfault: + y = str(x) + # Check the final string: + assert_(y == "[0 0]") + + def test_searchsorted_wrong_dtype(self): + # Ticket #2189, it used to segfault, so we check that it raises the + # proper exception. + a = np.array([('a', 1)], dtype='S1, int') + assert_raises(TypeError, np.searchsorted, a, 1.2) + # Ticket #2066, similar problem: + dtype = np.format_parser(['i4', 'i4'], [], []) + a = np.recarray((2, ), dtype) + assert_raises(TypeError, np.searchsorted, a, 1) + + def test_complex64_alignment(self): + # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment + dtt = np.complex64 + arr = np.arange(10, dtype=dtt) + # 2D array + arr2 = np.reshape(arr, (2, 5)) + # Fortran write followed by (C or F) read caused bus error + data_str = arr2.tobytes('F') + data_back = np.ndarray(arr2.shape, + arr2.dtype, + buffer=data_str, + order='F') + assert_array_equal(arr2, data_back) + + def test_structured_count_nonzero(self): + arr = np.array([0, 1]).astype('i4, (2)i4')[:1] + count = np.count_nonzero(arr) + assert_equal(count, 0) + + def test_copymodule_preserves_f_contiguity(self): + a = np.empty((2, 2), order='F') + b = copy.copy(a) + c = copy.deepcopy(a) + assert_(b.flags.fortran) + assert_(b.flags.f_contiguous) + assert_(c.flags.fortran) + assert_(c.flags.f_contiguous) + + def test_fortran_order_buffer(self): + import numpy as np + a = np.array([['Hello', 'Foob']], dtype='U5', order='F') + arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) + arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')], + [sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]]) + assert_array_equal(arr, arr2) + + def test_assign_from_sequence_error(self): + # Ticket #4024. + arr = np.array([1, 2, 3]) + assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) + arr.__setitem__(slice(None), [9]) + assert_equal(arr, [9, 9, 9]) + + def test_format_on_flex_array_element(self): + # Ticket #4369. + dt = np.dtype([('date', '= 3: + assert_raises(TypeError, f, lhs, rhs) + else: + f(lhs, rhs) + assert_(not op.eq(lhs, rhs)) + assert_(op.ne(lhs, rhs)) + + def test_richcompare_scalar_and_subclass(self): + # gh-4709 + class Foo(np.ndarray): + def __eq__(self, other): + return "OK" + x = np.array([1,2,3]).view(Foo) + assert_equal(10 == x, "OK") + assert_equal(np.int32(10) == x, "OK") + assert_equal(np.array([10]) == x, "OK") + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarinherit.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarinherit.py new file mode 100644 index 0000000000000..6f394196c2060 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarinherit.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +""" Test printing of scalar types. + +""" + +import numpy as np +from numpy.testing import TestCase, run_module_suite + + +class A(object): pass +class B(A, np.float64): pass + +class C(B): pass +class D(C, B): pass + +class B0(np.float64, A): pass +class C0(B0): pass + +class TestInherit(TestCase): + def test_init(self): + x = B(1.0) + assert str(x) == '1.0' + y = C(2.0) + assert str(y) == '2.0' + z = D(3.0) + assert str(z) == '3.0' + def test_init2(self): + x = B0(1.0) + assert str(x) == '1.0' + y = C0(2.0) + assert str(y) == '2.0' + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarmath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarmath.py new file mode 100644 index 0000000000000..afdc06c03d8e1 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarmath.py @@ -0,0 +1,275 @@ +from __future__ import division, absolute_import, print_function + +import sys +import platform +from numpy.testing import * +from numpy.testing.utils import _gen_alignment_data +import numpy as np + +types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, + np.int_, np.uint, np.longlong, np.ulonglong, + np.single, np.double, np.longdouble, np.csingle, + np.cdouble, np.clongdouble] + +# This compares scalarmath against ufuncs. + +class TestTypes(TestCase): + def test_types(self, level=1): + for atype in types: + a = atype(1) + assert_(a == 1, "error with %r: got %r" % (atype, a)) + + def test_type_add(self, level=1): + # list of types + for k, atype in enumerate(types): + a_scalar = atype(3) + a_array = np.array([3], dtype=atype) + for l, btype in enumerate(types): + b_scalar = btype(1) + b_array = np.array([1], dtype=btype) + c_scalar = a_scalar + b_scalar + c_array = a_array + b_array + # It was comparing the type numbers, but the new ufunc + # function-finding mechanism finds the lowest function + # to which both inputs can be cast - which produces 'l' + # when you do 'q' + 'b'. The old function finding mechanism + # skipped ahead based on the first argument, but that + # does not produce properly symmetric results... + assert_equal(c_scalar.dtype, c_array.dtype, + "error with types (%d/'%c' + %d/'%c')" % + (k, np.dtype(atype).char, l, np.dtype(btype).char)) + + def test_type_create(self, level=1): + for k, atype in enumerate(types): + a = np.array([1, 2, 3], atype) + b = atype([1, 2, 3]) + assert_equal(a, b) + + def test_leak(self): + # test leak of scalar objects + # a leak would show up in valgrind as still-reachable of ~2.6MB + for i in range(200000): + np.add(1, 1) + + +class TestBaseMath(TestCase): + def test_blocked(self): + # test alignments offsets for simd instructions + # alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 7)]: + for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, + type='binary', + max_size=sz): + exp1 = np.ones_like(inp1) + inp1[...] = np.ones_like(inp1) + inp2[...] = np.zeros_like(inp2) + assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) + assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg) + assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) + + np.add(inp1, inp2, out=out) + assert_almost_equal(out, exp1, err_msg=msg) + + inp2[...] += np.arange(inp2.size, dtype=dt) + 1 + assert_almost_equal(np.square(inp2), + np.multiply(inp2, inp2), err_msg=msg) + assert_almost_equal(np.reciprocal(inp2), + np.divide(1, inp2), err_msg=msg) + + inp1[...] = np.ones_like(inp1) + inp2[...] = np.zeros_like(inp2) + np.add(inp1, 1, out=out) + assert_almost_equal(out, exp1 + 1, err_msg=msg) + np.add(1, inp2, out=out) + assert_almost_equal(out, exp1, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_almost_equal(d + d, d * 2) + np.add(d, d, out=o) + np.add(np.ones_like(d), d, out=o) + np.add(d, np.ones_like(d), out=o) + np.add(np.ones_like(d), d) + np.add(d, np.ones_like(d)) + + +class TestPower(TestCase): + def test_small_types(self): + for t in [np.int8, np.int16, np.float16]: + a = t(3) + b = a ** 4 + assert_(b == 81, "error with %r: got %r" % (t, b)) + + def test_large_types(self): + for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: + a = t(51) + b = a ** 4 + msg = "error with %r: got %r" % (t, b) + if np.issubdtype(t, np.integer): + assert_(b == 6765201, msg) + else: + assert_almost_equal(b, 6765201, err_msg=msg) + def test_mixed_types(self): + typelist = [np.int8, np.int16, np.float16, + np.float32, np.float64, np.int8, + np.int16, np.int32, np.int64] + for t1 in typelist: + for t2 in typelist: + a = t1(3) + b = t2(2) + result = a**b + msg = ("error with %r and %r:" + "got %r, expected %r") % (t1, t2, result, 9) + if np.issubdtype(np.dtype(result), np.integer): + assert_(result == 9, msg) + else: + assert_almost_equal(result, 9, err_msg=msg) + +class TestComplexDivision(TestCase): + def test_zero_division(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + a = t(0.0) + b = t(1.0) + assert_(np.isinf(b/a)) + b = t(complex(np.inf, np.inf)) + assert_(np.isinf(b/a)) + b = t(complex(np.inf, np.nan)) + assert_(np.isinf(b/a)) + b = t(complex(np.nan, np.inf)) + assert_(np.isinf(b/a)) + b = t(complex(np.nan, np.nan)) + assert_(np.isnan(b/a)) + b = t(0.) + assert_(np.isnan(b/a)) + + +class TestConversion(TestCase): + def test_int_from_long(self): + l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] + li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] + for T in [None, np.float64, np.int64]: + a = np.array(l, dtype=T) + assert_equal([int(_m) for _m in a], li) + + a = np.array(l[:3], dtype=np.uint64) + assert_equal([int(_m) for _m in a], li[:3]) + + def test_iinfo_long_values(self): + for code in 'bBhH': + res = np.array(np.iinfo(code).max + 1, dtype=code) + tgt = np.iinfo(code).min + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.array(np.iinfo(code).max, dtype=code) + tgt = np.iinfo(code).max + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.typeDict[code](np.iinfo(code).max) + tgt = np.iinfo(code).max + assert_(res == tgt) + + def test_int_raise_behaviour(self): + def Overflow_error_func(dtype): + res = np.typeDict[dtype](np.iinfo(dtype).max + 1) + + for code in 'lLqQ': + assert_raises(OverflowError, Overflow_error_func, code) + + def test_longdouble_int(self): + # gh-627 + x = np.longdouble(np.inf) + assert_raises(OverflowError, x.__int__) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, x.__int__) + + def test_numpy_scalar_relational_operators(self): + #All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + #Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + + #unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + #Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + +#class TestRepr(TestCase): +# def test_repr(self): +# for t in types: +# val = t(1197346475.0137341) +# val_repr = repr(val) +# val2 = eval(val_repr) +# assert_equal( val, val2 ) + + +class TestRepr(object): + def _test_type_repr(self, t): + finfo=np.finfo(t) + last_fraction_bit_idx = finfo.nexp + finfo.nmant + last_exponent_bit_idx = finfo.nexp + storage_bytes = np.dtype(t).itemsize*8 + # could add some more types to the list below + for which in ['small denorm', 'small norm']: + # Values from http://en.wikipedia.org/wiki/IEEE_754 + constr = np.array([0x00]*storage_bytes, dtype=np.uint8) + if which == 'small denorm': + byte = last_fraction_bit_idx // 8 + bytebit = 7-(last_fraction_bit_idx % 8) + constr[byte] = 1< real + n 1 negative nums + O + n 1 sign nums + O -> int + n 1 invert bool + ints + O flts raise an error + n 1 degrees real + M cmplx raise an error + n 1 radians real + M cmplx raise an error + n 1 arccos flts + M + n 1 arccosh flts + M + n 1 arcsin flts + M + n 1 arcsinh flts + M + n 1 arctan flts + M + n 1 arctanh flts + M + n 1 cos flts + M + n 1 sin flts + M + n 1 tan flts + M + n 1 cosh flts + M + n 1 sinh flts + M + n 1 tanh flts + M + n 1 exp flts + M + n 1 expm1 flts + M + n 1 log flts + M + n 1 log10 flts + M + n 1 log1p flts + M + n 1 sqrt flts + M real x < 0 raises error + n 1 ceil real + M + n 1 trunc real + M + n 1 floor real + M + n 1 fabs real + M + n 1 rint flts + M + n 1 isnan flts -> bool + n 1 isinf flts -> bool + n 1 isfinite flts -> bool + n 1 signbit real -> bool + n 1 modf real -> (frac, int) + n 1 logical_not bool + nums + M -> bool + n 2 left_shift ints + O flts raise an error + n 2 right_shift ints + O flts raise an error + n 2 add bool + nums + O boolean + is || + n 2 subtract bool + nums + O boolean - is ^ + n 2 multiply bool + nums + O boolean * is & + n 2 divide nums + O + n 2 floor_divide nums + O + n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d + n 2 fmod nums + M + n 2 power nums + O + n 2 greater bool + nums + O -> bool + n 2 greater_equal bool + nums + O -> bool + n 2 less bool + nums + O -> bool + n 2 less_equal bool + nums + O -> bool + n 2 equal bool + nums + O -> bool + n 2 not_equal bool + nums + O -> bool + n 2 logical_and bool + nums + M -> bool + n 2 logical_or bool + nums + M -> bool + n 2 logical_xor bool + nums + M -> bool + n 2 maximum bool + nums + O + n 2 minimum bool + nums + O + n 2 bitwise_and bool + ints + O flts raise an error + n 2 bitwise_or bool + ints + O flts raise an error + n 2 bitwise_xor bool + ints + O flts raise an error + n 2 arctan2 real + M + n 2 remainder ints + real + O + n 2 hypot real + M + ===== ==== ============= =============== ======================== + + Types other than those listed will be accepted, but they are cast to + the smallest compatible type for which the function is defined. The + casting rules are: + + bool -> int8 -> float32 + ints -> double + + """ + pass + + + def test_signature(self): + # the arguments to test_signature are: nin, nout, core_signature + # pass + assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1) + + # pass. empty core signature; treat as plain ufunc (with trivial core) + assert_equal(umt.test_signature(2, 1, "(),()->()"), 0) + + # in the following calls, a ValueError should be raised because + # of error in core signature + # error: extra parenthesis + msg = "core_sig: extra parenthesis" + try: + ret = umt.test_signature(2, 1, "((i)),(i)->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: None + # error: parenthesis matching + msg = "core_sig: parenthesis matching" + try: + ret = umt.test_signature(2, 1, "(i),)i(->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: None + # error: incomplete signature. letters outside of parenthesis are ignored + msg = "core_sig: incomplete signature" + try: + ret = umt.test_signature(2, 1, "(i),->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: None + # error: incomplete signature. 2 output arguments are specified + msg = "core_sig: incomplete signature" + try: + ret = umt.test_signature(2, 2, "(i),(i)->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: None + + # more complicated names for variables + assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1) + + def test_get_signature(self): + assert_equal(umt.inner1d.signature, "(i),(i)->()") + + def test_forced_sig(self): + a = 0.5*np.arange(3, dtype='f8') + assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) + assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), + casting='unsafe'), [0, 0, 1]) + + b = np.zeros((3,), dtype='f8') + np.add(a, 0.5, out=b) + assert_equal(b, [0.5, 1, 1.5]) + b[:] = 0 + np.add(a, 0.5, sig='i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + + def test_sum_stability(self): + a = np.ones(500, dtype=np.float32) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) + + a = np.ones(500, dtype=np.float64) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) + + def test_sum(self): + for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) + d = np.arange(1, v + 1, dtype=dt) + assert_almost_equal(np.sum(d), tgt) + assert_almost_equal(np.sum(d[::-1]), tgt) + + d = np.ones(500, dtype=dt) + assert_almost_equal(np.sum(d[::2]), 250.) + assert_almost_equal(np.sum(d[1::2]), 250.) + assert_almost_equal(np.sum(d[::3]), 167.) + assert_almost_equal(np.sum(d[1::3]), 167.) + assert_almost_equal(np.sum(d[::-2]), 250.) + assert_almost_equal(np.sum(d[-1::-2]), 250.) + assert_almost_equal(np.sum(d[::-3]), 167.) + assert_almost_equal(np.sum(d[-1::-3]), 167.) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + d += d + assert_almost_equal(d, 2.) + + def test_sum_complex(self): + for dt in (np.complex64, np.complex128, np.clongdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) *1j) + d = np.empty(v, dtype=dt) + d.real = np.arange(1, v + 1) + d.imag = -np.arange(1, v + 1) + assert_almost_equal(np.sum(d), tgt) + assert_almost_equal(np.sum(d[::-1]), tgt) + + d = np.ones(500, dtype=dt) + 1j + assert_almost_equal(np.sum(d[::2]), 250. + 250j) + assert_almost_equal(np.sum(d[1::2]), 250. + 250j) + assert_almost_equal(np.sum(d[::3]), 167. + 167j) + assert_almost_equal(np.sum(d[1::3]), 167. + 167j) + assert_almost_equal(np.sum(d[::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[::-3]), 167. + 167j) + assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + 1j + d += d + assert_almost_equal(d, 2. + 2j) + + def test_inner1d(self): + a = np.arange(6).reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1)) + a = np.arange(6) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a)) + + def test_broadcast(self): + msg = "broadcast" + a = np.arange(4).reshape((2, 1, 2)) + b = np.arange(4).reshape((1, 2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + msg = "extend & broadcast loop dimensions" + b = np.arange(4).reshape((2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + msg = "broadcast in core dimensions" + a = np.arange(8).reshape((4, 2)) + b = np.arange(4).reshape((4, 1)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + msg = "extend & broadcast core and loop dimensions" + a = np.arange(8).reshape((4, 2)) + b = np.array(7) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + msg = "broadcast should fail" + a = np.arange(2).reshape((2, 1, 1)) + b = np.arange(3).reshape((3, 1, 1)) + try: + ret = umt.inner1d(a, b) + assert_equal(ret, None, err_msg=msg) + except ValueError: None + + def test_type_cast(self): + msg = "type cast" + a = np.arange(6, dtype='short').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) + msg = "type cast on one argument" + a = np.arange(6).reshape((2, 3)) + b = a+0.1 + assert_array_almost_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), + err_msg=msg) + + def test_endian(self): + msg = "big endian" + a = np.arange(6, dtype='>i4').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) + msg = "little endian" + a = np.arange(6, dtype=' 0, m > 0: fine + # n = 0, m > 0: fine, doing 0 reductions of m-element arrays + # n > 0, m = 0: can't reduce a 0-element array, ValueError + # n = 0, m = 0: can't reduce a 0-element array, ValueError (for + # consistency with the above case) + # This test doesn't actually look at return values, it just checks to + # make sure that error we get an error in exactly those cases where we + # expect one, and assumes the calculations themselves are done + # correctly. + def ok(f, *args, **kwargs): + f(*args, **kwargs) + def err(f, *args, **kwargs): + assert_raises(ValueError, f, *args, **kwargs) + def t(expect, func, n, m): + expect(func, np.zeros((n, m)), axis=1) + expect(func, np.zeros((m, n)), axis=0) + expect(func, np.zeros((n // 2, n // 2, m)), axis=2) + expect(func, np.zeros((n // 2, m, n // 2)), axis=1) + expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) + expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) + expect(func, np.zeros((m // 3, m // 3, m // 3, + n // 2, n //2)), + axis=(0, 1, 2)) + # Check what happens if the inner (resp. outer) dimensions are a + # mix of zero and non-zero: + expect(func, np.zeros((10, m, n)), axis=(0, 1)) + expect(func, np.zeros((10, n, m)), axis=(0, 2)) + expect(func, np.zeros((m, 10, n)), axis=0) + expect(func, np.zeros((10, m, n)), axis=1) + expect(func, np.zeros((10, n, m)), axis=2) + # np.maximum is just an arbitrary ufunc with no reduction identity + assert_equal(np.maximum.identity, None) + t(ok, np.maximum.reduce, 30, 30) + t(ok, np.maximum.reduce, 0, 30) + t(err, np.maximum.reduce, 30, 0) + t(err, np.maximum.reduce, 0, 0) + err(np.maximum.reduce, []) + np.maximum.reduce(np.zeros((0, 0)), axis=()) + + # all of the combinations are fine for a reduction that has an + # identity + t(ok, np.add.reduce, 30, 30) + t(ok, np.add.reduce, 0, 30) + t(ok, np.add.reduce, 30, 0) + t(ok, np.add.reduce, 0, 0) + np.add.reduce([]) + np.add.reduce(np.zeros((0, 0)), axis=()) + + # OTOH, accumulate always makes sense for any combination of n and m, + # because it maps an m-element array to an m-element array. These + # tests are simpler because accumulate doesn't accept multiple axes. + for uf in (np.maximum, np.add): + uf.accumulate(np.zeros((30, 0)), axis=0) + uf.accumulate(np.zeros((0, 30)), axis=0) + uf.accumulate(np.zeros((30, 30)), axis=0) + uf.accumulate(np.zeros((0, 0)), axis=0) + + def test_safe_casting(self): + # In old versions of numpy, in-place operations used the 'unsafe' + # casting rules. In some future version, 'same_kind' will become the + # default. + a = np.array([1, 2, 3], dtype=int) + # Non-in-place addition is fine + assert_array_equal(assert_no_warnings(np.add, a, 1.1), + [2.1, 3.1, 4.1]) + assert_warns(DeprecationWarning, np.add, a, 1.1, out=a) + assert_array_equal(a, [2, 3, 4]) + def add_inplace(a, b): + a += b + assert_warns(DeprecationWarning, add_inplace, a, 1.1) + assert_array_equal(a, [3, 4, 5]) + # Make sure that explicitly overriding the warning is allowed: + assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") + assert_array_equal(a, [4, 5, 6]) + + # There's no way to propagate exceptions from the place where we issue + # this deprecation warning, so we must throw the exception away + # entirely rather than cause it to be raised at some other point, or + # trigger some other unsuspecting if (PyErr_Occurred()) { ...} at some + # other location entirely. + import warnings + import sys + if sys.version_info[0] >= 3: + from io import StringIO + else: + from StringIO import StringIO + with warnings.catch_warnings(): + warnings.simplefilter("error") + old_stderr = sys.stderr + try: + sys.stderr = StringIO() + # No error, but dumps to stderr + a += 1.1 + # No error on the next bit of code executed either + 1 + 1 + assert_("Implicitly casting" in sys.stderr.getvalue()) + finally: + sys.stderr = old_stderr + + def test_ufunc_custom_out(self): + # Test ufunc with built in input types and custom output type + + a = np.array([0, 1, 2], dtype='i8') + b = np.array([0, 1, 2], dtype='i8') + c = np.empty(3, dtype=rational) + + # Output must be specified so numpy knows what + # ufunc signature to look for + result = test_add(a, b, c) + assert_equal(result, np.array([0, 2, 4], dtype=rational)) + + # no output type should raise TypeError + assert_raises(TypeError, test_add, a, b) + + def test_operand_flags(self): + a = np.arange(16, dtype='l').reshape(4, 4) + b = np.arange(9, dtype='l').reshape(3, 3) + opflag_tests.inplace_add(a[:-1, :-1], b) + assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], + [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l')) + + a = np.array(0) + opflag_tests.inplace_add(a, 3) + assert_equal(a, 3) + opflag_tests.inplace_add(a, [3, 4]) + assert_equal(a, 10) + + def test_struct_ufunc(self): + import numpy.core.struct_ufunc_test as struct_ufunc + + a = np.array([(1, 2, 3)], dtype='u8,u8,u8') + b = np.array([(1, 2, 3)], dtype='u8,u8,u8') + + result = struct_ufunc.add_triplet(a, b) + assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) + + def test_custom_ufunc(self): + a = np.array([rational(1, 2), rational(1, 3), rational(1, 4)], + dtype=rational); + b = np.array([rational(1, 2), rational(1, 3), rational(1, 4)], + dtype=rational); + + result = test_add_rationals(a, b) + expected = np.array([rational(1), rational(2, 3), rational(1, 2)], + dtype=rational); + assert_equal(result, expected); + + def test_custom_array_like(self): + class MyThing(object): + __array_priority__ = 1000 + + rmul_count = 0 + getitem_count = 0 + + def __init__(self, shape): + self.shape = shape + + def __len__(self): + return self.shape[0] + + def __getitem__(self, i): + MyThing.getitem_count += 1 + if not isinstance(i, tuple): + i = (i,) + if len(i) > len(self.shape): + raise IndexError("boo") + + return MyThing(self.shape[len(i):]) + + def __rmul__(self, other): + MyThing.rmul_count += 1 + return self + + np.float64(5)*MyThing((3, 3)) + assert_(MyThing.rmul_count == 1, MyThing.rmul_count) + assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) + + def test_inplace_fancy_indexing(self): + + a = np.arange(10) + np.add.at(a, [2, 5, 2], 1) + assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) + + a = np.arange(10) + b = np.array([100, 100, 100]) + np.add.at(a, [2, 5, 2], b) + assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, (slice(None), [1, 2, 1]), b) + assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) + assert_equal(a, + [[[0, 401, 202], + [3, 404, 205], + [6, 407, 208]], + + [[9, 410, 211], + [12, 413, 214], + [15, 416, 217]], + + [[18, 419, 220], + [21, 422, 223], + [24, 425, 226]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, ([1, 2, 1], slice(None)), b) + assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) + assert_equal(a, + [[[0, 1, 2 ], + [203, 404, 605], + [106, 207, 308]], + + [[9, 10, 11 ], + [212, 413, 614], + [115, 216, 317]], + + [[18, 19, 20 ], + [221, 422, 623], + [124, 225, 326]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (0, [1, 2, 1]), b) + assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, ([1, 2, 1], 0, slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [3, 4, 5], + [6, 7, 8]], + + [[209, 410, 611], + [12, 13, 14], + [15, 16, 17]], + + [[118, 219, 320], + [21, 22, 23], + [24, 25, 26]]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), slice(None)), b) + assert_equal(a, + [[[100, 201, 302], + [103, 204, 305], + [106, 207, 308]], + + [[109, 210, 311], + [112, 213, 314], + [115, 216, 317]], + + [[118, 219, 320], + [121, 222, 323], + [124, 225, 326]]]) + + a = np.arange(10) + np.negative.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9]) + + # Test 0-dim array + a = np.array(0) + np.add.at(a, (), 1) + assert_equal(a, 1) + + assert_raises(IndexError, np.add.at, a, 0, 1) + assert_raises(IndexError, np.add.at, a, [], 1) + + # Test mixed dtypes + a = np.arange(10) + np.power.at(a, [1, 2, 3, 2], 3.5) + assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) + + # Test boolean indexing and boolean ufuncs + a = np.arange(10) + index = a % 2 == 0 + np.equal.at(a, index, [0, 2, 4, 6, 8]) + assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) + + # Test unary operator + a = np.arange(10, dtype='u4') + np.invert.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) + + # Test empty subspace + orig = np.arange(4) + a = orig[:, None][:, 0:0] + np.add.at(a, [0, 1], 3) + assert_array_equal(orig, np.arange(4)) + + # Test with swapped byte order + index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) + values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) + np.add.at(values, index, 3) + assert_array_equal(values, [1, 8, 6, 4]) + + # Test exception thrown + values = np.array(['a', 1], dtype=np.object) + self.assertRaises(TypeError, np.add.at, values, [0, 1], 1) + assert_array_equal(values, np.array(['a', 1], dtype=np.object)) + + def test_reduce_arguments(self): + f = np.add.reduce + d = np.ones((5,2), dtype=int) + o = np.ones((2,), dtype=d.dtype) + r = o * 5 + assert_equal(f(d), r) + # a, axis=0, dtype=None, out=None, keepdims=False + assert_equal(f(d, axis=0), r) + assert_equal(f(d, 0), r) + assert_equal(f(d, 0, dtype=None), r) + assert_equal(f(d, 0, dtype='i'), r) + assert_equal(f(d, 0, 'i'), r) + assert_equal(f(d, 0, None), r) + assert_equal(f(d, 0, None, out=None), r) + assert_equal(f(d, 0, None, out=o), r) + assert_equal(f(d, 0, None, o), r) + assert_equal(f(d, 0, None, None), r) + assert_equal(f(d, 0, None, None, keepdims=False), r) + assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) + # multiple keywords + assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False), r) + + # too little + assert_raises(TypeError, f) + # too much + assert_raises(TypeError, f, d, 0, None, None, False, 1) + # invalid axis + assert_raises(TypeError, f, d, "invalid") + assert_raises(TypeError, f, d, axis="invalid") + assert_raises(TypeError, f, d, axis="invalid", dtype=None, + keepdims=True) + # invalid dtype + assert_raises(TypeError, f, d, 0, "invalid") + assert_raises(TypeError, f, d, dtype="invalid") + assert_raises(TypeError, f, d, dtype="invalid", out=None) + # invalid out + assert_raises(TypeError, f, d, 0, None, "invalid") + assert_raises(TypeError, f, d, out="invalid") + assert_raises(TypeError, f, d, out="invalid", dtype=None) + # keepdims boolean, no invalid value + # assert_raises(TypeError, f, d, 0, None, None, "invalid") + # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) + # invalid mix + assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", + out=None) + + # invalid keyord + assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", + out=None) + assert_raises(TypeError, f, d, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath.py new file mode 100644 index 0000000000000..7451af8f08e59 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath.py @@ -0,0 +1,1665 @@ +from __future__ import division, absolute_import, print_function + +import sys +import platform +import warnings + +from numpy.testing import * +from numpy.testing.utils import _gen_alignment_data +import numpy.core.umath as ncu +import numpy as np + + +def on_powerpc(): + """ True if we are running on a Power PC platform.""" + return platform.processor() == 'powerpc' or \ + platform.machine().startswith('ppc') + + +class _FilterInvalids(object): + def setUp(self): + self.olderr = np.seterr(invalid='ignore') + + def tearDown(self): + np.seterr(**self.olderr) + + +class TestConstants(TestCase): + def test_pi(self): + assert_allclose(ncu.pi, 3.141592653589793, 1e-15) + + + def test_e(self): + assert_allclose(ncu.e, 2.718281828459045, 1e-15) + + + def test_euler_gamma(self): + assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) + + +class TestDivision(TestCase): + def test_division_int(self): + # int division should follow Python + x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) + if 5 / 10 == 0.5: + assert_equal(x / 100, [0.05, 0.1, 0.9, 1, + -0.05, -0.1, -0.9, -1, -1.2]) + else: + assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) + + def test_division_complex(self): + # check that implementation is correct + msg = "Complex division implementation check" + x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) + assert_almost_equal(x**2/x, x, err_msg=msg) + # check overflow, underflow + msg = "Complex division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = x**2/x + assert_almost_equal(y/x, [1, 1], err_msg=msg) + + def test_zero_division_complex(self): + with np.errstate(invalid="ignore", divide="ignore"): + x = np.array([0.0], dtype=np.complex128) + y = 1.0/x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.nan)/x + assert_(np.isinf(y)[0]) + y = complex(np.nan, np.inf)/x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.inf)/x + assert_(np.isinf(y)[0]) + y = 0.0/x + assert_(np.isnan(y)[0]) + + def test_floor_division_complex(self): + # check that implementation is correct + msg = "Complex floor division implementation check" + x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) + y = np.array([0., -1., 0., 0.], dtype=np.complex128) + assert_equal(np.floor_divide(x**2, x), y, err_msg=msg) + # check overflow, underflow + msg = "Complex floor division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = np.floor_divide(x**2, x) + assert_equal(y, [1.e+110, 0], err_msg=msg) + + +class TestPower(TestCase): + def test_power_float(self): + x = np.array([1., 2., 3.]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_equal(x**2, [1., 4., 9.]) + y = x.copy() + y **= 2 + assert_equal(y, [1., 4., 9.]) + assert_almost_equal(x**(-1), [1., 0.5, 1./3]) + assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) + + for out, inp, msg in _gen_alignment_data(dtype=np.float32, + type='unary', + max_size=11): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + for out, inp, msg in _gen_alignment_data(dtype=np.float64, + type='unary', + max_size=7): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + + def test_power_complex(self): + x = np.array([1+2j, 2+3j, 3+4j]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) + assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) + assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) + assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) + assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) + assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, + (-117-44j)/15625]) + assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), + ncu.sqrt(3+4j)]) + norm = 1./((x**14)[0]) + assert_almost_equal(x**14 * norm, + [i * norm for i in [-76443+16124j, 23161315+58317492j, + 5583548873 + 2465133864j]]) + + # Ticket #836 + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + for z in [complex(0, np.inf), complex(1, np.inf)]: + z = np.array([z], dtype=np.complex_) + with np.errstate(invalid="ignore"): + assert_complex_equal(z**1, z) + assert_complex_equal(z**2, z*z) + assert_complex_equal(z**3, z*z*z) + + def test_power_zero(self): + # ticket #1271 + zero = np.array([0j]) + one = np.array([1+0j]) + cinf = np.array([complex(np.inf, 0)]) + cnan = np.array([complex(np.nan, np.nan)]) + + def assert_complex_equal(x, y): + x, y = np.asarray(x), np.asarray(y) + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # positive powers + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, p), zero) + + # zero power + assert_complex_equal(np.power(zero, 0), one) + with np.errstate(invalid="ignore"): + assert_complex_equal(np.power(zero, 0+1j), cnan) + + # negative power + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, -p), cnan) + assert_complex_equal(np.power(zero, -1+0.2j), cnan) + + def test_fast_power(self): + x = np.array([1, 2, 3], np.int16) + assert_((x**2.00001).dtype is (x**2.0).dtype) + + # Check that the fast path ignores 1-element not 0-d arrays + res = x ** np.array([[[2]]]) + assert_equal(res.shape, (1, 1, 3)) + + +class TestLog2(TestCase): + def test_log2_values(self) : + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g'] : + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.log2(xf), yf) + + def test_log2_ints(self): + # a good log2 implementation should provide this, + # might fail on OS with bad libm + for i in range(1, 65): + v = np.log2(2.**i) + assert_equal(v, float(i), err_msg='at exponent %d' % i) + + def test_log2_special(self): + assert_equal(np.log2(1.), 0.) + assert_equal(np.log2(np.inf), np.inf) + assert_(np.isnan(np.log2(np.nan))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.log2(-1.))) + assert_(np.isnan(np.log2(-np.inf))) + assert_equal(np.log2(0.), -np.inf) + assert_(w[0].category is RuntimeWarning) + assert_(w[1].category is RuntimeWarning) + assert_(w[2].category is RuntimeWarning) + + +class TestExp2(TestCase): + def test_exp2_values(self) : + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g'] : + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.exp2(yf), xf) + + +class TestLogAddExp2(_FilterInvalids): + # Need test for intermediate precisions + def test_logaddexp2_values(self) : + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec in zip(['f', 'd', 'g'], [6, 15, 15]) : + xf = np.log2(np.array(x, dtype=dt)) + yf = np.log2(np.array(y, dtype=dt)) + zf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) + + def test_logaddexp2_range(self) : + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g'] : + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_inf(self) : + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='ignore'): + for dt in ['f', 'd', 'g'] : + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, 0))) + assert_(np.isnan(np.logaddexp2(0, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) + + +class TestLog(TestCase): + def test_log_values(self) : + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g'] : + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.log(xf), yf) + + +class TestExp(TestCase): + def test_exp_values(self) : + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g'] : + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.exp(yf), xf) + + +class TestLogAddExp(_FilterInvalids): + def test_logaddexp_values(self) : + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec in zip(['f', 'd', 'g'], [6, 15, 15]) : + xf = np.log(np.array(x, dtype=dt)) + yf = np.log(np.array(y, dtype=dt)) + zf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) + + def test_logaddexp_range(self) : + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g'] : + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + + def test_inf(self) : + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='ignore'): + for dt in ['f', 'd', 'g'] : + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, 0))) + assert_(np.isnan(np.logaddexp(0, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, np.nan))) + + +class TestLog1p(TestCase): + def test_log1p(self): + assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) + assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) + + def test_special(self): + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(ncu.log1p(np.nan), np.nan) + assert_equal(ncu.log1p(np.inf), np.inf) + assert_equal(ncu.log1p(-1.), -np.inf) + assert_equal(ncu.log1p(-2.), np.nan) + assert_equal(ncu.log1p(-np.inf), np.nan) + + +class TestExpm1(TestCase): + def test_expm1(self): + assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) + assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) + + def test_special(self): + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(0.), 0.) + assert_equal(ncu.expm1(-0.), -0.) + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(-np.inf), -1.) + + +class TestHypot(TestCase, object): + def test_simple(self): + assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) + assert_almost_equal(ncu.hypot(0, 0), 0) + + +def assert_hypot_isnan(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isnan(ncu.hypot(x, y)), + "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))) + + +def assert_hypot_isinf(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isinf(ncu.hypot(x, y)), + "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) + + +class TestHypotSpecialValues(TestCase): + def test_nan_outputs(self): + assert_hypot_isnan(np.nan, np.nan) + assert_hypot_isnan(np.nan, 1) + + def test_nan_outputs2(self): + assert_hypot_isinf(np.nan, np.inf) + assert_hypot_isinf(np.inf, np.nan) + assert_hypot_isinf(np.inf, 0) + assert_hypot_isinf(0, np.inf) + assert_hypot_isinf(np.inf, np.inf) + assert_hypot_isinf(np.inf, 23.0) + + def test_no_fpe(self): + assert_no_warnings(ncu.hypot, np.inf, 0) + + +def assert_arctan2_isnan(x, y): + assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_ispinf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_isninf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_ispzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_isnzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) + + +class TestArctan2SpecialValues(TestCase): + def test_one_one(self): + # atan2(1, 1) returns pi/4. + assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) + assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) + + def test_zero_nzero(self): + # atan2(+-0, -0) returns +-pi. + assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi) + assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi) + + def test_zero_pzero(self): + # atan2(+-0, +0) returns +-0. + assert_arctan2_ispzero(np.PZERO, np.PZERO) + assert_arctan2_isnzero(np.NZERO, np.PZERO) + + def test_zero_negative(self): + # atan2(+-0, x) returns +-pi for x < 0. + assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi) + assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi) + + def test_zero_positive(self): + # atan2(+-0, x) returns +-0 for x > 0. + assert_arctan2_ispzero(np.PZERO, 1) + assert_arctan2_isnzero(np.NZERO, 1) + + def test_positive_zero(self): + # atan2(y, +-0) returns +pi/2 for y > 0. + assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi) + + def test_negative_zero(self): + # atan2(y, +-0) returns -pi/2 for y < 0. + assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi) + + def test_any_ninf(self): + # atan2(+-y, -infinity) returns +-pi for finite y > 0. + assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi) + assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) + + def test_any_pinf(self): + # atan2(+-y, +infinity) returns +-0 for finite y > 0. + assert_arctan2_ispzero(1, np.inf) + assert_arctan2_isnzero(-1, np.inf) + + def test_inf_any(self): + # atan2(+-infinity, x) returns +-pi/2 for finite x. + assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) + + def test_inf_ninf(self): + # atan2(+-infinity, -infinity) returns +-3*pi/4. + assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) + + def test_inf_pinf(self): + # atan2(+-infinity, +infinity) returns +-pi/4. + assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) + + def test_nan_any(self): + # atan2(nan, x) returns nan for any x, including inf + assert_arctan2_isnan(np.nan, np.inf) + assert_arctan2_isnan(np.inf, np.nan) + assert_arctan2_isnan(np.nan, np.nan) + + +class TestLdexp(TestCase): + def _check_ldexp(self, tp): + assert_almost_equal(ncu.ldexp(np.array(2., np.float32), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float64), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), + np.array(3, tp)), 16.) + + def test_ldexp(self): + # The default Python int type should work + assert_almost_equal(ncu.ldexp(2., 3), 16.) + # The following int types should all be accepted + self._check_ldexp(np.int8) + self._check_ldexp(np.int16) + self._check_ldexp(np.int32) + self._check_ldexp('i') + self._check_ldexp('l') + + def test_ldexp_overflow(self): + # silence warning emitted on overflow + with np.errstate(over="ignore"): + imax = np.iinfo(np.dtype('l')).max + imin = np.iinfo(np.dtype('l')).min + assert_equal(ncu.ldexp(2., imax), np.inf) + assert_equal(ncu.ldexp(2., imin), 0) + + +class TestMaximum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.maximum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.maximum.reduce([1, 2j]), 1) + assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.maximum(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : + arg1 = np.array([0, cnan, cnan], dtype=np.complex) + arg2 = np.array([cnan, 0, cnan], dtype=np.complex) + out = np.array([nan, nan, nan], dtype=np.complex) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=np.object) + arg2 = arg1 + 1 + assert_equal(np.maximum(arg1, arg2), arg2) + + +class TestMinimum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.minimum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.minimum.reduce([1, 2j]), 2j) + assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.minimum(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : + arg1 = np.array([0, cnan, cnan], dtype=np.complex) + arg2 = np.array([cnan, 0, cnan], dtype=np.complex) + out = np.array([nan, nan, nan], dtype=np.complex) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=np.object) + arg2 = arg1 + 1 + assert_equal(np.minimum(arg1, arg2), arg1) + + +class TestFmax(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmax.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 9) + assert_equal(func(tmp2), 9) + + def test_reduce_complex(self): + assert_equal(np.fmax.reduce([1, 2j]), 1) + assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmax(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : + arg1 = np.array([0, cnan, cnan], dtype=np.complex) + arg2 = np.array([cnan, 0, cnan], dtype=np.complex) + out = np.array([0, 0, nan], dtype=np.complex) + assert_equal(np.fmax(arg1, arg2), out) + + +class TestFmin(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmin.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 1) + assert_equal(func(tmp2), 1) + + def test_reduce_complex(self): + assert_equal(np.fmin.reduce([1, 2j]), 2j) + assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmin(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : + arg1 = np.array([0, cnan, cnan], dtype=np.complex) + arg2 = np.array([cnan, 0, cnan], dtype=np.complex) + out = np.array([0, 0, nan], dtype=np.complex) + assert_equal(np.fmin(arg1, arg2), out) + + +class TestBool(TestCase): + def test_truth_table(self): + arg1 = [False, False, True, True] + arg2 = [False, True, False, True] + # OR + out = [False, True, True, True] + for func in (np.logical_or, np.bitwise_or, np.maximum): + assert_equal(func(arg1, arg2), out) + # AND + out = [False, False, False, True] + for func in (np.logical_and, np.bitwise_and, np.minimum): + assert_equal(func(arg1, arg2), out) + # XOR + out = [False, True, True, False] + for func in (np.logical_xor, np.bitwise_xor, np.not_equal): + assert_equal(func(arg1, arg2), out) + + +class TestFloatingPoint(TestCase): + def test_floating_point(self): + assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) + + +class TestDegrees(TestCase): + def test_degrees(self): + assert_almost_equal(ncu.degrees(np.pi), 180.0) + assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) + + +class TestRadians(TestCase): + def test_radians(self): + assert_almost_equal(ncu.radians(180.0), np.pi) + assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) + + +class TestSign(TestCase): + def test_sign(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape) + tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + +class TestMinMax(TestCase): + def test_minmax_blocked(self): + # simd tests on max/min, test all alignments, slow but important + # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) + for dt, sz in [(np.float32, 15), (np.float64, 7)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + for i in range(inp.size): + inp[:] = np.arange(inp.size, dtype=dt) + inp[i] = np.nan + emsg = lambda: '%r\n%s' % (inp, msg) + assert_(np.isnan(inp.max()), msg=emsg) + assert_(np.isnan(inp.min()), msg=emsg) + + inp[i] = 1e10 + assert_equal(inp.max(), 1e10, err_msg=msg) + inp[i] = -1e10 + assert_equal(inp.min(), -1e10, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(d.max(), d[0]) + assert_equal(d.min(), d[0]) + + +class TestAbsoluteNegative(TestCase): + def test_abs_neg_blocked(self): + # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 5)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + tgt = [ncu.absolute(i) for i in inp] + np.absolute(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + self.assertTrue((out >= 0).all()) + + tgt = [-1*(i) for i in inp] + np.negative(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + + # will throw invalid flag depending on compiler optimizations + with np.errstate(invalid='ignore'): + for v in [np.nan, -np.inf, np.inf]: + for i in range(inp.size): + d = np.arange(inp.size, dtype=dt) + inp[:] = -d + inp[i] = v + d[i] = -v if v == -np.inf else v + assert_array_equal(np.abs(inp), d, err_msg=msg) + np.abs(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + assert_array_equal(-inp, -1*inp, err_msg=msg) + np.negative(inp, out=out) + assert_array_equal(out, -1*inp, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(np.abs(d), d) + assert_equal(np.negative(d), -d) + np.negative(d, out=d) + np.negative(np.ones_like(d), out=d) + np.abs(d, out=d) + np.abs(np.ones_like(d), out=d) + + +class TestSpecialMethods(TestCase): + def test_wrap(self): + class with_wrap(object): + def __array__(self): + return np.zeros(1) + def __array_wrap__(self, arr, context): + r = with_wrap() + r.arr = arr + r.context = context + return r + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + func, args, i = x.context + self.assertTrue(func is ncu.minimum) + self.assertEqual(len(args), 2) + assert_equal(args[0], a) + assert_equal(args[1], a) + self.assertEqual(i, 0) + + def test_wrap_with_iterable(self): + # test fix for bug #1026: + class with_wrap(np.ndarray): + __array_priority__ = 10 + def __new__(cls): + return np.asarray(1).view(cls).copy() + def __array_wrap__(self, arr, context): + return arr.view(type(self)) + a = with_wrap() + x = ncu.multiply(a, (1, 2, 3)) + self.assertTrue(isinstance(x, with_wrap)) + assert_array_equal(x, np.array((1, 2, 3))) + + def test_priority_with_scalar(self): + # test fix for bug #826: + class A(np.ndarray): + __array_priority__ = 10 + def __new__(cls): + return np.asarray(1.0, 'float64').view(cls).copy() + a = A() + x = np.float64(1)*a + self.assertTrue(isinstance(x, A)) + assert_array_equal(x, np.array(1)) + + def test_old_wrap(self): + class with_wrap(object): + def __array__(self): + return np.zeros(1) + def __array_wrap__(self, arr): + r = with_wrap() + r.arr = arr + return r + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + + def test_priority(self): + class A(object): + def __array__(self): + return np.zeros(1) + def __array_wrap__(self, arr, context): + r = type(self)() + r.arr = arr + r.context = context + return r + class B(A): + __array_priority__ = 20. + class C(A): + __array_priority__ = 40. + x = np.zeros(1) + a = A() + b = B() + c = C() + f = ncu.minimum + self.assertTrue(type(f(x, x)) is np.ndarray) + self.assertTrue(type(f(x, a)) is A) + self.assertTrue(type(f(x, b)) is B) + self.assertTrue(type(f(x, c)) is C) + self.assertTrue(type(f(a, x)) is A) + self.assertTrue(type(f(b, x)) is B) + self.assertTrue(type(f(c, x)) is C) + + self.assertTrue(type(f(a, a)) is A) + self.assertTrue(type(f(a, b)) is B) + self.assertTrue(type(f(b, a)) is B) + self.assertTrue(type(f(b, b)) is B) + self.assertTrue(type(f(b, c)) is C) + self.assertTrue(type(f(c, b)) is C) + self.assertTrue(type(f(c, c)) is C) + + self.assertTrue(type(ncu.exp(a) is A)) + self.assertTrue(type(ncu.exp(b) is B)) + self.assertTrue(type(ncu.exp(c) is C)) + + def test_failing_wrap(self): + class A(object): + def __array__(self): + return np.zeros(1) + def __array_wrap__(self, arr, context): + raise RuntimeError + a = A() + self.assertRaises(RuntimeError, ncu.maximum, a, a) + + def test_default_prepare(self): + class with_wrap(object): + __array_priority__ = 10 + def __array__(self): + return np.zeros(1) + def __array_wrap__(self, arr, context): + return arr + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x, np.zeros(1)) + assert_equal(type(x), np.ndarray) + + def test_prepare(self): + class with_prepare(np.ndarray): + __array_priority__ = 10 + def __array_prepare__(self, arr, context): + # make sure we can return a new + return np.array(arr).view(type=with_prepare) + a = np.array(1).view(type=with_prepare) + x = np.add(a, a) + assert_equal(x, np.array(2)) + assert_equal(type(x), with_prepare) + + def test_failing_prepare(self): + class A(object): + def __array__(self): + return np.zeros(1) + def __array_prepare__(self, arr, context=None): + raise RuntimeError + a = A() + self.assertRaises(RuntimeError, ncu.maximum, a, a) + + def test_array_with_context(self): + class A(object): + def __array__(self, dtype=None, context=None): + func, args, i = context + self.func = func + self.args = args + self.i = i + return np.zeros(1) + class B(object): + def __array__(self, dtype=None): + return np.zeros(1, dtype) + class C(object): + def __array__(self): + return np.zeros(1) + a = A() + ncu.maximum(np.zeros(1), a) + self.assertTrue(a.func is ncu.maximum) + assert_equal(a.args[0], 0) + self.assertTrue(a.args[1] is a) + self.assertTrue(a.i == 1) + assert_equal(ncu.maximum(a, B()), 0) + assert_equal(ncu.maximum(a, C()), 0) + + @dec.skipif(True) # ufunc override disabled for 1.9 + def test_ufunc_override(self): + class A(object): + def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): + return self, func, method, pos, inputs, kwargs + + a = A() + + b = np.matrix([1]) + c = np.array([1]) + res0 = np.multiply(a, b) + res1 = np.dot(a, b) + + # self + assert_equal(res0[0], a) + assert_equal(res1[0], a) + assert_equal(res0[1], np.multiply) + assert_equal(res1[1], np.dot) + assert_equal(res0[2], '__call__') + assert_equal(res1[2], '__call__') + assert_equal(res0[3], 0) + assert_equal(res1[3], 0) + assert_equal(res0[4], (a, b)) + assert_equal(res1[4], (a, b)) + assert_equal(res0[5], {}) + assert_equal(res1[5], {}) + + @dec.skipif(True) # ufunc override disabled for 1.9 + def test_ufunc_override_mro(self): + + # Some multi arg functions for testing. + def tres_mul(a, b, c): + return a * b * c + + def quatro_mul(a, b, c, d): + return a * b * c * d + + # Make these into ufuncs. + three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) + four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) + + class A(object): + def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): + return "A" + + class ASub(A): + def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): + return "ASub" + + class B(object): + def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): + return "B" + + class C(object): + def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): + return NotImplemented + + class CSub(object): + def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): + return NotImplemented + + + + a = A() + a_sub = ASub() + b = B() + c = C() + c_sub = CSub() + + # Standard + res = np.multiply(a, a_sub) + assert_equal(res, "ASub") + res = np.multiply(a_sub, b) + assert_equal(res, "ASub") + + # With 1 NotImplemented + res = np.multiply(c, a) + assert_equal(res, "A") + + # Both NotImplemented. + assert_raises(TypeError, np.multiply, c, c_sub) + assert_raises(TypeError, np.multiply, c_sub, c) + assert_raises(TypeError, np.multiply, 2, c) + + # Ternary testing. + assert_equal(three_mul_ufunc(a, 1, 2), "A") + assert_equal(three_mul_ufunc(1, a, 2), "A") + assert_equal(three_mul_ufunc(1, 2, a), "A") + + assert_equal(three_mul_ufunc(a, a, 6), "A") + assert_equal(three_mul_ufunc(a, 2, a), "A") + assert_equal(three_mul_ufunc(a, 2, b), "A") + assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") + assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") + assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") + assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") + + assert_equal(three_mul_ufunc(a, b, c), "A") + assert_equal(three_mul_ufunc(a, b, c_sub), "A") + assert_equal(three_mul_ufunc(1, 2, b), "B") + + assert_raises(TypeError, three_mul_ufunc, 1, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) + + # Quaternary testing. + assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, 3), "A") + assert_equal(four_mul_ufunc(1, 1, a, 3), "A") + assert_equal(four_mul_ufunc(1, 1, 2, a), "A") + + assert_equal(four_mul_ufunc(a, b, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, b), "A") + assert_equal(four_mul_ufunc(b, 1, a, 3), "B") + assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") + assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") + + assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) + assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) + assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c) + + @dec.skipif(True) # ufunc override disabled for 1.9 + def test_ufunc_override_methods(self): + class A(object): + def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): + return self, ufunc, method, pos, inputs, kwargs + + # __call__ + a = A() + res = np.multiply.__call__(1, a, foo='bar', answer=42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], '__call__') + assert_equal(res[3], 1) + assert_equal(res[4], (1, a)) + assert_equal(res[5], {'foo': 'bar', 'answer': 42}) + + # reduce, positional args + res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], 0) + assert_equal(res[4], (a,)) + assert_equal(res[5], {'dtype':'dtype0', + 'out': 'out0', + 'keepdims': 'keep0', + 'axis': 'axis0'}) + + # reduce, kwargs + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', + keepdims='keep0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], 0) + assert_equal(res[4], (a,)) + assert_equal(res[5], {'dtype':'dtype0', + 'out': 'out0', + 'keepdims': 'keep0', + 'axis': 'axis0'}) + + # accumulate, pos args + res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], 0) + assert_equal(res[4], (a,)) + assert_equal(res[5], {'dtype':'dtype0', + 'out': 'out0', + 'axis': 'axis0'}) + + # accumulate, kwargs + res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], 0) + assert_equal(res[4], (a,)) + assert_equal(res[5], {'dtype':'dtype0', + 'out': 'out0', + 'axis': 'axis0'}) + + # reduceat, pos args + res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], 0) + assert_equal(res[4], (a, [4, 2])) + assert_equal(res[5], {'dtype':'dtype0', + 'out': 'out0', + 'axis': 'axis0'}) + + # reduceat, kwargs + res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], 0) + assert_equal(res[4], (a, [4, 2])) + assert_equal(res[5], {'dtype':'dtype0', + 'out': 'out0', + 'axis': 'axis0'}) + + # outer + res = np.multiply.outer(a, 42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'outer') + assert_equal(res[3], 0) + assert_equal(res[4], (a, 42)) + assert_equal(res[5], {}) + + # at + res = np.multiply.at(a, [4, 2], 'b0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'at') + assert_equal(res[3], 0) + assert_equal(res[4], (a, [4, 2], 'b0')) + + @dec.skipif(True) # ufunc override disabled for 1.9 + def test_ufunc_override_out(self): + class A(object): + def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): + return kwargs + + + class B(object): + def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): + return kwargs + + a = A() + b = B() + res0 = np.multiply(a, b, 'out_arg') + res1 = np.multiply(a, b, out='out_arg') + res2 = np.multiply(2, b, 'out_arg') + res3 = np.multiply(3, b, out='out_arg') + res4 = np.multiply(a, 4, 'out_arg') + res5 = np.multiply(a, 5, out='out_arg') + + assert_equal(res0['out'], 'out_arg') + assert_equal(res1['out'], 'out_arg') + assert_equal(res2['out'], 'out_arg') + assert_equal(res3['out'], 'out_arg') + assert_equal(res4['out'], 'out_arg') + assert_equal(res5['out'], 'out_arg') + + # ufuncs with multiple output modf and frexp. + res6 = np.modf(a, 'out0', 'out1') + res7 = np.frexp(a, 'out0', 'out1') + assert_equal(res6['out'][0], 'out0') + assert_equal(res6['out'][1], 'out1') + assert_equal(res7['out'][0], 'out0') + assert_equal(res7['out'][1], 'out1') + + @dec.skipif(True) # ufunc override disabled for 1.9 + def test_ufunc_override_exception(self): + class A(object): + def __numpy_ufunc__(self, *a, **kwargs): + raise ValueError("oops") + a = A() + for func in [np.divide, np.dot]: + assert_raises(ValueError, func, a, a) + +class TestChoose(TestCase): + def test_mixed(self): + c = np.array([True, True]) + a = np.array([True, True]) + assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) + + +def is_longdouble_finfo_bogus(): + info = np.finfo(np.longcomplex) + return not np.isfinite(np.log10(info.tiny/info.eps)) + + +class TestComplexFunctions(object): + funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, + np.arctanh, np.sin, np.cos, np.tan, np.exp, + np.exp2, np.log, np.sqrt, np.log10, np.log2, + np.log1p] + + def test_it(self): + for f in self.funcs: + if f is np.arccosh : + x = 1.5 + else : + x = .5 + fr = f(x) + fz = f(np.complex(x)) + assert_almost_equal(fz.real, fr, err_msg='real part %s'%f) + assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f) + + def test_precisions_consistent(self) : + z = 1 + 1j + for f in self.funcs : + fcf = f(np.csingle(z)) + fcd = f(np.cdouble(z)) + fcl = f(np.clongdouble(z)) + assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f) + assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f) + + def test_branch_cuts(self): + # check branch cuts and continuity on them + yield _check_branch_cut, np.log, -0.5, 1j, 1, -1 + yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1 + yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1 + yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1 + yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1 + + yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1 + yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1 + yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1 + + yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1 + yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1 + yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1 + + # check against bogus branch cuts: assert continuity between quadrants + yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1 + yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1 + yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1 + + yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1 + yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1 + yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1 + + @dec.knownfailureif(True, "These branch cuts are known to fail") + def test_branch_cuts_failing(self): + # XXX: signed zero not OK with ICC on 64-bit platform for log, see + # http://permalink.gmane.org/gmane.comp.python.numeric.general/25335 + yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True + yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True + yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True + yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True + # XXX: signed zeros are not OK for sqrt or for the arc* functions + yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True + yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True + yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True + yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True + yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True + yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True + yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True + + def test_against_cmath(self): + import cmath, sys + + points = [-1-1j, -1+1j, +1-1j, +1+1j] + name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', + 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} + atol = 4*np.finfo(np.complex).eps + for func in self.funcs: + fname = func.__name__.split('.')[-1] + cname = name_map.get(fname, fname) + try: + cfunc = getattr(cmath, cname) + except AttributeError: + continue + for p in points: + a = complex(func(np.complex_(p))) + b = cfunc(p) + assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname, p, a, b)) + + def check_loss_of_precision(self, dtype): + """Check loss of precision in complex arc* functions""" + + # Check against known-good functions + + info = np.finfo(dtype) + real_dtype = dtype(0.).real.dtype + eps = info.eps + + def check(x, rtol): + x = x.astype(real_dtype) + + z = x.astype(dtype) + d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsinh')) + + z = (1j*x).astype(dtype) + d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsin')) + + z = x.astype(dtype) + d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctanh')) + + z = (1j*x).astype(dtype) + d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctan')) + + # The switchover was chosen as 1e-3; hence there can be up to + # ~eps/1e-3 of relative cancellation error before it + + x_series = np.logspace(-20, -3.001, 200) + x_basic = np.logspace(-2.999, 0, 10, endpoint=False) + + if dtype is np.longcomplex: + # It's not guaranteed that the system-provided arc functions + # are accurate down to a few epsilons. (Eg. on Linux 64-bit) + # So, give more leeway for long complex tests here: + check(x_series, 50*eps) + else: + check(x_series, 2*eps) + check(x_basic, 2*eps/1e-3) + + # Check a few points + + z = np.array([1e-5*(1+1j)], dtype=dtype) + p = 9.999999999333333333e-6 + 1.000000000066666666e-5j + d = np.absolute(1-np.arctanh(z)/p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j + d = np.absolute(1-np.arcsinh(z)/p) + assert_(np.all(d < 1e-15)) + + p = 9.999999999333333333e-6j + 1.000000000066666666e-5 + d = np.absolute(1-np.arctan(z)/p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 + d = np.absolute(1-np.arcsin(z)/p) + assert_(np.all(d < 1e-15)) + + # Check continuity across switchover points + + def check(func, z0, d=1): + z0 = np.asarray(z0, dtype=dtype) + zp = z0 + abs(z0) * d * eps * 2 + zm = z0 - abs(z0) * d * eps * 2 + assert_(np.all(zp != zm), (zp, zm)) + + # NB: the cancellation error at the switchover is at least eps + good = (abs(func(zp) - func(zm)) < 2*eps) + assert_(np.all(good), (func, z0[~good])) + + for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): + pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) + if rp != 0 or ip != 0] + check(func, pts, 1) + check(func, pts, 1j) + check(func, pts, 1+1j) + + def test_loss_of_precision(self): + for dtype in [np.complex64, np.complex_]: + yield self.check_loss_of_precision, dtype + + @dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo") + def test_loss_of_precision_longcomplex(self): + self.check_loss_of_precision(np.longcomplex) + + +class TestAttributes(TestCase): + def test_attributes(self): + add = ncu.add + assert_equal(add.__name__, 'add') + assert_(add.__doc__.startswith('add(x1, x2[, out])\n\n')) + self.assertTrue(add.ntypes >= 18) # don't fail if types added + self.assertTrue('ii->i' in add.types) + assert_equal(add.nin, 2) + assert_equal(add.nout, 1) + assert_equal(add.identity, 0) + + +class TestSubclass(TestCase): + def test_subclass_op(self): + class simple(np.ndarray): + def __new__(subtype, shape): + self = np.ndarray.__new__(subtype, shape, dtype=object) + self.fill(0) + return self + a = simple((3, 4)) + assert_equal(a+a, a) + +def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, + dtype=np.complex): + """ + Check for a branch cut in a function. + + Assert that `x0` lies on a branch cut of function `f` and `f` is + continuous from the direction `dx`. + + Parameters + ---------- + f : func + Function to check + x0 : array-like + Point on branch cut + dx : array-like + Direction to check continuity in + re_sign, im_sign : {1, -1} + Change of sign of the real or imaginary part expected + sig_zero_ok : bool + Whether to check if the branch cut respects signed zero (if applicable) + dtype : dtype + Dtype to check (should be complex) + + """ + x0 = np.atleast_1d(x0).astype(dtype) + dx = np.atleast_1d(dx).astype(dtype) + + scale = np.finfo(dtype).eps * 1e3 + atol = 1e-4 + + y0 = f(x0) + yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) + ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) + + assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) + + if sig_zero_ok: + # check that signed zeros also work as a displacement + jr = (x0.real == 0) & (dx.real != 0) + ji = (x0.imag == 0) & (dx.imag != 0) + + x = -x0 + x.real[jr] = 0.*dx.real + x.imag[ji] = 0.*dx.imag + x = -x + ym = f(x) + ym = ym[jr | ji] + y0 = y0[jr | ji] + assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) + +def test_copysign(): + assert_(np.copysign(1, -1) == -1) + with np.errstate(divide="ignore"): + assert_(1 / np.copysign(0, -1) < 0) + assert_(1 / np.copysign(0, 1) > 0) + assert_(np.signbit(np.copysign(np.nan, -1))) + assert_(not np.signbit(np.copysign(np.nan, 1))) + +def _test_nextafter(t): + one = t(1) + two = t(2) + zero = t(0) + eps = np.finfo(t).eps + assert_(np.nextafter(one, two) - one == eps) + assert_(np.nextafter(one, zero) - one < 0) + assert_(np.isnan(np.nextafter(np.nan, one))) + assert_(np.isnan(np.nextafter(one, np.nan))) + assert_(np.nextafter(one, one) == one) + +def test_nextafter(): + return _test_nextafter(np.float64) + +def test_nextafterf(): + return _test_nextafter(np.float32) + +@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(), + "Long double support buggy on win32 and PPC, ticket 1664.") +def test_nextafterl(): + return _test_nextafter(np.longdouble) + +def _test_spacing(t): + one = t(1) + eps = np.finfo(t).eps + nan = t(np.nan) + inf = t(np.inf) + with np.errstate(invalid='ignore'): + assert_(np.spacing(one) == eps) + assert_(np.isnan(np.spacing(nan))) + assert_(np.isnan(np.spacing(inf))) + assert_(np.isnan(np.spacing(-inf))) + assert_(np.spacing(t(1e30)) != 0) + +def test_spacing(): + return _test_spacing(np.float64) + +def test_spacingf(): + return _test_spacing(np.float32) + +@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(), + "Long double support buggy on win32 and PPC, ticket 1664.") +def test_spacingl(): + return _test_spacing(np.longdouble) + +def test_spacing_gfortran(): + # Reference from this fortran file, built with gfortran 4.3.3 on linux + # 32bits: + # PROGRAM test_spacing + # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) + # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) + # + # WRITE(*,*) spacing(0.00001_DBL) + # WRITE(*,*) spacing(1.0_DBL) + # WRITE(*,*) spacing(1000._DBL) + # WRITE(*,*) spacing(10500._DBL) + # + # WRITE(*,*) spacing(0.00001_SGL) + # WRITE(*,*) spacing(1.0_SGL) + # WRITE(*,*) spacing(1000._SGL) + # WRITE(*,*) spacing(10500._SGL) + # END PROGRAM + ref = {} + ref[np.float64] = [1.69406589450860068E-021, + 2.22044604925031308E-016, + 1.13686837721616030E-013, + 1.81898940354585648E-012] + ref[np.float32] = [ + 9.09494702E-13, + 1.19209290E-07, + 6.10351563E-05, + 9.76562500E-04] + + for dt, dec in zip([np.float32, np.float64], (10, 20)): + x = np.array([1e-5, 1, 1000, 10500], dtype=dt) + assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec) + +def test_nextafter_vs_spacing(): + # XXX: spacing does not handle long double yet + for t in [np.float32, np.float64]: + for _f in [1, 1e-5, 1000]: + f = t(_f) + f1 = t(_f + 1) + assert_(np.nextafter(f, f1) - f == np.spacing(f)) + +def test_pos_nan(): + """Check np.nan is a positive nan.""" + assert_(np.signbit(np.nan) == 0) + +def test_reduceat(): + """Test bug in reduceat when structured arrays are not copied.""" + db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) + a = np.empty([100], dtype=db) + a['name'] = 'Simple' + a['time'] = 10 + a['value'] = 100 + indx = [0, 7, 15, 25] + + h2 = [] + val1 = indx[0] + for val2 in indx[1:]: + h2.append(np.add.reduce(a['value'][val1:val2])) + val1 = val2 + h2.append(np.add.reduce(a['value'][val1:])) + h2 = np.array(h2) + + # test buffered -- this should work + h1 = np.add.reduceat(a['value'], indx) + assert_array_almost_equal(h1, h2) + + # This is when the error occurs. + # test no buffer + res = np.setbufsize(32) + h1 = np.add.reduceat(a['value'], indx) + np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) + assert_array_almost_equal(h1, h2) + +def test_reduceat_empty(): + """Reduceat should work with empty arrays""" + indices = np.array([], 'i4') + x = np.array([], 'f8') + result = np.add.reduceat(x, indices) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0,)) + # Another case with a slightly different zero-sized shape + x = np.ones((5, 2)) + result = np.add.reduceat(x, [], axis=0) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0, 2)) + result = np.add.reduceat(x, [], axis=1) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (5, 0)) + +def test_complex_nan_comparisons(): + nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] + fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), + complex(1, 1), complex(-1, -1), complex(0, 0)] + + with np.errstate(invalid='ignore'): + for x in nans + fins: + x = np.array([x]) + for y in nans + fins: + y = np.array([y]) + + if np.isfinite(x) and np.isfinite(y): + continue + + assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) + assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) + assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) + assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) + assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath_complex.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath_complex.py new file mode 100644 index 0000000000000..4f3da4397acfd --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath_complex.py @@ -0,0 +1,537 @@ +from __future__ import division, absolute_import, print_function + +import sys +import platform + +from numpy.testing import * +import numpy.core.umath as ncu +import numpy as np + +# TODO: branch cuts (use Pauli code) +# TODO: conj 'symmetry' +# TODO: FPU exceptions + +# At least on Windows the results of many complex functions are not conforming +# to the C99 standard. See ticket 1574. +# Ditto for Solaris (ticket 1642) and OS X on PowerPC. +with np.errstate(all='ignore'): + functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) + or (np.log(complex(np.NZERO, 0)).imag != np.pi)) +# TODO: replace with a check on whether platform-provided C99 funcs are used +skip_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) + +def platform_skip(func): + return dec.skipif(skip_complex_tests, + "Numpy is using complex functions (e.g. sqrt) provided by your" + "platform's C library. However, they do not seem to behave according" + "to C99 -- so C99 tests are skipped.")(func) + + +class TestCexp(object): + def test_simple(self): + check = check_complex_value + f = np.exp + + yield check, f, 1, 0, np.exp(1), 0, False + yield check, f, 0, 1, np.cos(1), np.sin(1), False + + ref = np.exp(1) * np.complex(np.cos(1), np.sin(1)) + yield check, f, 1, 1, ref.real, ref.imag, False + + @platform_skip + def test_special_values(self): + # C99: Section G 6.3.1 + + check = check_complex_value + f = np.exp + + # cexp(+-0 + 0i) is 1 + 0i + yield check, f, np.PZERO, 0, 1, 0, False + yield check, f, np.NZERO, 0, 1, 0, False + + # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU + # exception + yield check, f, 1, np.inf, np.nan, np.nan + yield check, f, -1, np.inf, np.nan, np.nan + yield check, f, 0, np.inf, np.nan, np.nan + + # cexp(inf + 0i) is inf + 0i + yield check, f, np.inf, 0, np.inf, 0 + + # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y + ref = np.complex(np.cos(1.), np.sin(1.)) + yield check, f, -np.inf, 1, np.PZERO, np.PZERO + + ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75)) + yield check, f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO + + # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y + ref = np.complex(np.cos(1.), np.sin(1.)) + yield check, f, np.inf, 1, np.inf, np.inf + + ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75)) + yield check, f, np.inf, 0.75 * np.pi, -np.inf, np.inf + + # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) + def _check_ninf_inf(dummy): + msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(np.complex(-np.inf, np.inf))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform %(z.real, z.imag)) + + yield _check_ninf_inf, None + + # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. + def _check_inf_inf(dummy): + msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(np.complex(np.inf, np.inf))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + yield _check_inf_inf, None + + # cexp(-inf + nan i) is +-0 +- 0i + def _check_ninf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(np.complex(-np.inf, np.nan))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + yield _check_ninf_nan, None + + # cexp(inf + nan i) is +-inf + nan + def _check_inf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(np.complex(np.inf, np.nan))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + yield _check_inf_nan, None + + # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU + # ex) + yield check, f, np.nan, 1, np.nan, np.nan + yield check, f, np.nan, -1, np.nan, np.nan + + yield check, f, np.nan, np.inf, np.nan, np.nan + yield check, f, np.nan, -np.inf, np.nan, np.nan + + # cexp(nan + nani) is nan + nani + yield check, f, np.nan, np.nan, np.nan, np.nan + + @dec.knownfailureif(True, "cexp(nan + 0I) is wrong on most implementations") + def test_special_values2(self): + # XXX: most implementations get it wrong here (including glibc <= 2.10) + # cexp(nan + 0i) is nan + 0i + yield check, f, np.nan, 0, np.nan, 0 + +class TestClog(TestCase): + def test_simple(self): + x = np.array([1+0j, 1+2j]) + y_r = np.log(np.abs(x)) + 1j * np.angle(x) + y = np.log(x) + for i in range(len(x)): + assert_almost_equal(y[i], y_r[i]) + + @platform_skip + @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") + def test_special_values(self): + xl = [] + yl = [] + + # From C99 std (Sec 6.3.2) + # XXX: check exceptions raised + # --- raise for invalid fails. + + # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([np.NZERO], dtype=np.complex) + y = np.complex(-np.inf, np.pi) + self.assertRaises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([0], dtype=np.complex) + y = np.complex(-np.inf, 0) + self.assertRaises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(x + i inf returns +inf + i pi /2, for finite x. + x = np.array([complex(1, np.inf)], dtype=np.complex) + y = np.complex(np.inf, 0.5 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-1, np.inf)], dtype=np.complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(x + iNaN) returns NaN + iNaN and optionally raises the + # 'invalid' floating- point exception, for finite x. + with np.errstate(invalid='raise'): + x = np.array([complex(1., np.nan)], dtype=np.complex) + y = np.complex(np.nan, np.nan) + #self.assertRaises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + with np.errstate(invalid='raise'): + x = np.array([np.inf + 1j * np.nan], dtype=np.complex) + #self.assertRaises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. + x = np.array([-np.inf + 1j], dtype=np.complex) + y = np.complex(np.inf, np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. + x = np.array([np.inf + 1j], dtype=np.complex) + y = np.complex(np.inf, 0) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(- inf + i inf) returns +inf + i3pi /4. + x = np.array([complex(-np.inf, np.inf)], dtype=np.complex) + y = np.complex(np.inf, 0.75 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + i inf) returns +inf + ipi /4. + x = np.array([complex(np.inf, np.inf)], dtype=np.complex) + y = np.complex(np.inf, 0.25 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+/- inf + iNaN) returns +inf + iNaN. + x = np.array([complex(np.inf, np.nan)], dtype=np.complex) + y = np.complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-np.inf, np.nan)], dtype=np.complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iy) returns NaN + iNaN and optionally raises the + # 'invalid' floating-point exception, for finite y. + x = np.array([complex(np.nan, 1)], dtype=np.complex) + y = np.complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + i inf) returns +inf + iNaN. + x = np.array([complex(np.nan, np.inf)], dtype=np.complex) + y = np.complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iNaN) returns NaN + iNaN. + x = np.array([complex(np.nan, np.nan)], dtype=np.complex) + y = np.complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(conj(z)) = conj(clog(z)). + xa = np.array(xl, dtype=np.complex) + ya = np.array(yl, dtype=np.complex) + with np.errstate(divide='ignore'): + for i in range(len(xa)): + assert_almost_equal(np.log(np.conj(xa[i])), np.conj(np.log(xa[i]))) + +class TestCsqrt(object): + + def test_simple(self): + # sqrt(1) + yield check_complex_value, np.sqrt, 1, 0, 1, 0 + + # sqrt(1i) + yield check_complex_value, np.sqrt, 0, 1, 0.5*np.sqrt(2), 0.5*np.sqrt(2), False + + # sqrt(-1) + yield check_complex_value, np.sqrt, -1, 0, 0, 1 + + def test_simple_conjugate(self): + ref = np.conj(np.sqrt(np.complex(1, 1))) + def f(z): + return np.sqrt(np.conj(z)) + yield check_complex_value, f, 1, 1, ref.real, ref.imag, False + + #def test_branch_cut(self): + # _check_branch_cut(f, -1, 0, 1, -1) + + @platform_skip + def test_special_values(self): + check = check_complex_value + f = np.sqrt + + # C99: Sec G 6.4.2 + x, y = [], [] + + # csqrt(+-0 + 0i) is 0 + 0i + yield check, f, np.PZERO, 0, 0, 0 + yield check, f, np.NZERO, 0, 0, 0 + + # csqrt(x + infi) is inf + infi for any x (including NaN) + yield check, f, 1, np.inf, np.inf, np.inf + yield check, f, -1, np.inf, np.inf, np.inf + + yield check, f, np.PZERO, np.inf, np.inf, np.inf + yield check, f, np.NZERO, np.inf, np.inf, np.inf + yield check, f, np.inf, np.inf, np.inf, np.inf + yield check, f, -np.inf, np.inf, np.inf, np.inf + yield check, f, -np.nan, np.inf, np.inf, np.inf + + # csqrt(x + nani) is nan + nani for any finite x + yield check, f, 1, np.nan, np.nan, np.nan + yield check, f, -1, np.nan, np.nan, np.nan + yield check, f, 0, np.nan, np.nan, np.nan + + # csqrt(-inf + yi) is +0 + infi for any finite y > 0 + yield check, f, -np.inf, 1, np.PZERO, np.inf + + # csqrt(inf + yi) is +inf + 0i for any finite y > 0 + yield check, f, np.inf, 1, np.inf, np.PZERO + + # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) + def _check_ninf_nan(dummy): + msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" + z = np.sqrt(np.array(np.complex(-np.inf, np.nan))) + #Fixme: ugly workaround for isinf bug. + with np.errstate(invalid='ignore'): + if not (np.isnan(z.real) and np.isinf(z.imag)): + raise AssertionError(msgform % (z.real, z.imag)) + + yield _check_ninf_nan, None + + # csqrt(+inf + nani) is inf + nani + yield check, f, np.inf, np.nan, np.inf, np.nan + + # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x + # + nani) + yield check, f, np.nan, 0, np.nan, np.nan + yield check, f, np.nan, 1, np.nan, np.nan + yield check, f, np.nan, np.nan, np.nan, np.nan + + # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch + # cuts first) + +class TestCpow(TestCase): + def setUp(self): + self.olderr = np.seterr(invalid='ignore') + + def tearDown(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + y_r = x ** 2 + y = np.power(x, 2) + for i in range(len(x)): + assert_almost_equal(y[i], y_r[i]) + + def test_scalar(self): + x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + lx = list(range(len(x))) + # Compute the values for complex type in python + p_r = [complex(x[i]) ** complex(y[i]) for i in lx] + # Substitute a result allowed by C99 standard + p_r[4] = complex(np.inf, np.nan) + # Do the same with numpy complex scalars + n_r = [x[i] ** y[i] for i in lx] + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + + def test_array(self): + x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + lx = list(range(len(x))) + # Compute the values for complex type in python + p_r = [complex(x[i]) ** complex(y[i]) for i in lx] + # Substitute a result allowed by C99 standard + p_r[4] = complex(np.inf, np.nan) + # Do the same with numpy arrays + n_r = x ** y + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + +class TestCabs(object): + def setUp(self): + self.olderr = np.seterr(invalid='ignore') + + def tearDown(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) + y = np.abs(x) + for i in range(len(x)): + assert_almost_equal(y[i], y_r[i]) + + def test_fabs(self): + # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) + x = np.array([1+0j], dtype=np.complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(1, np.NZERO)], dtype=np.complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex) + assert_array_equal(np.abs(x), np.real(x)) + + def test_cabs_inf_nan(self): + x, y = [], [] + + # cabs(+-nan + nani) returns nan + x.append(np.nan) + y.append(np.nan) + yield check_real_value, np.abs, np.nan, np.nan, np.nan + + x.append(np.nan) + y.append(-np.nan) + yield check_real_value, np.abs, -np.nan, np.nan, np.nan + + # According to C99 standard, if exactly one of the real/part is inf and + # the other nan, then cabs should return inf + x.append(np.inf) + y.append(np.nan) + yield check_real_value, np.abs, np.inf, np.nan, np.inf + + x.append(-np.inf) + y.append(np.nan) + yield check_real_value, np.abs, -np.inf, np.nan, np.inf + + # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) + def f(a): + return np.abs(np.conj(a)) + def g(a, b): + return np.abs(np.complex(a, b)) + + xa = np.array(x, dtype=np.complex) + for i in range(len(xa)): + ref = g(x[i], y[i]) + yield check_real_value, f, x[i], y[i], ref + +class TestCarg(object): + def test_simple(self): + check_real_value(ncu._arg, 1, 0, 0, False) + check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) + + check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) + check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) + + @dec.knownfailureif(True, + "Complex arithmetic with signed zero is buggy on most implementation") + def test_zero(self): + # carg(-0 +- 0i) returns +- pi + yield check_real_value, ncu._arg, np.NZERO, np.PZERO, np.pi, False + yield check_real_value, ncu._arg, np.NZERO, np.NZERO, -np.pi, False + + # carg(+0 +- 0i) returns +- 0 + yield check_real_value, ncu._arg, np.PZERO, np.PZERO, np.PZERO + yield check_real_value, ncu._arg, np.PZERO, np.NZERO, np.NZERO + + # carg(x +- 0i) returns +- 0 for x > 0 + yield check_real_value, ncu._arg, 1, np.PZERO, np.PZERO, False + yield check_real_value, ncu._arg, 1, np.NZERO, np.NZERO, False + + # carg(x +- 0i) returns +- pi for x < 0 + yield check_real_value, ncu._arg, -1, np.PZERO, np.pi, False + yield check_real_value, ncu._arg, -1, np.NZERO, -np.pi, False + + # carg(+- 0 + yi) returns pi/2 for y > 0 + yield check_real_value, ncu._arg, np.PZERO, 1, 0.5 * np.pi, False + yield check_real_value, ncu._arg, np.NZERO, 1, 0.5 * np.pi, False + + # carg(+- 0 + yi) returns -pi/2 for y < 0 + yield check_real_value, ncu._arg, np.PZERO, -1, 0.5 * np.pi, False + yield check_real_value, ncu._arg, np.NZERO, -1, -0.5 * np.pi, False + + #def test_branch_cuts(self): + # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) + + def test_special_values(self): + # carg(-np.inf +- yi) returns +-pi for finite y > 0 + yield check_real_value, ncu._arg, -np.inf, 1, np.pi, False + yield check_real_value, ncu._arg, -np.inf, -1, -np.pi, False + + # carg(np.inf +- yi) returns +-0 for finite y > 0 + yield check_real_value, ncu._arg, np.inf, 1, np.PZERO, False + yield check_real_value, ncu._arg, np.inf, -1, np.NZERO, False + + # carg(x +- np.infi) returns +-pi/2 for finite x + yield check_real_value, ncu._arg, 1, np.inf, 0.5 * np.pi, False + yield check_real_value, ncu._arg, 1, -np.inf, -0.5 * np.pi, False + + # carg(-np.inf +- np.infi) returns +-3pi/4 + yield check_real_value, ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False + yield check_real_value, ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False + + # carg(np.inf +- np.infi) returns +-pi/4 + yield check_real_value, ncu._arg, np.inf, np.inf, 0.25 * np.pi, False + yield check_real_value, ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False + + # carg(x + yi) returns np.nan if x or y is nan + yield check_real_value, ncu._arg, np.nan, 0, np.nan, False + yield check_real_value, ncu._arg, 0, np.nan, np.nan, False + + yield check_real_value, ncu._arg, np.nan, np.inf, np.nan, False + yield check_real_value, ncu._arg, np.inf, np.nan, np.nan, False + +def check_real_value(f, x1, y1, x, exact=True): + z1 = np.array([complex(x1, y1)]) + if exact: + assert_equal(f(z1), x) + else: + assert_almost_equal(f(z1), x) + +def check_complex_value(f, x1, y1, x2, y2, exact=True): + z1 = np.array([complex(x1, y1)]) + z2 = np.complex(x2, y2) + with np.errstate(invalid='ignore'): + if exact: + assert_equal(f(z1), z2) + else: + assert_almost_equal(f(z1), z2) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_unicode.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_unicode.py new file mode 100644 index 0000000000000..d184b3a9fe99b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_unicode.py @@ -0,0 +1,357 @@ +from __future__ import division, absolute_import, print_function + +import sys + +from numpy.testing import * +from numpy.core import * +from numpy.compat import asbytes, sixu + +# Guess the UCS length for this python interpreter +if sys.version_info[:2] >= (3, 3): + # Python 3.3 uses a flexible string representation + ucs4 = False + def buffer_length(arr): + if isinstance(arr, unicode): + arr = str(arr) + return (sys.getsizeof(arr+"a") - sys.getsizeof(arr)) * len(arr) + v = memoryview(arr) + if v.shape is None: + return len(v) * v.itemsize + else: + return prod(v.shape) * v.itemsize +elif sys.version_info[0] >= 3: + import array as _array + ucs4 = (_array.array('u').itemsize == 4) + def buffer_length(arr): + if isinstance(arr, unicode): + return _array.array('u').itemsize * len(arr) + v = memoryview(arr) + if v.shape is None: + return len(v) * v.itemsize + else: + return prod(v.shape) * v.itemsize +else: + if len(buffer(sixu('u'))) == 4: + ucs4 = True + else: + ucs4 = False + def buffer_length(arr): + if isinstance(arr, ndarray): + return len(arr.data) + return len(buffer(arr)) + +# In both cases below we need to make sure that the byte swapped value (as +# UCS4) is still a valid unicode: +# Value that can be represented in UCS2 interpreters +ucs2_value = sixu('\u0900') +# Value that cannot be represented in UCS2 interpreters (but can in UCS4) +ucs4_value = sixu('\U00100900') + + +############################################################ +# Creation tests +############################################################ + +class create_zeros(object): + """Check the creation of zero-valued arrays""" + + def content_check(self, ua, ua_scalar, nbytes): + + # Check the length of the unicode base type + self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) + # Check the length of the data buffer + self.assertTrue(buffer_length(ua) == nbytes) + # Small check that data in array element is ok + self.assertTrue(ua_scalar == sixu('')) + # Encode to ascii and double check + self.assertTrue(ua_scalar.encode('ascii') == asbytes('')) + # Check buffer lengths for scalars + if ucs4: + self.assertTrue(buffer_length(ua_scalar) == 0) + else: + self.assertTrue(buffer_length(ua_scalar) == 0) + + def test_zeros0D(self): + """Check creation of 0-dimensional objects""" + ua = zeros((), dtype='U%s' % self.ulen) + self.content_check(ua, ua[()], 4*self.ulen) + + def test_zerosSD(self): + """Check creation of single-dimensional objects""" + ua = zeros((2,), dtype='U%s' % self.ulen) + self.content_check(ua, ua[0], 4*self.ulen*2) + self.content_check(ua, ua[1], 4*self.ulen*2) + + def test_zerosMD(self): + """Check creation of multi-dimensional objects""" + ua = zeros((2, 3, 4), dtype='U%s' % self.ulen) + self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) + self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) + + +class test_create_zeros_1(create_zeros, TestCase): + """Check the creation of zero-valued arrays (size 1)""" + ulen = 1 + + +class test_create_zeros_2(create_zeros, TestCase): + """Check the creation of zero-valued arrays (size 2)""" + ulen = 2 + + +class test_create_zeros_1009(create_zeros, TestCase): + """Check the creation of zero-valued arrays (size 1009)""" + ulen = 1009 + + +class create_values(object): + """Check the creation of unicode arrays with values""" + + def content_check(self, ua, ua_scalar, nbytes): + + # Check the length of the unicode base type + self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) + # Check the length of the data buffer + self.assertTrue(buffer_length(ua) == nbytes) + # Small check that data in array element is ok + self.assertTrue(ua_scalar == self.ucs_value*self.ulen) + # Encode to UTF-8 and double check + self.assertTrue(ua_scalar.encode('utf-8') == \ + (self.ucs_value*self.ulen).encode('utf-8')) + # Check buffer lengths for scalars + if ucs4: + self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) + else: + if self.ucs_value == ucs4_value: + # In UCS2, the \U0010FFFF will be represented using a + # surrogate *pair* + self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) + else: + # In UCS2, the \uFFFF will be represented using a + # regular 2-byte word + self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) + + def test_values0D(self): + """Check creation of 0-dimensional objects with values""" + ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) + self.content_check(ua, ua[()], 4*self.ulen) + + def test_valuesSD(self): + """Check creation of single-dimensional objects with values""" + ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) + self.content_check(ua, ua[0], 4*self.ulen*2) + self.content_check(ua, ua[1], 4*self.ulen*2) + + def test_valuesMD(self): + """Check creation of multi-dimensional objects with values""" + ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) + self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) + self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) + + +class test_create_values_1_ucs2(create_values, TestCase): + """Check the creation of valued arrays (size 1, UCS2 values)""" + ulen = 1 + ucs_value = ucs2_value + + +class test_create_values_1_ucs4(create_values, TestCase): + """Check the creation of valued arrays (size 1, UCS4 values)""" + ulen = 1 + ucs_value = ucs4_value + + +class test_create_values_2_ucs2(create_values, TestCase): + """Check the creation of valued arrays (size 2, UCS2 values)""" + ulen = 2 + ucs_value = ucs2_value + + +class test_create_values_2_ucs4(create_values, TestCase): + """Check the creation of valued arrays (size 2, UCS4 values)""" + ulen = 2 + ucs_value = ucs4_value + + +class test_create_values_1009_ucs2(create_values, TestCase): + """Check the creation of valued arrays (size 1009, UCS2 values)""" + ulen = 1009 + ucs_value = ucs2_value + + +class test_create_values_1009_ucs4(create_values, TestCase): + """Check the creation of valued arrays (size 1009, UCS4 values)""" + ulen = 1009 + ucs_value = ucs4_value + + +############################################################ +# Assignment tests +############################################################ + +class assign_values(object): + """Check the assignment of unicode arrays with values""" + + def content_check(self, ua, ua_scalar, nbytes): + + # Check the length of the unicode base type + self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) + # Check the length of the data buffer + self.assertTrue(buffer_length(ua) == nbytes) + # Small check that data in array element is ok + self.assertTrue(ua_scalar == self.ucs_value*self.ulen) + # Encode to UTF-8 and double check + self.assertTrue(ua_scalar.encode('utf-8') == \ + (self.ucs_value*self.ulen).encode('utf-8')) + # Check buffer lengths for scalars + if ucs4: + self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) + else: + if self.ucs_value == ucs4_value: + # In UCS2, the \U0010FFFF will be represented using a + # surrogate *pair* + self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) + else: + # In UCS2, the \uFFFF will be represented using a + # regular 2-byte word + self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) + + def test_values0D(self): + """Check assignment of 0-dimensional objects with values""" + ua = zeros((), dtype='U%s' % self.ulen) + ua[()] = self.ucs_value*self.ulen + self.content_check(ua, ua[()], 4*self.ulen) + + def test_valuesSD(self): + """Check assignment of single-dimensional objects with values""" + ua = zeros((2,), dtype='U%s' % self.ulen) + ua[0] = self.ucs_value*self.ulen + self.content_check(ua, ua[0], 4*self.ulen*2) + ua[1] = self.ucs_value*self.ulen + self.content_check(ua, ua[1], 4*self.ulen*2) + + def test_valuesMD(self): + """Check assignment of multi-dimensional objects with values""" + ua = zeros((2, 3, 4), dtype='U%s' % self.ulen) + ua[0, 0, 0] = self.ucs_value*self.ulen + self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) + ua[-1, -1, -1] = self.ucs_value*self.ulen + self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) + + +class test_assign_values_1_ucs2(assign_values, TestCase): + """Check the assignment of valued arrays (size 1, UCS2 values)""" + ulen = 1 + ucs_value = ucs2_value + + +class test_assign_values_1_ucs4(assign_values, TestCase): + """Check the assignment of valued arrays (size 1, UCS4 values)""" + ulen = 1 + ucs_value = ucs4_value + + +class test_assign_values_2_ucs2(assign_values, TestCase): + """Check the assignment of valued arrays (size 2, UCS2 values)""" + ulen = 2 + ucs_value = ucs2_value + + +class test_assign_values_2_ucs4(assign_values, TestCase): + """Check the assignment of valued arrays (size 2, UCS4 values)""" + ulen = 2 + ucs_value = ucs4_value + + +class test_assign_values_1009_ucs2(assign_values, TestCase): + """Check the assignment of valued arrays (size 1009, UCS2 values)""" + ulen = 1009 + ucs_value = ucs2_value + + +class test_assign_values_1009_ucs4(assign_values, TestCase): + """Check the assignment of valued arrays (size 1009, UCS4 values)""" + ulen = 1009 + ucs_value = ucs4_value + + + +############################################################ +# Byteorder tests +############################################################ + +class byteorder_values: + """Check the byteorder of unicode arrays in round-trip conversions""" + + def test_values0D(self): + """Check byteorder of 0-dimensional objects""" + ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) + ua2 = ua.newbyteorder() + # This changes the interpretation of the data region (but not the + # actual data), therefore the returned scalars are not + # the same (they are byte-swapped versions of each other). + self.assertTrue(ua[()] != ua2[()]) + ua3 = ua2.newbyteorder() + # Arrays must be equal after the round-trip + assert_equal(ua, ua3) + + def test_valuesSD(self): + """Check byteorder of single-dimensional objects""" + ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) + ua2 = ua.newbyteorder() + self.assertTrue(ua[0] != ua2[0]) + self.assertTrue(ua[-1] != ua2[-1]) + ua3 = ua2.newbyteorder() + # Arrays must be equal after the round-trip + assert_equal(ua, ua3) + + def test_valuesMD(self): + """Check byteorder of multi-dimensional objects""" + ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, + dtype='U%s' % self.ulen) + ua2 = ua.newbyteorder() + self.assertTrue(ua[0, 0, 0] != ua2[0, 0, 0]) + self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1]) + ua3 = ua2.newbyteorder() + # Arrays must be equal after the round-trip + assert_equal(ua, ua3) + + +class test_byteorder_1_ucs2(byteorder_values, TestCase): + """Check the byteorder in unicode (size 1, UCS2 values)""" + ulen = 1 + ucs_value = ucs2_value + + +class test_byteorder_1_ucs4(byteorder_values, TestCase): + """Check the byteorder in unicode (size 1, UCS4 values)""" + ulen = 1 + ucs_value = ucs4_value + + +class test_byteorder_2_ucs2(byteorder_values, TestCase): + """Check the byteorder in unicode (size 2, UCS2 values)""" + ulen = 2 + ucs_value = ucs2_value + + +class test_byteorder_2_ucs4(byteorder_values, TestCase): + """Check the byteorder in unicode (size 2, UCS4 values)""" + ulen = 2 + ucs_value = ucs4_value + + +class test_byteorder_1009_ucs2(byteorder_values, TestCase): + """Check the byteorder in unicode (size 1009, UCS2 values)""" + ulen = 1009 + ucs_value = ucs2_value + + +class test_byteorder_1009_ucs4(byteorder_values, TestCase): + """Check the byteorder in unicode (size 1009, UCS4 values)""" + ulen = 1009 + ucs_value = ucs4_value + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath.py new file mode 100644 index 0000000000000..30f3b0b135d5c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'umath.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath_tests.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath_tests.py new file mode 100644 index 0000000000000..9ae91de7e2b63 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath_tests.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'umath_tests.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ctypeslib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ctypeslib.py new file mode 100644 index 0000000000000..961fa601261f8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ctypeslib.py @@ -0,0 +1,426 @@ +""" +============================ +``ctypes`` Utility Functions +============================ + +See Also +--------- +load_library : Load a C library. +ndpointer : Array restype/argtype with verification. +as_ctypes : Create a ctypes array from an ndarray. +as_array : Create an ndarray from a ctypes array. + +References +---------- +.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes + +Examples +-------- +Load the C library: + +>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP + +Our result type, an ndarray that must be of type double, be 1-dimensional +and is C-contiguous in memory: + +>>> array_1d_double = np.ctypeslib.ndpointer( +... dtype=np.double, +... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP + +Our C-function typically takes an array and updates its values +in-place. For example:: + + void foo_func(double* x, int length) + { + int i; + for (i = 0; i < length; i++) { + x[i] = i*i; + } + } + +We wrap it using: + +>>> _lib.foo_func.restype = None #doctest: +SKIP +>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP + +Then, we're ready to call ``foo_func``: + +>>> out = np.empty(15, dtype=np.double) +>>> _lib.foo_func(out, len(out)) #doctest: +SKIP + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library', + 'c_intp', 'as_ctypes', 'as_array'] + +import sys, os +from numpy import integer, ndarray, dtype as _dtype, deprecate, array +from numpy.core.multiarray import _flagdict, flagsobj + +try: + import ctypes +except ImportError: + ctypes = None + +if ctypes is None: + def _dummy(*args, **kwds): + """ + Dummy object that raises an ImportError if ctypes is not available. + + Raises + ------ + ImportError + If ctypes is not available. + + """ + raise ImportError("ctypes is not available.") + ctypes_load_library = _dummy + load_library = _dummy + as_ctypes = _dummy + as_array = _dummy + from numpy import intp as c_intp + _ndptr_base = object +else: + import numpy.core._internal as nic + c_intp = nic._getintp_ctype() + del nic + _ndptr_base = ctypes.c_void_p + + # Adapted from Albert Strasheim + def load_library(libname, loader_path): + if ctypes.__version__ < '1.0.1': + import warnings + warnings.warn("All features of ctypes interface may not work " \ + "with ctypes < 1.0.1") + + ext = os.path.splitext(libname)[1] + if not ext: + # Try to load library with platform-specific name, otherwise + # default to libname.[so|pyd]. Sometimes, these files are built + # erroneously on non-linux platforms. + from numpy.distutils.misc_util import get_shared_lib_extension + so_ext = get_shared_lib_extension() + libname_ext = [libname + so_ext] + # mac, windows and linux >= py3.2 shared library and loadable + # module have different extensions so try both + so_ext2 = get_shared_lib_extension(is_python_ext=True) + if not so_ext2 == so_ext: + libname_ext.insert(0, libname + so_ext2) + else: + libname_ext = [libname] + + loader_path = os.path.abspath(loader_path) + if not os.path.isdir(loader_path): + libdir = os.path.dirname(loader_path) + else: + libdir = loader_path + + for ln in libname_ext: + libpath = os.path.join(libdir, ln) + if os.path.exists(libpath): + try: + return ctypes.cdll[libpath] + except OSError: + ## defective lib file + raise + ## if no successful return in the libname_ext loop: + raise OSError("no file with expected extension") + + ctypes_load_library = deprecate(load_library, 'ctypes_load_library', + 'load_library') + +def _num_fromflags(flaglist): + num = 0 + for val in flaglist: + num += _flagdict[val] + return num + +_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', + 'OWNDATA', 'UPDATEIFCOPY'] +def _flags_fromnum(num): + res = [] + for key in _flagnames: + value = _flagdict[key] + if (num & value): + res.append(key) + return res + + +class _ndptr(_ndptr_base): + + def _check_retval_(self): + """This method is called when this class is used as the .restype + asttribute for a shared-library function. It constructs a numpy + array from a void pointer.""" + return array(self) + + @property + def __array_interface__(self): + return {'descr': self._dtype_.descr, + '__ref': self, + 'strides': None, + 'shape': self._shape_, + 'version': 3, + 'typestr': self._dtype_.descr[0][1], + 'data': (self.value, False), + } + + @classmethod + def from_param(cls, obj): + if not isinstance(obj, ndarray): + raise TypeError("argument must be an ndarray") + if cls._dtype_ is not None \ + and obj.dtype != cls._dtype_: + raise TypeError("array must have data type %s" % cls._dtype_) + if cls._ndim_ is not None \ + and obj.ndim != cls._ndim_: + raise TypeError("array must have %d dimension(s)" % cls._ndim_) + if cls._shape_ is not None \ + and obj.shape != cls._shape_: + raise TypeError("array must have shape %s" % str(cls._shape_)) + if cls._flags_ is not None \ + and ((obj.flags.num & cls._flags_) != cls._flags_): + raise TypeError("array must have flags %s" % + _flags_fromnum(cls._flags_)) + return obj.ctypes + + +# Factory for an array-checking class with from_param defined for +# use with ctypes argtypes mechanism +_pointer_type_cache = {} +def ndpointer(dtype=None, ndim=None, shape=None, flags=None): + """ + Array-checking restype/argtypes. + + An ndpointer instance is used to describe an ndarray in restypes + and argtypes specifications. This approach is more flexible than + using, for example, ``POINTER(c_double)``, since several restrictions + can be specified, which are verified upon calling the ctypes function. + These include data type, number of dimensions, shape and flags. If a + given array does not satisfy the specified restrictions, + a ``TypeError`` is raised. + + Parameters + ---------- + dtype : data-type, optional + Array data-type. + ndim : int, optional + Number of array dimensions. + shape : tuple of ints, optional + Array shape. + flags : str or tuple of str + Array flags; may be one or more of: + + - C_CONTIGUOUS / C / CONTIGUOUS + - F_CONTIGUOUS / F / FORTRAN + - OWNDATA / O + - WRITEABLE / W + - ALIGNED / A + - UPDATEIFCOPY / U + + Returns + ------- + klass : ndpointer type object + A type object, which is an ``_ndtpr`` instance containing + dtype, ndim, shape and flags information. + + Raises + ------ + TypeError + If a given array does not satisfy the specified restrictions. + + Examples + -------- + >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, + ... ndim=1, + ... flags='C_CONTIGUOUS')] + ... #doctest: +SKIP + >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) + ... #doctest: +SKIP + + """ + + if dtype is not None: + dtype = _dtype(dtype) + num = None + if flags is not None: + if isinstance(flags, str): + flags = flags.split(',') + elif isinstance(flags, (int, integer)): + num = flags + flags = _flags_fromnum(num) + elif isinstance(flags, flagsobj): + num = flags.num + flags = _flags_fromnum(num) + if num is None: + try: + flags = [x.strip().upper() for x in flags] + except: + raise TypeError("invalid flags specification") + num = _num_fromflags(flags) + try: + return _pointer_type_cache[(dtype, ndim, shape, num)] + except KeyError: + pass + if dtype is None: + name = 'any' + elif dtype.names: + name = str(id(dtype)) + else: + name = dtype.str + if ndim is not None: + name += "_%dd" % ndim + if shape is not None: + try: + strshape = [str(x) for x in shape] + except TypeError: + strshape = [str(shape)] + shape = (shape,) + shape = tuple(shape) + name += "_"+"x".join(strshape) + if flags is not None: + name += "_"+"_".join(flags) + else: + flags = [] + klass = type("ndpointer_%s"%name, (_ndptr,), + {"_dtype_": dtype, + "_shape_" : shape, + "_ndim_" : ndim, + "_flags_" : num}) + _pointer_type_cache[dtype] = klass + return klass + +if ctypes is not None: + ct = ctypes + ################################################################ + # simple types + + # maps the numpy typecodes like ' 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + \ No newline at end of file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__init__.py new file mode 100644 index 0000000000000..b43e08b052a1a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__init__.py @@ -0,0 +1,39 @@ +from __future__ import division, absolute_import, print_function + +import sys + +if sys.version_info[0] < 3: + from .__version__ import version as __version__ + # Must import local ccompiler ASAP in order to get + # customized CCompiler.spawn effective. + from . import ccompiler + from . import unixccompiler + + from .info import __doc__ + from .npy_pkg_config import * + + try: + import __config__ + _INSTALLED = True + except ImportError: + _INSTALLED = False +else: + from numpy.distutils.__version__ import version as __version__ + # Must import local ccompiler ASAP in order to get + # customized CCompiler.spawn effective. + import numpy.distutils.ccompiler + import numpy.distutils.unixccompiler + + from numpy.distutils.info import __doc__ + from numpy.distutils.npy_pkg_config import * + + try: + import numpy.distutils.__config__ + _INSTALLED = True + except ImportError: + _INSTALLED = False + +if _INSTALLED: + from numpy.testing import Tester + test = Tester().test + bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__version__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__version__.py new file mode 100644 index 0000000000000..969decbba20e7 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__version__.py @@ -0,0 +1,6 @@ +from __future__ import division, absolute_import, print_function + +major = 0 +minor = 4 +micro = 0 +version = '%(major)d.%(minor)d.%(micro)d' % (locals()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/ccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/ccompiler.py new file mode 100644 index 0000000000000..8484685c0f975 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/ccompiler.py @@ -0,0 +1,656 @@ +from __future__ import division, absolute_import, print_function + +import re +import os +import sys +import types +from copy import copy + +from distutils.ccompiler import * +from distutils import ccompiler +from distutils.errors import DistutilsExecError, DistutilsModuleError, \ + DistutilsPlatformError +from distutils.sysconfig import customize_compiler +from distutils.version import LooseVersion + +from numpy.distutils import log +from numpy.distutils.exec_command import exec_command +from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ + quote_args +from numpy.distutils.compat import get_exception + + +def replace_method(klass, method_name, func): + if sys.version_info[0] < 3: + m = types.MethodType(func, None, klass) + else: + # Py3k does not have unbound method anymore, MethodType does not work + m = lambda self, *args, **kw: func(self, *args, **kw) + setattr(klass, method_name, m) + +# Using customized CCompiler.spawn. +def CCompiler_spawn(self, cmd, display=None): + """ + Execute a command in a sub-process. + + Parameters + ---------- + cmd : str + The command to execute. + display : str or sequence of str, optional + The text to add to the log file kept by `numpy.distutils`. + If not given, `display` is equal to `cmd`. + + Returns + ------- + None + + Raises + ------ + DistutilsExecError + If the command failed, i.e. the exit status was not 0. + + """ + if display is None: + display = cmd + if is_sequence(display): + display = ' '.join(list(display)) + log.info(display) + s, o = exec_command(cmd) + if s: + if is_sequence(cmd): + cmd = ' '.join(list(cmd)) + try: + print(o) + except UnicodeError: + # When installing through pip, `o` can contain non-ascii chars + pass + if re.search('Too many open files', o): + msg = '\nTry rerunning setup command until build succeeds.' + else: + msg = '' + raise DistutilsExecError('Command "%s" failed with exit status %d%s' % (cmd, s, msg)) + +replace_method(CCompiler, 'spawn', CCompiler_spawn) + +def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): + """ + Return the name of the object files for the given source files. + + Parameters + ---------- + source_filenames : list of str + The list of paths to source files. Paths can be either relative or + absolute, this is handled transparently. + strip_dir : bool, optional + Whether to strip the directory from the returned paths. If True, + the file name prepended by `output_dir` is returned. Default is False. + output_dir : str, optional + If given, this path is prepended to the returned paths to the + object files. + + Returns + ------- + obj_names : list of str + The list of paths to the object files corresponding to the source + files in `source_filenames`. + + """ + if output_dir is None: + output_dir = '' + obj_names = [] + for src_name in source_filenames: + base, ext = os.path.splitext(os.path.normpath(src_name)) + base = os.path.splitdrive(base)[1] # Chop off the drive + base = base[os.path.isabs(base):] # If abs, chop off leading / + if base.startswith('..'): + # Resolve starting relative path components, middle ones + # (if any) have been handled by os.path.normpath above. + i = base.rfind('..')+2 + d = base[:i] + d = os.path.basename(os.path.abspath(d)) + base = d + base[i:] + if ext not in self.src_extensions: + raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) + if strip_dir: + base = os.path.basename(base) + obj_name = os.path.join(output_dir, base + self.obj_extension) + obj_names.append(obj_name) + return obj_names + +replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) + +def CCompiler_compile(self, sources, output_dir=None, macros=None, + include_dirs=None, debug=0, extra_preargs=None, + extra_postargs=None, depends=None): + """ + Compile one or more source files. + + Please refer to the Python distutils API reference for more details. + + Parameters + ---------- + sources : list of str + A list of filenames + output_dir : str, optional + Path to the output directory. + macros : list of tuples + A list of macro definitions. + include_dirs : list of str, optional + The directories to add to the default include file search path for + this compilation only. + debug : bool, optional + Whether or not to output debug symbols in or alongside the object + file(s). + extra_preargs, extra_postargs : ? + Extra pre- and post-arguments. + depends : list of str, optional + A list of file names that all targets depend on. + + Returns + ------- + objects : list of str + A list of object file names, one per source file `sources`. + + Raises + ------ + CompileError + If compilation fails. + + """ + # This method is effective only with Python >=2.3 distutils. + # Any changes here should be applied also to fcompiler.compile + # method to support pre Python 2.3 distutils. + if not sources: + return [] + # FIXME:RELATIVE_IMPORT + if sys.version_info[0] < 3: + from .fcompiler import FCompiler + else: + from numpy.distutils.fcompiler import FCompiler + if isinstance(self, FCompiler): + display = [] + for fc in ['f77', 'f90', 'fix']: + fcomp = getattr(self, 'compiler_'+fc) + if fcomp is None: + continue + display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) + display = '\n'.join(display) + else: + ccomp = self.compiler_so + display = "C compiler: %s\n" % (' '.join(ccomp),) + log.info(display) + macros, objects, extra_postargs, pp_opts, build = \ + self._setup_compile(output_dir, macros, include_dirs, sources, + depends, extra_postargs) + cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) + display = "compile options: '%s'" % (' '.join(cc_args)) + if extra_postargs: + display += "\nextra options: '%s'" % (' '.join(extra_postargs)) + log.info(display) + + # build any sources in same order as they were originally specified + # especially important for fortran .f90 files using modules + if isinstance(self, FCompiler): + objects_to_build = list(build.keys()) + for obj in objects: + if obj in objects_to_build: + src, ext = build[obj] + if self.compiler_type=='absoft': + obj = cyg2win32(obj) + src = cyg2win32(src) + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + else: + for obj, (src, ext) in build.items(): + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + + # Return *all* object filenames, not just the ones we just built. + return objects + +replace_method(CCompiler, 'compile', CCompiler_compile) + +def CCompiler_customize_cmd(self, cmd, ignore=()): + """ + Customize compiler using distutils command. + + Parameters + ---------- + cmd : class instance + An instance inheriting from `distutils.cmd.Command`. + ignore : sequence of str, optional + List of `CCompiler` commands (without ``'set_'``) that should not be + altered. Strings that are checked for are: + ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', + 'rpath', 'link_objects')``. + + Returns + ------- + None + + """ + log.info('customize %s using %s' % (self.__class__.__name__, + cmd.__class__.__name__)) + def allow(attr): + return getattr(cmd, attr, None) is not None and attr not in ignore + + if allow('include_dirs'): + self.set_include_dirs(cmd.include_dirs) + if allow('define'): + for (name, value) in cmd.define: + self.define_macro(name, value) + if allow('undef'): + for macro in cmd.undef: + self.undefine_macro(macro) + if allow('libraries'): + self.set_libraries(self.libraries + cmd.libraries) + if allow('library_dirs'): + self.set_library_dirs(self.library_dirs + cmd.library_dirs) + if allow('rpath'): + self.set_runtime_library_dirs(cmd.rpath) + if allow('link_objects'): + self.set_link_objects(cmd.link_objects) + +replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) + +def _compiler_to_string(compiler): + props = [] + mx = 0 + keys = list(compiler.executables.keys()) + for key in ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch', + 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: + if key not in keys: + keys.append(key) + for key in keys: + if hasattr(compiler, key): + v = getattr(compiler, key) + mx = max(mx, len(key)) + props.append((key, repr(v))) + lines = [] + format = '%-' + repr(mx+1) + 's = %s' + for prop in props: + lines.append(format % prop) + return '\n'.join(lines) + +def CCompiler_show_customization(self): + """ + Print the compiler customizations to stdout. + + Parameters + ---------- + None + + Returns + ------- + None + + Notes + ----- + Printing is only done if the distutils log threshold is < 2. + + """ + if 0: + for attrname in ['include_dirs', 'define', 'undef', + 'libraries', 'library_dirs', + 'rpath', 'link_objects']: + attr = getattr(self, attrname, None) + if not attr: + continue + log.info("compiler '%s' is set to %s" % (attrname, attr)) + try: + self.get_version() + except: + pass + if log._global_log.threshold<2: + print('*'*80) + print(self.__class__) + print(_compiler_to_string(self)) + print('*'*80) + +replace_method(CCompiler, 'show_customization', CCompiler_show_customization) + +def CCompiler_customize(self, dist, need_cxx=0): + """ + Do any platform-specific customization of a compiler instance. + + This method calls `distutils.sysconfig.customize_compiler` for + platform-specific customization, as well as optionally remove a flag + to suppress spurious warnings in case C++ code is being compiled. + + Parameters + ---------- + dist : object + This parameter is not used for anything. + need_cxx : bool, optional + Whether or not C++ has to be compiled. If so (True), the + ``"-Wstrict-prototypes"`` option is removed to prevent spurious + warnings. Default is False. + + Returns + ------- + None + + Notes + ----- + All the default options used by distutils can be extracted with:: + + from distutils import sysconfig + sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', + 'CCSHARED', 'LDSHARED', 'SO') + + """ + # See FCompiler.customize for suggested usage. + log.info('customize %s' % (self.__class__.__name__)) + customize_compiler(self) + if need_cxx: + # In general, distutils uses -Wstrict-prototypes, but this option is + # not valid for C++ code, only for C. Remove it if it's there to + # avoid a spurious warning on every compilation. + try: + self.compiler_so.remove('-Wstrict-prototypes') + except (AttributeError, ValueError): + pass + + if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: + if not self.compiler_cxx: + if self.compiler[0].startswith('gcc'): + a, b = 'gcc', 'g++' + else: + a, b = 'cc', 'c++' + self.compiler_cxx = [self.compiler[0].replace(a, b)]\ + + self.compiler[1:] + else: + if hasattr(self, 'compiler'): + log.warn("#### %s #######" % (self.compiler,)) + log.warn('Missing compiler_cxx fix for '+self.__class__.__name__) + return + +replace_method(CCompiler, 'customize', CCompiler_customize) + +def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): + """ + Simple matching of version numbers, for use in CCompiler and FCompiler. + + Parameters + ---------- + pat : str, optional + A regular expression matching version numbers. + Default is ``r'[-.\\d]+'``. + ignore : str, optional + A regular expression matching patterns to skip. + Default is ``''``, in which case nothing is skipped. + start : str, optional + A regular expression matching the start of where to start looking + for version numbers. + Default is ``''``, in which case searching is started at the + beginning of the version string given to `matcher`. + + Returns + ------- + matcher : callable + A function that is appropriate to use as the ``.version_match`` + attribute of a `CCompiler` class. `matcher` takes a single parameter, + a version string. + + """ + def matcher(self, version_string): + # version string may appear in the second line, so getting rid + # of new lines: + version_string = version_string.replace('\n', ' ') + pos = 0 + if start: + m = re.match(start, version_string) + if not m: + return None + pos = m.end() + while True: + m = re.search(pat, version_string[pos:]) + if not m: + return None + if ignore and re.match(ignore, m.group(0)): + pos = m.end() + continue + break + return m.group(0) + return matcher + +def CCompiler_get_version(self, force=False, ok_status=[0]): + """ + Return compiler version, or None if compiler is not available. + + Parameters + ---------- + force : bool, optional + If True, force a new determination of the version, even if the + compiler already has a version attribute. Default is False. + ok_status : list of int, optional + The list of status values returned by the version look-up process + for which a version string is returned. If the status value is not + in `ok_status`, None is returned. Default is ``[0]``. + + Returns + ------- + version : str or None + Version string, in the format of `distutils.version.LooseVersion`. + + """ + if not force and hasattr(self, 'version'): + return self.version + self.find_executables() + try: + version_cmd = self.version_cmd + except AttributeError: + return None + if not version_cmd or not version_cmd[0]: + return None + try: + matcher = self.version_match + except AttributeError: + try: + pat = self.version_pattern + except AttributeError: + return None + def matcher(version_string): + m = re.match(pat, version_string) + if not m: + return None + version = m.group('version') + return version + + status, output = exec_command(version_cmd, use_tee=0) + + version = None + if status in ok_status: + version = matcher(output) + if version: + version = LooseVersion(version) + self.version = version + return version + +replace_method(CCompiler, 'get_version', CCompiler_get_version) + +def CCompiler_cxx_compiler(self): + """ + Return the C++ compiler. + + Parameters + ---------- + None + + Returns + ------- + cxx : class instance + The C++ compiler, as a `CCompiler` instance. + + """ + if self.compiler_type=='msvc': return self + cxx = copy(self) + cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:] + if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]: + # AIX needs the ld_so_aix script included with Python + cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ + + cxx.linker_so[2:] + else: + cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] + return cxx + +replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) + +compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', + "Intel C Compiler for 32-bit applications") +compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', + "Intel C Itanium Compiler for Itanium-based applications") +compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', + "Intel C Compiler for 64-bit applications") +compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', + "PathScale Compiler for SiCortex-based applications") +ccompiler._default_compilers += (('linux.*', 'intel'), + ('linux.*', 'intele'), + ('linux.*', 'intelem'), + ('linux.*', 'pathcc')) + +if sys.platform == 'win32': + compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', + "Mingw32 port of GNU C Compiler for Win32"\ + "(for MSC built Python)") + if mingw32(): + # On windows platforms, we want to default to mingw32 (gcc) + # because msvc can't build blitz stuff. + log.info('Setting mingw32 as default compiler for nt.') + ccompiler._default_compilers = (('nt', 'mingw32'),) \ + + ccompiler._default_compilers + + +_distutils_new_compiler = new_compiler +def new_compiler (plat=None, + compiler=None, + verbose=0, + dry_run=0, + force=0): + # Try first C compilers from numpy.distutils. + if plat is None: + plat = os.name + try: + if compiler is None: + compiler = get_default_compiler(plat) + (module_name, class_name, long_description) = compiler_class[compiler] + except KeyError: + msg = "don't know how to compile C/C++ code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler" % compiler + raise DistutilsPlatformError(msg) + module_name = "numpy.distutils." + module_name + try: + __import__ (module_name) + except ImportError: + msg = str(get_exception()) + log.info('%s in numpy.distutils; trying from distutils', + str(msg)) + module_name = module_name[6:] + try: + __import__(module_name) + except ImportError: + msg = str(get_exception()) + raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ + module_name) + try: + module = sys.modules[module_name] + klass = vars(module)[class_name] + except KeyError: + raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + + "in module '%s'") % (class_name, module_name)) + compiler = klass(None, dry_run, force) + log.debug('new_compiler returns %s' % (klass)) + return compiler + +ccompiler.new_compiler = new_compiler + +_distutils_gen_lib_options = gen_lib_options +def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): + library_dirs = quote_args(library_dirs) + runtime_library_dirs = quote_args(runtime_library_dirs) + r = _distutils_gen_lib_options(compiler, library_dirs, + runtime_library_dirs, libraries) + lib_opts = [] + for i in r: + if is_sequence(i): + lib_opts.extend(list(i)) + else: + lib_opts.append(i) + return lib_opts +ccompiler.gen_lib_options = gen_lib_options + +# Also fix up the various compiler modules, which do +# from distutils.ccompiler import gen_lib_options +# Don't bother with mwerks, as we don't support Classic Mac. +for _cc in ['msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: + _m = sys.modules.get('distutils.'+_cc+'compiler') + if _m is not None: + setattr(_m, 'gen_lib_options', gen_lib_options) + +_distutils_gen_preprocess_options = gen_preprocess_options +def gen_preprocess_options (macros, include_dirs): + include_dirs = quote_args(include_dirs) + return _distutils_gen_preprocess_options(macros, include_dirs) +ccompiler.gen_preprocess_options = gen_preprocess_options + +##Fix distutils.util.split_quoted: +# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears +# that removing this fix causes f2py problems on Windows XP (see ticket #723). +# Specifically, on WinXP when gfortran is installed in a directory path, which +# contains spaces, then f2py is unable to find it. +import re +import string +_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace) +_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'") +_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"') +_has_white_re = re.compile(r'\s') +def split_quoted(s): + s = s.strip() + words = [] + pos = 0 + + while s: + m = _wordchars_re.match(s, pos) + end = m.end() + if end == len(s): + words.append(s[:end]) + break + + if s[end] in string.whitespace: # unescaped, unquoted whitespace: now + words.append(s[:end]) # we definitely have a word delimiter + s = s[end:].lstrip() + pos = 0 + + elif s[end] == '\\': # preserve whatever is being escaped; + # will become part of the current word + s = s[:end] + s[end+1:] + pos = end+1 + + else: + if s[end] == "'": # slurp singly-quoted string + m = _squote_re.match(s, end) + elif s[end] == '"': # slurp doubly-quoted string + m = _dquote_re.match(s, end) + else: + raise RuntimeError("this can't happen (bad char '%c')" % s[end]) + + if m is None: + raise ValueError("bad string (mismatched %s quotes?)" % s[end]) + + (beg, end) = m.span() + if _has_white_re.search(s[beg+1:end-1]): + s = s[:beg] + s[beg+1:end-1] + s[end:] + pos = m.end() - 2 + else: + # Keeping quotes when a quoted word does not contain + # white-space. XXX: send a patch to distutils + pos = m.end() + + if pos >= len(s): + words.append(s) + break + + return words +ccompiler.split_quoted = split_quoted +##Fix distutils.util.split_quoted: diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/__init__.py new file mode 100644 index 0000000000000..76a2600723def --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/__init__.py @@ -0,0 +1,43 @@ +"""distutils.command + +Package containing implementation of all the standard Distutils +commands. + +""" +from __future__ import division, absolute_import, print_function + +def test_na_writable_attributes_deletion(): + a = np.NA(2) + attr = ['payload', 'dtype'] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + +__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" + +distutils_all = [ #'build_py', + 'clean', + 'install_clib', + 'install_scripts', + 'bdist', + 'bdist_dumb', + 'bdist_wininst', + ] + +__import__('distutils.command', globals(), locals(), distutils_all) + +__all__ = ['build', + 'config_compiler', + 'config', + 'build_src', + 'build_py', + 'build_ext', + 'build_clib', + 'build_scripts', + 'install', + 'install_data', + 'install_headers', + 'install_lib', + 'bdist_rpm', + 'sdist', + ] + distutils_all diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/autodist.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/autodist.py new file mode 100644 index 0000000000000..1b9b1dd57c58d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/autodist.py @@ -0,0 +1,43 @@ +"""This module implements additional tests ala autoconf which can be useful. + +""" +from __future__ import division, absolute_import, print_function + + +# We put them here since they could be easily reused outside numpy.distutils + +def check_inline(cmd): + """Return the inline identifier (may be empty).""" + cmd._check_compiler() + body = """ +#ifndef __cplusplus +static %(inline)s int static_func (void) +{ + return 0; +} +%(inline)s int nostatic_func (void) +{ + return 0; +} +#endif""" + + for kw in ['inline', '__inline__', '__inline']: + st = cmd.try_compile(body % {'inline': kw}, None, None) + if st: + return kw + + return '' + +def check_compiler_gcc4(cmd): + """Return True if the C compiler is GCC 4.x.""" + cmd._check_compiler() + body = """ +int +main() +{ +#if (! defined __GNUC__) || (__GNUC__ < 4) +#error gcc >= 4 required +#endif +} +""" + return cmd.try_compile(body, None, None) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/bdist_rpm.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/bdist_rpm.py new file mode 100644 index 0000000000000..3e52a503b1721 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/bdist_rpm.py @@ -0,0 +1,24 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +if 'setuptools' in sys.modules: + from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm +else: + from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm + +class bdist_rpm(old_bdist_rpm): + + def _make_spec_file(self): + spec_file = old_bdist_rpm._make_spec_file(self) + + # Replace hardcoded setup.py script name + # with the real setup script name. + setup_py = os.path.basename(sys.argv[0]) + if setup_py == 'setup.py': + return spec_file + new_spec_file = [] + for line in spec_file: + line = line.replace('setup.py', setup_py) + new_spec_file.append(line) + return new_spec_file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build.py new file mode 100644 index 0000000000000..b6912be15e41a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build.py @@ -0,0 +1,39 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +from distutils.command.build import build as old_build +from distutils.util import get_platform +from numpy.distutils.command.config_compiler import show_fortran_compilers + +class build(old_build): + + sub_commands = [('config_cc', lambda *args: True), + ('config_fc', lambda *args: True), + ('build_src', old_build.has_ext_modules), + ] + old_build.sub_commands + + user_options = old_build.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ] + + help_options = old_build.help_options + [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + def initialize_options(self): + old_build.initialize_options(self) + self.fcompiler = None + + def finalize_options(self): + build_scripts = self.build_scripts + old_build.finalize_options(self) + plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) + if build_scripts is None: + self.build_scripts = os.path.join(self.build_base, + 'scripts' + plat_specifier) + + def run(self): + old_build.run(self) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_clib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_clib.py new file mode 100644 index 0000000000000..84ca87250170e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_clib.py @@ -0,0 +1,284 @@ +""" Modified version of build_clib that handles fortran source files. +""" +from __future__ import division, absolute_import, print_function + +import os +from glob import glob +import shutil +from distutils.command.build_clib import build_clib as old_build_clib +from distutils.errors import DistutilsSetupError, DistutilsError, \ + DistutilsFileError + +from numpy.distutils import log +from distutils.dep_util import newer_group +from numpy.distutils.misc_util import filter_sources, has_f_sources,\ + has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \ + get_numpy_include_dirs + +# Fix Python distutils bug sf #1718574: +_l = old_build_clib.user_options +for _i in range(len(_l)): + if _l[_i][0] in ['build-clib', 'build-temp']: + _l[_i] = (_l[_i][0]+'=',)+_l[_i][1:] +# + +class build_clib(old_build_clib): + + description = "build C/C++/F libraries used by Python extensions" + + user_options = old_build_clib.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('inplace', 'i', 'Build in-place'), + ] + + boolean_options = old_build_clib.boolean_options + ['inplace'] + + def initialize_options(self): + old_build_clib.initialize_options(self) + self.fcompiler = None + self.inplace = 0 + return + + def have_f_sources(self): + for (lib_name, build_info) in self.libraries: + if has_f_sources(build_info.get('sources', [])): + return True + return False + + def have_cxx_sources(self): + for (lib_name, build_info) in self.libraries: + if has_cxx_sources(build_info.get('sources', [])): + return True + return False + + def run(self): + if not self.libraries: + return + + # Make sure that library sources are complete. + languages = [] + + # Make sure that extension sources are complete. + self.run_command('build_src') + + for (lib_name, build_info) in self.libraries: + l = build_info.get('language', None) + if l and l not in languages: languages.append(l) + + from distutils.ccompiler import new_compiler + self.compiler = new_compiler(compiler=self.compiler, + dry_run=self.dry_run, + force=self.force) + self.compiler.customize(self.distribution, + need_cxx=self.have_cxx_sources()) + + libraries = self.libraries + self.libraries = None + self.compiler.customize_cmd(self) + self.libraries = libraries + + self.compiler.show_customization() + + if self.have_f_sources(): + from numpy.distutils.fcompiler import new_fcompiler + self._f_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90='f90' in languages, + c_compiler=self.compiler) + if self._f_compiler is not None: + self._f_compiler.customize(self.distribution) + + libraries = self.libraries + self.libraries = None + self._f_compiler.customize_cmd(self) + self.libraries = libraries + + self._f_compiler.show_customization() + else: + self._f_compiler = None + + self.build_libraries(self.libraries) + + if self.inplace: + for l in self.distribution.installed_libraries: + libname = self.compiler.library_filename(l.name) + source = os.path.join(self.build_clib, libname) + target = os.path.join(l.target_dir, libname) + self.mkpath(l.target_dir) + shutil.copy(source, target) + + def get_source_files(self): + self.check_library_list(self.libraries) + filenames = [] + for lib in self.libraries: + filenames.extend(get_lib_source_files(lib)) + return filenames + + def build_libraries(self, libraries): + for (lib_name, build_info) in libraries: + self.build_a_library(build_info, lib_name, libraries) + + def build_a_library(self, build_info, lib_name, libraries): + # default compilers + compiler = self.compiler + fcompiler = self._f_compiler + + sources = build_info.get('sources') + if sources is None or not is_sequence(sources): + raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + + "'sources' must be present and must be " + + "a list of source filenames") % lib_name) + sources = list(sources) + + c_sources, cxx_sources, f_sources, fmodule_sources \ + = filter_sources(sources) + requiref90 = not not fmodule_sources or \ + build_info.get('language', 'c')=='f90' + + # save source type information so that build_ext can use it. + source_languages = [] + if c_sources: source_languages.append('c') + if cxx_sources: source_languages.append('c++') + if requiref90: source_languages.append('f90') + elif f_sources: source_languages.append('f77') + build_info['source_languages'] = source_languages + + lib_file = compiler.library_filename(lib_name, + output_dir=self.build_clib) + depends = sources + build_info.get('depends', []) + if not (self.force or newer_group(depends, lib_file, 'newer')): + log.debug("skipping '%s' library (up-to-date)", lib_name) + return + else: + log.info("building '%s' library", lib_name) + + config_fc = build_info.get('config_fc', {}) + if fcompiler is not None and config_fc: + log.info('using additional config_fc from setup script '\ + 'for fortran compiler: %s' \ + % (config_fc,)) + from numpy.distutils.fcompiler import new_fcompiler + fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=requiref90, + c_compiler=self.compiler) + if fcompiler is not None: + dist = self.distribution + base_config_fc = dist.get_option_dict('config_fc').copy() + base_config_fc.update(config_fc) + fcompiler.customize(base_config_fc) + + # check availability of Fortran compilers + if (f_sources or fmodule_sources) and fcompiler is None: + raise DistutilsError("library %s has Fortran sources"\ + " but no Fortran compiler found" % (lib_name)) + + if fcompiler is not None: + fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or [] + fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or [] + + macros = build_info.get('macros') + include_dirs = build_info.get('include_dirs') + if include_dirs is None: + include_dirs = [] + extra_postargs = build_info.get('extra_compiler_args') or [] + + include_dirs.extend(get_numpy_include_dirs()) + # where compiled F90 module files are: + module_dirs = build_info.get('module_dirs') or [] + module_build_dir = os.path.dirname(lib_file) + if requiref90: self.mkpath(module_build_dir) + + if compiler.compiler_type=='msvc': + # this hack works around the msvc compiler attributes + # problem, msvc uses its own convention :( + c_sources += cxx_sources + cxx_sources = [] + + objects = [] + if c_sources: + log.info("compiling C sources") + objects = compiler.compile(c_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + + if cxx_sources: + log.info("compiling C++ sources") + cxx_compiler = compiler.cxx_compiler() + cxx_objects = cxx_compiler.compile(cxx_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + objects.extend(cxx_objects) + + if f_sources or fmodule_sources: + extra_postargs = [] + f_objects = [] + + if requiref90: + if fcompiler.module_dir_switch is None: + existing_modules = glob('*.mod') + extra_postargs += fcompiler.module_options(\ + module_dirs, module_build_dir) + + if fmodule_sources: + log.info("compiling Fortran 90 module sources") + f_objects += fcompiler.compile(fmodule_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + + if requiref90 and self._f_compiler.module_dir_switch is None: + # move new compiled F90 module files to module_build_dir + for f in glob('*.mod'): + if f in existing_modules: + continue + t = os.path.join(module_build_dir, f) + if os.path.abspath(f)==os.path.abspath(t): + continue + if os.path.isfile(t): + os.remove(t) + try: + self.move_file(f, module_build_dir) + except DistutilsFileError: + log.warn('failed to move %r to %r' \ + % (f, module_build_dir)) + + if f_sources: + log.info("compiling Fortran sources") + f_objects += fcompiler.compile(f_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + else: + f_objects = [] + + objects.extend(f_objects) + + # assume that default linker is suitable for + # linking Fortran object files + compiler.create_static_lib(objects, lib_name, + output_dir=self.build_clib, + debug=self.debug) + + # fix library dependencies + clib_libraries = build_info.get('libraries', []) + for lname, binfo in libraries: + if lname in clib_libraries: + clib_libraries.extend(binfo[1].get('libraries', [])) + if clib_libraries: + build_info['libraries'] = clib_libraries diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_ext.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_ext.py new file mode 100644 index 0000000000000..b48e4227a03bf --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_ext.py @@ -0,0 +1,503 @@ +""" Modified version of build_ext that handles fortran source files. + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +from glob import glob + +from distutils.dep_util import newer_group +from distutils.command.build_ext import build_ext as old_build_ext +from distutils.errors import DistutilsFileError, DistutilsSetupError,\ + DistutilsError +from distutils.file_util import copy_file + +from numpy.distutils import log +from numpy.distutils.exec_command import exec_command +from numpy.distutils.system_info import combine_paths +from numpy.distutils.misc_util import filter_sources, has_f_sources, \ + has_cxx_sources, get_ext_source_files, \ + get_numpy_include_dirs, is_sequence, get_build_architecture, \ + msvc_version +from numpy.distutils.command.config_compiler import show_fortran_compilers + +try: + set +except NameError: + from sets import Set as set + +class build_ext (old_build_ext): + + description = "build C/C++/F extensions (compile/link to build directory)" + + user_options = old_build_ext.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ] + + help_options = old_build_ext.help_options + [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + def initialize_options(self): + old_build_ext.initialize_options(self) + self.fcompiler = None + + def finalize_options(self): + incl_dirs = self.include_dirs + old_build_ext.finalize_options(self) + if incl_dirs is not None: + self.include_dirs.extend(self.distribution.include_dirs or []) + + def run(self): + if not self.extensions: + return + + # Make sure that extension sources are complete. + self.run_command('build_src') + + if self.distribution.has_c_libraries(): + if self.inplace: + if self.distribution.have_run.get('build_clib'): + log.warn('build_clib already run, it is too late to ' \ + 'ensure in-place build of build_clib') + build_clib = self.distribution.get_command_obj('build_clib') + else: + build_clib = self.distribution.get_command_obj('build_clib') + build_clib.inplace = 1 + build_clib.ensure_finalized() + build_clib.run() + self.distribution.have_run['build_clib'] = 1 + + else: + self.run_command('build_clib') + build_clib = self.get_finalized_command('build_clib') + self.library_dirs.append(build_clib.build_clib) + else: + build_clib = None + + # Not including C libraries to the list of + # extension libraries automatically to prevent + # bogus linking commands. Extensions must + # explicitly specify the C libraries that they use. + + from distutils.ccompiler import new_compiler + from numpy.distutils.fcompiler import new_fcompiler + + compiler_type = self.compiler + # Initialize C compiler: + self.compiler = new_compiler(compiler=compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) + self.compiler.customize(self.distribution) + self.compiler.customize_cmd(self) + self.compiler.show_customization() + + # Create mapping of libraries built by build_clib: + clibs = {} + if build_clib is not None: + for libname, build_info in build_clib.libraries or []: + if libname in clibs and clibs[libname] != build_info: + log.warn('library %r defined more than once,'\ + ' overwriting build_info\n%s... \nwith\n%s...' \ + % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) + clibs[libname] = build_info + # .. and distribution libraries: + for libname, build_info in self.distribution.libraries or []: + if libname in clibs: + # build_clib libraries have a precedence before distribution ones + continue + clibs[libname] = build_info + + # Determine if C++/Fortran 77/Fortran 90 compilers are needed. + # Update extension libraries, library_dirs, and macros. + all_languages = set() + for ext in self.extensions: + ext_languages = set() + c_libs = [] + c_lib_dirs = [] + macros = [] + for libname in ext.libraries: + if libname in clibs: + binfo = clibs[libname] + c_libs += binfo.get('libraries', []) + c_lib_dirs += binfo.get('library_dirs', []) + for m in binfo.get('macros', []): + if m not in macros: + macros.append(m) + + for l in clibs.get(libname, {}).get('source_languages', []): + ext_languages.add(l) + if c_libs: + new_c_libs = ext.libraries + c_libs + log.info('updating extension %r libraries from %r to %r' + % (ext.name, ext.libraries, new_c_libs)) + ext.libraries = new_c_libs + ext.library_dirs = ext.library_dirs + c_lib_dirs + if macros: + log.info('extending extension %r defined_macros with %r' + % (ext.name, macros)) + ext.define_macros = ext.define_macros + macros + + # determine extension languages + if has_f_sources(ext.sources): + ext_languages.add('f77') + if has_cxx_sources(ext.sources): + ext_languages.add('c++') + l = ext.language or self.compiler.detect_language(ext.sources) + if l: + ext_languages.add(l) + # reset language attribute for choosing proper linker + if 'c++' in ext_languages: + ext_language = 'c++' + elif 'f90' in ext_languages: + ext_language = 'f90' + elif 'f77' in ext_languages: + ext_language = 'f77' + else: + ext_language = 'c' # default + if l and l != ext_language and ext.language: + log.warn('resetting extension %r language from %r to %r.' % + (ext.name, l, ext_language)) + ext.language = ext_language + # global language + all_languages.update(ext_languages) + + need_f90_compiler = 'f90' in all_languages + need_f77_compiler = 'f77' in all_languages + need_cxx_compiler = 'c++' in all_languages + + # Initialize C++ compiler: + if need_cxx_compiler: + self._cxx_compiler = new_compiler(compiler=compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) + compiler = self._cxx_compiler + compiler.customize(self.distribution, need_cxx=need_cxx_compiler) + compiler.customize_cmd(self) + compiler.show_customization() + self._cxx_compiler = compiler.cxx_compiler() + else: + self._cxx_compiler = None + + # Initialize Fortran 77 compiler: + if need_f77_compiler: + ctype = self.fcompiler + self._f77_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=False, + c_compiler=self.compiler) + fcompiler = self._f77_compiler + if fcompiler: + ctype = fcompiler.compiler_type + fcompiler.customize(self.distribution) + if fcompiler and fcompiler.get_version(): + fcompiler.customize_cmd(self) + fcompiler.show_customization() + else: + self.warn('f77_compiler=%s is not available.' % + (ctype)) + self._f77_compiler = None + else: + self._f77_compiler = None + + # Initialize Fortran 90 compiler: + if need_f90_compiler: + ctype = self.fcompiler + self._f90_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=True, + c_compiler = self.compiler) + fcompiler = self._f90_compiler + if fcompiler: + ctype = fcompiler.compiler_type + fcompiler.customize(self.distribution) + if fcompiler and fcompiler.get_version(): + fcompiler.customize_cmd(self) + fcompiler.show_customization() + else: + self.warn('f90_compiler=%s is not available.' % + (ctype)) + self._f90_compiler = None + else: + self._f90_compiler = None + + # Build extensions + self.build_extensions() + + + def swig_sources(self, sources): + # Do nothing. Swig sources have beed handled in build_src command. + return sources + + def build_extension(self, ext): + sources = ext.sources + if sources is None or not is_sequence(sources): + raise DistutilsSetupError( + ("in 'ext_modules' option (extension '%s'), " + + "'sources' must be present and must be " + + "a list of source filenames") % ext.name) + sources = list(sources) + + if not sources: + return + + fullname = self.get_ext_fullname(ext.name) + if self.inplace: + modpath = fullname.split('.') + package = '.'.join(modpath[0:-1]) + base = modpath[-1] + build_py = self.get_finalized_command('build_py') + package_dir = build_py.get_package_dir(package) + ext_filename = os.path.join(package_dir, + self.get_ext_filename(base)) + else: + ext_filename = os.path.join(self.build_lib, + self.get_ext_filename(fullname)) + depends = sources + ext.depends + + if not (self.force or newer_group(depends, ext_filename, 'newer')): + log.debug("skipping '%s' extension (up-to-date)", ext.name) + return + else: + log.info("building '%s' extension", ext.name) + + extra_args = ext.extra_compile_args or [] + macros = ext.define_macros[:] + for undef in ext.undef_macros: + macros.append((undef,)) + + c_sources, cxx_sources, f_sources, fmodule_sources = \ + filter_sources(ext.sources) + + + + if self.compiler.compiler_type=='msvc': + if cxx_sources: + # Needed to compile kiva.agg._agg extension. + extra_args.append('/Zm1000') + # this hack works around the msvc compiler attributes + # problem, msvc uses its own convention :( + c_sources += cxx_sources + cxx_sources = [] + + # Set Fortran/C++ compilers for compilation and linking. + if ext.language=='f90': + fcompiler = self._f90_compiler + elif ext.language=='f77': + fcompiler = self._f77_compiler + else: # in case ext.language is c++, for instance + fcompiler = self._f90_compiler or self._f77_compiler + if fcompiler is not None: + fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext, 'extra_f77_compile_args') else [] + fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext, 'extra_f90_compile_args') else [] + cxx_compiler = self._cxx_compiler + + # check for the availability of required compilers + if cxx_sources and cxx_compiler is None: + raise DistutilsError("extension %r has C++ sources" \ + "but no C++ compiler found" % (ext.name)) + if (f_sources or fmodule_sources) and fcompiler is None: + raise DistutilsError("extension %r has Fortran sources " \ + "but no Fortran compiler found" % (ext.name)) + if ext.language in ['f77', 'f90'] and fcompiler is None: + self.warn("extension %r has Fortran libraries " \ + "but no Fortran linker found, using default linker" % (ext.name)) + if ext.language=='c++' and cxx_compiler is None: + self.warn("extension %r has C++ libraries " \ + "but no C++ linker found, using default linker" % (ext.name)) + + kws = {'depends':ext.depends} + output_dir = self.build_temp + + include_dirs = ext.include_dirs + get_numpy_include_dirs() + + c_objects = [] + if c_sources: + log.info("compiling C sources") + c_objects = self.compiler.compile(c_sources, + output_dir=output_dir, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args, + **kws) + + if cxx_sources: + log.info("compiling C++ sources") + c_objects += cxx_compiler.compile(cxx_sources, + output_dir=output_dir, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args, + **kws) + + extra_postargs = [] + f_objects = [] + if fmodule_sources: + log.info("compiling Fortran 90 module sources") + module_dirs = ext.module_dirs[:] + module_build_dir = os.path.join( + self.build_temp, os.path.dirname( + self.get_ext_filename(fullname))) + + self.mkpath(module_build_dir) + if fcompiler.module_dir_switch is None: + existing_modules = glob('*.mod') + extra_postargs += fcompiler.module_options( + module_dirs, module_build_dir) + f_objects += fcompiler.compile(fmodule_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + depends=ext.depends) + + if fcompiler.module_dir_switch is None: + for f in glob('*.mod'): + if f in existing_modules: + continue + t = os.path.join(module_build_dir, f) + if os.path.abspath(f)==os.path.abspath(t): + continue + if os.path.isfile(t): + os.remove(t) + try: + self.move_file(f, module_build_dir) + except DistutilsFileError: + log.warn('failed to move %r to %r' % + (f, module_build_dir)) + if f_sources: + log.info("compiling Fortran sources") + f_objects += fcompiler.compile(f_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + depends=ext.depends) + + objects = c_objects + f_objects + + if ext.extra_objects: + objects.extend(ext.extra_objects) + extra_args = ext.extra_link_args or [] + libraries = self.get_libraries(ext)[:] + library_dirs = ext.library_dirs[:] + + linker = self.compiler.link_shared_object + # Always use system linker when using MSVC compiler. + if self.compiler.compiler_type=='msvc': + # expand libraries with fcompiler libraries as we are + # not using fcompiler linker + self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs) + + elif ext.language in ['f77', 'f90'] and fcompiler is not None: + linker = fcompiler.link_shared_object + if ext.language=='c++' and cxx_compiler is not None: + linker = cxx_compiler.link_shared_object + + if sys.version[:3]>='2.3': + kws = {'target_lang':ext.language} + else: + kws = {} + + linker(objects, ext_filename, + libraries=libraries, + library_dirs=library_dirs, + runtime_library_dirs=ext.runtime_library_dirs, + extra_postargs=extra_args, + export_symbols=self.get_export_symbols(ext), + debug=self.debug, + build_temp=self.build_temp,**kws) + + def _add_dummy_mingwex_sym(self, c_sources): + build_src = self.get_finalized_command("build_src").build_src + build_clib = self.get_finalized_command("build_clib").build_clib + objects = self.compiler.compile([os.path.join(build_src, + "gfortran_vs2003_hack.c")], + output_dir=self.build_temp) + self.compiler.create_static_lib(objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) + + def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, + c_library_dirs): + if fcompiler is None: return + + for libname in c_libraries: + if libname.startswith('msvc'): continue + fileexists = False + for libdir in c_library_dirs or []: + libfile = os.path.join(libdir, '%s.lib' % (libname)) + if os.path.isfile(libfile): + fileexists = True + break + if fileexists: continue + # make g77-compiled static libs available to MSVC + fileexists = False + for libdir in c_library_dirs: + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) + if os.path.isfile(libfile): + # copy libname.a file to name.lib so that MSVC linker + # can find it + libfile2 = os.path.join(self.build_temp, libname + '.lib') + copy_file(libfile, libfile2) + if self.build_temp not in c_library_dirs: + c_library_dirs.append(self.build_temp) + fileexists = True + break + if fileexists: continue + log.warn('could not find library %r in directories %s' + % (libname, c_library_dirs)) + + # Always use system linker when using MSVC compiler. + f_lib_dirs = [] + for dir in fcompiler.library_dirs: + # correct path when compiling in Cygwin but with normal Win + # Python + if dir.startswith('/usr/lib'): + s, o = exec_command(['cygpath', '-w', dir], use_tee=False) + if not s: + dir = o + f_lib_dirs.append(dir) + c_library_dirs.extend(f_lib_dirs) + + # make g77-compiled static libs available to MSVC + for lib in fcompiler.libraries: + if not lib.startswith('msvc'): + c_libraries.append(lib) + p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') + if p: + dst_name = os.path.join(self.build_temp, lib + '.lib') + if not os.path.isfile(dst_name): + copy_file(p[0], dst_name) + if self.build_temp not in c_library_dirs: + c_library_dirs.append(self.build_temp) + + def get_source_files (self): + self.check_extensions_list(self.extensions) + filenames = [] + for ext in self.extensions: + filenames.extend(get_ext_source_files(ext)) + return filenames + + def get_outputs (self): + self.check_extensions_list(self.extensions) + + outputs = [] + for ext in self.extensions: + if not ext.sources: + continue + fullname = self.get_ext_fullname(ext.name) + outputs.append(os.path.join(self.build_lib, + self.get_ext_filename(fullname))) + return outputs diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_py.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_py.py new file mode 100644 index 0000000000000..54dcde4350839 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_py.py @@ -0,0 +1,33 @@ +from __future__ import division, absolute_import, print_function + +from distutils.command.build_py import build_py as old_build_py +from numpy.distutils.misc_util import is_string + +class build_py(old_build_py): + + def run(self): + build_src = self.get_finalized_command('build_src') + if build_src.py_modules_dict and self.packages is None: + self.packages = list(build_src.py_modules_dict.keys ()) + old_build_py.run(self) + + def find_package_modules(self, package, package_dir): + modules = old_build_py.find_package_modules(self, package, package_dir) + + # Find build_src generated *.py files. + build_src = self.get_finalized_command('build_src') + modules += build_src.py_modules_dict.get(package, []) + + return modules + + def find_modules(self): + old_py_modules = self.py_modules[:] + new_py_modules = [_m for _m in self.py_modules if is_string(_m)] + self.py_modules[:] = new_py_modules + modules = old_build_py.find_modules(self) + self.py_modules[:] = old_py_modules + + return modules + + # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple + # and item[2] is source file. diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_scripts.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_scripts.py new file mode 100644 index 0000000000000..c8b25fc719b59 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_scripts.py @@ -0,0 +1,51 @@ +""" Modified version of build_scripts that handles building scripts from functions. + +""" +from __future__ import division, absolute_import, print_function + +from distutils.command.build_scripts import build_scripts as old_build_scripts +from numpy.distutils import log +from numpy.distutils.misc_util import is_string + +class build_scripts(old_build_scripts): + + def generate_scripts(self, scripts): + new_scripts = [] + func_scripts = [] + for script in scripts: + if is_string(script): + new_scripts.append(script) + else: + func_scripts.append(script) + if not func_scripts: + return new_scripts + + build_dir = self.build_dir + self.mkpath(build_dir) + for func in func_scripts: + script = func(build_dir) + if not script: + continue + if is_string(script): + log.info(" adding '%s' to scripts" % (script,)) + new_scripts.append(script) + else: + [log.info(" adding '%s' to scripts" % (s,)) for s in script] + new_scripts.extend(list(script)) + return new_scripts + + def run (self): + if not self.scripts: + return + + self.scripts = self.generate_scripts(self.scripts) + # Now make sure that the distribution object has this list of scripts. + # setuptools' develop command requires that this be a list of filenames, + # not functions. + self.distribution.scripts = self.scripts + + return old_build_scripts.run(self) + + def get_source_files(self): + from numpy.distutils.misc_util import get_script_files + return get_script_files(self.scripts) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_src.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_src.py new file mode 100644 index 0000000000000..7463a0e1745f9 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_src.py @@ -0,0 +1,806 @@ +""" Build swig, f2py, pyrex sources. +""" +from __future__ import division, absolute_import, print_function + +import os +import re +import sys +import shlex +import copy + +from distutils.command import build_ext +from distutils.dep_util import newer_group, newer +from distutils.util import get_platform +from distutils.errors import DistutilsError, DistutilsSetupError + +def have_pyrex(): + try: + import Pyrex.Compiler.Main + return True + except ImportError: + return False + +# this import can't be done here, as it uses numpy stuff only available +# after it's installed +#import numpy.f2py +from numpy.distutils import log +from numpy.distutils.misc_util import fortran_ext_match, \ + appendpath, is_string, is_sequence, get_cmd +from numpy.distutils.from_template import process_file as process_f_file +from numpy.distutils.conv_template import process_file as process_c_file + +def subst_vars(target, source, d): + """Substitute any occurence of @foo@ by d['foo'] from source file into + target.""" + var = re.compile('@([a-zA-Z_]+)@') + fs = open(source, 'r') + try: + ft = open(target, 'w') + try: + for l in fs: + m = var.search(l) + if m: + ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) + else: + ft.write(l) + finally: + ft.close() + finally: + fs.close() + +class build_src(build_ext.build_ext): + + description = "build sources from SWIG, F2PY files or a function" + + user_options = [ + ('build-src=', 'd', "directory to \"build\" sources to"), + ('f2py-opts=', None, "list of f2py command line options"), + ('swig=', None, "path to the SWIG executable"), + ('swig-opts=', None, "list of SWIG command line options"), + ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), + ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete + ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete + ('force', 'f', "forcibly build everything (ignore file timestamps)"), + ('inplace', 'i', + "ignore build-lib and put compiled extensions into the source " + + "directory alongside your pure Python modules"), + ] + + boolean_options = ['force', 'inplace'] + + help_options = [] + + def initialize_options(self): + self.extensions = None + self.package = None + self.py_modules = None + self.py_modules_dict = None + self.build_src = None + self.build_lib = None + self.build_base = None + self.force = None + self.inplace = None + self.package_dir = None + self.f2pyflags = None # obsolete + self.f2py_opts = None + self.swigflags = None # obsolete + self.swig_opts = None + self.swig_cpp = None + self.swig = None + + def finalize_options(self): + self.set_undefined_options('build', + ('build_base', 'build_base'), + ('build_lib', 'build_lib'), + ('force', 'force')) + if self.package is None: + self.package = self.distribution.ext_package + self.extensions = self.distribution.ext_modules + self.libraries = self.distribution.libraries or [] + self.py_modules = self.distribution.py_modules or [] + self.data_files = self.distribution.data_files or [] + + if self.build_src is None: + plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) + self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) + + # py_modules_dict is used in build_py.find_package_modules + self.py_modules_dict = {} + + if self.f2pyflags: + if self.f2py_opts: + log.warn('ignoring --f2pyflags as --f2py-opts already used') + else: + self.f2py_opts = self.f2pyflags + self.f2pyflags = None + if self.f2py_opts is None: + self.f2py_opts = [] + else: + self.f2py_opts = shlex.split(self.f2py_opts) + + if self.swigflags: + if self.swig_opts: + log.warn('ignoring --swigflags as --swig-opts already used') + else: + self.swig_opts = self.swigflags + self.swigflags = None + + if self.swig_opts is None: + self.swig_opts = [] + else: + self.swig_opts = shlex.split(self.swig_opts) + + # use options from build_ext command + build_ext = self.get_finalized_command('build_ext') + if self.inplace is None: + self.inplace = build_ext.inplace + if self.swig_cpp is None: + self.swig_cpp = build_ext.swig_cpp + for c in ['swig', 'swig_opt']: + o = '--'+c.replace('_', '-') + v = getattr(build_ext, c, None) + if v: + if getattr(self, c): + log.warn('both build_src and build_ext define %s option' % (o)) + else: + log.info('using "%s=%s" option from build_ext command' % (o, v)) + setattr(self, c, v) + + def run(self): + log.info("build_src") + if not (self.extensions or self.libraries): + return + self.build_sources() + + def build_sources(self): + + if self.inplace: + self.get_package_dir = \ + self.get_finalized_command('build_py').get_package_dir + + self.build_py_modules_sources() + + for libname_info in self.libraries: + self.build_library_sources(*libname_info) + + if self.extensions: + self.check_extensions_list(self.extensions) + + for ext in self.extensions: + self.build_extension_sources(ext) + + self.build_data_files_sources() + self.build_npy_pkg_config() + + def build_data_files_sources(self): + if not self.data_files: + return + log.info('building data_files sources') + from numpy.distutils.misc_util import get_data_files + new_data_files = [] + for data in self.data_files: + if isinstance(data, str): + new_data_files.append(data) + elif isinstance(data, tuple): + d, files = data + if self.inplace: + build_dir = self.get_package_dir('.'.join(d.split(os.sep))) + else: + build_dir = os.path.join(self.build_src, d) + funcs = [f for f in files if hasattr(f, '__call__')] + files = [f for f in files if not hasattr(f, '__call__')] + for f in funcs: + if f.__code__.co_argcount==1: + s = f(build_dir) + else: + s = f() + if s is not None: + if isinstance(s, list): + files.extend(s) + elif isinstance(s, str): + files.append(s) + else: + raise TypeError(repr(s)) + filenames = get_data_files((d, files)) + new_data_files.append((d, filenames)) + else: + raise TypeError(repr(data)) + self.data_files[:] = new_data_files + + + def _build_npy_pkg_config(self, info, gd): + import shutil + template, install_dir, subst_dict = info + template_dir = os.path.dirname(template) + for k, v in gd.items(): + subst_dict[k] = v + + if self.inplace == 1: + generated_dir = os.path.join(template_dir, install_dir) + else: + generated_dir = os.path.join(self.build_src, template_dir, + install_dir) + generated = os.path.basename(os.path.splitext(template)[0]) + generated_path = os.path.join(generated_dir, generated) + if not os.path.exists(generated_dir): + os.makedirs(generated_dir) + + subst_vars(generated_path, template, subst_dict) + + # Where to install relatively to install prefix + full_install_dir = os.path.join(template_dir, install_dir) + return full_install_dir, generated_path + + def build_npy_pkg_config(self): + log.info('build_src: building npy-pkg config files') + + # XXX: another ugly workaround to circumvent distutils brain damage. We + # need the install prefix here, but finalizing the options of the + # install command when only building sources cause error. Instead, we + # copy the install command instance, and finalize the copy so that it + # does not disrupt how distutils want to do things when with the + # original install command instance. + install_cmd = copy.copy(get_cmd('install')) + if not install_cmd.finalized == 1: + install_cmd.finalize_options() + build_npkg = False + gd = {} + if self.inplace == 1: + top_prefix = '.' + build_npkg = True + elif hasattr(install_cmd, 'install_libbase'): + top_prefix = install_cmd.install_libbase + build_npkg = True + + if build_npkg: + for pkg, infos in self.distribution.installed_pkg_config.items(): + pkg_path = self.distribution.package_dir[pkg] + prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) + d = {'prefix': prefix} + for info in infos: + install_dir, generated = self._build_npy_pkg_config(info, d) + self.distribution.data_files.append((install_dir, + [generated])) + + def build_py_modules_sources(self): + if not self.py_modules: + return + log.info('building py_modules sources') + new_py_modules = [] + for source in self.py_modules: + if is_sequence(source) and len(source)==3: + package, module_base, source = source + if self.inplace: + build_dir = self.get_package_dir(package) + else: + build_dir = os.path.join(self.build_src, + os.path.join(*package.split('.'))) + if hasattr(source, '__call__'): + target = os.path.join(build_dir, module_base + '.py') + source = source(target) + if source is None: + continue + modules = [(package, module_base, source)] + if package not in self.py_modules_dict: + self.py_modules_dict[package] = [] + self.py_modules_dict[package] += modules + else: + new_py_modules.append(source) + self.py_modules[:] = new_py_modules + + def build_library_sources(self, lib_name, build_info): + sources = list(build_info.get('sources', [])) + + if not sources: + return + + log.info('building library "%s" sources' % (lib_name)) + + sources = self.generate_sources(sources, (lib_name, build_info)) + + sources = self.template_sources(sources, (lib_name, build_info)) + + sources, h_files = self.filter_h_files(sources) + + if h_files: + log.info('%s - nothing done with h_files = %s', + self.package, h_files) + + #for f in h_files: + # self.distribution.headers.append((lib_name,f)) + + build_info['sources'] = sources + return + + def build_extension_sources(self, ext): + + sources = list(ext.sources) + + log.info('building extension "%s" sources' % (ext.name)) + + fullname = self.get_ext_fullname(ext.name) + + modpath = fullname.split('.') + package = '.'.join(modpath[0:-1]) + + if self.inplace: + self.ext_target_dir = self.get_package_dir(package) + + sources = self.generate_sources(sources, ext) + + sources = self.template_sources(sources, ext) + + sources = self.swig_sources(sources, ext) + + sources = self.f2py_sources(sources, ext) + + sources = self.pyrex_sources(sources, ext) + + sources, py_files = self.filter_py_files(sources) + + if package not in self.py_modules_dict: + self.py_modules_dict[package] = [] + modules = [] + for f in py_files: + module = os.path.splitext(os.path.basename(f))[0] + modules.append((package, module, f)) + self.py_modules_dict[package] += modules + + sources, h_files = self.filter_h_files(sources) + + if h_files: + log.info('%s - nothing done with h_files = %s', + package, h_files) + #for f in h_files: + # self.distribution.headers.append((package,f)) + + ext.sources = sources + + def generate_sources(self, sources, extension): + new_sources = [] + func_sources = [] + for source in sources: + if is_string(source): + new_sources.append(source) + else: + func_sources.append(source) + if not func_sources: + return new_sources + if self.inplace and not is_sequence(extension): + build_dir = self.ext_target_dir + else: + if is_sequence(extension): + name = extension[0] + # if 'include_dirs' not in extension[1]: + # extension[1]['include_dirs'] = [] + # incl_dirs = extension[1]['include_dirs'] + else: + name = extension.name + # incl_dirs = extension.include_dirs + #if self.build_src not in incl_dirs: + # incl_dirs.append(self.build_src) + build_dir = os.path.join(*([self.build_src]\ + +name.split('.')[:-1])) + self.mkpath(build_dir) + for func in func_sources: + source = func(extension, build_dir) + if not source: + continue + if is_sequence(source): + [log.info(" adding '%s' to sources." % (s,)) for s in source] + new_sources.extend(source) + else: + log.info(" adding '%s' to sources." % (source,)) + new_sources.append(source) + + return new_sources + + def filter_py_files(self, sources): + return self.filter_files(sources, ['.py']) + + def filter_h_files(self, sources): + return self.filter_files(sources, ['.h', '.hpp', '.inc']) + + def filter_files(self, sources, exts = []): + new_sources = [] + files = [] + for source in sources: + (base, ext) = os.path.splitext(source) + if ext in exts: + files.append(source) + else: + new_sources.append(source) + return new_sources, files + + def template_sources(self, sources, extension): + new_sources = [] + if is_sequence(extension): + depends = extension[1].get('depends') + include_dirs = extension[1].get('include_dirs') + else: + depends = extension.depends + include_dirs = extension.include_dirs + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.src': # Template file + if self.inplace: + target_dir = os.path.dirname(base) + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + self.mkpath(target_dir) + target_file = os.path.join(target_dir, os.path.basename(base)) + if (self.force or newer_group([source] + depends, target_file)): + if _f_pyf_ext_match(base): + log.info("from_template:> %s" % (target_file)) + outstr = process_f_file(source) + else: + log.info("conv_template:> %s" % (target_file)) + outstr = process_c_file(source) + fid = open(target_file, 'w') + fid.write(outstr) + fid.close() + if _header_ext_match(target_file): + d = os.path.dirname(target_file) + if d not in include_dirs: + log.info(" adding '%s' to include_dirs." % (d)) + include_dirs.append(d) + new_sources.append(target_file) + else: + new_sources.append(source) + return new_sources + + def pyrex_sources(self, sources, extension): + new_sources = [] + ext_name = extension.name.split('.')[-1] + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.pyx': + target_file = self.generate_a_pyrex_source(base, ext_name, + source, + extension) + new_sources.append(target_file) + else: + new_sources.append(source) + return new_sources + + def generate_a_pyrex_source(self, base, ext_name, source, extension): + if self.inplace or not have_pyrex(): + target_dir = os.path.dirname(base) + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + target_file = os.path.join(target_dir, ext_name + '.c') + depends = [source] + extension.depends + if self.force or newer_group(depends, target_file, 'newer'): + if have_pyrex(): + import Pyrex.Compiler.Main + log.info("pyrexc:> %s" % (target_file)) + self.mkpath(target_dir) + options = Pyrex.Compiler.Main.CompilationOptions( + defaults=Pyrex.Compiler.Main.default_options, + include_path=extension.include_dirs, + output_file=target_file) + pyrex_result = Pyrex.Compiler.Main.compile(source, + options=options) + if pyrex_result.num_errors != 0: + raise DistutilsError("%d errors while compiling %r with Pyrex" \ + % (pyrex_result.num_errors, source)) + elif os.path.isfile(target_file): + log.warn("Pyrex required for compiling %r but not available,"\ + " using old target %r"\ + % (source, target_file)) + else: + raise DistutilsError("Pyrex required for compiling %r"\ + " but notavailable" % (source,)) + return target_file + + def f2py_sources(self, sources, extension): + new_sources = [] + f2py_sources = [] + f_sources = [] + f2py_targets = {} + target_dirs = [] + ext_name = extension.name.split('.')[-1] + skip_f2py = 0 + + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.pyf': # F2PY interface file + if self.inplace: + target_dir = os.path.dirname(base) + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + if os.path.isfile(source): + name = get_f2py_modulename(source) + if name != ext_name: + raise DistutilsSetupError('mismatch of extension names: %s ' + 'provides %r but expected %r' % ( + source, name, ext_name)) + target_file = os.path.join(target_dir, name+'module.c') + else: + log.debug(' source %s does not exist: skipping f2py\'ing.' \ + % (source)) + name = ext_name + skip_f2py = 1 + target_file = os.path.join(target_dir, name+'module.c') + if not os.path.isfile(target_file): + log.warn(' target %s does not exist:\n '\ + 'Assuming %smodule.c was generated with '\ + '"build_src --inplace" command.' \ + % (target_file, name)) + target_dir = os.path.dirname(base) + target_file = os.path.join(target_dir, name+'module.c') + if not os.path.isfile(target_file): + raise DistutilsSetupError("%r missing" % (target_file,)) + log.info(' Yes! Using %r as up-to-date target.' \ + % (target_file)) + target_dirs.append(target_dir) + f2py_sources.append(source) + f2py_targets[source] = target_file + new_sources.append(target_file) + elif fortran_ext_match(ext): + f_sources.append(source) + else: + new_sources.append(source) + + if not (f2py_sources or f_sources): + return new_sources + + for d in target_dirs: + self.mkpath(d) + + f2py_options = extension.f2py_options + self.f2py_opts + + if self.distribution.libraries: + for name, build_info in self.distribution.libraries: + if name in extension.libraries: + f2py_options.extend(build_info.get('f2py_options', [])) + + log.info("f2py options: %s" % (f2py_options)) + + if f2py_sources: + if len(f2py_sources) != 1: + raise DistutilsSetupError( + 'only one .pyf file is allowed per extension module but got'\ + ' more: %r' % (f2py_sources,)) + source = f2py_sources[0] + target_file = f2py_targets[source] + target_dir = os.path.dirname(target_file) or '.' + depends = [source] + extension.depends + if (self.force or newer_group(depends, target_file, 'newer')) \ + and not skip_f2py: + log.info("f2py: %s" % (source)) + import numpy.f2py + numpy.f2py.run_main(f2py_options + + ['--build-dir', target_dir, source]) + else: + log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) + else: + #XXX TODO: --inplace support for sdist command + if is_sequence(extension): + name = extension[0] + else: name = extension.name + target_dir = os.path.join(*([self.build_src]\ + +name.split('.')[:-1])) + target_file = os.path.join(target_dir, ext_name + 'module.c') + new_sources.append(target_file) + depends = f_sources + extension.depends + if (self.force or newer_group(depends, target_file, 'newer')) \ + and not skip_f2py: + log.info("f2py:> %s" % (target_file)) + self.mkpath(target_dir) + import numpy.f2py + numpy.f2py.run_main(f2py_options + ['--lower', + '--build-dir', target_dir]+\ + ['-m', ext_name]+f_sources) + else: + log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ + % (target_file)) + + if not os.path.isfile(target_file): + raise DistutilsError("f2py target file %r not generated" % (target_file,)) + + target_c = os.path.join(self.build_src, 'fortranobject.c') + target_h = os.path.join(self.build_src, 'fortranobject.h') + log.info(" adding '%s' to sources." % (target_c)) + new_sources.append(target_c) + if self.build_src not in extension.include_dirs: + log.info(" adding '%s' to include_dirs." \ + % (self.build_src)) + extension.include_dirs.append(self.build_src) + + if not skip_f2py: + import numpy.f2py + d = os.path.dirname(numpy.f2py.__file__) + source_c = os.path.join(d, 'src', 'fortranobject.c') + source_h = os.path.join(d, 'src', 'fortranobject.h') + if newer(source_c, target_c) or newer(source_h, target_h): + self.mkpath(os.path.dirname(target_c)) + self.copy_file(source_c, target_c) + self.copy_file(source_h, target_h) + else: + if not os.path.isfile(target_c): + raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) + if not os.path.isfile(target_h): + raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) + + for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: + filename = os.path.join(target_dir, ext_name + name_ext) + if os.path.isfile(filename): + log.info(" adding '%s' to sources." % (filename)) + f_sources.append(filename) + + return new_sources + f_sources + + def swig_sources(self, sources, extension): + # Assuming SWIG 1.3.14 or later. See compatibility note in + # http://www.swig.org/Doc1.3/Python.html#Python_nn6 + + new_sources = [] + swig_sources = [] + swig_targets = {} + target_dirs = [] + py_files = [] # swig generated .py files + target_ext = '.c' + if '-c++' in extension.swig_opts: + typ = 'c++' + is_cpp = True + extension.swig_opts.remove('-c++') + elif self.swig_cpp: + typ = 'c++' + is_cpp = True + else: + typ = None + is_cpp = False + skip_swig = 0 + ext_name = extension.name.split('.')[-1] + + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.i': # SWIG interface file + # the code below assumes that the sources list + # contains not more than one .i SWIG interface file + if self.inplace: + target_dir = os.path.dirname(base) + py_target_dir = self.ext_target_dir + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + py_target_dir = target_dir + if os.path.isfile(source): + name = get_swig_modulename(source) + if name != ext_name[1:]: + raise DistutilsSetupError( + 'mismatch of extension names: %s provides %r' + ' but expected %r' % (source, name, ext_name[1:])) + if typ is None: + typ = get_swig_target(source) + is_cpp = typ=='c++' + else: + typ2 = get_swig_target(source) + if typ2 is None: + log.warn('source %r does not define swig target, assuming %s swig target' \ + % (source, typ)) + elif typ!=typ2: + log.warn('expected %r but source %r defines %r swig target' \ + % (typ, source, typ2)) + if typ2=='c++': + log.warn('resetting swig target to c++ (some targets may have .c extension)') + is_cpp = True + else: + log.warn('assuming that %r has c++ swig target' % (source)) + if is_cpp: + target_ext = '.cpp' + target_file = os.path.join(target_dir, '%s_wrap%s' \ + % (name, target_ext)) + else: + log.warn(' source %s does not exist: skipping swig\'ing.' \ + % (source)) + name = ext_name[1:] + skip_swig = 1 + target_file = _find_swig_target(target_dir, name) + if not os.path.isfile(target_file): + log.warn(' target %s does not exist:\n '\ + 'Assuming %s_wrap.{c,cpp} was generated with '\ + '"build_src --inplace" command.' \ + % (target_file, name)) + target_dir = os.path.dirname(base) + target_file = _find_swig_target(target_dir, name) + if not os.path.isfile(target_file): + raise DistutilsSetupError("%r missing" % (target_file,)) + log.warn(' Yes! Using %r as up-to-date target.' \ + % (target_file)) + target_dirs.append(target_dir) + new_sources.append(target_file) + py_files.append(os.path.join(py_target_dir, name+'.py')) + swig_sources.append(source) + swig_targets[source] = new_sources[-1] + else: + new_sources.append(source) + + if not swig_sources: + return new_sources + + if skip_swig: + return new_sources + py_files + + for d in target_dirs: + self.mkpath(d) + + swig = self.swig or self.find_swig() + swig_cmd = [swig, "-python"] + extension.swig_opts + if is_cpp: + swig_cmd.append('-c++') + for d in extension.include_dirs: + swig_cmd.append('-I'+d) + for source in swig_sources: + target = swig_targets[source] + depends = [source] + extension.depends + if self.force or newer_group(depends, target, 'newer'): + log.info("%s: %s" % (os.path.basename(swig) \ + + (is_cpp and '++' or ''), source)) + self.spawn(swig_cmd + self.swig_opts \ + + ["-o", target, '-outdir', py_target_dir, source]) + else: + log.debug(" skipping '%s' swig interface (up-to-date)" \ + % (source)) + + return new_sources + py_files + +_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match +_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match + +#### SWIG related auxiliary functions #### +_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', + re.I).match +_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search +_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search + +def get_swig_target(source): + f = open(source, 'r') + result = None + line = f.readline() + if _has_cpp_header(line): + result = 'c++' + if _has_c_header(line): + result = 'c' + f.close() + return result + +def get_swig_modulename(source): + f = open(source, 'r') + name = None + for line in f: + m = _swig_module_name_match(line) + if m: + name = m.group('name') + break + f.close() + return name + +def _find_swig_target(target_dir, name): + for ext in ['.cpp', '.c']: + target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) + if os.path.isfile(target): + break + return target + +#### F2PY related auxiliary functions #### + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?'\ + '__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + f = open(source) + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + f.close() + return name + +########################################## diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config.py new file mode 100644 index 0000000000000..1b688bdd67adb --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config.py @@ -0,0 +1,476 @@ +# Added Fortran compiler support to config. Currently useful only for +# try_compile call. try_run works but is untested for most of Fortran +# compilers (they must define linker_exe first). +# Pearu Peterson +from __future__ import division, absolute_import, print_function + +import os, signal +import warnings +import sys + +from distutils.command.config import config as old_config +from distutils.command.config import LANG_EXT +from distutils import log +from distutils.file_util import copy_file +from distutils.ccompiler import CompileError, LinkError +import distutils +from numpy.distutils.exec_command import exec_command +from numpy.distutils.mingw32ccompiler import generate_manifest +from numpy.distutils.command.autodist import check_inline, check_compiler_gcc4 +from numpy.distutils.compat import get_exception + +LANG_EXT['f77'] = '.f' +LANG_EXT['f90'] = '.f90' + +class config(old_config): + old_config.user_options += [ + ('fcompiler=', None, "specify the Fortran compiler type"), + ] + + def initialize_options(self): + self.fcompiler = None + old_config.initialize_options(self) + + def try_run(self, body, headers=None, include_dirs=None, + libraries=None, library_dirs=None, lang="c"): + warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ + "Usage of try_run is deprecated: please do not \n" \ + "use it anymore, and avoid configuration checks \n" \ + "involving running executable on the target machine.\n" \ + "+++++++++++++++++++++++++++++++++++++++++++++++++\n", + DeprecationWarning) + return old_config.try_run(self, body, headers, include_dirs, libraries, + library_dirs, lang) + + def _check_compiler (self): + old_config._check_compiler(self) + from numpy.distutils.fcompiler import FCompiler, new_fcompiler + + if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc': + # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: + # initialize call query_vcvarsall, which throws an IOError, and + # causes an error along the way without much information. We try to + # catch it here, hoping it is early enough, and print an helpful + # message instead of Error: None. + if not self.compiler.initialized: + try: + self.compiler.initialize() + except IOError: + e = get_exception() + msg = """\ +Could not initialize compiler instance: do you have Visual Studio +installed? If you are trying to build with MinGW, please use "python setup.py +build -c mingw32" instead. If you have Visual Studio installed, check it is +correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2, +VS 2010 for >= 3.3). + +Original exception was: %s, and the Compiler class was %s +============================================================================""" \ + % (e, self.compiler.__class__.__name__) + print ("""\ +============================================================================""") + raise distutils.errors.DistutilsPlatformError(msg) + + # After MSVC is initialized, add an explicit /MANIFEST to linker + # flags. See issues gh-4245 and gh-4101 for details. Also + # relevant are issues 4431 and 16296 on the Python bug tracker. + from distutils import msvc9compiler + if msvc9compiler.get_build_version() >= 10: + for ldflags in [self.compiler.ldflags_shared, + self.compiler.ldflags_shared_debug]: + if '/MANIFEST' not in ldflags: + ldflags.append('/MANIFEST') + + if not isinstance(self.fcompiler, FCompiler): + self.fcompiler = new_fcompiler(compiler=self.fcompiler, + dry_run=self.dry_run, force=1, + c_compiler=self.compiler) + if self.fcompiler is not None: + self.fcompiler.customize(self.distribution) + if self.fcompiler.get_version(): + self.fcompiler.customize_cmd(self) + self.fcompiler.show_customization() + + def _wrap_method(self, mth, lang, args): + from distutils.ccompiler import CompileError + from distutils.errors import DistutilsExecError + save_compiler = self.compiler + if lang in ['f77', 'f90']: + self.compiler = self.fcompiler + try: + ret = mth(*((self,)+args)) + except (DistutilsExecError, CompileError): + msg = str(get_exception()) + self.compiler = save_compiler + raise CompileError + self.compiler = save_compiler + return ret + + def _compile (self, body, headers, include_dirs, lang): + return self._wrap_method(old_config._compile, lang, + (body, headers, include_dirs, lang)) + + def _link (self, body, + headers, include_dirs, + libraries, library_dirs, lang): + if self.compiler.compiler_type=='msvc': + libraries = (libraries or [])[:] + library_dirs = (library_dirs or [])[:] + if lang in ['f77', 'f90']: + lang = 'c' # always use system linker when using MSVC compiler + if self.fcompiler: + for d in self.fcompiler.library_dirs or []: + # correct path when compiling in Cygwin but with + # normal Win Python + if d.startswith('/usr/lib'): + s, o = exec_command(['cygpath', '-w', d], + use_tee=False) + if not s: d = o + library_dirs.append(d) + for libname in self.fcompiler.libraries or []: + if libname not in libraries: + libraries.append(libname) + for libname in libraries: + if libname.startswith('msvc'): continue + fileexists = False + for libdir in library_dirs or []: + libfile = os.path.join(libdir, '%s.lib' % (libname)) + if os.path.isfile(libfile): + fileexists = True + break + if fileexists: continue + # make g77-compiled static libs available to MSVC + fileexists = False + for libdir in library_dirs: + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) + if os.path.isfile(libfile): + # copy libname.a file to name.lib so that MSVC linker + # can find it + libfile2 = os.path.join(libdir, '%s.lib' % (libname)) + copy_file(libfile, libfile2) + self.temp_files.append(libfile2) + fileexists = True + break + if fileexists: continue + log.warn('could not find library %r in directories %s' \ + % (libname, library_dirs)) + elif self.compiler.compiler_type == 'mingw32': + generate_manifest(self) + return self._wrap_method(old_config._link, lang, + (body, headers, include_dirs, + libraries, library_dirs, lang)) + + def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): + self._check_compiler() + return self.try_compile( + "/* we need a dummy line to make distutils happy */", + [header], include_dirs) + + def check_decl(self, symbol, + headers=None, include_dirs=None): + self._check_compiler() + body = """ +int main() +{ +#ifndef %s + (void) %s; +#endif + ; + return 0; +}""" % (symbol, symbol) + + return self.try_compile(body, headers, include_dirs) + + def check_macro_true(self, symbol, + headers=None, include_dirs=None): + self._check_compiler() + body = """ +int main() +{ +#if %s +#else +#error false or undefined macro +#endif + ; + return 0; +}""" % (symbol,) + + return self.try_compile(body, headers, include_dirs) + + def check_type(self, type_name, headers=None, include_dirs=None, + library_dirs=None): + """Check type availability. Return True if the type can be compiled, + False otherwise""" + self._check_compiler() + + # First check the type can be compiled + body = r""" +int main() { + if ((%(name)s *) 0) + return 0; + if (sizeof (%(name)s)) + return 0; +} +""" % {'name': type_name} + + st = False + try: + try: + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + st = True + except distutils.errors.CompileError: + st = False + finally: + self._clean() + + return st + + def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): + """Check size of a given type.""" + self._check_compiler() + + # First check the type can be compiled + body = r""" +typedef %(type)s npy_check_sizeof_type; +int main () +{ + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; + test_array [0] = 0 + + ; + return 0; +} +""" + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + self._clean() + + if expected: + body = r""" +typedef %(type)s npy_check_sizeof_type; +int main () +{ + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; + test_array [0] = 0 + + ; + return 0; +} +""" + for size in expected: + try: + self._compile(body % {'type': type_name, 'size': size}, + headers, include_dirs, 'c') + self._clean() + return size + except CompileError: + pass + + # this fails to *compile* if size > sizeof(type) + body = r""" +typedef %(type)s npy_check_sizeof_type; +int main () +{ + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; + test_array [0] = 0 + + ; + return 0; +} +""" + + # The principle is simple: we first find low and high bounds of size + # for the type, where low/high are looked up on a log scale. Then, we + # do a binary search to find the exact size between low and high + low = 0 + mid = 0 + while True: + try: + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + break + except CompileError: + #log.info("failure to test for bound %d" % mid) + low = mid + 1 + mid = 2 * mid + 1 + + high = mid + # Binary search: + while low != high: + mid = (high - low) // 2 + low + try: + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + high = mid + except CompileError: + low = mid + 1 + return low + + def check_func(self, func, + headers=None, include_dirs=None, + libraries=None, library_dirs=None, + decl=False, call=False, call_args=None): + # clean up distutils's config a bit: add void to main(), and + # return a value. + self._check_compiler() + body = [] + if decl: + if type(decl) == str: + body.append(decl) + else: + body.append("int %s (void);" % func) + # Handle MSVC intrinsics: force MS compiler to make a function call. + # Useful to test for some functions when built with optimization on, to + # avoid build error because the intrinsic and our 'fake' test + # declaration do not match. + body.append("#ifdef _MSC_VER") + body.append("#pragma function(%s)" % func) + body.append("#endif") + body.append("int main (void) {") + if call: + if call_args is None: + call_args = '' + body.append(" %s(%s);" % (func, call_args)) + else: + body.append(" %s;" % func) + body.append(" return 0;") + body.append("}") + body = '\n'.join(body) + "\n" + + return self.try_link(body, headers, include_dirs, + libraries, library_dirs) + + def check_funcs_once(self, funcs, + headers=None, include_dirs=None, + libraries=None, library_dirs=None, + decl=False, call=False, call_args=None): + """Check a list of functions at once. + + This is useful to speed up things, since all the functions in the funcs + list will be put in one compilation unit. + + Arguments + --------- + funcs : seq + list of functions to test + include_dirs : seq + list of header paths + libraries : seq + list of libraries to link the code snippet to + libraru_dirs : seq + list of library paths + decl : dict + for every (key, value), the declaration in the value will be + used for function in key. If a function is not in the + dictionay, no declaration will be used. + call : dict + for every item (f, value), if the value is True, a call will be + done to the function f. + """ + self._check_compiler() + body = [] + if decl: + for f, v in decl.items(): + if v: + body.append("int %s (void);" % f) + + # Handle MS intrinsics. See check_func for more info. + body.append("#ifdef _MSC_VER") + for func in funcs: + body.append("#pragma function(%s)" % func) + body.append("#endif") + + body.append("int main (void) {") + if call: + for f in funcs: + if f in call and call[f]: + if not (call_args and f in call_args and call_args[f]): + args = '' + else: + args = call_args[f] + body.append(" %s(%s);" % (f, args)) + else: + body.append(" %s;" % f) + else: + for f in funcs: + body.append(" %s;" % f) + body.append(" return 0;") + body.append("}") + body = '\n'.join(body) + "\n" + + return self.try_link(body, headers, include_dirs, + libraries, library_dirs) + + def check_inline(self): + """Return the inline keyword recognized by the compiler, empty string + otherwise.""" + return check_inline(self) + + def check_compiler_gcc4(self): + """Return True if the C compiler is gcc >= 4.""" + return check_compiler_gcc4(self) + + def get_output(self, body, headers=None, include_dirs=None, + libraries=None, library_dirs=None, + lang="c", use_tee=None): + """Try to compile, link to an executable, and run a program + built from 'body' and 'headers'. Returns the exit status code + of the program and its output. + """ + warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ + "Usage of get_output is deprecated: please do not \n" \ + "use it anymore, and avoid configuration checks \n" \ + "involving running executable on the target machine.\n" \ + "+++++++++++++++++++++++++++++++++++++++++++++++++\n", + DeprecationWarning) + from distutils.ccompiler import CompileError, LinkError + self._check_compiler() + exitcode, output = 255, '' + try: + grabber = GrabStdout() + try: + src, obj, exe = self._link(body, headers, include_dirs, + libraries, library_dirs, lang) + grabber.restore() + except: + output = grabber.data + grabber.restore() + raise + exe = os.path.join('.', exe) + exitstatus, output = exec_command(exe, execute_in='.', + use_tee=use_tee) + if hasattr(os, 'WEXITSTATUS'): + exitcode = os.WEXITSTATUS(exitstatus) + if os.WIFSIGNALED(exitstatus): + sig = os.WTERMSIG(exitstatus) + log.error('subprocess exited with signal %d' % (sig,)) + if sig == signal.SIGINT: + # control-C + raise KeyboardInterrupt + else: + exitcode = exitstatus + log.info("success!") + except (CompileError, LinkError): + log.info("failure.") + self._clean() + return exitcode, output + +class GrabStdout(object): + + def __init__(self): + self.sys_stdout = sys.stdout + self.data = '' + sys.stdout = self + + def write (self, data): + self.sys_stdout.write(data) + self.data += data + + def flush (self): + self.sys_stdout.flush() + + def restore(self): + sys.stdout = self.sys_stdout diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config_compiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config_compiler.py new file mode 100644 index 0000000000000..5e638feccce04 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config_compiler.py @@ -0,0 +1,125 @@ +from __future__ import division, absolute_import, print_function + +from distutils.core import Command +from numpy.distutils import log + +#XXX: Linker flags + +def show_fortran_compilers(_cache=[]): + # Using cache to prevent infinite recursion + if _cache: return + _cache.append(1) + from numpy.distutils.fcompiler import show_fcompilers + import distutils.core + dist = distutils.core._setup_distribution + show_fcompilers(dist) + +class config_fc(Command): + """ Distutils command to hold user specified options + to Fortran compilers. + + config_fc command is used by the FCompiler.customize() method. + """ + + description = "specify Fortran 77/Fortran 90 compiler information" + + user_options = [ + ('fcompiler=', None, "specify Fortran compiler type"), + ('f77exec=', None, "specify F77 compiler command"), + ('f90exec=', None, "specify F90 compiler command"), + ('f77flags=', None, "specify F77 compiler flags"), + ('f90flags=', None, "specify F90 compiler flags"), + ('opt=', None, "specify optimization flags"), + ('arch=', None, "specify architecture specific optimization flags"), + ('debug', 'g', "compile with debugging information"), + ('noopt', None, "compile without optimization"), + ('noarch', None, "compile without arch-dependent optimization"), + ] + + help_options = [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + boolean_options = ['debug', 'noopt', 'noarch'] + + def initialize_options(self): + self.fcompiler = None + self.f77exec = None + self.f90exec = None + self.f77flags = None + self.f90flags = None + self.opt = None + self.arch = None + self.debug = None + self.noopt = None + self.noarch = None + + def finalize_options(self): + log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') + build_clib = self.get_finalized_command('build_clib') + build_ext = self.get_finalized_command('build_ext') + config = self.get_finalized_command('config') + build = self.get_finalized_command('build') + cmd_list = [self, config, build_clib, build_ext, build] + for a in ['fcompiler']: + l = [] + for c in cmd_list: + v = getattr(c, a) + if v is not None: + if not isinstance(v, str): v = v.compiler_type + if v not in l: l.append(v) + if not l: v1 = None + else: v1 = l[0] + if len(l)>1: + log.warn(' commands have different --%s options: %s'\ + ', using first in list as default' % (a, l)) + if v1: + for c in cmd_list: + if getattr(c, a) is None: setattr(c, a, v1) + + def run(self): + # Do nothing. + return + +class config_cc(Command): + """ Distutils command to hold user specified options + to C/C++ compilers. + """ + + description = "specify C/C++ compiler information" + + user_options = [ + ('compiler=', None, "specify C/C++ compiler type"), + ] + + def initialize_options(self): + self.compiler = None + + def finalize_options(self): + log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') + build_clib = self.get_finalized_command('build_clib') + build_ext = self.get_finalized_command('build_ext') + config = self.get_finalized_command('config') + build = self.get_finalized_command('build') + cmd_list = [self, config, build_clib, build_ext, build] + for a in ['compiler']: + l = [] + for c in cmd_list: + v = getattr(c, a) + if v is not None: + if not isinstance(v, str): v = v.compiler_type + if v not in l: l.append(v) + if not l: v1 = None + else: v1 = l[0] + if len(l)>1: + log.warn(' commands have different --%s options: %s'\ + ', using first in list as default' % (a, l)) + if v1: + for c in cmd_list: + if getattr(c, a) is None: setattr(c, a, v1) + return + + def run(self): + # Do nothing. + return diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/develop.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/develop.py new file mode 100644 index 0000000000000..1410ab2a00fd4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/develop.py @@ -0,0 +1,17 @@ +""" Override the develop command from setuptools so we can ensure that our +generated files (from build_src or build_scripts) are properly converted to real +files with filenames. + +""" +from __future__ import division, absolute_import, print_function + +from setuptools.command.develop import develop as old_develop + +class develop(old_develop): + __doc__ = old_develop.__doc__ + def install_for_development(self): + # Build sources in-place, too. + self.reinitialize_command('build_src', inplace=1) + # Make sure scripts are built. + self.run_command('build_scripts') + old_develop.install_for_development(self) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/egg_info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/egg_info.py new file mode 100644 index 0000000000000..b7104de5be409 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/egg_info.py @@ -0,0 +1,11 @@ +from __future__ import division, absolute_import, print_function + +from setuptools.command.egg_info import egg_info as _egg_info + +class egg_info(_egg_info): + def run(self): + # We need to ensure that build_src has been executed in order to give + # setuptools' egg_info command real filenames instead of functions which + # generate files. + self.run_command("build_src") + _egg_info.run(self) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install.py new file mode 100644 index 0000000000000..a1dd47755c64a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install.py @@ -0,0 +1,82 @@ +from __future__ import division, absolute_import, print_function + +import sys +if 'setuptools' in sys.modules: + import setuptools.command.install as old_install_mod + have_setuptools = True +else: + import distutils.command.install as old_install_mod + have_setuptools = False +from distutils.file_util import write_file + +old_install = old_install_mod.install + +class install(old_install): + + # Always run install_clib - the command is cheap, so no need to bypass it; + # but it's not run by setuptools -- so it's run again in install_data + sub_commands = old_install.sub_commands + [ + ('install_clib', lambda x: True) + ] + + def finalize_options (self): + old_install.finalize_options(self) + self.install_lib = self.install_libbase + + def setuptools_run(self): + """ The setuptools version of the .run() method. + + We must pull in the entire code so we can override the level used in the + _getframe() call since we wrap this call by one more level. + """ + from distutils.command.install import install as distutils_install + + # Explicit request for old-style install? Just do it + if self.old_and_unmanageable or self.single_version_externally_managed: + return distutils_install.run(self) + + # Attempt to detect whether we were called from setup() or by another + # command. If we were called by setup(), our caller will be the + # 'run_command' method in 'distutils.dist', and *its* caller will be + # the 'run_commands' method. If we were called any other way, our + # immediate caller *might* be 'run_command', but it won't have been + # called by 'run_commands'. This is slightly kludgy, but seems to + # work. + # + caller = sys._getframe(3) + caller_module = caller.f_globals.get('__name__', '') + caller_name = caller.f_code.co_name + + if caller_module != 'distutils.dist' or caller_name!='run_commands': + # We weren't called from the command line or setup(), so we + # should run in backward-compatibility mode to support bdist_* + # commands. + distutils_install.run(self) + else: + self.do_egg_install() + + def run(self): + if not have_setuptools: + r = old_install.run(self) + else: + r = self.setuptools_run() + if self.record: + # bdist_rpm fails when INSTALLED_FILES contains + # paths with spaces. Such paths must be enclosed + # with double-quotes. + f = open(self.record, 'r') + lines = [] + need_rewrite = False + for l in f: + l = l.rstrip() + if ' ' in l: + need_rewrite = True + l = '"%s"' % (l) + lines.append(l) + f.close() + if need_rewrite: + self.execute(write_file, + (self.record, lines), + "re-writing list of installed files to '%s'" % + self.record) + return r diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_clib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_clib.py new file mode 100644 index 0000000000000..662aa00bda9b4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_clib.py @@ -0,0 +1,39 @@ +from __future__ import division, absolute_import, print_function + +import os +from distutils.core import Command +from distutils.ccompiler import new_compiler +from numpy.distutils.misc_util import get_cmd + +class install_clib(Command): + description = "Command to install installable C libraries" + + user_options = [] + + def initialize_options(self): + self.install_dir = None + self.outfiles = [] + + def finalize_options(self): + self.set_undefined_options('install', ('install_lib', 'install_dir')) + + def run (self): + build_clib_cmd = get_cmd("build_clib") + build_dir = build_clib_cmd.build_clib + + # We need the compiler to get the library name -> filename association + if not build_clib_cmd.compiler: + compiler = new_compiler(compiler=None) + compiler.customize(self.distribution) + else: + compiler = build_clib_cmd.compiler + + for l in self.distribution.installed_libraries: + target_dir = os.path.join(self.install_dir, l.target_dir) + name = compiler.library_filename(l.name) + source = os.path.join(build_dir, name) + self.mkpath(target_dir) + self.outfiles.append(self.copy_file(source, target_dir)[0]) + + def get_outputs(self): + return self.outfiles diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_data.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_data.py new file mode 100644 index 0000000000000..996cf7e4017a6 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_data.py @@ -0,0 +1,26 @@ +from __future__ import division, absolute_import, print_function + +import sys +have_setuptools = ('setuptools' in sys.modules) + +from distutils.command.install_data import install_data as old_install_data + +#data installer with improved intelligence over distutils +#data files are copied into the project directory instead +#of willy-nilly +class install_data (old_install_data): + + def run(self): + old_install_data.run(self) + + if have_setuptools: + # Run install_clib again, since setuptools does not run sub-commands + # of install automatically + self.run_command('install_clib') + + def finalize_options (self): + self.set_undefined_options('install', + ('install_lib', 'install_dir'), + ('root', 'root'), + ('force', 'force'), + ) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_headers.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_headers.py new file mode 100644 index 0000000000000..f3f58aa2876fd --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_headers.py @@ -0,0 +1,27 @@ +from __future__ import division, absolute_import, print_function + +import os +from distutils.command.install_headers import install_headers as old_install_headers + +class install_headers (old_install_headers): + + def run (self): + headers = self.distribution.headers + if not headers: + return + + prefix = os.path.dirname(self.install_dir) + for header in headers: + if isinstance(header, tuple): + # Kind of a hack, but I don't know where else to change this... + if header[0] == 'numpy.core': + header = ('numpy', header[1]) + if os.path.splitext(header[1])[1] == '.inc': + continue + d = os.path.join(*([prefix]+header[0].split('.'))) + header = header[1] + else: + d = self.install_dir + self.mkpath(d) + (out, _) = self.copy_file(header, d) + self.outfiles.append(out) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/sdist.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/sdist.py new file mode 100644 index 0000000000000..bfaab1c8ffa18 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/sdist.py @@ -0,0 +1,29 @@ +from __future__ import division, absolute_import, print_function + +import sys +if 'setuptools' in sys.modules: + from setuptools.command.sdist import sdist as old_sdist +else: + from distutils.command.sdist import sdist as old_sdist + +from numpy.distutils.misc_util import get_data_files + +class sdist(old_sdist): + + def add_defaults (self): + old_sdist.add_defaults(self) + + dist = self.distribution + + if dist.has_data_files(): + for data in dist.data_files: + self.filelist.extend(get_data_files(data)) + + if dist.has_headers(): + headers = [] + for h in dist.headers: + if isinstance(h, str): headers.append(h) + else: headers.append(h[1]) + self.filelist.extend(headers) + + return diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/compat.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/compat.py new file mode 100644 index 0000000000000..9a81cd392fc4a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/compat.py @@ -0,0 +1,10 @@ +"""Small modules to cope with python 2 vs 3 incompatibilities inside +numpy.distutils + +""" +from __future__ import division, absolute_import, print_function + +import sys + +def get_exception(): + return sys.exc_info()[1] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/conv_template.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/conv_template.py new file mode 100644 index 0000000000000..a67fe4e511446 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/conv_template.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +""" +takes templated file .xxx.src and produces .xxx file where .xxx is +.i or .c or .h, using the following template rules + +/**begin repeat -- on a line by itself marks the start of a repeated code + segment +/**end repeat**/ -- on a line by itself marks it's end + +After the /**begin repeat and before the */, all the named templates are placed +these should all have the same number of replacements + +Repeat blocks can be nested, with each nested block labeled with its depth, +i.e. +/**begin repeat1 + *.... + */ +/**end repeat1**/ + +When using nested loops, you can optionally exlude particular +combinations of the variables using (inside the comment portion of the inner loop): + + :exclude: var1=value1, var2=value2, ... + +This will exlude the pattern where var1 is value1 and var2 is value2 when +the result is being generated. + + +In the main body each replace will use one entry from the list of named replacements + + Note that all #..# forms in a block must have the same number of + comma-separated entries. + +Example: + + An input file containing + + /**begin repeat + * #a = 1,2,3# + * #b = 1,2,3# + */ + + /**begin repeat1 + * #c = ted, jim# + */ + @a@, @b@, @c@ + /**end repeat1**/ + + /**end repeat**/ + + produces + + line 1 "template.c.src" + + /* + ********************************************************************* + ** This file was autogenerated from a template DO NOT EDIT!!** + ** Changes should be made to the original source (.src) file ** + ********************************************************************* + */ + + #line 9 + 1, 1, ted + + #line 9 + 1, 1, jim + + #line 9 + 2, 2, ted + + #line 9 + 2, 2, jim + + #line 9 + 3, 3, ted + + #line 9 + 3, 3, jim + +""" +from __future__ import division, absolute_import, print_function + + +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +from numpy.distutils.compat import get_exception + +# names for replacement that are already global. +global_names = {} + +# header placed at the front of head processed file +header =\ +""" +/* + ***************************************************************************** + ** This file was autogenerated from a template DO NOT EDIT!!!! ** + ** Changes should be made to the original source (.src) file ** + ***************************************************************************** + */ + +""" +# Parse string for repeat loops +def parse_structure(astr, level): + """ + The returned line number is from the beginning of the string, starting + at zero. Returns an empty list if no loops found. + + """ + if level == 0 : + loopbeg = "/**begin repeat" + loopend = "/**end repeat**/" + else : + loopbeg = "/**begin repeat%d" % level + loopend = "/**end repeat%d**/" % level + + ind = 0 + line = 0 + spanlist = [] + while True: + start = astr.find(loopbeg, ind) + if start == -1: + break + start2 = astr.find("*/", start) + start2 = astr.find("\n", start2) + fini1 = astr.find(loopend, start2) + fini2 = astr.find("\n", fini1) + line += astr.count("\n", ind, start2+1) + spanlist.append((start, start2+1, fini1, fini2+1, line)) + line += astr.count("\n", start2+1, fini2) + ind = fini2 + spanlist.sort() + return spanlist + + +def paren_repl(obj): + torep = obj.group(1) + numrep = obj.group(2) + return ','.join([torep]*int(numrep)) + +parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)") +plainrep = re.compile(r"([^*]+)\*(\d+)") +def parse_values(astr): + # replaces all occurrences of '(a,b,c)*4' in astr + # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate + # empty values, i.e., ()*4 yields ',,,'. The result is + # split at ',' and a list of values returned. + astr = parenrep.sub(paren_repl, astr) + # replaces occurences of xxx*3 with xxx, xxx, xxx + astr = ','.join([plainrep.sub(paren_repl, x.strip()) + for x in astr.split(',')]) + return astr.split(',') + + +stripast = re.compile(r"\n\s*\*?") +named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") +exclude_vars_re = re.compile(r"(\w*)=(\w*)") +exclude_re = re.compile(":exclude:") +def parse_loop_header(loophead) : + """Find all named replacements in the header + + Returns a list of dictionaries, one for each loop iteration, + where each key is a name to be substituted and the corresponding + value is the replacement string. + + Also return a list of exclusions. The exclusions are dictionaries + of key value pairs. There can be more than one exclusion. + [{'var1':'value1', 'var2', 'value2'[,...]}, ...] + + """ + # Strip out '\n' and leading '*', if any, in continuation lines. + # This should not effect code previous to this change as + # continuation lines were not allowed. + loophead = stripast.sub("", loophead) + # parse out the names and lists of values + names = [] + reps = named_re.findall(loophead) + nsub = None + for rep in reps: + name = rep[0] + vals = parse_values(rep[1]) + size = len(vals) + if nsub is None : + nsub = size + elif nsub != size : + msg = "Mismatch in number of values:\n%s = %s" % (name, vals) + raise ValueError(msg) + names.append((name, vals)) + + + # Find any exclude variables + excludes = [] + + for obj in exclude_re.finditer(loophead): + span = obj.span() + # find next newline + endline = loophead.find('\n', span[1]) + substr = loophead[span[1]:endline] + ex_names = exclude_vars_re.findall(substr) + excludes.append(dict(ex_names)) + + # generate list of dictionaries, one for each template iteration + dlist = [] + if nsub is None : + raise ValueError("No substitution variables found") + for i in range(nsub) : + tmp = {} + for name, vals in names : + tmp[name] = vals[i] + dlist.append(tmp) + return dlist + +replace_re = re.compile(r"@([\w]+)@") +def parse_string(astr, env, level, line) : + lineno = "#line %d\n" % line + + # local function for string replacement, uses env + def replace(match): + name = match.group(1) + try : + val = env[name] + except KeyError: + msg = 'line %d: no definition of key "%s"'%(line, name) + raise ValueError(msg) + return val + + code = [lineno] + struct = parse_structure(astr, level) + if struct : + # recurse over inner loops + oldend = 0 + newlevel = level + 1 + for sub in struct: + pref = astr[oldend:sub[0]] + head = astr[sub[0]:sub[1]] + text = astr[sub[1]:sub[2]] + oldend = sub[3] + newline = line + sub[4] + code.append(replace_re.sub(replace, pref)) + try : + envlist = parse_loop_header(head) + except ValueError: + e = get_exception() + msg = "line %d: %s" % (newline, e) + raise ValueError(msg) + for newenv in envlist : + newenv.update(env) + newcode = parse_string(text, newenv, newlevel, newline) + code.extend(newcode) + suff = astr[oldend:] + code.append(replace_re.sub(replace, suff)) + else : + # replace keys + code.append(replace_re.sub(replace, astr)) + code.append('\n') + return ''.join(code) + +def process_str(astr): + code = [header] + code.extend(parse_string(astr, global_names, 0, 1)) + return ''.join(code) + + +include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" + r"(?P[\w\d./\\]+[.]src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + fid = open(source) + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + print('Including file', fn) + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + fid.close() + return lines + +def process_file(source): + lines = resolve_includes(source) + sourcefile = os.path.normcase(source).replace("\\", "\\\\") + try: + code = process_str(''.join(lines)) + except ValueError: + e = get_exception() + raise ValueError('In "%s" loop at %s' % (sourcefile, e)) + return '#line 1 "%s"\n%s' % (sourcefile, code) + + +def unique_key(adict): + # this obtains a unique key given a dictionary + # currently it works by appending together n of the letters of the + # current keys and increasing n until a unique key is found + # -- not particularly quick + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = "".join([x[:n] for x in allkeys]) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +if __name__ == "__main__": + + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + try: + writestr = process_str(allstr) + except ValueError: + e = get_exception() + raise ValueError("In %s loop at %s" % (file, e)) + outfile.write(writestr) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/core.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/core.py new file mode 100644 index 0000000000000..3f0fd464a0d39 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/core.py @@ -0,0 +1,210 @@ +from __future__ import division, absolute_import, print_function + +import sys +from distutils.core import * + +if 'setuptools' in sys.modules: + have_setuptools = True + from setuptools import setup as old_setup + # easy_install imports math, it may be picked up from cwd + from setuptools.command import easy_install + try: + # very old versions of setuptools don't have this + from setuptools.command import bdist_egg + except ImportError: + have_setuptools = False +else: + from distutils.core import setup as old_setup + have_setuptools = False + +import warnings +import distutils.core +import distutils.dist + +from numpy.distutils.extension import Extension +from numpy.distutils.numpy_distribution import NumpyDistribution +from numpy.distutils.command import config, config_compiler, \ + build, build_py, build_ext, build_clib, build_src, build_scripts, \ + sdist, install_data, install_headers, install, bdist_rpm, \ + install_clib +from numpy.distutils.misc_util import get_data_files, is_sequence, is_string + +numpy_cmdclass = {'build': build.build, + 'build_src': build_src.build_src, + 'build_scripts': build_scripts.build_scripts, + 'config_cc': config_compiler.config_cc, + 'config_fc': config_compiler.config_fc, + 'config': config.config, + 'build_ext': build_ext.build_ext, + 'build_py': build_py.build_py, + 'build_clib': build_clib.build_clib, + 'sdist': sdist.sdist, + 'install_data': install_data.install_data, + 'install_headers': install_headers.install_headers, + 'install_clib': install_clib.install_clib, + 'install': install.install, + 'bdist_rpm': bdist_rpm.bdist_rpm, + } +if have_setuptools: + # Use our own versions of develop and egg_info to ensure that build_src is + # handled appropriately. + from numpy.distutils.command import develop, egg_info + numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg + numpy_cmdclass['develop'] = develop.develop + numpy_cmdclass['easy_install'] = easy_install.easy_install + numpy_cmdclass['egg_info'] = egg_info.egg_info + +def _dict_append(d, **kws): + for k, v in kws.items(): + if k not in d: + d[k] = v + continue + dv = d[k] + if isinstance(dv, tuple): + d[k] = dv + tuple(v) + elif isinstance(dv, list): + d[k] = dv + list(v) + elif isinstance(dv, dict): + _dict_append(dv, **v) + elif is_string(dv): + d[k] = dv + v + else: + raise TypeError(repr(type(dv))) + +def _command_line_ok(_cache=[]): + """ Return True if command line does not contain any + help or display requests. + """ + if _cache: + return _cache[0] + ok = True + display_opts = ['--'+n for n in Distribution.display_option_names] + for o in Distribution.display_options: + if o[1]: + display_opts.append('-'+o[1]) + for arg in sys.argv: + if arg.startswith('--help') or arg=='-h' or arg in display_opts: + ok = False + break + _cache.append(ok) + return ok + +def get_distribution(always=False): + dist = distutils.core._setup_distribution + # XXX Hack to get numpy installable with easy_install. + # The problem is easy_install runs it's own setup(), which + # sets up distutils.core._setup_distribution. However, + # when our setup() runs, that gets overwritten and lost. + # We can't use isinstance, as the DistributionWithoutHelpCommands + # class is local to a function in setuptools.command.easy_install + if dist is not None and \ + 'DistributionWithoutHelpCommands' in repr(dist): + dist = None + if always and dist is None: + dist = NumpyDistribution() + return dist + +def setup(**attr): + + cmdclass = numpy_cmdclass.copy() + + new_attr = attr.copy() + if 'cmdclass' in new_attr: + cmdclass.update(new_attr['cmdclass']) + new_attr['cmdclass'] = cmdclass + + if 'configuration' in new_attr: + # To avoid calling configuration if there are any errors + # or help request in command in the line. + configuration = new_attr.pop('configuration') + + old_dist = distutils.core._setup_distribution + old_stop = distutils.core._setup_stop_after + distutils.core._setup_distribution = None + distutils.core._setup_stop_after = "commandline" + try: + dist = setup(**new_attr) + finally: + distutils.core._setup_distribution = old_dist + distutils.core._setup_stop_after = old_stop + if dist.help or not _command_line_ok(): + # probably displayed help, skip running any commands + return dist + + # create setup dictionary and append to new_attr + config = configuration() + if hasattr(config, 'todict'): + config = config.todict() + _dict_append(new_attr, **config) + + # Move extension source libraries to libraries + libraries = [] + for ext in new_attr.get('ext_modules', []): + new_libraries = [] + for item in ext.libraries: + if is_sequence(item): + lib_name, build_info = item + _check_append_ext_library(libraries, lib_name, build_info) + new_libraries.append(lib_name) + elif is_string(item): + new_libraries.append(item) + else: + raise TypeError("invalid description of extension module " + "library %r" % (item,)) + ext.libraries = new_libraries + if libraries: + if 'libraries' not in new_attr: + new_attr['libraries'] = [] + for item in libraries: + _check_append_library(new_attr['libraries'], item) + + # sources in ext_modules or libraries may contain header files + if ('ext_modules' in new_attr or 'libraries' in new_attr) \ + and 'headers' not in new_attr: + new_attr['headers'] = [] + + # Use our custom NumpyDistribution class instead of distutils' one + new_attr['distclass'] = NumpyDistribution + + return old_setup(**new_attr) + +def _check_append_library(libraries, item): + for libitem in libraries: + if is_sequence(libitem): + if is_sequence(item): + if item[0]==libitem[0]: + if item[1] is libitem[1]: + return + warnings.warn("[0] libraries list contains %r with" + " different build_info" % (item[0],)) + break + else: + if item==libitem[0]: + warnings.warn("[1] libraries list contains %r with" + " no build_info" % (item[0],)) + break + else: + if is_sequence(item): + if item[0]==libitem: + warnings.warn("[2] libraries list contains %r with" + " no build_info" % (item[0],)) + break + else: + if item==libitem: + return + libraries.append(item) + +def _check_append_ext_library(libraries, lib_name, build_info): + for item in libraries: + if is_sequence(item): + if item[0]==lib_name: + if item[1] is build_info: + return + warnings.warn("[3] libraries list contains %r with" + " different build_info" % (lib_name,)) + break + elif item==lib_name: + warnings.warn("[4] libraries list contains %r with" + " no build_info" % (lib_name,)) + break + libraries.append((lib_name, build_info)) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/cpuinfo.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/cpuinfo.py new file mode 100644 index 0000000000000..020f2c02fee63 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/cpuinfo.py @@ -0,0 +1,693 @@ +#!/usr/bin/env python +""" +cpuinfo + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['cpu'] + +import sys, re, types +import os + +if sys.version_info[0] >= 3: + from subprocess import getstatusoutput +else: + from commands import getstatusoutput + +import warnings +import platform + +from numpy.distutils.compat import get_exception + +def getoutput(cmd, successful_status=(0,), stacklevel=1): + try: + status, output = getstatusoutput(cmd) + except EnvironmentError: + e = get_exception() + warnings.warn(str(e), UserWarning, stacklevel=stacklevel) + return False, output + if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: + return True, output + return False, output + +def command_info(successful_status=(0,), stacklevel=1, **kw): + info = {} + for key in kw: + ok, output = getoutput(kw[key], successful_status=successful_status, + stacklevel=stacklevel+1) + if ok: + info[key] = output.strip() + return info + +def command_by_line(cmd, successful_status=(0,), stacklevel=1): + ok, output = getoutput(cmd, successful_status=successful_status, + stacklevel=stacklevel+1) + if not ok: + return + for line in output.splitlines(): + yield line.strip() + +def key_value_from_command(cmd, sep, successful_status=(0,), + stacklevel=1): + d = {} + for line in command_by_line(cmd, successful_status=successful_status, + stacklevel=stacklevel+1): + l = [s.strip() for s in line.split(sep, 1)] + if len(l) == 2: + d[l[0]] = l[1] + return d + +class CPUInfoBase(object): + """Holds CPU information and provides methods for requiring + the availability of various CPU features. + """ + + def _try_call(self, func): + try: + return func() + except: + pass + + def __getattr__(self, name): + if not name.startswith('_'): + if hasattr(self, '_'+name): + attr = getattr(self, '_'+name) + if isinstance(attr, types.MethodType): + return lambda func=self._try_call,attr=attr : func(attr) + else: + return lambda : None + raise AttributeError(name) + + def _getNCPUs(self): + return 1 + + def __get_nbits(self): + abits = platform.architecture()[0] + nbits = re.compile('(\d+)bit').search(abits).group(1) + return nbits + + def _is_32bit(self): + return self.__get_nbits() == '32' + + def _is_64bit(self): + return self.__get_nbits() == '64' + +class LinuxCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = [ {} ] + ok, output = getoutput('uname -m') + if ok: + info[0]['uname_m'] = output.strip() + try: + fo = open('/proc/cpuinfo') + except EnvironmentError: + e = get_exception() + warnings.warn(str(e), UserWarning) + else: + for line in fo: + name_value = [s.strip() for s in line.split(':', 1)] + if len(name_value) != 2: + continue + name, value = name_value + if not info or name in info[-1]: # next processor + info.append({}) + info[-1][name] = value + fo.close() + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['vendor_id']=='AuthenticAMD' + + def _is_AthlonK6_2(self): + return self._is_AMD() and self.info[0]['model'] == '2' + + def _is_AthlonK6_3(self): + return self._is_AMD() and self.info[0]['model'] == '3' + + def _is_AthlonK6(self): + return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None + + def _is_AthlonK7(self): + return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None + + def _is_AthlonMP(self): + return re.match(r'.*?Athlon\(tm\) MP\b', + self.info[0]['model name']) is not None + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['family'] == '15' + + def _is_Athlon64(self): + return re.match(r'.*?Athlon\(tm\) 64\b', + self.info[0]['model name']) is not None + + def _is_AthlonHX(self): + return re.match(r'.*?Athlon HX\b', + self.info[0]['model name']) is not None + + def _is_Opteron(self): + return re.match(r'.*?Opteron\b', + self.info[0]['model name']) is not None + + def _is_Hammer(self): + return re.match(r'.*?Hammer\b', + self.info[0]['model name']) is not None + + # Alpha + + def _is_Alpha(self): + return self.info[0]['cpu']=='Alpha' + + def _is_EV4(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' + + def _is_EV5(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' + + def _is_EV56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' + + def _is_PCA56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' + + # Intel + + #XXX + _is_i386 = _not_impl + + def _is_Intel(self): + return self.info[0]['vendor_id']=='GenuineIntel' + + def _is_i486(self): + return self.info[0]['cpu']=='i486' + + def _is_i586(self): + return self.is_Intel() and self.info[0]['cpu family'] == '5' + + def _is_i686(self): + return self.is_Intel() and self.info[0]['cpu family'] == '6' + + def _is_Celeron(self): + return re.match(r'.*?Celeron', + self.info[0]['model name']) is not None + + def _is_Pentium(self): + return re.match(r'.*?Pentium', + self.info[0]['model name']) is not None + + def _is_PentiumII(self): + return re.match(r'.*?Pentium.*?II\b', + self.info[0]['model name']) is not None + + def _is_PentiumPro(self): + return re.match(r'.*?PentiumPro\b', + self.info[0]['model name']) is not None + + def _is_PentiumMMX(self): + return re.match(r'.*?Pentium.*?MMX\b', + self.info[0]['model name']) is not None + + def _is_PentiumIII(self): + return re.match(r'.*?Pentium.*?III\b', + self.info[0]['model name']) is not None + + def _is_PentiumIV(self): + return re.match(r'.*?Pentium.*?(IV|4)\b', + self.info[0]['model name']) is not None + + def _is_PentiumM(self): + return re.match(r'.*?Pentium.*?M\b', + self.info[0]['model name']) is not None + + def _is_Prescott(self): + return self.is_PentiumIV() and self.has_sse3() + + def _is_Nocona(self): + return self.is_Intel() \ + and (self.info[0]['cpu family'] == '6' \ + or self.info[0]['cpu family'] == '15' ) \ + and (self.has_sse3() and not self.has_ssse3())\ + and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None + + def _is_Core2(self): + return self.is_64bit() and self.is_Intel() and \ + re.match(r'.*?Core\(TM\)2\b', \ + self.info[0]['model name']) is not None + + def _is_Itanium(self): + return re.match(r'.*?Itanium\b', + self.info[0]['family']) is not None + + def _is_XEON(self): + return re.match(r'.*?XEON\b', + self.info[0]['model name'], re.IGNORECASE) is not None + + _is_Xeon = _is_XEON + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_fdiv_bug(self): + return self.info[0]['fdiv_bug']=='yes' + + def _has_f00f_bug(self): + return self.info[0]['f00f_bug']=='yes' + + def _has_mmx(self): + return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None + + def _has_sse(self): + return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None + + def _has_sse2(self): + return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None + + def _has_sse3(self): + return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None + + def _has_ssse3(self): + return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None + + def _has_3dnow(self): + return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None + + def _has_3dnowext(self): + return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None + +class IRIXCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = key_value_from_command('sysconf', sep=' ', + successful_status=(0, 1)) + self.__class__.info = info + + def _not_impl(self): pass + + def _is_singleCPU(self): + return self.info.get('NUM_PROCESSORS') == '1' + + def _getNCPUs(self): + return int(self.info.get('NUM_PROCESSORS', 1)) + + def __cputype(self, n): + return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) + def _is_r2000(self): return self.__cputype(2000) + def _is_r3000(self): return self.__cputype(3000) + def _is_r3900(self): return self.__cputype(3900) + def _is_r4000(self): return self.__cputype(4000) + def _is_r4100(self): return self.__cputype(4100) + def _is_r4300(self): return self.__cputype(4300) + def _is_r4400(self): return self.__cputype(4400) + def _is_r4600(self): return self.__cputype(4600) + def _is_r4650(self): return self.__cputype(4650) + def _is_r5000(self): return self.__cputype(5000) + def _is_r6000(self): return self.__cputype(6000) + def _is_r8000(self): return self.__cputype(8000) + def _is_r10000(self): return self.__cputype(10000) + def _is_r12000(self): return self.__cputype(12000) + def _is_rorion(self): return self.__cputype('orion') + + def get_ip(self): + try: return self.info.get('MACHINE') + except: pass + def __machine(self, n): + return self.info.get('MACHINE').lower() == 'ip%s' % (n) + def _is_IP19(self): return self.__machine(19) + def _is_IP20(self): return self.__machine(20) + def _is_IP21(self): return self.__machine(21) + def _is_IP22(self): return self.__machine(22) + def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() + def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() + def _is_IP24(self): return self.__machine(24) + def _is_IP25(self): return self.__machine(25) + def _is_IP26(self): return self.__machine(26) + def _is_IP27(self): return self.__machine(27) + def _is_IP28(self): return self.__machine(28) + def _is_IP30(self): return self.__machine(30) + def _is_IP32(self): return self.__machine(32) + def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() + def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() + + +class DarwinCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + machine='machine') + info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') + self.__class__.info = info + + def _not_impl(self): pass + + def _getNCPUs(self): + return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) + + def _is_Power_Macintosh(self): + return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' + + def _is_i386(self): + return self.info['arch']=='i386' + def _is_ppc(self): + return self.info['arch']=='ppc' + + def __machine(self, n): + return self.info['machine'] == 'ppc%s'%n + def _is_ppc601(self): return self.__machine(601) + def _is_ppc602(self): return self.__machine(602) + def _is_ppc603(self): return self.__machine(603) + def _is_ppc603e(self): return self.__machine('603e') + def _is_ppc604(self): return self.__machine(604) + def _is_ppc604e(self): return self.__machine('604e') + def _is_ppc620(self): return self.__machine(620) + def _is_ppc630(self): return self.__machine(630) + def _is_ppc740(self): return self.__machine(740) + def _is_ppc7400(self): return self.__machine(7400) + def _is_ppc7450(self): return self.__machine(7450) + def _is_ppc750(self): return self.__machine(750) + def _is_ppc403(self): return self.__machine(403) + def _is_ppc505(self): return self.__machine(505) + def _is_ppc801(self): return self.__machine(801) + def _is_ppc821(self): return self.__machine(821) + def _is_ppc823(self): return self.__machine(823) + def _is_ppc860(self): return self.__machine(860) + + +class SunOSCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + mach='mach', + uname_i='uname_i', + isainfo_b='isainfo -b', + isainfo_n='isainfo -n', + ) + info['uname_X'] = key_value_from_command('uname -X', sep='=') + for line in command_by_line('psrinfo -v 0'): + m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) + if m: + info['processor'] = m.group('p') + break + self.__class__.info = info + + def _not_impl(self): pass + + def _is_i386(self): + return self.info['isainfo_n']=='i386' + def _is_sparc(self): + return self.info['isainfo_n']=='sparc' + def _is_sparcv9(self): + return self.info['isainfo_n']=='sparcv9' + + def _getNCPUs(self): + return int(self.info['uname_X'].get('NumCPU', 1)) + + def _is_sun4(self): + return self.info['arch']=='sun4' + + def _is_SUNW(self): + return re.match(r'SUNW', self.info['uname_i']) is not None + def _is_sparcstation5(self): + return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None + def _is_ultra1(self): + return re.match(r'.*Ultra-1', self.info['uname_i']) is not None + def _is_ultra250(self): + return re.match(r'.*Ultra-250', self.info['uname_i']) is not None + def _is_ultra2(self): + return re.match(r'.*Ultra-2', self.info['uname_i']) is not None + def _is_ultra30(self): + return re.match(r'.*Ultra-30', self.info['uname_i']) is not None + def _is_ultra4(self): + return re.match(r'.*Ultra-4', self.info['uname_i']) is not None + def _is_ultra5_10(self): + return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None + def _is_ultra5(self): + return re.match(r'.*Ultra-5', self.info['uname_i']) is not None + def _is_ultra60(self): + return re.match(r'.*Ultra-60', self.info['uname_i']) is not None + def _is_ultra80(self): + return re.match(r'.*Ultra-80', self.info['uname_i']) is not None + def _is_ultraenterprice(self): + return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None + def _is_ultraenterprice10k(self): + return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None + def _is_sunfire(self): + return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None + def _is_ultra(self): + return re.match(r'.*Ultra', self.info['uname_i']) is not None + + def _is_cpusparcv7(self): + return self.info['processor']=='sparcv7' + def _is_cpusparcv8(self): + return self.info['processor']=='sparcv8' + def _is_cpusparcv9(self): + return self.info['processor']=='sparcv9' + +class Win32CPUInfo(CPUInfoBase): + + info = None + pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" + # XXX: what does the value of + # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 + # mean? + + def __init__(self): + if self.info is not None: + return + info = [] + try: + #XXX: Bad style to use so long `try:...except:...`. Fix it! + if sys.version_info[0] >= 3: + import winreg + else: + import _winreg as winreg + + prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)"\ + "\s+stepping\s+(?P\d+)", re.IGNORECASE) + chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) + pnum=0 + while True: + try: + proc=winreg.EnumKey(chnd, pnum) + except winreg.error: + break + else: + pnum+=1 + info.append({"Processor":proc}) + phnd=winreg.OpenKey(chnd, proc) + pidx=0 + while True: + try: + name, value, vtpe=winreg.EnumValue(phnd, pidx) + except winreg.error: + break + else: + pidx=pidx+1 + info[-1][name]=value + if name=="Identifier": + srch=prgx.search(value) + if srch: + info[-1]["Family"]=int(srch.group("FML")) + info[-1]["Model"]=int(srch.group("MDL")) + info[-1]["Stepping"]=int(srch.group("STP")) + except: + print(sys.exc_info()[1], '(ignoring)') + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['VendorIdentifier']=='AuthenticAMD' + + def _is_Am486(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_Am5x86(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_AMDK5(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [0, 1, 2, 3] + + def _is_AMDK6(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [6, 7] + + def _is_AMDK6_2(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==8 + + def _is_AMDK6_3(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==9 + + def _is_AMDK7(self): + return self.is_AMD() and self.info[0]['Family'] == 6 + + # To reliably distinguish between the different types of AMD64 chips + # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would + # require looking at the 'brand' from cpuid + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['Family'] == 15 + + # Intel + + def _is_Intel(self): + return self.info[0]['VendorIdentifier']=='GenuineIntel' + + def _is_i386(self): + return self.info[0]['Family']==3 + + def _is_i486(self): + return self.info[0]['Family']==4 + + def _is_i586(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_i686(self): + return self.is_Intel() and self.info[0]['Family']==6 + + def _is_Pentium(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_PentiumMMX(self): + return self.is_Intel() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==4 + + def _is_PentiumPro(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model']==1 + + def _is_PentiumII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [3, 5, 6] + + def _is_PentiumIII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [7, 8, 9, 10, 11] + + def _is_PentiumIV(self): + return self.is_Intel() and self.info[0]['Family']==15 + + def _is_PentiumM(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [9, 13, 14] + + def _is_Core2(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [15, 16, 17] + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_mmx(self): + if self.is_Intel(): + return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ + or (self.info[0]['Family'] in [6, 15]) + elif self.is_AMD(): + return self.info[0]['Family'] in [5, 6, 15] + else: + return False + + def _has_sse(self): + if self.is_Intel(): + return (self.info[0]['Family']==6 and \ + self.info[0]['Model'] in [7, 8, 9, 10, 11]) \ + or self.info[0]['Family']==15 + elif self.is_AMD(): + return (self.info[0]['Family']==6 and \ + self.info[0]['Model'] in [6, 7, 8, 10]) \ + or self.info[0]['Family']==15 + else: + return False + + def _has_sse2(self): + if self.is_Intel(): + return self.is_Pentium4() or self.is_PentiumM() \ + or self.is_Core2() + elif self.is_AMD(): + return self.is_AMD64() + else: + return False + + def _has_3dnow(self): + return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] + + def _has_3dnowext(self): + return self.is_AMD() and self.info[0]['Family'] in [6, 15] + +if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) + cpuinfo = LinuxCPUInfo +elif sys.platform.startswith('irix'): + cpuinfo = IRIXCPUInfo +elif sys.platform == 'darwin': + cpuinfo = DarwinCPUInfo +elif sys.platform.startswith('sunos'): + cpuinfo = SunOSCPUInfo +elif sys.platform.startswith('win32'): + cpuinfo = Win32CPUInfo +elif sys.platform.startswith('cygwin'): + cpuinfo = LinuxCPUInfo +#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. +else: + cpuinfo = CPUInfoBase + +cpu = cpuinfo() + +#if __name__ == "__main__": +# +# cpu.is_blaa() +# cpu.is_Intel() +# cpu.is_Alpha() +# +# print 'CPU information:', +# for name in dir(cpuinfo): +# if name[0]=='_' and name[1]!='_': +# r = getattr(cpu,name[1:])() +# if r: +# if r!=1: +# print '%s=%s' %(name[1:],r), +# else: +# print name[1:], +# print diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/environment.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/environment.py new file mode 100644 index 0000000000000..3798e16f5da7b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/environment.py @@ -0,0 +1,72 @@ +from __future__ import division, absolute_import, print_function + +import os +from distutils.dist import Distribution + +__metaclass__ = type + +class EnvironmentConfig(object): + def __init__(self, distutils_section='ALL', **kw): + self._distutils_section = distutils_section + self._conf_keys = kw + self._conf = None + self._hook_handler = None + + def dump_variable(self, name): + conf_desc = self._conf_keys[name] + hook, envvar, confvar, convert = conf_desc + if not convert: + convert = lambda x : x + print('%s.%s:' % (self._distutils_section, name)) + v = self._hook_handler(name, hook) + print(' hook : %s' % (convert(v),)) + if envvar: + v = os.environ.get(envvar, None) + print(' environ: %s' % (convert(v),)) + if confvar and self._conf: + v = self._conf.get(confvar, (None, None))[1] + print(' config : %s' % (convert(v),)) + + def dump_variables(self): + for name in self._conf_keys: + self.dump_variable(name) + + def __getattr__(self, name): + try: + conf_desc = self._conf_keys[name] + except KeyError: + raise AttributeError(name) + return self._get_var(name, conf_desc) + + def get(self, name, default=None): + try: + conf_desc = self._conf_keys[name] + except KeyError: + return default + var = self._get_var(name, conf_desc) + if var is None: + var = default + return var + + def _get_var(self, name, conf_desc): + hook, envvar, confvar, convert = conf_desc + var = self._hook_handler(name, hook) + if envvar is not None: + var = os.environ.get(envvar, var) + if confvar is not None and self._conf: + var = self._conf.get(confvar, (None, var))[1] + if convert is not None: + var = convert(var) + return var + + def clone(self, hook_handler): + ec = self.__class__(distutils_section=self._distutils_section, + **self._conf_keys) + ec._hook_handler = hook_handler + return ec + + def use_distribution(self, dist): + if isinstance(dist, Distribution): + self._conf = dist.get_option_dict(self._distutils_section) + else: + self._conf = dist diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/exec_command.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/exec_command.py new file mode 100644 index 0000000000000..baf81f337aa2e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/exec_command.py @@ -0,0 +1,618 @@ +#!/usr/bin/env python +""" +exec_command + +Implements exec_command function that is (almost) equivalent to +commands.getstatusoutput function but on NT, DOS systems the +returned status is actually correct (though, the returned status +values may be different by a factor). In addition, exec_command +takes keyword arguments for (re-)defining environment variables. + +Provides functions: + exec_command --- execute command in a specified directory and + in the modified environment. + find_executable --- locate a command using info from environment + variable PATH. Equivalent to posix `which` + command. + +Author: Pearu Peterson +Created: 11 January 2003 + +Requires: Python 2.x + +Succesfully tested on: + os.name | sys.platform | comments + --------+--------------+---------- + posix | linux2 | Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 + PyCrust 0.9.3, Idle 1.0.2 + posix | linux2 | Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 + posix | sunos5 | SunOS 5.9, Python 2.2, 2.3.2 + posix | darwin | Darwin 7.2.0, Python 2.3 + nt | win32 | Windows Me + Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 + Python 2.1.1 Idle 0.8 + nt | win32 | Windows 98, Python 2.1.1. Idle 0.8 + nt | win32 | Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests + fail i.e. redefining environment variables may + not work. FIXED: don't use cygwin echo! + Comment: also `cmd /c echo` will not work + but redefining environment variables do work. + posix | cygwin | Cygwin 98-4.10, Python 2.3.3(cygming special) + nt | win32 | Windows XP, Python 2.3.3 + +Known bugs: +- Tests, that send messages to stderr, fail when executed from MSYS prompt + because the messages are lost at some point. +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['exec_command', 'find_executable'] + +import os +import sys +import shlex + +from numpy.distutils.misc_util import is_sequence, make_temp_file +from numpy.distutils import log +from numpy.distutils.compat import get_exception + +from numpy.compat import open_latin1 + +def temp_file_name(): + fo, name = make_temp_file() + fo.close() + return name + +def get_pythonexe(): + pythonexe = sys.executable + if os.name in ['nt', 'dos']: + fdir, fn = os.path.split(pythonexe) + fn = fn.upper().replace('PYTHONW', 'PYTHON') + pythonexe = os.path.join(fdir, fn) + assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) + return pythonexe + +def splitcmdline(line): + import warnings + warnings.warn('splitcmdline is deprecated; use shlex.split', + DeprecationWarning) + return shlex.split(line) + +def find_executable(exe, path=None, _cache={}): + """Return full path of a executable or None. + + Symbolic links are not followed. + """ + key = exe, path + try: + return _cache[key] + except KeyError: + pass + log.debug('find_executable(%r)' % exe) + orig_exe = exe + + if path is None: + path = os.environ.get('PATH', os.defpath) + if os.name=='posix': + realpath = os.path.realpath + else: + realpath = lambda a:a + + if exe.startswith('"'): + exe = exe[1:-1] + + suffixes = [''] + if os.name in ['nt', 'dos', 'os2']: + fn, ext = os.path.splitext(exe) + extra_suffixes = ['.exe', '.com', '.bat'] + if ext.lower() not in extra_suffixes: + suffixes = extra_suffixes + + if os.path.isabs(exe): + paths = [''] + else: + paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] + + for path in paths: + fn = os.path.join(path, exe) + for s in suffixes: + f_ext = fn+s + if not os.path.islink(f_ext): + f_ext = realpath(f_ext) + if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): + log.info('Found executable %s' % f_ext) + _cache[key] = f_ext + return f_ext + + log.warn('Could not locate executable %s' % orig_exe) + return None + +############################################################ + +def _preserve_environment( names ): + log.debug('_preserve_environment(%r)' % (names)) + env = {} + for name in names: + env[name] = os.environ.get(name) + return env + +def _update_environment( **env ): + log.debug('_update_environment(...)') + for name, value in env.items(): + os.environ[name] = value or '' + +def _supports_fileno(stream): + """ + Returns True if 'stream' supports the file descriptor and allows fileno(). + """ + if hasattr(stream, 'fileno'): + try: + r = stream.fileno() + return True + except IOError: + return False + else: + return False + +def exec_command( command, + execute_in='', use_shell=None, use_tee = None, + _with_python = 1, + **env ): + """ Return (status,output) of executed command. + + command is a concatenated string of executable and arguments. + The output contains both stdout and stderr messages. + The following special keyword arguments can be used: + use_shell - execute `sh -c command` + use_tee - pipe the output of command through tee + execute_in - before run command `cd execute_in` and after `cd -`. + + On NT, DOS systems the returned status is correct for external commands. + Wild cards will not work for non-posix systems or when use_shell=0. + """ + log.debug('exec_command(%r,%s)' % (command,\ + ','.join(['%s=%r'%kv for kv in env.items()]))) + + if use_tee is None: + use_tee = os.name=='posix' + if use_shell is None: + use_shell = os.name=='posix' + execute_in = os.path.abspath(execute_in) + oldcwd = os.path.abspath(os.getcwd()) + + if __name__[-12:] == 'exec_command': + exec_dir = os.path.dirname(os.path.abspath(__file__)) + elif os.path.isfile('exec_command.py'): + exec_dir = os.path.abspath('.') + else: + exec_dir = os.path.abspath(sys.argv[0]) + if os.path.isfile(exec_dir): + exec_dir = os.path.dirname(exec_dir) + + if oldcwd!=execute_in: + os.chdir(execute_in) + log.debug('New cwd: %s' % execute_in) + else: + log.debug('Retaining cwd: %s' % oldcwd) + + oldenv = _preserve_environment( list(env.keys()) ) + _update_environment( **env ) + + try: + # _exec_command is robust but slow, it relies on + # usable sys.std*.fileno() descriptors. If they + # are bad (like in win32 Idle, PyCrust environments) + # then _exec_command_python (even slower) + # will be used as a last resort. + # + # _exec_command_posix uses os.system and is faster + # but not on all platforms os.system will return + # a correct status. + if (_with_python and _supports_fileno(sys.stdout) and + sys.stdout.fileno() == -1): + st = _exec_command_python(command, + exec_command_dir = exec_dir, + **env) + elif os.name=='posix': + st = _exec_command_posix(command, + use_shell=use_shell, + use_tee=use_tee, + **env) + else: + st = _exec_command(command, use_shell=use_shell, + use_tee=use_tee,**env) + finally: + if oldcwd!=execute_in: + os.chdir(oldcwd) + log.debug('Restored cwd to %s' % oldcwd) + _update_environment(**oldenv) + + return st + +def _exec_command_posix( command, + use_shell = None, + use_tee = None, + **env ): + log.debug('_exec_command_posix(...)') + + if is_sequence(command): + command_str = ' '.join(list(command)) + else: + command_str = command + + tmpfile = temp_file_name() + stsfile = None + if use_tee: + stsfile = temp_file_name() + filter = '' + if use_tee == 2: + filter = r'| tr -cd "\n" | tr "\n" "."; echo' + command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\ + % (command_str, stsfile, tmpfile, filter) + else: + stsfile = temp_file_name() + command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\ + % (command_str, stsfile, tmpfile) + #command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile) + + log.debug('Running os.system(%r)' % (command_posix)) + status = os.system(command_posix) + + if use_tee: + if status: + # if command_tee fails then fall back to robust exec_command + log.warn('_exec_command_posix failed (status=%s)' % status) + return _exec_command(command, use_shell=use_shell, **env) + + if stsfile is not None: + f = open_latin1(stsfile, 'r') + status_text = f.read() + status = int(status_text) + f.close() + os.remove(stsfile) + + f = open_latin1(tmpfile, 'r') + text = f.read() + f.close() + os.remove(tmpfile) + + if text[-1:]=='\n': + text = text[:-1] + + return status, text + + +def _exec_command_python(command, + exec_command_dir='', **env): + log.debug('_exec_command_python(...)') + + python_exe = get_pythonexe() + cmdfile = temp_file_name() + stsfile = temp_file_name() + outfile = temp_file_name() + + f = open(cmdfile, 'w') + f.write('import os\n') + f.write('import sys\n') + f.write('sys.path.insert(0,%r)\n' % (exec_command_dir)) + f.write('from exec_command import exec_command\n') + f.write('del sys.path[0]\n') + f.write('cmd = %r\n' % command) + f.write('os.environ = %r\n' % (os.environ)) + f.write('s,o = exec_command(cmd, _with_python=0, **%r)\n' % (env)) + f.write('f=open(%r,"w")\nf.write(str(s))\nf.close()\n' % (stsfile)) + f.write('f=open(%r,"w")\nf.write(o)\nf.close()\n' % (outfile)) + f.close() + + cmd = '%s %s' % (python_exe, cmdfile) + status = os.system(cmd) + if status: + raise RuntimeError("%r failed" % (cmd,)) + os.remove(cmdfile) + + f = open_latin1(stsfile, 'r') + status = int(f.read()) + f.close() + os.remove(stsfile) + + f = open_latin1(outfile, 'r') + text = f.read() + f.close() + os.remove(outfile) + + return status, text + +def quote_arg(arg): + if arg[0]!='"' and ' ' in arg: + return '"%s"' % arg + return arg + +def _exec_command( command, use_shell=None, use_tee = None, **env ): + log.debug('_exec_command(...)') + + if use_shell is None: + use_shell = os.name=='posix' + if use_tee is None: + use_tee = os.name=='posix' + using_command = 0 + if use_shell: + # We use shell (unless use_shell==0) so that wildcards can be + # used. + sh = os.environ.get('SHELL', '/bin/sh') + if is_sequence(command): + argv = [sh, '-c', ' '.join(list(command))] + else: + argv = [sh, '-c', command] + else: + # On NT, DOS we avoid using command.com as it's exit status is + # not related to the exit status of a command. + if is_sequence(command): + argv = command[:] + else: + argv = shlex.split(command) + + if hasattr(os, 'spawnvpe'): + spawn_command = os.spawnvpe + else: + spawn_command = os.spawnve + argv[0] = find_executable(argv[0]) or argv[0] + if not os.path.isfile(argv[0]): + log.warn('Executable %s does not exist' % (argv[0])) + if os.name in ['nt', 'dos']: + # argv[0] might be internal command + argv = [os.environ['COMSPEC'], '/C'] + argv + using_command = 1 + + _so_has_fileno = _supports_fileno(sys.stdout) + _se_has_fileno = _supports_fileno(sys.stderr) + so_flush = sys.stdout.flush + se_flush = sys.stderr.flush + if _so_has_fileno: + so_fileno = sys.stdout.fileno() + so_dup = os.dup(so_fileno) + if _se_has_fileno: + se_fileno = sys.stderr.fileno() + se_dup = os.dup(se_fileno) + + outfile = temp_file_name() + fout = open(outfile, 'w') + if using_command: + errfile = temp_file_name() + ferr = open(errfile, 'w') + + log.debug('Running %s(%s,%r,%r,os.environ)' \ + % (spawn_command.__name__, os.P_WAIT, argv[0], argv)) + + argv0 = argv[0] + if not using_command: + argv[0] = quote_arg(argv0) + + so_flush() + se_flush() + if _so_has_fileno: + os.dup2(fout.fileno(), so_fileno) + + if _se_has_fileno: + if using_command: + #XXX: disabled for now as it does not work from cmd under win32. + # Tests fail on msys + os.dup2(ferr.fileno(), se_fileno) + else: + os.dup2(fout.fileno(), se_fileno) + try: + status = spawn_command(os.P_WAIT, argv0, argv, os.environ) + except OSError: + errmess = str(get_exception()) + status = 999 + sys.stderr.write('%s: %s'%(errmess, argv[0])) + + so_flush() + se_flush() + if _so_has_fileno: + os.dup2(so_dup, so_fileno) + if _se_has_fileno: + os.dup2(se_dup, se_fileno) + + fout.close() + fout = open_latin1(outfile, 'r') + text = fout.read() + fout.close() + os.remove(outfile) + + if using_command: + ferr.close() + ferr = open_latin1(errfile, 'r') + errmess = ferr.read() + ferr.close() + os.remove(errfile) + if errmess and not status: + # Not sure how to handle the case where errmess + # contains only warning messages and that should + # not be treated as errors. + #status = 998 + if text: + text = text + '\n' + #text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess) + text = text + errmess + print (errmess) + if text[-1:]=='\n': + text = text[:-1] + if status is None: + status = 0 + + if use_tee: + print (text) + + return status, text + + +def test_nt(**kws): + pythonexe = get_pythonexe() + echo = find_executable('echo') + using_cygwin_echo = echo != 'echo' + if using_cygwin_echo: + log.warn('Using cygwin echo in win32 environment is not supported') + + s, o=exec_command(pythonexe\ + +' -c "import os;print os.environ.get(\'AAA\',\'\')"') + assert s==0 and o=='', (s, o) + + s, o=exec_command(pythonexe\ + +' -c "import os;print os.environ.get(\'AAA\')"', + AAA='Tere') + assert s==0 and o=='Tere', (s, o) + + os.environ['BBB'] = 'Hi' + s, o=exec_command(pythonexe\ + +' -c "import os;print os.environ.get(\'BBB\',\'\')"') + assert s==0 and o=='Hi', (s, o) + + s, o=exec_command(pythonexe\ + +' -c "import os;print os.environ.get(\'BBB\',\'\')"', + BBB='Hey') + assert s==0 and o=='Hey', (s, o) + + s, o=exec_command(pythonexe\ + +' -c "import os;print os.environ.get(\'BBB\',\'\')"') + assert s==0 and o=='Hi', (s, o) + elif 0: + s, o=exec_command('echo Hello') + assert s==0 and o=='Hello', (s, o) + + s, o=exec_command('echo a%AAA%') + assert s==0 and o=='a', (s, o) + + s, o=exec_command('echo a%AAA%', AAA='Tere') + assert s==0 and o=='aTere', (s, o) + + os.environ['BBB'] = 'Hi' + s, o=exec_command('echo a%BBB%') + assert s==0 and o=='aHi', (s, o) + + s, o=exec_command('echo a%BBB%', BBB='Hey') + assert s==0 and o=='aHey', (s, o) + s, o=exec_command('echo a%BBB%') + assert s==0 and o=='aHi', (s, o) + + s, o=exec_command('this_is_not_a_command') + assert s and o!='', (s, o) + + s, o=exec_command('type not_existing_file') + assert s and o!='', (s, o) + + s, o=exec_command('echo path=%path%') + assert s==0 and o!='', (s, o) + + s, o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \ + % pythonexe) + assert s==0 and o=='win32', (s, o) + + s, o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe) + assert s==1 and o, (s, o) + + s, o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\ + % pythonexe) + assert s==0 and o=='012', (s, o) + + s, o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe) + assert s==15 and o=='', (s, o) + + s, o=exec_command('%s -c "print \'Heipa\'"' % pythonexe) + assert s==0 and o=='Heipa', (s, o) + + print ('ok') + +def test_posix(**kws): + s, o=exec_command("echo Hello",**kws) + assert s==0 and o=='Hello', (s, o) + + s, o=exec_command('echo $AAA',**kws) + assert s==0 and o=='', (s, o) + + s, o=exec_command('echo "$AAA"',AAA='Tere',**kws) + assert s==0 and o=='Tere', (s, o) + + + s, o=exec_command('echo "$AAA"',**kws) + assert s==0 and o=='', (s, o) + + os.environ['BBB'] = 'Hi' + s, o=exec_command('echo "$BBB"',**kws) + assert s==0 and o=='Hi', (s, o) + + s, o=exec_command('echo "$BBB"',BBB='Hey',**kws) + assert s==0 and o=='Hey', (s, o) + + s, o=exec_command('echo "$BBB"',**kws) + assert s==0 and o=='Hi', (s, o) + + + s, o=exec_command('this_is_not_a_command',**kws) + assert s!=0 and o!='', (s, o) + + s, o=exec_command('echo path=$PATH',**kws) + assert s==0 and o!='', (s, o) + + s, o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws) + assert s==0 and o=='posix', (s, o) + + s, o=exec_command('python -c "raise \'Ignore me.\'"',**kws) + assert s==1 and o, (s, o) + + s, o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws) + assert s==0 and o=='012', (s, o) + + s, o=exec_command('python -c "import sys;sys.exit(15)"',**kws) + assert s==15 and o=='', (s, o) + + s, o=exec_command('python -c "print \'Heipa\'"',**kws) + assert s==0 and o=='Heipa', (s, o) + + print ('ok') + +def test_execute_in(**kws): + pythonexe = get_pythonexe() + tmpfile = temp_file_name() + fn = os.path.basename(tmpfile) + tmpdir = os.path.dirname(tmpfile) + f = open(tmpfile, 'w') + f.write('Hello') + f.close() + + s, o = exec_command('%s -c "print \'Ignore the following IOError:\','\ + 'open(%r,\'r\')"' % (pythonexe, fn),**kws) + assert s and o!='', (s, o) + s, o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe, fn), + execute_in = tmpdir,**kws) + assert s==0 and o=='Hello', (s, o) + os.remove(tmpfile) + print ('ok') + +def test_svn(**kws): + s, o = exec_command(['svn', 'status'],**kws) + assert s, (s, o) + print ('svn ok') + +def test_cl(**kws): + if os.name=='nt': + s, o = exec_command(['cl', '/V'],**kws) + assert s, (s, o) + print ('cl ok') + +if os.name=='posix': + test = test_posix +elif os.name in ['nt', 'dos']: + test = test_nt +else: + raise NotImplementedError('exec_command tests for ', os.name) + +############################################################ + +if __name__ == "__main__": + + test(use_tee=0) + test(use_tee=1) + test_execute_in(use_tee=0) + test_execute_in(use_tee=1) + test_svn(use_tee=1) + test_cl(use_tee=1) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/extension.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/extension.py new file mode 100644 index 0000000000000..344c66da02875 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/extension.py @@ -0,0 +1,90 @@ +"""distutils.extension + +Provides the Extension class, used to describe C/C++ extension +modules in setup scripts. + +Overridden to support f2py. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import re +from distutils.extension import Extension as old_Extension + +if sys.version_info[0] >= 3: + basestring = str + + +cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match +fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match + +class Extension(old_Extension): + def __init__ (self, name, sources, + include_dirs=None, + define_macros=None, + undef_macros=None, + library_dirs=None, + libraries=None, + runtime_library_dirs=None, + extra_objects=None, + extra_compile_args=None, + extra_link_args=None, + export_symbols=None, + swig_opts=None, + depends=None, + language=None, + f2py_options=None, + module_dirs=None, + extra_f77_compile_args=None, + extra_f90_compile_args=None, + ): + old_Extension.__init__(self, name, [], + include_dirs, + define_macros, + undef_macros, + library_dirs, + libraries, + runtime_library_dirs, + extra_objects, + extra_compile_args, + extra_link_args, + export_symbols) + # Avoid assert statements checking that sources contains strings: + self.sources = sources + + # Python 2.4 distutils new features + self.swig_opts = swig_opts or [] + # swig_opts is assumed to be a list. Here we handle the case where it + # is specified as a string instead. + if isinstance(self.swig_opts, basestring): + import warnings + msg = "swig_opts is specified as a string instead of a list" + warnings.warn(msg, SyntaxWarning) + self.swig_opts = self.swig_opts.split() + + # Python 2.3 distutils new features + self.depends = depends or [] + self.language = language + + # numpy_distutils features + self.f2py_options = f2py_options or [] + self.module_dirs = module_dirs or [] + self.extra_f77_compile_args = extra_f77_compile_args or [] + self.extra_f90_compile_args = extra_f90_compile_args or [] + + return + + def has_cxx_sources(self): + for source in self.sources: + if cxx_ext_re(str(source)): + return True + return False + + def has_f2py_sources(self): + for source in self.sources: + if fortran_pyf_ext_re(source): + return True + return False + +# class Extension diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/__init__.py new file mode 100644 index 0000000000000..0b1b1ee6d9a85 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/__init__.py @@ -0,0 +1,989 @@ +"""numpy.distutils.fcompiler + +Contains FCompiler, an abstract base class that defines the interface +for the numpy.distutils Fortran compiler abstraction model. + +Terminology: + +To be consistent, where the term 'executable' is used, it means the single +file, like 'gcc', that is executed, and should be a string. In contrast, +'command' means the entire command line, like ['gcc', '-c', 'file.c'], and +should be a list. + +But note that FCompiler.executables is actually a dictionary of commands. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', + 'dummy_fortran_file'] + +import os +import sys +import re +import types +try: + set +except NameError: + from sets import Set as set + +from numpy.compat import open_latin1 + +from distutils.sysconfig import get_python_lib +from distutils.fancy_getopt import FancyGetopt +from distutils.errors import DistutilsModuleError, \ + DistutilsExecError, CompileError, LinkError, DistutilsPlatformError +from distutils.util import split_quoted, strtobool + +from numpy.distutils.ccompiler import CCompiler, gen_lib_options +from numpy.distutils import log +from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ + make_temp_file, get_shared_lib_extension +from numpy.distutils.environment import EnvironmentConfig +from numpy.distutils.exec_command import find_executable +from numpy.distutils.compat import get_exception + +__metaclass__ = type + +class CompilerNotFound(Exception): + pass + +def flaglist(s): + if is_string(s): + return split_quoted(s) + else: + return s + +def str2bool(s): + if is_string(s): + return strtobool(s) + return bool(s) + +def is_sequence_of_strings(seq): + return is_sequence(seq) and all_strings(seq) + +class FCompiler(CCompiler): + """Abstract base class to define the interface that must be implemented + by real Fortran compiler classes. + + Methods that subclasses may redefine: + + update_executables(), find_executables(), get_version() + get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() + get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), + get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), + get_flags_arch_f90(), get_flags_debug_f90(), + get_flags_fix(), get_flags_linker_so() + + DON'T call these methods (except get_version) after + constructing a compiler instance or inside any other method. + All methods, except update_executables() and find_executables(), + may call the get_version() method. + + After constructing a compiler instance, always call customize(dist=None) + method that finalizes compiler construction and makes the following + attributes available: + compiler_f77 + compiler_f90 + compiler_fix + linker_so + archiver + ranlib + libraries + library_dirs + """ + + # These are the environment variables and distutils keys used. + # Each configuration descripition is + # (, , , ) + # The hook names are handled by the self._environment_hook method. + # - names starting with 'self.' call methods in this class + # - names starting with 'exe.' return the key in the executables dict + # - names like 'flags.YYY' return self.get_flag_YYY() + # convert is either None or a function to convert a string to the + # appropiate type used. + + distutils_vars = EnvironmentConfig( + distutils_section='config_fc', + noopt = (None, None, 'noopt', str2bool), + noarch = (None, None, 'noarch', str2bool), + debug = (None, None, 'debug', str2bool), + verbose = (None, None, 'verbose', str2bool), + ) + + command_vars = EnvironmentConfig( + distutils_section='config_fc', + compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None), + compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None), + compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None), + version_cmd = ('exe.version_cmd', None, None, None), + linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None), + linker_exe = ('exe.linker_exe', 'LD', 'ld', None), + archiver = (None, 'AR', 'ar', None), + ranlib = (None, 'RANLIB', 'ranlib', None), + ) + + flag_vars = EnvironmentConfig( + distutils_section='config_fc', + f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist), + f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist), + free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist), + fix = ('flags.fix', None, None, flaglist), + opt = ('flags.opt', 'FOPT', 'opt', flaglist), + opt_f77 = ('flags.opt_f77', None, None, flaglist), + opt_f90 = ('flags.opt_f90', None, None, flaglist), + arch = ('flags.arch', 'FARCH', 'arch', flaglist), + arch_f77 = ('flags.arch_f77', None, None, flaglist), + arch_f90 = ('flags.arch_f90', None, None, flaglist), + debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist), + debug_f77 = ('flags.debug_f77', None, None, flaglist), + debug_f90 = ('flags.debug_f90', None, None, flaglist), + flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist), + linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist), + linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist), + ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist), + ) + + language_map = {'.f': 'f77', + '.for': 'f77', + '.F': 'f77', # XXX: needs preprocessor + '.ftn': 'f77', + '.f77': 'f77', + '.f90': 'f90', + '.F90': 'f90', # XXX: needs preprocessor + '.f95': 'f90', + } + language_order = ['f90', 'f77'] + + + # These will be set by the subclass + + compiler_type = None + compiler_aliases = () + version_pattern = None + + possible_executables = [] + executables = { + 'version_cmd': ["f77", "-v"], + 'compiler_f77': ["f77"], + 'compiler_f90': ["f90"], + 'compiler_fix': ["f90", "-fixed"], + 'linker_so': ["f90", "-shared"], + 'linker_exe': ["f90"], + 'archiver': ["ar", "-cr"], + 'ranlib': None, + } + + # If compiler does not support compiling Fortran 90 then it can + # suggest using another compiler. For example, gnu would suggest + # gnu95 compiler type when there are F90 sources. + suggested_f90_compiler = None + + compile_switch = "-c" + object_switch = "-o " # Ending space matters! It will be stripped + # but if it is missing then object_switch + # will be prefixed to object file name by + # string concatenation. + library_switch = "-o " # Ditto! + + # Switch to specify where module files are created and searched + # for USE statement. Normally it is a string and also here ending + # space matters. See above. + module_dir_switch = None + + # Switch to specify where module files are searched for USE statement. + module_include_switch = '-I' + + pic_flags = [] # Flags to create position-independent code + + src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] + obj_extension = ".o" + + shared_lib_extension = get_shared_lib_extension() + static_lib_extension = ".a" # or .lib + static_lib_format = "lib%s%s" # or %s%s + shared_lib_format = "%s%s" + exe_extension = "" + + _exe_cache = {} + + _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', + 'ranlib'] + + # This will be set by new_fcompiler when called in + # command/{build_ext.py, build_clib.py, config.py} files. + c_compiler = None + + # extra_{f77,f90}_compile_args are set by build_ext.build_extension method + extra_f77_compile_args = [] + extra_f90_compile_args = [] + + def __init__(self, *args, **kw): + CCompiler.__init__(self, *args, **kw) + self.distutils_vars = self.distutils_vars.clone(self._environment_hook) + self.command_vars = self.command_vars.clone(self._environment_hook) + self.flag_vars = self.flag_vars.clone(self._environment_hook) + self.executables = self.executables.copy() + for e in self._executable_keys: + if e not in self.executables: + self.executables[e] = None + + # Some methods depend on .customize() being called first, so + # this keeps track of whether that's happened yet. + self._is_customised = False + + def __copy__(self): + obj = self.__new__(self.__class__) + obj.__dict__.update(self.__dict__) + obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) + obj.command_vars = obj.command_vars.clone(obj._environment_hook) + obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) + obj.executables = obj.executables.copy() + return obj + + def copy(self): + return self.__copy__() + + # Use properties for the attributes used by CCompiler. Setting them + # as attributes from the self.executables dictionary is error-prone, + # so we get them from there each time. + def _command_property(key): + def fget(self): + assert self._is_customised + return self.executables[key] + return property(fget=fget) + version_cmd = _command_property('version_cmd') + compiler_f77 = _command_property('compiler_f77') + compiler_f90 = _command_property('compiler_f90') + compiler_fix = _command_property('compiler_fix') + linker_so = _command_property('linker_so') + linker_exe = _command_property('linker_exe') + archiver = _command_property('archiver') + ranlib = _command_property('ranlib') + + # Make our terminology consistent. + def set_executable(self, key, value): + self.set_command(key, value) + + def set_commands(self, **kw): + for k, v in kw.items(): + self.set_command(k, v) + + def set_command(self, key, value): + if not key in self._executable_keys: + raise ValueError( + "unknown executable '%s' for class %s" % + (key, self.__class__.__name__)) + if is_string(value): + value = split_quoted(value) + assert value is None or is_sequence_of_strings(value[1:]), (key, value) + self.executables[key] = value + + ###################################################################### + ## Methods that subclasses may redefine. But don't call these methods! + ## They are private to FCompiler class and may return unexpected + ## results if used elsewhere. So, you have been warned.. + + def find_executables(self): + """Go through the self.executables dictionary, and attempt to + find and assign appropiate executables. + + Executable names are looked for in the environment (environment + variables, the distutils.cfg, and command line), the 0th-element of + the command list, and the self.possible_executables list. + + Also, if the 0th element is "" or "", the Fortran 77 + or the Fortran 90 compiler executable is used, unless overridden + by an environment setting. + + Subclasses should call this if overriden. + """ + assert self._is_customised + exe_cache = self._exe_cache + def cached_find_executable(exe): + if exe in exe_cache: + return exe_cache[exe] + fc_exe = find_executable(exe) + exe_cache[exe] = exe_cache[fc_exe] = fc_exe + return fc_exe + def verify_command_form(name, value): + if value is not None and not is_sequence_of_strings(value): + raise ValueError( + "%s value %r is invalid in class %s" % + (name, value, self.__class__.__name__)) + def set_exe(exe_key, f77=None, f90=None): + cmd = self.executables.get(exe_key, None) + if not cmd: + return None + # Note that we get cmd[0] here if the environment doesn't + # have anything set + exe_from_environ = getattr(self.command_vars, exe_key) + if not exe_from_environ: + possibles = [f90, f77] + self.possible_executables + else: + possibles = [exe_from_environ] + self.possible_executables + + seen = set() + unique_possibles = [] + for e in possibles: + if e == '': + e = f77 + elif e == '': + e = f90 + if not e or e in seen: + continue + seen.add(e) + unique_possibles.append(e) + + for exe in unique_possibles: + fc_exe = cached_find_executable(exe) + if fc_exe: + cmd[0] = fc_exe + return fc_exe + self.set_command(exe_key, None) + return None + + ctype = self.compiler_type + f90 = set_exe('compiler_f90') + if not f90: + f77 = set_exe('compiler_f77') + if f77: + log.warn('%s: no Fortran 90 compiler found' % ctype) + else: + raise CompilerNotFound('%s: f90 nor f77' % ctype) + else: + f77 = set_exe('compiler_f77', f90=f90) + if not f77: + log.warn('%s: no Fortran 77 compiler found' % ctype) + set_exe('compiler_fix', f90=f90) + + set_exe('linker_so', f77=f77, f90=f90) + set_exe('linker_exe', f77=f77, f90=f90) + set_exe('version_cmd', f77=f77, f90=f90) + set_exe('archiver') + set_exe('ranlib') + + def update_executables(elf): + """Called at the beginning of customisation. Subclasses should + override this if they need to set up the executables dictionary. + + Note that self.find_executables() is run afterwards, so the + self.executables dictionary values can contain or as + the command, which will be replaced by the found F77 or F90 + compiler. + """ + pass + + def get_flags(self): + """List of flags common to all compiler types.""" + return [] + self.pic_flags + + def _get_command_flags(self, key): + cmd = self.executables.get(key, None) + if cmd is None: + return [] + return cmd[1:] + + def get_flags_f77(self): + """List of Fortran 77 specific flags.""" + return self._get_command_flags('compiler_f77') + def get_flags_f90(self): + """List of Fortran 90 specific flags.""" + return self._get_command_flags('compiler_f90') + def get_flags_free(self): + """List of Fortran 90 free format specific flags.""" + return [] + def get_flags_fix(self): + """List of Fortran 90 fixed format specific flags.""" + return self._get_command_flags('compiler_fix') + def get_flags_linker_so(self): + """List of linker flags to build a shared library.""" + return self._get_command_flags('linker_so') + def get_flags_linker_exe(self): + """List of linker flags to build an executable.""" + return self._get_command_flags('linker_exe') + def get_flags_ar(self): + """List of archiver flags. """ + return self._get_command_flags('archiver') + def get_flags_opt(self): + """List of architecture independent compiler flags.""" + return [] + def get_flags_arch(self): + """List of architecture dependent compiler flags.""" + return [] + def get_flags_debug(self): + """List of compiler flags to compile with debugging information.""" + return [] + + get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt + get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch + get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug + + def get_libraries(self): + """List of compiler libraries.""" + return self.libraries[:] + def get_library_dirs(self): + """List of compiler library directories.""" + return self.library_dirs[:] + + def get_version(self, force=False, ok_status=[0]): + assert self._is_customised + version = CCompiler.get_version(self, force=force, ok_status=ok_status) + if version is None: + raise CompilerNotFound() + return version + + ############################################################ + + ## Public methods: + + def customize(self, dist = None): + """Customize Fortran compiler. + + This method gets Fortran compiler specific information from + (i) class definition, (ii) environment, (iii) distutils config + files, and (iv) command line (later overrides earlier). + + This method should be always called after constructing a + compiler instance. But not in __init__ because Distribution + instance is needed for (iii) and (iv). + """ + log.info('customize %s' % (self.__class__.__name__)) + + self._is_customised = True + + self.distutils_vars.use_distribution(dist) + self.command_vars.use_distribution(dist) + self.flag_vars.use_distribution(dist) + + self.update_executables() + + # find_executables takes care of setting the compiler commands, + # version_cmd, linker_so, linker_exe, ar, and ranlib + self.find_executables() + + noopt = self.distutils_vars.get('noopt', False) + noarch = self.distutils_vars.get('noarch', noopt) + debug = self.distutils_vars.get('debug', False) + + f77 = self.command_vars.compiler_f77 + f90 = self.command_vars.compiler_f90 + + f77flags = [] + f90flags = [] + freeflags = [] + fixflags = [] + + if f77: + f77flags = self.flag_vars.f77 + if f90: + f90flags = self.flag_vars.f90 + freeflags = self.flag_vars.free + # XXX Assuming that free format is default for f90 compiler. + fix = self.command_vars.compiler_fix + if fix: + fixflags = self.flag_vars.fix + f90flags + + oflags, aflags, dflags = [], [], [] + # examine get_flags__ for extra flags + # only add them if the method is different from get_flags_ + def get_flags(tag, flags): + # note that self.flag_vars. calls self.get_flags_() + flags.extend(getattr(self.flag_vars, tag)) + this_get = getattr(self, 'get_flags_' + tag) + for name, c, flagvar in [('f77', f77, f77flags), + ('f90', f90, f90flags), + ('f90', fix, fixflags)]: + t = '%s_%s' % (tag, name) + if c and this_get is not getattr(self, 'get_flags_' + t): + flagvar.extend(getattr(self.flag_vars, t)) + if not noopt: + get_flags('opt', oflags) + if not noarch: + get_flags('arch', aflags) + if debug: + get_flags('debug', dflags) + + fflags = self.flag_vars.flags + dflags + oflags + aflags + + if f77: + self.set_commands(compiler_f77=[f77]+f77flags+fflags) + if f90: + self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags) + if fix: + self.set_commands(compiler_fix=[fix]+fixflags+fflags) + + + #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS + linker_so = self.linker_so + if linker_so: + linker_so_flags = self.flag_vars.linker_so + if sys.platform.startswith('aix'): + python_lib = get_python_lib(standard_lib=1) + ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') + python_exp = os.path.join(python_lib, 'config', 'python.exp') + linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] + self.set_commands(linker_so=linker_so+linker_so_flags) + + linker_exe = self.linker_exe + if linker_exe: + linker_exe_flags = self.flag_vars.linker_exe + self.set_commands(linker_exe=linker_exe+linker_exe_flags) + + ar = self.command_vars.archiver + if ar: + arflags = self.flag_vars.ar + self.set_commands(archiver=[ar]+arflags) + + self.set_library_dirs(self.get_library_dirs()) + self.set_libraries(self.get_libraries()) + + def dump_properties(self): + """Print out the attributes of a compiler instance.""" + props = [] + for key in list(self.executables.keys()) + \ + ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch']: + if hasattr(self, key): + v = getattr(self, key) + props.append((key, None, '= '+repr(v))) + props.sort() + + pretty_printer = FancyGetopt(props) + for l in pretty_printer.generate_help("%s instance properties:" \ + % (self.__class__.__name__)): + if l[:4]==' --': + l = ' ' + l[4:] + print(l) + + ################### + + def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile 'src' to product 'obj'.""" + src_flags = {} + if is_f_file(src) and not has_f90_header(src): + flavor = ':f77' + compiler = self.compiler_f77 + src_flags = get_f77flags(src) + extra_compile_args = self.extra_f77_compile_args or [] + elif is_free_format(src): + flavor = ':f90' + compiler = self.compiler_f90 + if compiler is None: + raise DistutilsExecError('f90 not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + else: + flavor = ':fix' + compiler = self.compiler_fix + if compiler is None: + raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + if self.object_switch[-1]==' ': + o_args = [self.object_switch.strip(), obj] + else: + o_args = [self.object_switch.strip()+obj] + + assert self.compile_switch.strip() + s_args = [self.compile_switch, src] + + if extra_compile_args: + log.info('extra %s options: %r' \ + % (flavor[1:], ' '.join(extra_compile_args))) + + extra_flags = src_flags.get(self.compiler_type, []) + if extra_flags: + log.info('using compile options from source: %r' \ + % ' '.join(extra_flags)) + + command = compiler + cc_args + extra_flags + s_args + o_args \ + + extra_postargs + extra_compile_args + + display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, + src) + try: + self.spawn(command, display=display) + except DistutilsExecError: + msg = str(get_exception()) + raise CompileError(msg) + + def module_options(self, module_dirs, module_build_dir): + options = [] + if self.module_dir_switch is not None: + if self.module_dir_switch[-1]==' ': + options.extend([self.module_dir_switch.strip(), module_build_dir]) + else: + options.append(self.module_dir_switch.strip()+module_build_dir) + else: + print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) + print('XXX: Fix module_dir_switch for ', self.__class__.__name__) + if self.module_include_switch is not None: + for d in [module_build_dir]+module_dirs: + options.append('%s%s' % (self.module_include_switch, d)) + else: + print('XXX: module_dirs=%r option ignored' % (module_dirs)) + print('XXX: Fix module_include_switch for ', self.__class__.__name__) + return options + + def library_option(self, lib): + return "-l" + lib + def library_dir_option(self, dir): + return "-L" + dir + + def link(self, target_desc, objects, + output_filename, output_dir=None, libraries=None, + library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None): + objects, output_dir = self._fix_object_args(objects, output_dir) + libraries, library_dirs, runtime_library_dirs = \ + self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) + + lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, + libraries) + if is_string(output_dir): + output_filename = os.path.join(output_dir, output_filename) + elif output_dir is not None: + raise TypeError("'output_dir' must be a string or None") + + if self._need_link(objects, output_filename): + if self.library_switch[-1]==' ': + o_args = [self.library_switch.strip(), output_filename] + else: + o_args = [self.library_switch.strip()+output_filename] + + if is_string(self.objects): + ld_args = objects + [self.objects] + else: + ld_args = objects + self.objects + ld_args = ld_args + lib_opts + o_args + if debug: + ld_args[:0] = ['-g'] + if extra_preargs: + ld_args[:0] = extra_preargs + if extra_postargs: + ld_args.extend(extra_postargs) + self.mkpath(os.path.dirname(output_filename)) + if target_desc == CCompiler.EXECUTABLE: + linker = self.linker_exe[:] + else: + linker = self.linker_so[:] + command = linker + ld_args + try: + self.spawn(command) + except DistutilsExecError: + msg = str(get_exception()) + raise LinkError(msg) + else: + log.debug("skipping %s (up-to-date)", output_filename) + + def _environment_hook(self, name, hook_name): + if hook_name is None: + return None + if is_string(hook_name): + if hook_name.startswith('self.'): + hook_name = hook_name[5:] + hook = getattr(self, hook_name) + return hook() + elif hook_name.startswith('exe.'): + hook_name = hook_name[4:] + var = self.executables[hook_name] + if var: + return var[0] + else: + return None + elif hook_name.startswith('flags.'): + hook_name = hook_name[6:] + hook = getattr(self, 'get_flags_' + hook_name) + return hook() + else: + return hook_name() + + ## class FCompiler + +_default_compilers = ( + # sys.platform mappings + ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', + 'intelvem', 'intelem')), + ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), + ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', + 'intele', 'intelem', 'gnu', 'g95', 'pathf95')), + ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), + ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), + ('irix.*', ('mips', 'gnu', 'gnu95',)), + ('aix.*', ('ibm', 'gnu', 'gnu95',)), + # os.name mappings + ('posix', ('gnu', 'gnu95',)), + ('nt', ('gnu', 'gnu95',)), + ('mac', ('gnu95', 'gnu', 'pg')), + ) + +fcompiler_class = None +fcompiler_aliases = None + +def load_all_fcompiler_classes(): + """Cache all the FCompiler classes found in modules in the + numpy.distutils.fcompiler package. + """ + from glob import glob + global fcompiler_class, fcompiler_aliases + if fcompiler_class is not None: + return + pys = os.path.join(os.path.dirname(__file__), '*.py') + fcompiler_class = {} + fcompiler_aliases = {} + for fname in glob(pys): + module_name, ext = os.path.splitext(os.path.basename(fname)) + module_name = 'numpy.distutils.fcompiler.' + module_name + __import__ (module_name) + module = sys.modules[module_name] + if hasattr(module, 'compilers'): + for cname in module.compilers: + klass = getattr(module, cname) + desc = (klass.compiler_type, klass, klass.description) + fcompiler_class[klass.compiler_type] = desc + for alias in klass.compiler_aliases: + if alias in fcompiler_aliases: + raise ValueError("alias %r defined for both %s and %s" + % (alias, klass.__name__, + fcompiler_aliases[alias][1].__name__)) + fcompiler_aliases[alias] = desc + +def _find_existing_fcompiler(compiler_types, + osname=None, platform=None, + requiref90=False, + c_compiler=None): + from numpy.distutils.core import get_distribution + dist = get_distribution(always=True) + for compiler_type in compiler_types: + v = None + try: + c = new_fcompiler(plat=platform, compiler=compiler_type, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if requiref90 and c.compiler_f90 is None: + v = None + new_compiler = c.suggested_f90_compiler + if new_compiler: + log.warn('Trying %r compiler as suggested by %r ' + 'compiler for f90 support.' % (compiler_type, + new_compiler)) + c = new_fcompiler(plat=platform, compiler=new_compiler, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if v is not None: + compiler_type = new_compiler + if requiref90 and c.compiler_f90 is None: + raise ValueError('%s does not support compiling f90 codes, ' + 'skipping.' % (c.__class__.__name__)) + except DistutilsModuleError: + log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) + except CompilerNotFound: + log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) + if v is not None: + return compiler_type + return None + +def available_fcompilers_for_platform(osname=None, platform=None): + if osname is None: + osname = os.name + if platform is None: + platform = sys.platform + matching_compiler_types = [] + for pattern, compiler_type in _default_compilers: + if re.match(pattern, platform) or re.match(pattern, osname): + for ct in compiler_type: + if ct not in matching_compiler_types: + matching_compiler_types.append(ct) + if not matching_compiler_types: + matching_compiler_types.append('gnu') + return matching_compiler_types + +def get_default_fcompiler(osname=None, platform=None, requiref90=False, + c_compiler=None): + """Determine the default Fortran compiler to use for the given + platform.""" + matching_compiler_types = available_fcompilers_for_platform(osname, + platform) + compiler_type = _find_existing_fcompiler(matching_compiler_types, + osname=osname, + platform=platform, + requiref90=requiref90, + c_compiler=c_compiler) + return compiler_type + +# Flag to avoid rechecking for Fortran compiler every time +failed_fcompilers = set() + +def new_fcompiler(plat=None, + compiler=None, + verbose=0, + dry_run=0, + force=0, + requiref90=False, + c_compiler = None): + """Generate an instance of some FCompiler subclass for the supplied + platform/compiler combination. + """ + global failed_fcompilers + fcompiler_key = (plat, compiler) + if fcompiler_key in failed_fcompilers: + return None + + load_all_fcompiler_classes() + if plat is None: + plat = os.name + if compiler is None: + compiler = get_default_fcompiler(plat, requiref90=requiref90, + c_compiler=c_compiler) + if compiler in fcompiler_class: + module_name, klass, long_description = fcompiler_class[compiler] + elif compiler in fcompiler_aliases: + module_name, klass, long_description = fcompiler_aliases[compiler] + else: + msg = "don't know how to compile Fortran code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler." % compiler + msg = msg + " Supported compilers are: %s)" \ + % (','.join(fcompiler_class.keys())) + log.warn(msg) + failed_fcompilers.add(fcompiler_key) + return None + + compiler = klass(verbose=verbose, dry_run=dry_run, force=force) + compiler.c_compiler = c_compiler + return compiler + +def show_fcompilers(dist=None): + """Print list of available compilers (used by the "--help-fcompiler" + option to "config_fc"). + """ + if dist is None: + from distutils.dist import Distribution + from numpy.distutils.command.config_compiler import config_fc + dist = Distribution() + dist.script_name = os.path.basename(sys.argv[0]) + dist.script_args = ['config_fc'] + sys.argv[1:] + try: + dist.script_args.remove('--help-fcompiler') + except ValueError: + pass + dist.cmdclass['config_fc'] = config_fc + dist.parse_config_files() + dist.parse_command_line() + compilers = [] + compilers_na = [] + compilers_ni = [] + if not fcompiler_class: + load_all_fcompiler_classes() + platform_compilers = available_fcompilers_for_platform() + for compiler in platform_compilers: + v = None + log.set_verbosity(-2) + try: + c = new_fcompiler(compiler=compiler, verbose=dist.verbose) + c.customize(dist) + v = c.get_version() + except (DistutilsModuleError, CompilerNotFound): + e = get_exception() + log.debug("show_fcompilers: %s not found" % (compiler,)) + log.debug(repr(e)) + + if v is None: + compilers_na.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2])) + else: + c.dump_properties() + compilers.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2] + ' (%s)' % v)) + + compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) + compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) + for fc in compilers_ni] + + compilers.sort() + compilers_na.sort() + compilers_ni.sort() + pretty_printer = FancyGetopt(compilers) + pretty_printer.print_help("Fortran compilers found:") + pretty_printer = FancyGetopt(compilers_na) + pretty_printer.print_help("Compilers available for this " + "platform, but not found:") + if compilers_ni: + pretty_printer = FancyGetopt(compilers_ni) + pretty_printer.print_help("Compilers not available on this platform:") + print("For compiler details, run 'config_fc --verbose' setup command.") + + +def dummy_fortran_file(): + fo, name = make_temp_file(suffix='.f') + fo.write(" subroutine dummy()\n end\n") + fo.close() + return name[:-2] + + +is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search +_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search +_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search +_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match + +def is_free_format(file): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = 0 + f = open_latin1(file, 'r') + line = f.readline() + n = 10000 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = 1 + while n>0 and line: + line = line.rstrip() + if line and line[0]!='!': + n -= 1 + if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': + result = 1 + break + line = f.readline() + f.close() + return result + +def has_f90_header(src): + f = open_latin1(src, 'r') + line = f.readline() + f.close() + return _has_f90_header(line) or _has_fix_header(line) + +_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) +def get_f77flags(src): + """ + Search the first 20 lines of fortran 77 code for line pattern + `CF77FLAGS()=` + Return a dictionary {:}. + """ + flags = {} + f = open_latin1(src, 'r') + i = 0 + for line in f: + i += 1 + if i>20: break + m = _f77flags_re.match(line) + if not m: continue + fcname = m.group('fcname').strip() + fflags = m.group('fflags').strip() + flags[fcname] = split_quoted(fflags) + f.close() + return flags + +# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags + +if __name__ == '__main__': + show_fcompilers() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/absoft.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/absoft.py new file mode 100644 index 0000000000000..bde0529bea082 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/absoft.py @@ -0,0 +1,160 @@ + +# http://www.absoft.com/literature/osxuserguide.pdf +# http://www.absoft.com/documentation.html + +# Notes: +# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py +# generated extension modules (works for f2py v2.45.241_1936 and up) +from __future__ import division, absolute_import, print_function + +import os + +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from numpy.distutils.misc_util import cyg2win32 + +compilers = ['AbsoftFCompiler'] + +class AbsoftFCompiler(FCompiler): + + compiler_type = 'absoft' + description = 'Absoft Corp Fortran Compiler' + #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' + version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ + r' (?P[^\s*,]*)(.*?Absoft Corp|)' + + # on windows: f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 + + # samt5735(8)$ f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 + # Note that fink installs g77 as f77, so need to use f90 for detection. + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : ["f77"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + if os.name=='nt': + library_switch = '/out:' #No space after /out:! + + module_dir_switch = None + module_include_switch = '-p' + + def update_executables(self): + f = cyg2win32(dummy_fortran_file()) + self.executables['version_cmd'] = ['', '-V', '-c', + f+'.f', '-o', f+'.o'] + + def get_flags_linker_so(self): + if os.name=='nt': + opt = ['/dll'] + # The "-K shared" switches are being left in for pre-9.0 versions + # of Absoft though I don't think versions earlier than 9 can + # actually be used to build shared libraries. In fact, version + # 8 of Absoft doesn't recognize "-K shared" and will fail. + elif self.get_version() >= '9.0': + opt = ['-shared'] + else: + opt = ["-K", "shared"] + return opt + + def library_dir_option(self, dir): + if os.name=='nt': + return ['-link', '/PATH:"%s"' % (dir)] + return "-L" + dir + + def library_option(self, lib): + if os.name=='nt': + return '%s.lib' % (lib) + return "-l" + lib + + def get_library_dirs(self): + opt = FCompiler.get_library_dirs(self) + d = os.environ.get('ABSOFT') + if d: + if self.get_version() >= '10.0': + # use shared libraries, the static libraries were not compiled -fPIC + prefix = 'sh' + else: + prefix = '' + if cpu.is_64bit(): + suffix = '64' + else: + suffix = '' + opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) + return opt + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + if self.get_version() >= '11.0': + opt.extend(['af90math', 'afio', 'af77math', 'amisc']) + elif self.get_version() >= '10.0': + opt.extend(['af90math', 'afio', 'af77math', 'U77']) + elif self.get_version() >= '8.0': + opt.extend(['f90math', 'fio', 'f77math', 'U77']) + else: + opt.extend(['fio', 'f90math', 'fmath', 'U77']) + if os.name =='nt': + opt.append('COMDLG32') + return opt + + def get_flags(self): + opt = FCompiler.get_flags(self) + if os.name != 'nt': + opt.extend(['-s']) + if self.get_version(): + if self.get_version()>='8.2': + opt.append('-fpic') + return opt + + def get_flags_f77(self): + opt = FCompiler.get_flags_f77(self) + opt.extend(['-N22', '-N90', '-N110']) + v = self.get_version() + if os.name == 'nt': + if v and v>='8.0': + opt.extend(['-f', '-N15']) + else: + opt.append('-f') + if v: + if v<='4.6': + opt.append('-B108') + else: + # Though -N15 is undocumented, it works with + # Absoft 8.0 on Linux + opt.append('-N15') + return opt + + def get_flags_f90(self): + opt = FCompiler.get_flags_f90(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + if self.get_version(): + if self.get_version()>'4.6': + opt.extend(["-YDEALLOC=ALL"]) + return opt + + def get_flags_fix(self): + opt = FCompiler.get_flags_fix(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + opt.extend(["-f", "fixed"]) + return opt + + def get_flags_opt(self): + opt = ['-O'] + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='absoft') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/compaq.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/compaq.py new file mode 100644 index 0000000000000..5162b168c1609 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/compaq.py @@ -0,0 +1,128 @@ + +#http://www.compaq.com/fortran/docs/ +from __future__ import division, absolute_import, print_function + +import os +import sys + +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.compat import get_exception +from distutils.errors import DistutilsPlatformError + +compilers = ['CompaqFCompiler'] +if os.name != 'posix' or sys.platform[:6] == 'cygwin' : + # Otherwise we'd get a false positive on posix systems with + # case-insensitive filesystems (like darwin), because we'll pick + # up /bin/df + compilers.append('CompaqVisualFCompiler') + +class CompaqFCompiler(FCompiler): + + compiler_type = 'compaq' + description = 'Compaq Fortran Compiler' + version_pattern = r'Compaq Fortran (?P[^\s]*).*' + + if sys.platform[:5]=='linux': + fc_exe = 'fort' + else: + fc_exe = 'f90' + + executables = { + 'version_cmd' : ['', "-version"], + 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], + 'compiler_fix' : [fc_exe, "-fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = '-module ' # not tested + module_include_switch = '-I' + + def get_flags(self): + return ['-assume no2underscore', '-nomixed_str_len_arg'] + def get_flags_debug(self): + return ['-g', '-check bounds'] + def get_flags_opt(self): + return ['-O4', '-align dcommons', '-assume bigarrays', + '-assume nozsize', '-math_library fast'] + def get_flags_arch(self): + return ['-arch host', '-tune host'] + def get_flags_linker_so(self): + if sys.platform[:5]=='linux': + return ['-shared'] + return ['-shared', '-Wl,-expect_unresolved,*'] + +class CompaqVisualFCompiler(FCompiler): + + compiler_type = 'compaqv' + description = 'DIGITAL or Compaq Visual Fortran Compiler' + version_pattern = r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'\ + ' Version (?P[^\s]*).*' + + compile_switch = '/compile_only' + object_switch = '/object:' + library_switch = '/OUT:' #No space after /OUT:! + + static_lib_extension = ".lib" + static_lib_format = "%s%s" + module_dir_switch = '/module:' + module_include_switch = '/I' + + ar_exe = 'lib.exe' + fc_exe = 'DF' + + if sys.platform=='win32': + from distutils.msvccompiler import MSVCCompiler + + try: + m = MSVCCompiler() + m.initialize() + ar_exe = m.lib + except DistutilsPlatformError: + pass + except AttributeError: + msg = get_exception() + if '_MSVCCompiler__root' in str(msg): + print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg)) + else: + raise + except IOError: + e = get_exception() + if not "vcvarsall.bat" in str(e): + print("Unexpected IOError in", __file__) + raise e + except ValueError: + e = get_exception() + if not "path']" in str(e): + print("Unexpected ValueError in", __file__) + raise e + + executables = { + 'version_cmd' : ['', "/what"], + 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], + 'compiler_fix' : [fc_exe, "/fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : [ar_exe, "/OUT:"], + 'ranlib' : None + } + + def get_flags(self): + return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', + '/names:lowercase', '/assume:underscore'] + def get_flags_opt(self): + return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] + def get_flags_arch(self): + return ['/threads'] + def get_flags_debug(self): + return ['/debug'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='compaq') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/g95.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/g95.py new file mode 100644 index 0000000000000..26f73b530e84c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/g95.py @@ -0,0 +1,45 @@ +# http://g95.sourceforge.net/ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['G95FCompiler'] + +class G95FCompiler(FCompiler): + compiler_type = 'g95' + description = 'G95 Fortran Compiler' + +# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95!) May 22 2006) + + version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["g95", "-ffixed-form"], + 'compiler_fix' : ["g95", "-ffixed-form"], + 'compiler_f90' : ["g95"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fpic'] + module_dir_switch = '-fmod=' + module_include_switch = '-I' + + def get_flags(self): + return ['-fno-second-underscore'] + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + compiler = G95FCompiler() + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/gnu.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/gnu.py new file mode 100644 index 0000000000000..368506470ad43 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/gnu.py @@ -0,0 +1,390 @@ +from __future__ import division, absolute_import, print_function + +import re +import os +import sys +import warnings +import platform +import tempfile +from subprocess import Popen, PIPE, STDOUT + +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.exec_command import exec_command +from numpy.distutils.misc_util import msvc_runtime_library +from numpy.distutils.compat import get_exception + +compilers = ['GnuFCompiler', 'Gnu95FCompiler'] + +TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)") + +# XXX: handle cross compilation +def is_win64(): + return sys.platform == "win32" and platform.architecture()[0] == "64bit" + +if is_win64(): + #_EXTRAFLAGS = ["-fno-leading-underscore"] + _EXTRAFLAGS = [] +else: + _EXTRAFLAGS = [] + +class GnuFCompiler(FCompiler): + compiler_type = 'gnu' + compiler_aliases = ('g77',) + description = 'GNU Fortran 77 compiler' + + def gnu_version_match(self, version_string): + """Handle the different versions of GNU fortran compilers""" + m = re.search(r'GNU Fortran', version_string) + if not m: + return None + m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) + if m: + return ('gfortran', m.group(1)) + m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string) + if m: + v = m.group(1) + if v.startswith('0') or v.startswith('2') or v.startswith('3'): + # the '0' is for early g77's + return ('g77', v) + else: + # at some point in the 4.x series, the ' 95' was dropped + # from the version string + return ('gfortran', v) + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'g77': + return None + return v[1] + + # 'g77 --version' results + # SunOS: GNU Fortran (GCC 3.2) 3.2 20020814 (release) + # Debian: GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian) + # GNU Fortran (GCC) 3.3.3 (Debian 20040401) + # GNU Fortran 0.5.25 20010319 (prerelease) + # Redhat: GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2 20030222 (Red Hat Linux 3.2.2-5) + # GNU Fortran (GCC) 3.4.2 (mingw-special) + + possible_executables = ['g77', 'f77'] + executables = { + 'version_cmd' : [None, "--version"], + 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], + 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes + 'compiler_fix' : None, + 'linker_so' : [None, "-g", "-Wall"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-g", "-Wall"] + } + module_dir_switch = None + module_include_switch = None + + # Cygwin: f771: warning: -fPIC ignored for target (all code is + # position independent) + if os.name != 'nt' and sys.platform != 'cygwin': + pic_flags = ['-fPIC'] + + # use -mno-cygwin for g77 when Python is not Cygwin-Python + if sys.platform == 'win32': + for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: + executables[key].append('-mno-cygwin') + + g2c = 'g2c' + + suggested_f90_compiler = 'gnu95' + + #def get_linker_so(self): + # # win32 linking should be handled by standard linker + # # Darwin g77 cannot be used as a linker. + # #if re.match(r'(darwin)', sys.platform): + # # return + # return FCompiler.get_linker_so(self) + + def get_flags_linker_so(self): + opt = self.linker_so[1:] + if sys.platform=='darwin': + target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) + # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value + # and leave it alone. But, distutils will complain if the + # environment's value is different from the one in the Python + # Makefile used to build Python. We let disutils handle this + # error checking. + if not target: + # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, + # we try to get it first from the Python Makefile and then we + # fall back to setting it to 10.3 to maximize the set of + # versions we can work with. This is a reasonable default + # even when using the official Python dist and those derived + # from it. + import distutils.sysconfig as sc + g = {} + filename = sc.get_makefile_filename() + sc.parse_makefile(filename, g) + target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') + os.environ['MACOSX_DEPLOYMENT_TARGET'] = target + if target == '10.3': + s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' + warnings.warn(s) + + opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) + else: + opt.append("-shared") + if sys.platform.startswith('sunos'): + # SunOS often has dynamically loaded symbols defined in the + # static library libg2c.a The linker doesn't like this. To + # ignore the problem, use the -mimpure-text flag. It isn't + # the safest thing, but seems to work. 'man gcc' says: + # ".. Instead of using -mimpure-text, you should compile all + # source code with -fpic or -fPIC." + opt.append('-mimpure-text') + return opt + + def get_libgcc_dir(self): + status, output = exec_command(self.compiler_f77 + + ['-print-libgcc-file-name'], + use_tee=0) + if not status: + return os.path.dirname(output) + return None + + def get_library_dirs(self): + opt = [] + if sys.platform[:5] != 'linux': + d = self.get_libgcc_dir() + if d: + # if windows and not cygwin, libg2c lies in a different folder + if sys.platform == 'win32' and not d.startswith('/usr/lib'): + d = os.path.normpath(d) + if not os.path.exists(os.path.join(d, "lib%s.a" % self.g2c)): + d2 = os.path.abspath(os.path.join(d, + '../../../../lib')) + if os.path.exists(os.path.join(d2, "lib%s.a" % self.g2c)): + opt.append(d2) + opt.append(d) + return opt + + def get_libraries(self): + opt = [] + d = self.get_libgcc_dir() + if d is not None: + g2c = self.g2c + '-pic' + f = self.static_lib_format % (g2c, self.static_lib_extension) + if not os.path.isfile(os.path.join(d, f)): + g2c = self.g2c + else: + g2c = self.g2c + + if g2c is not None: + opt.append(g2c) + c_compiler = self.c_compiler + if sys.platform == 'win32' and c_compiler and \ + c_compiler.compiler_type=='msvc': + # the following code is not needed (read: breaks) when using MinGW + # in case want to link F77 compiled code with MSVC + opt.append('gcc') + runtime_lib = msvc_runtime_library() + if runtime_lib: + opt.append(runtime_lib) + if sys.platform == 'darwin': + opt.append('cc_dynamic') + return opt + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + v = self.get_version() + if v and v<='3.3.3': + # With this compiler version building Fortran BLAS/LAPACK + # with -O3 caused failures in lib.lapack heevr,syevr tests. + opt = ['-O2'] + else: + opt = ['-O3'] + opt.append('-funroll-loops') + return opt + + def _c_arch_flags(self): + """ Return detected arch flags from CFLAGS """ + from distutils import sysconfig + try: + cflags = sysconfig.get_config_vars()['CFLAGS'] + except KeyError: + return [] + arch_re = re.compile(r"-arch\s+(\w+)") + arch_flags = [] + for arch in arch_re.findall(cflags): + arch_flags += ['-arch', arch] + return arch_flags + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + return '-Wl,-rpath="%s"' % dir + +class Gnu95FCompiler(GnuFCompiler): + compiler_type = 'gnu95' + compiler_aliases = ('gfortran',) + description = 'GNU Fortran 95 compiler' + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'gfortran': + return None + v = v[1] + if v>='4.': + # gcc-4 series releases do not support -mno-cygwin option + pass + else: + # use -mno-cygwin flag for gfortran when Python is not Cygwin-Python + if sys.platform == 'win32': + for key in ['version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe']: + self.executables[key].append('-mno-cygwin') + return v + + # 'gfortran --version' results: + # XXX is the below right? + # Debian: GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3)) + # GNU Fortran 95 (GCC) 4.1.2 20061115 (prerelease) (Debian 4.1.1-21) + # OS X: GNU Fortran 95 (GCC) 4.1.0 + # GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental) + # GNU Fortran (GCC) 4.3.0 20070316 (experimental) + + possible_executables = ['gfortran', 'f95'] + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", + "-fno-second-underscore"] + _EXTRAFLAGS, + 'compiler_f90' : [None, "-Wall", "-g", + "-fno-second-underscore"] + _EXTRAFLAGS, + 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", + "-fno-second-underscore"] + _EXTRAFLAGS, + 'linker_so' : ["", "-Wall", "-g"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-Wall"] + } + + module_dir_switch = '-J' + module_include_switch = '-I' + + g2c = 'gfortran' + + def _universal_flags(self, cmd): + """Return a list of -arch flags for every supported architecture.""" + if not sys.platform == 'darwin': + return [] + arch_flags = [] + # get arches the C compiler gets. + c_archs = self._c_arch_flags() + if "i386" in c_archs: + c_archs[c_archs.index("i386")] = "i686" + # check the arches the Fortran compiler supports, and compare with + # arch flags from C compiler + for arch in ["ppc", "i686", "x86_64", "ppc64"]: + if _can_target(cmd, arch) and arch in c_archs: + arch_flags.extend(["-arch", arch]) + return arch_flags + + def get_flags(self): + flags = GnuFCompiler.get_flags(self) + arch_flags = self._universal_flags(self.compiler_f90) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_flags_linker_so(self): + flags = GnuFCompiler.get_flags_linker_so(self) + arch_flags = self._universal_flags(self.linker_so) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_library_dirs(self): + opt = GnuFCompiler.get_library_dirs(self) + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + target = self.get_target() + if target: + d = os.path.normpath(self.get_libgcc_dir()) + root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir) + mingwdir = os.path.normpath(os.path.join(root, target, "lib")) + full = os.path.join(mingwdir, "libmingwex.a") + if os.path.exists(full): + opt.append(mingwdir) + return opt + + def get_libraries(self): + opt = GnuFCompiler.get_libraries(self) + if sys.platform == 'darwin': + opt.remove('cc_dynamic') + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + if "gcc" in opt: + i = opt.index("gcc") + opt.insert(i+1, "mingwex") + opt.insert(i+1, "mingw32") + # XXX: fix this mess, does not work for mingw + if is_win64(): + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + return [] + else: + raise NotImplementedError("Only MS compiler supported with gfortran on win64") + return opt + + def get_target(self): + status, output = exec_command(self.compiler_f77 + + ['-v'], + use_tee=0) + if not status: + m = TARGET_R.search(output) + if m: + return m.group(1) + return "" + + def get_flags_opt(self): + if is_win64(): + return ['-O0'] + else: + return GnuFCompiler.get_flags_opt(self) + +def _can_target(cmd, arch): + """Return true is the command supports the -arch flag for the given + architecture.""" + newcmd = cmd[:] + fid, filename = tempfile.mkstemp(suffix=".f") + try: + d = os.path.dirname(filename) + output = os.path.splitext(filename)[0] + ".o" + try: + newcmd.extend(["-arch", arch, "-c", filename]) + p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) + p.communicate() + return p.returncode == 0 + finally: + if os.path.exists(output): + os.remove(output) + finally: + os.remove(filename) + return False + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + + compiler = GnuFCompiler() + compiler.customize() + print(compiler.get_version()) + + try: + compiler = Gnu95FCompiler() + compiler.customize() + print(compiler.get_version()) + except Exception: + msg = get_exception() + print(msg) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/hpux.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/hpux.py new file mode 100644 index 0000000000000..9004961e1de73 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/hpux.py @@ -0,0 +1,45 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['HPUXFCompiler'] + +class HPUXFCompiler(FCompiler): + + compiler_type = 'hpux' + description = 'HP Fortran 90 Compiler' + version_pattern = r'HP F90 (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["f90", "+version"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["ld", "-b"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['+Z'] + def get_flags(self): + return self.pic_flags + ['+ppu', '+DD64'] + def get_flags_opt(self): + return ['-O3'] + def get_libraries(self): + return ['m'] + def get_library_dirs(self): + opt = ['/usr/lib/hpux64'] + return opt + def get_version(self, force=0, ok_status=[256, 0, 1]): + # XXX status==256 may indicate 'unrecognized option' or + # 'no input file'. So, version_cmd needs more work. + return FCompiler.get_version(self, force, ok_status) + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(10) + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='hpux') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/ibm.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/ibm.py new file mode 100644 index 0000000000000..cc65df9721f9e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/ibm.py @@ -0,0 +1,96 @@ +from __future__ import division, absolute_import, print_function + +import os +import re +import sys + +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.exec_command import exec_command, find_executable +from numpy.distutils.misc_util import make_temp_file +from distutils import log + +compilers = ['IBMFCompiler'] + +class IBMFCompiler(FCompiler): + compiler_type = 'ibm' + description = 'IBM XL Fortran Compiler' + version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' + #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 + + executables = { + 'version_cmd' : ["", "-qversion"], + 'compiler_f77' : ["xlf"], + 'compiler_fix' : ["xlf90", "-qfixed"], + 'compiler_f90' : ["xlf90"], + 'linker_so' : ["xlf95"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_version(self,*args,**kwds): + version = FCompiler.get_version(self,*args,**kwds) + + if version is None and sys.platform.startswith('aix'): + # use lslpp to find out xlf version + lslpp = find_executable('lslpp') + xlf = find_executable('xlf') + if os.path.exists(xlf) and os.path.exists(lslpp): + s, o = exec_command(lslpp + ' -Lc xlfcmp') + m = re.search('xlfcmp:(?P\d+([.]\d+)+)', o) + if m: version = m.group('version') + + xlf_dir = '/etc/opt/ibmcmp/xlf' + if version is None and os.path.isdir(xlf_dir): + # linux: + # If the output of xlf does not contain version info + # (that's the case with xlf 8.1, for instance) then + # let's try another method: + l = sorted(os.listdir(xlf_dir)) + l.reverse() + l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] + if l: + from distutils.version import LooseVersion + self.version = version = LooseVersion(l[0]) + return version + + def get_flags(self): + return ['-qextname'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + opt = [] + if sys.platform=='darwin': + opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') + else: + opt.append('-bshared') + version = self.get_version(ok_status=[0, 40]) + if version is not None: + if sys.platform.startswith('aix'): + xlf_cfg = '/etc/xlf.cfg' + else: + xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version + fo, new_cfg = make_temp_file(suffix='_xlf.cfg') + log.info('Creating '+new_cfg) + fi = open(xlf_cfg, 'r') + crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P.*)/crt1.o').match + for line in fi: + m = crt1_match(line) + if m: + fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) + else: + fo.write(line) + fi.close() + fo.close() + opt.append('-F'+new_cfg) + return opt + + def get_flags_opt(self): + return ['-O3'] + +if __name__ == '__main__': + log.set_verbosity(2) + compiler = IBMFCompiler() + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/intel.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/intel.py new file mode 100644 index 0000000000000..a80e525e3c7aa --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/intel.py @@ -0,0 +1,205 @@ +# http://developer.intel.com/software/products/compilers/flin/ +from __future__ import division, absolute_import, print_function + +import sys + +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file + +compilers = ['IntelFCompiler', 'IntelVisualFCompiler', + 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', + 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] + +def intel_version_match(type): + # Match against the important stuff in the version string + return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) + +class BaseIntelFCompiler(FCompiler): + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '-FI', '-V', '-c', + f + '.f', '-o', f + '.o'] + +class IntelFCompiler(BaseIntelFCompiler): + + compiler_type = 'intel' + compiler_aliases = ('ifort',) + description = 'Intel Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + possible_executables = ['ifort', 'ifc'] + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : [None, "-72", "-w90", "-w95"], + 'compiler_f90' : [None], + 'compiler_fix' : [None, "-FI"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_free(self): + return ["-FR"] + + def get_flags(self): + return ['-fPIC'] + + def get_flags_opt(self): + #return ['-i8 -xhost -openmp -fp-model strict'] + return ['-xhost -openmp -fp-model strict'] + + def get_flags_arch(self): + return [] + + def get_flags_linker_so(self): + opt = FCompiler.get_flags_linker_so(self) + v = self.get_version() + if v and v >= '8.0': + opt.append('-nofor_main') + if sys.platform == 'darwin': + # Here, it's -dynamiclib + try: + idx = opt.index('-shared') + opt.remove('-shared') + except ValueError: + idx = 0 + opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] + return opt + +class IntelItaniumFCompiler(IntelFCompiler): + compiler_type = 'intele' + compiler_aliases = () + description = 'Intel Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium|IA-64') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + +class IntelEM64TFCompiler(IntelFCompiler): + compiler_type = 'intelem' + compiler_aliases = () + description = 'Intel Fortran Compiler for 64-bit apps' + + version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags(self): + return ['-fPIC'] + + def get_flags_opt(self): + #return ['-i8 -xhost -openmp -fp-model strict'] + return ['-xhost -openmp -fp-model strict'] + + def get_flags_arch(self): + return [] + +# Is there no difference in the version string between the above compilers +# and the Visual compilers? + +class IntelVisualFCompiler(BaseIntelFCompiler): + compiler_type = 'intelv' + description = 'Intel Visual Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '/FI', '/c', + f + '.f', '/o', f + '.o'] + + ar_exe = 'lib.exe' + possible_executables = ['ifort', 'ifl'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI", "-4L72", "-w"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + compile_switch = '/c ' + object_switch = '/Fo' #No space after /Fo! + library_switch = '/OUT:' #No space after /OUT:! + module_dir_switch = '/module:' #No space after /module: + module_include_switch = '/I' + + def get_flags(self): + opt = ['/nologo', '/MD', '/nbs', '/Qlowercase', '/us'] + return opt + + def get_flags_free(self): + return ["-FR"] + + def get_flags_debug(self): + return ['/4Yb', '/d2'] + + def get_flags_opt(self): + return ['/O1'] # Scipy test failures with /O2 + + def get_flags_arch(self): + return ["/arch:IA-32", "/QaxSSE3"] + +class IntelItaniumVisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelev' + description = 'Intel Visual Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium') + + possible_executables = ['efl'] # XXX this is a wild guess + ar_exe = IntelVisualFCompiler.ar_exe + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI", "-4L72", "-w"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + +class IntelEM64VisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelvem' + description = 'Intel Visual Fortran Compiler for 64-bit apps' + + version_match = simple_version_match(start='Intel\(R\).*?64,') + + def get_flags_arch(self): + return ["/arch:SSE2"] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='intel') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/lahey.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/lahey.py new file mode 100644 index 0000000000000..7a33b4b63ce5d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/lahey.py @@ -0,0 +1,49 @@ +from __future__ import division, absolute_import, print_function + +import os + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['LaheyFCompiler'] + +class LaheyFCompiler(FCompiler): + + compiler_type = 'lahey' + description = 'Lahey/Fujitsu Fortran 95 Compiler' + version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["lf95", "--fix"], + 'compiler_fix' : ["lf95", "--fix"], + 'compiler_f90' : ["lf95"], + 'linker_so' : ["lf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g', '--chk', '--chkglobal'] + def get_library_dirs(self): + opt = [] + d = os.environ.get('LAHEY') + if d: + opt.append(os.path.join(d, 'lib')) + return opt + def get_libraries(self): + opt = [] + opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='lahey') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/mips.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/mips.py new file mode 100644 index 0000000000000..6a8d230992266 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/mips.py @@ -0,0 +1,58 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler + +compilers = ['MIPSFCompiler'] + +class MIPSFCompiler(FCompiler): + + compiler_type = 'mips' + description = 'MIPSpro Fortran Compiler' + version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["", "-version"], + 'compiler_f77' : ["f77", "-f77"], + 'compiler_fix' : ["f90", "-fixedform"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["f90", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : None + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['-KPIC'] + + def get_flags(self): + return self.pic_flags + ['-n32'] + def get_flags_opt(self): + return ['-O3'] + def get_flags_arch(self): + opt = [] + for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): + if getattr(cpu, 'is_IP%s'%a)(): + opt.append('-TARG:platform=IP%s' % a) + break + return opt + def get_flags_arch_f77(self): + r = None + if cpu.is_r10000(): r = 10000 + elif cpu.is_r12000(): r = 12000 + elif cpu.is_r8000(): r = 8000 + elif cpu.is_r5000(): r = 5000 + elif cpu.is_r4000(): r = 4000 + if r is not None: + return ['r%s' % (r)] + return [] + def get_flags_arch_f90(self): + r = self.get_flags_arch_f77() + if r: + r[0] = '-' + r[0] + return r + +if __name__ == '__main__': + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='mips') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/nag.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/nag.py new file mode 100644 index 0000000000000..ae1b96faf3e8b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/nag.py @@ -0,0 +1,45 @@ +from __future__ import division, absolute_import, print_function + +import sys +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NAGFCompiler'] + +class NAGFCompiler(FCompiler): + + compiler_type = 'nag' + description = 'NAGWare Fortran 95 Compiler' + version_pattern = r'NAGWare Fortran 95 compiler Release (?P[^\s]*)' + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f95", "-fixed"], + 'compiler_fix' : ["f95", "-fixed"], + 'compiler_f90' : ["f95"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_linker_so(self): + if sys.platform=='darwin': + return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return ["-Wl,-shared"] + def get_flags_opt(self): + return ['-O4'] + def get_flags_arch(self): + version = self.get_version() + if version and version < '5.1': + return ['-target=native'] + else: + return [''] + def get_flags_debug(self): + return ['-g', '-gline', '-g90', '-nan', '-C'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='nag') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/none.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/none.py new file mode 100644 index 0000000000000..6f602d734d56a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/none.py @@ -0,0 +1,31 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NoneFCompiler'] + +class NoneFCompiler(FCompiler): + + compiler_type = 'none' + description = 'Fake Fortran compiler' + + executables = {'compiler_f77': None, + 'compiler_f90': None, + 'compiler_fix': None, + 'linker_so': None, + 'linker_exe': None, + 'archiver': None, + 'ranlib': None, + 'version_cmd': None, + } + + def find_executables(self): + pass + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + compiler = NoneFCompiler() + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pathf95.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pathf95.py new file mode 100644 index 0000000000000..1902bbc242ca8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pathf95.py @@ -0,0 +1,38 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['PathScaleFCompiler'] + +class PathScaleFCompiler(FCompiler): + + compiler_type = 'pathf95' + description = 'PathScale Fortran Compiler' + version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' + + executables = { + 'version_cmd' : ["pathf95", "-version"], + 'compiler_f77' : ["pathf95", "-fixedform"], + 'compiler_fix' : ["pathf95", "-fixedform"], + 'compiler_f90' : ["pathf95"], + 'linker_so' : ["pathf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_opt(self): + return ['-O3'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + #compiler = PathScaleFCompiler() + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='pathf95') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pg.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pg.py new file mode 100644 index 0000000000000..f3f5ea22ba755 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pg.py @@ -0,0 +1,60 @@ +# http://www.pgroup.com +from __future__ import division, absolute_import, print_function + +from numpy.distutils.fcompiler import FCompiler +from sys import platform + +compilers = ['PGroupFCompiler'] + +class PGroupFCompiler(FCompiler): + + compiler_type = 'pg' + description = 'Portland Group Fortran Compiler' + version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' + + if platform == 'darwin': + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["pgfortran", "-dynamiclib"], + 'compiler_fix' : ["pgfortran", "-Mfixed", "-dynamiclib"], + 'compiler_f90' : ["pgfortran", "-dynamiclib"], + 'linker_so' : ["libtool"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = [''] + else: + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["pgfortran"], + 'compiler_fix' : ["pgfortran", "-Mfixed"], + 'compiler_f90' : ["pgfortran"], + 'linker_so' : ["pgfortran", "-shared", "-fpic"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fpic'] + + + module_dir_switch = '-module ' + module_include_switch = '-I' + + def get_flags(self): + opt = ['-Minform=inform', '-Mnosecond_underscore'] + return self.pic_flags + opt + def get_flags_opt(self): + return ['-fast'] + def get_flags_debug(self): + return ['-g'] + + if platform == 'darwin': + def get_flags_linker_so(self): + return ["-dynamic", '-undefined', 'dynamic_lookup'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='pg') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/sun.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/sun.py new file mode 100644 index 0000000000000..0955f14a1c42b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/sun.py @@ -0,0 +1,52 @@ +from __future__ import division, absolute_import, print_function + +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler + +compilers = ['SunFCompiler'] + +class SunFCompiler(FCompiler): + + compiler_type = 'sun' + description = 'Sun or Forte Fortran 95 Compiler' + # ex: + # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 + version_match = simple_version_match( + start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90", "-fixed"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["", "-Bdynamic", "-G"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = '-moddir=' + module_include_switch = '-M' + pic_flags = ['-xcode=pic32'] + + def get_flags_f77(self): + ret = ["-ftrap=%none"] + if (self.get_version() or '') >= '7': + ret.append("-f77") + else: + ret.append("-fixed") + return ret + def get_opt(self): + return ['-fast', '-dalign'] + def get_arch(self): + return ['-xtarget=generic'] + def get_libraries(self): + opt = [] + opt.extend(['fsu', 'sunmath', 'mvec']) + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='sun') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/vast.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/vast.py new file mode 100644 index 0000000000000..05bbc10badb13 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/vast.py @@ -0,0 +1,56 @@ +from __future__ import division, absolute_import, print_function + +import os + +from numpy.distutils.fcompiler.gnu import GnuFCompiler + +compilers = ['VastFCompiler'] + +class VastFCompiler(GnuFCompiler): + compiler_type = 'vast' + compiler_aliases = () + description = 'Pacific-Sierra Research Fortran 90 Compiler' + version_pattern = r'\s*Pacific-Sierra Research vf90 '\ + '(Personal|Professional)\s+(?P[^\s]*)' + + # VAST f90 does not support -o with -c. So, object files are created + # to the current directory and then moved to build directory + object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' + + executables = { + 'version_cmd' : ["vf90", "-v"], + 'compiler_f77' : ["g77"], + 'compiler_fix' : ["f90", "-Wv,-ya"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def find_executables(self): + pass + + def get_version_cmd(self): + f90 = self.compiler_f90[0] + d, b = os.path.split(f90) + vf90 = os.path.join(d, 'v'+b) + return vf90 + + def get_flags_arch(self): + vast_version = self.get_version() + gnu = GnuFCompiler() + gnu.customize(None) + self.version = gnu.get_version() + opt = GnuFCompiler.get_flags_arch(self) + self.version = vast_version + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils.fcompiler import new_fcompiler + compiler = new_fcompiler(compiler='vast') + compiler.customize() + print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/from_template.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/from_template.py new file mode 100644 index 0000000000000..d10b50218d2aa --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/from_template.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +""" + +process_file(filename) + + takes templated file .xxx.src and produces .xxx file where .xxx + is .pyf .f90 or .f using the following template rules: + + '<..>' denotes a template. + + All function and subroutine blocks in a source file with names that + contain '<..>' will be replicated according to the rules in '<..>'. + + The number of comma-separeted words in '<..>' will determine the number of + replicates. + + '<..>' may have two different forms, named and short. For example, + + named: + where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i==-1: + break + start = i + if astr[i:i+7]!='\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = m and m.end()-1 or len(astr) + spanlist.append((start, end)) + return spanlist + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace('\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = '__l%s' % (n) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace('\>', '@rightarrow@') + substr = substr.replace('\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace('\,', '@comma@')) + if template_name_re.match(thelist): + return "<%s>" % (thelist) + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return "<%s>" % name + + substr = list_re.sub(listrepl, substr) # convert all lists to named templates + # newnames are constructed as needed + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError('No replicates found for <%s>' % (r)) + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + print("Mismatch in number of replacements (base <%s=%s>)" + " for <%s=%s>. Ignoring." % + (base_rule, ','.join(rules[base_rule]), r, thelist)) + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k+1)*[name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' #_head # using _head will break free-format files + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + writestr += newstr[oldend:sub[0]] + names.update(find_repl_patterns(newstr[oldend:sub[0]])) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+[.]src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + fid = open(source) + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + print('Including file', fn) + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + fid.close() + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +if __name__ == "__main__": + + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + writestr = process_str(allstr) + outfile.write(writestr) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/info.py new file mode 100644 index 0000000000000..2f5310665cef3 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/info.py @@ -0,0 +1,6 @@ +""" +Enhanced distutils with Fortran compilers support and more. +""" +from __future__ import division, absolute_import, print_function + +postpone_import = True diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/intelccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/intelccompiler.py new file mode 100644 index 0000000000000..1d8dcd9fd88dc --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/intelccompiler.py @@ -0,0 +1,45 @@ +from __future__ import division, absolute_import, print_function + +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.exec_command import find_executable + +class IntelCCompiler(UnixCCompiler): + """ A modified Intel compiler compatible with an gcc built Python.""" + compiler_type = 'intel' + cc_exe = 'icc' + cc_args = 'fPIC' + + def __init__ (self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__ (self, verbose, dry_run, force) + self.cc_exe = 'icc -fPIC' + compiler = self.cc_exe + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + linker_exe=compiler, + linker_so=compiler + ' -shared') + +class IntelItaniumCCompiler(IntelCCompiler): + compiler_type = 'intele' + + # On Itanium, the Intel Compiler used to be called ecc, let's search for + # it (now it's also icc, so ecc is last in the search). + for cc_exe in map(find_executable, ['icc', 'ecc']): + if cc_exe: + break + +class IntelEM64TCCompiler(UnixCCompiler): + """ A modified Intel x86_64 compiler compatible with a 64bit gcc built Python. + """ + compiler_type = 'intelem' + cc_exe = 'icc -m64 -fPIC' + cc_args = "-fPIC" + def __init__ (self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__ (self, verbose, dry_run, force) + self.cc_exe = 'icc -m64 -fPIC' + compiler = self.cc_exe + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + linker_exe=compiler, + linker_so=compiler + ' -shared') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/lib2def.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/lib2def.py new file mode 100644 index 0000000000000..7316547a37b5c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/lib2def.py @@ -0,0 +1,116 @@ +from __future__ import division, absolute_import, print_function + +import re +import sys +import os +import subprocess + +__doc__ = """This module generates a DEF file from the symbols in +an MSVC-compiled DLL import library. It correctly discriminates between +data and functions. The data is collected from the output of the program +nm(1). + +Usage: + python lib2def.py [libname.lib] [output.def] +or + python lib2def.py [libname.lib] > output.def + +libname.lib defaults to python.lib and output.def defaults to stdout + +Author: Robert Kern +Last Update: April 30, 1999 +""" + +__version__ = '0.1a' + +py_ver = "%d%d" % tuple(sys.version_info[:2]) + +DEFAULT_NM = 'nm -Cs' + +DEF_HEADER = """LIBRARY python%s.dll +;CODE PRELOAD MOVEABLE DISCARDABLE +;DATA PRELOAD SINGLE + +EXPORTS +""" % py_ver +# the header of the DEF file + +FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) +DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) + +def parse_cmd(): + """Parses the command-line arguments. + +libfile, deffile = parse_cmd()""" + if len(sys.argv) == 3: + if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': + libfile, deffile = sys.argv[1:] + elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': + deffile, libfile = sys.argv[1:] + else: + print("I'm assuming that your first argument is the library") + print("and the second is the DEF file.") + elif len(sys.argv) == 2: + if sys.argv[1][-4:] == '.def': + deffile = sys.argv[1] + libfile = 'python%s.lib' % py_ver + elif sys.argv[1][-4:] == '.lib': + deffile = None + libfile = sys.argv[1] + else: + libfile = 'python%s.lib' % py_ver + deffile = None + return libfile, deffile + +def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]): + """Returns the output of nm_cmd via a pipe. + +nm_output = getnam(nm_cmd = 'nm -Cs py_lib')""" + f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE) + nm_output = f.stdout.read() + f.stdout.close() + return nm_output + +def parse_nm(nm_output): + """Returns a tuple of lists: dlist for the list of data +symbols and flist for the list of function symbols. + +dlist, flist = parse_nm(nm_output)""" + data = DATA_RE.findall(nm_output) + func = FUNC_RE.findall(nm_output) + + flist = [] + for sym in data: + if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): + flist.append(sym) + + dlist = [] + for sym in data: + if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): + dlist.append(sym) + + dlist.sort() + flist.sort() + return dlist, flist + +def output_def(dlist, flist, header, file = sys.stdout): + """Outputs the final DEF file to a file defaulting to stdout. + +output_def(dlist, flist, header, file = sys.stdout)""" + for data_sym in dlist: + header = header + '\t%s DATA\n' % data_sym + header = header + '\n' # blank line + for func_sym in flist: + header = header + '\t%s\n' % func_sym + file.write(header) + +if __name__ == '__main__': + libfile, deffile = parse_cmd() + if deffile is None: + deffile = sys.stdout + else: + deffile = open(deffile, 'w') + nm_cmd = [str(DEFAULT_NM), str(libfile)] + nm_output = getnm(nm_cmd) + dlist, flist = parse_nm(nm_output) + output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/line_endings.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/line_endings.py new file mode 100644 index 0000000000000..5ecb104ffdf51 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/line_endings.py @@ -0,0 +1,76 @@ +""" Functions for converting from DOS to UNIX line endings + +""" +from __future__ import division, absolute_import, print_function + +import sys, re, os + +def dos2unix(file): + "Replace CRLF with LF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + data = open(file, "rb").read() + if '\0' in data: + print(file, "Binary!") + return + + newdata = re.sub("\r\n", "\n", data) + if newdata != data: + print('dos2unix:', file) + f = open(file, "wb") + f.write(newdata) + f.close() + return file + else: + print(file, 'ok') + +def dos2unix_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + file = dos2unix(full_path) + if file is not None: + modified_files.append(file) + +def dos2unix_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, dos2unix_one_dir, modified_files) + return modified_files +#---------------------------------- + +def unix2dos(file): + "Replace LF with CRLF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + data = open(file, "rb").read() + if '\0' in data: + print(file, "Binary!") + return + newdata = re.sub("\r\n", "\n", data) + newdata = re.sub("\n", "\r\n", newdata) + if newdata != data: + print('unix2dos:', file) + f = open(file, "wb") + f.write(newdata) + f.close() + return file + else: + print(file, 'ok') + +def unix2dos_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + unix2dos(full_path) + if file is not None: + modified_files.append(file) + +def unix2dos_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, unix2dos_one_dir, modified_files) + return modified_files + +if __name__ == "__main__": + dos2unix_dir(sys.argv[1]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/log.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/log.py new file mode 100644 index 0000000000000..37f9fe5dd0ef6 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/log.py @@ -0,0 +1,93 @@ +# Colored log, requires Python 2.3 or up. +from __future__ import division, absolute_import, print_function + +import sys +from distutils.log import * +from distutils.log import Log as old_Log +from distutils.log import _global_log + +if sys.version_info[0] < 3: + from .misc_util import (red_text, default_text, cyan_text, green_text, + is_sequence, is_string) +else: + from numpy.distutils.misc_util import (red_text, default_text, cyan_text, + green_text, is_sequence, is_string) + + +def _fix_args(args,flag=1): + if is_string(args): + return args.replace('%', '%%') + if flag and is_sequence(args): + return tuple([_fix_args(a, flag=0) for a in args]) + return args + + +class Log(old_Log): + def _log(self, level, msg, args): + if level >= self.threshold: + if args: + msg = msg % _fix_args(args) + if 0: + if msg.startswith('copying ') and msg.find(' -> ') != -1: + return + if msg.startswith('byte-compiling '): + return + print(_global_color_map[level](msg)) + sys.stdout.flush() + + def good(self, msg, *args): + """ + If we log WARN messages, log this message as a 'nice' anti-warn + message. + + """ + if WARN >= self.threshold: + if args: + print(green_text(msg % _fix_args(args))) + else: + print(green_text(msg)) + sys.stdout.flush() + + +_global_log.__class__ = Log + +good = _global_log.good + +def set_threshold(level, force=False): + prev_level = _global_log.threshold + if prev_level > DEBUG or force: + # If we're running at DEBUG, don't change the threshold, as there's + # likely a good reason why we're running at this level. + _global_log.threshold = level + if level <= DEBUG: + info('set_threshold: setting threshold to DEBUG level,' + ' it can be changed only with force argument') + else: + info('set_threshold: not changing threshold from DEBUG level' + ' %s to %s' % (prev_level, level)) + return prev_level + + +def set_verbosity(v, force=False): + prev_level = _global_log.threshold + if v < 0: + set_threshold(ERROR, force) + elif v == 0: + set_threshold(WARN, force) + elif v == 1: + set_threshold(INFO, force) + elif v >= 2: + set_threshold(DEBUG, force) + return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) + + +_global_color_map = { + DEBUG:cyan_text, + INFO:default_text, + WARN:red_text, + ERROR:red_text, + FATAL:red_text +} + +# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. +set_verbosity(0, force=True) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/mingw32ccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/mingw32ccompiler.py new file mode 100644 index 0000000000000..c720d142a0f90 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/mingw32ccompiler.py @@ -0,0 +1,582 @@ +""" +Support code for building Python extensions on Windows. + + # NT stuff + # 1. Make sure libpython.a exists for gcc. If not, build it. + # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) + # 3. Force windows to use g77 + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import subprocess +import re + +# Overwrite certain distutils.ccompiler functions: +import numpy.distutils.ccompiler + +if sys.version_info[0] < 3: + from . import log +else: + from numpy.distutils import log +# NT stuff +# 1. Make sure libpython.a exists for gcc. If not, build it. +# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) +# --> this is done in numpy/distutils/ccompiler.py +# 3. Force windows to use g77 + +import distutils.cygwinccompiler +from distutils.version import StrictVersion +from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options +from distutils.errors import DistutilsExecError, CompileError, UnknownFileError + +from distutils.unixccompiler import UnixCCompiler +from distutils.msvccompiler import get_build_version as get_build_msvc_version +from numpy.distutils.misc_util import msvc_runtime_library, get_build_architecture + +# Useful to generate table of symbols from a dll +_START = re.compile(r'\[Ordinal/Name Pointer\] Table') +_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') + +# the same as cygwin plus some additional parameters +class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): + """ A modified MingW32 compiler compatible with an MSVC built Python. + + """ + + compiler_type = 'mingw32' + + def __init__ (self, + verbose=0, + dry_run=0, + force=0): + + distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, + verbose, dry_run, force) + + # we need to support 3.2 which doesn't match the standard + # get_versions methods regex + if self.gcc_version is None: + import re + p = subprocess.Popen(['gcc', '-dumpversion'], shell=True, + stdout=subprocess.PIPE) + out_string = p.stdout.read() + p.stdout.close() + result = re.search('(\d+\.\d+)', out_string) + if result: + self.gcc_version = StrictVersion(result.group(1)) + + # A real mingw32 doesn't need to specify a different entry point, + # but cygwin 2.91.57 in no-cygwin-mode needs it. + if self.gcc_version <= "2.91.57": + entry_point = '--entry _DllMain@12' + else: + entry_point = '' + + if self.linker_dll == 'dllwrap': + # Commented out '--driver-name g++' part that fixes weird + # g++.exe: g++: No such file or directory + # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). + # If the --driver-name part is required for some environment + # then make the inclusion of this part specific to that environment. + self.linker = 'dllwrap' # --driver-name g++' + elif self.linker_dll == 'gcc': + self.linker = 'g++' + + # **changes: eric jones 4/11/01 + # 1. Check for import library on Windows. Build if it doesn't exist. + + build_import_library() + + # Check for custom msvc runtime library on Windows. Build if it doesn't exist. + msvcr_success = build_msvcr_library() + msvcr_dbg_success = build_msvcr_library(debug=True) + if msvcr_success or msvcr_dbg_success: + # add preprocessor statement for using customized msvcr lib + self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') + + # Define the MSVC version as hint for MinGW + msvcr_version = '0x%03i0' % int(msvc_runtime_library().lstrip('msvcr')) + self.define_macro('__MSVCRT_VERSION__', msvcr_version) + + # **changes: eric jones 4/11/01 + # 2. increased optimization and turned off all warnings + # 3. also added --driver-name g++ + #self.set_executables(compiler='gcc -mno-cygwin -O2 -w', + # compiler_so='gcc -mno-cygwin -mdll -O2 -w', + # linker_exe='gcc -mno-cygwin', + # linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s' + # % (self.linker, entry_point)) + + # MS_WIN64 should be defined when building for amd64 on windows, but + # python headers define it only for MS compilers, which has all kind of + # bad consequences, like using Py_ModuleInit4 instead of + # Py_ModuleInit4_64, etc... So we add it here + if get_build_architecture() == 'AMD64': + if self.gcc_version < "4.0": + self.set_executables( + compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall -Wstrict-prototypes', + linker_exe='gcc -g -mno-cygwin', + linker_so='gcc -g -mno-cygwin -shared') + else: + # gcc-4 series releases do not support -mno-cygwin option + self.set_executables( + compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', + linker_exe='gcc -g', + linker_so='gcc -g -shared') + else: + if self.gcc_version <= "3.0.0": + self.set_executables(compiler='gcc -mno-cygwin -O2 -w', + compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='%s -mno-cygwin -mdll -static %s' + % (self.linker, entry_point)) + elif self.gcc_version < "4.0": + self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall', + compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='g++ -mno-cygwin -shared') + else: + # gcc-4 series releases do not support -mno-cygwin option + self.set_executables(compiler='gcc -O2 -Wall', + compiler_so='gcc -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ ', + linker_so='g++ -shared') + # added for python2.3 support + # we can't pass it through set_executables because pre 2.2 would fail + self.compiler_cxx = ['g++'] + + # Maybe we should also append -mthreads, but then the finished + # dlls need another dll (mingwm10.dll see Mingw32 docs) + # (-mthreads: Support thread-safe exception handling on `Mingw32') + + # no additional libraries needed + #self.dll_libraries=[] + return + + # __init__ () + + def link(self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + export_symbols = None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + # Include the appropiate MSVC runtime library if Python was built + # with MSVC >= 7.0 (MinGW standard is msvcrt) + runtime_library = msvc_runtime_library() + if runtime_library: + if not libraries: + libraries = [] + libraries.append(runtime_library) + args = (self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + None, #export_symbols, we do this in our def-file + debug, + extra_preargs, + extra_postargs, + build_temp, + target_lang) + if self.gcc_version < "3.0.0": + func = distutils.cygwinccompiler.CygwinCCompiler.link + else: + func = UnixCCompiler.link + func(*args[:func.__code__.co_argcount]) + return + + def object_filenames (self, + source_filenames, + strip_dir=0, + output_dir=''): + if output_dir is None: output_dir = '' + obj_names = [] + for src_name in source_filenames: + # use normcase to make sure '.rc' is really '.rc' and not '.RC' + (base, ext) = os.path.splitext (os.path.normcase(src_name)) + + # added these lines to strip off windows drive letters + # without it, .o files are placed next to .c files + # instead of the build directory + drv, base = os.path.splitdrive(base) + if drv: + base = base[1:] + + if ext not in (self.src_extensions + ['.rc', '.res']): + raise UnknownFileError( + "unknown file type '%s' (from '%s')" % \ + (ext, src_name)) + if strip_dir: + base = os.path.basename (base) + if ext == '.res' or ext == '.rc': + # these need to be compiled to object files + obj_names.append (os.path.join (output_dir, + base + ext + self.obj_extension)) + else: + obj_names.append (os.path.join (output_dir, + base + self.obj_extension)) + return obj_names + + # object_filenames () + + +def find_python_dll(): + maj, min, micro = [int(i) for i in sys.version_info[:3]] + dllname = 'python%d%d.dll' % (maj, min) + print("Looking for %s" % dllname) + + # We can't do much here: + # - find it in python main dir + # - in system32, + # - ortherwise (Sxs), I don't know how to get it. + lib_dirs = [] + lib_dirs.append(sys.prefix) + lib_dirs.append(os.path.join(sys.prefix, 'lib')) + try: + lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32')) + except KeyError: + pass + + for d in lib_dirs: + dll = os.path.join(d, dllname) + if os.path.exists(dll): + return dll + + raise ValueError("%s not found in %s" % (dllname, lib_dirs)) + +def dump_table(dll): + st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE) + return st.stdout.readlines() + +def generate_def(dll, dfile): + """Given a dll file location, get all its exported symbols and dump them + into the given def file. + + The .def file will be overwritten""" + dump = dump_table(dll) + for i in range(len(dump)): + if _START.match(dump[i].decode()): + break + else: + raise ValueError("Symbol table not found") + + syms = [] + for j in range(i+1, len(dump)): + m = _TABLE.match(dump[j].decode()) + if m: + syms.append((int(m.group(1).strip()), m.group(2))) + else: + break + + if len(syms) == 0: + log.warn('No symbols found in %s' % dll) + + d = open(dfile, 'w') + d.write('LIBRARY %s\n' % os.path.basename(dll)) + d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') + d.write(';DATA PRELOAD SINGLE\n') + d.write('\nEXPORTS\n') + for s in syms: + #d.write('@%d %s\n' % (s[0], s[1])) + d.write('%s\n' % s[1]) + d.close() + +def find_dll(dll_name): + + arch = {'AMD64' : 'amd64', + 'Intel' : 'x86'}[get_build_architecture()] + + def _find_dll_in_winsxs(dll_name): + # Walk through the WinSxS directory to find the dll. + winsxs_path = os.path.join(os.environ['WINDIR'], 'winsxs') + if not os.path.exists(winsxs_path): + return None + for root, dirs, files in os.walk(winsxs_path): + if dll_name in files and arch in root: + return os.path.join(root, dll_name) + return None + + def _find_dll_in_path(dll_name): + # First, look in the Python directory, then scan PATH for + # the given dll name. + for path in [sys.prefix] + os.environ['PATH'].split(';'): + filepath = os.path.join(path, dll_name) + if os.path.exists(filepath): + return os.path.abspath(filepath) + + return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) + +def build_msvcr_library(debug=False): + if os.name != 'nt': + return False + + msvcr_name = msvc_runtime_library() + + # Skip using a custom library for versions < MSVC 8.0 + if int(msvcr_name.lstrip('msvcr')) < 80: + log.debug('Skip building msvcr library: custom functionality not present') + return False + + if debug: + msvcr_name += 'd' + + # Skip if custom library already exists + out_name = "lib%s.a" % msvcr_name + out_file = os.path.join(sys.prefix, 'libs', out_name) + if os.path.isfile(out_file): + log.debug('Skip building msvcr library: "%s" exists' % (out_file)) + return True + + # Find the msvcr dll + msvcr_dll_name = msvcr_name + '.dll' + dll_file = find_dll(msvcr_dll_name) + if not dll_file: + log.warn('Cannot build msvcr library: "%s" not found' % msvcr_dll_name) + return False + + def_name = "lib%s.def" % msvcr_name + def_file = os.path.join(sys.prefix, 'libs', def_name) + + log.info('Building msvcr library: "%s" (from %s)' \ + % (out_file, dll_file)) + + # Generate a symbol definition file from the msvcr dll + generate_def(dll_file, def_file) + + # Create a custom mingw library for the given symbol definitions + cmd = ['dlltool', '-d', def_file, '-l', out_file] + retcode = subprocess.call(cmd) + + # Clean up symbol definitions + os.remove(def_file) + + return (not retcode) + +def build_import_library(): + if os.name != 'nt': + return + + arch = get_build_architecture() + if arch == 'AMD64': + return _build_import_library_amd64() + elif arch == 'Intel': + return _build_import_library_x86() + else: + raise ValueError("Unhandled arch %s" % arch) + +def _build_import_library_amd64(): + dll_file = find_python_dll() + + out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) + out_file = os.path.join(sys.prefix, 'libs', out_name) + if os.path.isfile(out_file): + log.debug('Skip building import library: "%s" exists' % (out_file)) + return + + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + + log.info('Building import library (arch=AMD64): "%s" (from %s)' \ + % (out_file, dll_file)) + + generate_def(dll_file, def_file) + + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.Popen(cmd) + +def _build_import_library_x86(): + """ Build the import libraries for Mingw32-gcc on Windows + """ + lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) + lib_file = os.path.join(sys.prefix, 'libs', lib_name) + out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) + out_file = os.path.join(sys.prefix, 'libs', out_name) + if not os.path.isfile(lib_file): + log.warn('Cannot build import library: "%s" not found' % (lib_file)) + return + if os.path.isfile(out_file): + log.debug('Skip building import library: "%s" exists' % (out_file)) + return + log.info('Building import library (ARCH=x86): "%s"' % (out_file)) + + from numpy.distutils import lib2def + + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) + nm_output = lib2def.getnm(nm_cmd) + dlist, flist = lib2def.parse_nm(nm_output) + lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) + + dll_name = "python%d%d.dll" % tuple(sys.version_info[:2]) + args = (dll_name, def_file, out_file) + cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args + status = os.system(cmd) + # for now, fail silently + if status: + log.warn('Failed to build import library for gcc. Linking will fail.') + #if not success: + # msg = "Couldn't find import library, and failed to build it." + # raise DistutilsPlatformError(msg) + return + +#===================================== +# Dealing with Visual Studio MANIFESTS +#===================================== + +# Functions to deal with visual studio manifests. Manifest are a mechanism to +# enforce strong DLL versioning on windows, and has nothing to do with +# distutils MANIFEST. manifests are XML files with version info, and used by +# the OS loader; they are necessary when linking against a DLL not in the +# system path; in particular, official python 2.6 binary is built against the +# MS runtime 9 (the one from VS 2008), which is not available on most windows +# systems; python 2.6 installer does install it in the Win SxS (Side by side) +# directory, but this requires the manifest for this to work. This is a big +# mess, thanks MS for a wonderful system. + +# XXX: ideally, we should use exactly the same version as used by python. I +# submitted a patch to get this version, but it was only included for python +# 2.6.1 and above. So for versions below, we use a "best guess". +_MSVCRVER_TO_FULLVER = {} +if sys.platform == 'win32': + try: + import msvcrt + # I took one version in my SxS directory: no idea if it is the good + # one, and we can't retrieve it from python + _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" + _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 on Windows XP: + _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" + if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): + major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) + _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION + del major, minor, rest + except ImportError: + # If we are here, means python was not built with MSVC. Not sure what to do + # in that case: manifest building will fail, but it should not be used in + # that case anyway + log.warn('Cannot import msvcrt: using manifest will not be possible') + +def msvc_manifest_xml(maj, min): + """Given a major and minor version of the MSVCR, returns the + corresponding XML file.""" + try: + fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] + except KeyError: + raise ValueError("Version %d,%d of MSVCRT not supported yet" \ + % (maj, min)) + # Don't be fooled, it looks like an XML, but it is not. In particular, it + # should not have any space before starting, and its size should be + # divisible by 4, most likely for alignement constraints when the xml is + # embedded in the binary... + # This template was copied directly from the python 2.6 binary (using + # strings.exe from mingw on python.exe). + template = """\ + + + + + + + + + + + + + +""" + + return template % {'fullver': fullver, 'maj': maj, 'min': min} + +def manifest_rc(name, type='dll'): + """Return the rc file used to generate the res file which will be embedded + as manifest for given manifest file name, of given type ('dll' or + 'exe'). + + Parameters + ---------- + name : str + name of the manifest file to embed + type : str {'dll', 'exe'} + type of the binary which will embed the manifest + + """ + if type == 'dll': + rctype = 2 + elif type == 'exe': + rctype = 1 + else: + raise ValueError("Type %s not supported" % type) + + return """\ +#include "winuser.h" +%d RT_MANIFEST %s""" % (rctype, name) + +def check_embedded_msvcr_match_linked(msver): + """msver is the ms runtime version used for the MANIFEST.""" + # check msvcr major version are the same for linking and + # embedding + msvcv = msvc_runtime_library() + if msvcv: + assert msvcv.startswith("msvcr"), msvcv + # Dealing with something like "mscvr90" or "mscvr100", the last + # last digit is the minor release, want int("9") or int("10"): + maj = int(msvcv[5:-1]) + if not maj == int(msver): + raise ValueError( + "Discrepancy between linked msvcr " \ + "(%d) and the one about to be embedded " \ + "(%d)" % (int(msver), maj)) + +def configtest_name(config): + base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) + return os.path.splitext(base)[0] + +def manifest_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + exext = config.compiler.exe_extension + return root + exext + ".manifest" + +def rc_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + return root + ".rc" + +def generate_manifest(config): + msver = get_build_msvc_version() + if msver is not None: + if msver >= 8: + check_embedded_msvcr_match_linked(msver) + ma = int(msver) + mi = int((msver - ma) * 10) + # Write the manifest file + manxml = msvc_manifest_xml(ma, mi) + man = open(manifest_name(config), "w") + config.temp_files.append(manifest_name(config)) + man.write(manxml) + man.close() + # # Write the rc file + # manrc = manifest_rc(manifest_name(self), "exe") + # rc = open(rc_name(self), "w") + # self.temp_files.append(manrc) + # rc.write(manrc) + # rc.close() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/misc_util.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/misc_util.py new file mode 100644 index 0000000000000..c146178f06479 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/misc_util.py @@ -0,0 +1,2271 @@ +from __future__ import division, absolute_import, print_function + +import os +import re +import sys +import imp +import copy +import glob +import atexit +import tempfile +import subprocess +import shutil + +import distutils +from distutils.errors import DistutilsError + +try: + set +except NameError: + from sets import Set as set + +from numpy.distutils.compat import get_exception + +__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', + 'dict_append', 'appendpath', 'generate_config_py', + 'get_cmd', 'allpath', 'get_mathlibs', + 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', + 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', + 'has_f_sources', 'has_cxx_sources', 'filter_sources', + 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', + 'get_script_files', 'get_lib_source_files', 'get_data_files', + 'dot_join', 'get_frame', 'minrelpath', 'njoin', + 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', + 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info'] + +class InstallableLib(object): + """ + Container to hold information on an installable library. + + Parameters + ---------- + name : str + Name of the installed library. + build_info : dict + Dictionary holding build information. + target_dir : str + Absolute path specifying where to install the library. + + See Also + -------- + Configuration.add_installed_library + + Notes + ----- + The three parameters are stored as attributes with the same names. + + """ + def __init__(self, name, build_info, target_dir): + self.name = name + self.build_info = build_info + self.target_dir = target_dir + +def quote_args(args): + # don't used _nt_quote_args as it does not check if + # args items already have quotes or not. + args = list(args) + for i in range(len(args)): + a = args[i] + if ' ' in a and a[0] not in '"\'': + args[i] = '"%s"' % (a) + return args + +def allpath(name): + "Convert a /-separated pathname to one using the OS's path separator." + splitted = name.split('/') + return os.path.join(*splitted) + +def rel_path(path, parent_path): + """Return path relative to parent_path. + """ + pd = os.path.abspath(parent_path) + apath = os.path.abspath(path) + if len(apath)= 0 + and curses.tigetnum("pairs") >= 0 + and ((curses.tigetstr("setf") is not None + and curses.tigetstr("setb") is not None) + or (curses.tigetstr("setaf") is not None + and curses.tigetstr("setab") is not None) + or curses.tigetstr("scp") is not None)): + return 1 + except Exception: + pass + return 0 + +if terminal_has_colors(): + _colour_codes = dict(black=0, red=1, green=2, yellow=3, + blue=4, magenta=5, cyan=6, white=7, default=9) + def colour_text(s, fg=None, bg=None, bold=False): + seq = [] + if bold: + seq.append('1') + if fg: + fgcode = 30 + _colour_codes.get(fg.lower(), 0) + seq.append(str(fgcode)) + if bg: + bgcode = 40 + _colour_codes.get(fg.lower(), 7) + seq.append(str(bgcode)) + if seq: + return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) + else: + return s +else: + def colour_text(s, fg=None, bg=None): + return s + +def default_text(s): + return colour_text(s, 'default') +def red_text(s): + return colour_text(s, 'red') +def green_text(s): + return colour_text(s, 'green') +def yellow_text(s): + return colour_text(s, 'yellow') +def cyan_text(s): + return colour_text(s, 'cyan') +def blue_text(s): + return colour_text(s, 'blue') + +######################### + +def cyg2win32(path): + if sys.platform=='cygwin' and path.startswith('/cygdrive'): + path = path[10] + ':' + os.path.normcase(path[11:]) + return path + +def mingw32(): + """Return true when using mingw32 environment. + """ + if sys.platform=='win32': + if os.environ.get('OSTYPE', '')=='msys': + return True + if os.environ.get('MSYSTEM', '')=='MINGW32': + return True + return False + +def msvc_runtime_library(): + "Return name of MSVC runtime library if Python was built with MSVC >= 7" + msc_pos = sys.version.find('MSC v.') + if msc_pos != -1: + msc_ver = sys.version[msc_pos+6:msc_pos+10] + lib = {'1300': 'msvcr70', # MSVC 7.0 + '1310': 'msvcr71', # MSVC 7.1 + '1400': 'msvcr80', # MSVC 8 + '1500': 'msvcr90', # MSVC 9 (VS 2008) + '1600': 'msvcr100', # MSVC 10 (aka 2010) + }.get(msc_ver, None) + else: + lib = None + return lib + + +######################### + +#XXX need support for .C that is also C++ +cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match +fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match +f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match +f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match +def _get_f90_modules(source): + """Return a list of Fortran f90 module names that + given source file defines. + """ + if not f90_ext_match(source): + return [] + modules = [] + f = open(source, 'r') + for line in f: + m = f90_module_name_match(line) + if m: + name = m.group('name') + modules.append(name) + # break # XXX can we assume that there is one module per file? + f.close() + return modules + +def is_string(s): + return isinstance(s, str) + +def all_strings(lst): + """Return True if all items in lst are string objects. """ + for item in lst: + if not is_string(item): + return False + return True + +def is_sequence(seq): + if is_string(seq): + return False + try: + len(seq) + except: + return False + return True + +def is_glob_pattern(s): + return is_string(s) and ('*' in s or '?' is s) + +def as_list(seq): + if is_sequence(seq): + return list(seq) + else: + return [seq] + +def get_language(sources): + # not used in numpy/scipy packages, use build_ext.detect_language instead + """Determine language value (c,f77,f90) from sources """ + language = None + for source in sources: + if isinstance(source, str): + if f90_ext_match(source): + language = 'f90' + break + elif fortran_ext_match(source): + language = 'f77' + return language + +def has_f_sources(sources): + """Return True if sources contains Fortran files """ + for source in sources: + if fortran_ext_match(source): + return True + return False + +def has_cxx_sources(sources): + """Return True if sources contains C++ files """ + for source in sources: + if cxx_ext_match(source): + return True + return False + +def filter_sources(sources): + """Return four lists of filenames containing + C, C++, Fortran, and Fortran 90 module sources, + respectively. + """ + c_sources = [] + cxx_sources = [] + f_sources = [] + fmodule_sources = [] + for source in sources: + if fortran_ext_match(source): + modules = _get_f90_modules(source) + if modules: + fmodule_sources.append(source) + else: + f_sources.append(source) + elif cxx_ext_match(source): + cxx_sources.append(source) + else: + c_sources.append(source) + return c_sources, cxx_sources, f_sources, fmodule_sources + + +def _get_headers(directory_list): + # get *.h files from list of directories + headers = [] + for d in directory_list: + head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? + headers.extend(head) + return headers + +def _get_directories(list_of_sources): + # get unique directories from list of sources. + direcs = [] + for f in list_of_sources: + d = os.path.split(f) + if d[0] != '' and not d[0] in direcs: + direcs.append(d[0]) + return direcs + +def get_dependencies(sources): + #XXX scan sources for include statements + return _get_headers(_get_directories(sources)) + +def is_local_src_dir(directory): + """Return true if directory is local directory. + """ + if not is_string(directory): + return False + abs_dir = os.path.abspath(directory) + c = os.path.commonprefix([os.getcwd(), abs_dir]) + new_dir = abs_dir[len(c):].split(os.sep) + if new_dir and not new_dir[0]: + new_dir = new_dir[1:] + if new_dir and new_dir[0]=='build': + return False + new_dir = os.sep.join(new_dir) + return os.path.isdir(new_dir) + +def general_source_files(top_path): + pruned_directories = {'CVS':1, '.svn':1, 'build':1} + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for f in filenames: + if not prune_file_pat.search(f): + yield os.path.join(dirpath, f) + +def general_source_directories_files(top_path): + """Return a directory name relative to top_path and + files contained. + """ + pruned_directories = ['CVS', '.svn', 'build'] + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for d in dirnames: + dpath = os.path.join(dirpath, d) + rpath = rel_path(dpath, top_path) + files = [] + for f in os.listdir(dpath): + fn = os.path.join(dpath, f) + if os.path.isfile(fn) and not prune_file_pat.search(fn): + files.append(fn) + yield rpath, files + dpath = top_path + rpath = rel_path(dpath, top_path) + filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ + if not prune_file_pat.search(f)] + files = [f for f in filenames if os.path.isfile(f)] + yield rpath, files + + +def get_ext_source_files(ext): + # Get sources and any include files in the same directory. + filenames = [] + sources = [_m for _m in ext.sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + for d in ext.depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_script_files(scripts): + scripts = [_m for _m in scripts if is_string(_m)] + return scripts + +def get_lib_source_files(lib): + filenames = [] + sources = lib[1].get('sources', []) + sources = [_m for _m in sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + depends = lib[1].get('depends', []) + for d in depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_shared_lib_extension(is_python_ext=False): + """Return the correct file extension for shared libraries. + + Parameters + ---------- + is_python_ext : bool, optional + Whether the shared library is a Python extension. Default is False. + + Returns + ------- + so_ext : str + The shared library extension. + + Notes + ----- + For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, + and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on + POSIX systems according to PEP 3149. For Python 3.2 this is implemented on + Linux, but not on OS X. + + """ + confvars = distutils.sysconfig.get_config_vars() + # SO is deprecated in 3.3.1, use EXT_SUFFIX instead + so_ext = confvars.get('EXT_SUFFIX', None) + if so_ext is None: + so_ext = confvars.get('SO', '') + + if not is_python_ext: + # hardcode known values, config vars (including SHLIB_SUFFIX) are + # unreliable (see #3182) + # darwin, windows and debug linux are wrong in 3.3.1 and older + if (sys.platform.startswith('linux') or + sys.platform.startswith('gnukfreebsd')): + so_ext = '.so' + elif sys.platform.startswith('darwin'): + so_ext = '.dylib' + elif sys.platform.startswith('win'): + so_ext = '.dll' + else: + # fall back to config vars for unknown platforms + # fix long extension for Python >=3.2, see PEP 3149. + if 'SOABI' in confvars: + # Does nothing unless SOABI config var exists + so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) + + return so_ext + +def get_data_files(data): + if is_string(data): + return [data] + sources = data[1] + filenames = [] + for s in sources: + if hasattr(s, '__call__'): + continue + if is_local_src_dir(s): + filenames.extend(list(general_source_files(s))) + elif is_string(s): + if os.path.isfile(s): + filenames.append(s) + else: + print('Not existing data file:', s) + else: + raise TypeError(repr(s)) + return filenames + +def dot_join(*args): + return '.'.join([a for a in args if a]) + +def get_frame(level=0): + """Return frame object from call stack with given level. + """ + try: + return sys._getframe(level+1) + except AttributeError: + frame = sys.exc_info()[2].tb_frame + for _ in range(level+1): + frame = frame.f_back + return frame + + +###################### + +class Configuration(object): + + _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', + 'libraries', 'headers', 'scripts', 'py_modules', + 'installed_libraries', 'define_macros'] + _dict_keys = ['package_dir', 'installed_pkg_config'] + _extra_keys = ['name', 'version'] + + numpy_include_dirs = [] + + def __init__(self, + package_name=None, + parent_name=None, + top_path=None, + package_path=None, + caller_level=1, + setup_name='setup.py', + **attrs): + """Construct configuration instance of a package. + + package_name -- name of the package + Ex.: 'distutils' + parent_name -- name of the parent package + Ex.: 'numpy' + top_path -- directory of the toplevel package + Ex.: the directory where the numpy package source sits + package_path -- directory of package. Will be computed by magic from the + directory of the caller module if not specified + Ex.: the directory where numpy.distutils is + caller_level -- frame level to caller namespace, internal parameter. + """ + self.name = dot_join(parent_name, package_name) + self.version = None + + caller_frame = get_frame(caller_level) + self.local_path = get_path_from_frame(caller_frame, top_path) + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + if top_path is None: + top_path = self.local_path + self.local_path = '' + if package_path is None: + package_path = self.local_path + elif os.path.isdir(njoin(self.local_path, package_path)): + package_path = njoin(self.local_path, package_path) + if not os.path.isdir(package_path or '.'): + raise ValueError("%r is not a directory" % (package_path,)) + self.top_path = top_path + self.package_path = package_path + # this is the relative path in the installed package + self.path_in_package = os.path.join(*self.name.split('.')) + + self.list_keys = self._list_keys[:] + self.dict_keys = self._dict_keys[:] + + for n in self.list_keys: + v = copy.copy(attrs.get(n, [])) + setattr(self, n, as_list(v)) + + for n in self.dict_keys: + v = copy.copy(attrs.get(n, {})) + setattr(self, n, v) + + known_keys = self.list_keys + self.dict_keys + self.extra_keys = self._extra_keys[:] + for n in attrs.keys(): + if n in known_keys: + continue + a = attrs[n] + setattr(self, n, a) + if isinstance(a, list): + self.list_keys.append(n) + elif isinstance(a, dict): + self.dict_keys.append(n) + else: + self.extra_keys.append(n) + + if os.path.exists(njoin(package_path, '__init__.py')): + self.packages.append(self.name) + self.package_dir[self.name] = package_path + + self.options = dict( + ignore_setup_xxx_py = False, + assume_default_configuration = False, + delegate_options_to_subpackages = False, + quiet = False, + ) + + caller_instance = None + for i in range(1, 3): + try: + f = get_frame(i) + except ValueError: + break + try: + caller_instance = eval('self', f.f_globals, f.f_locals) + break + except NameError: + pass + if isinstance(caller_instance, self.__class__): + if caller_instance.options['delegate_options_to_subpackages']: + self.set_options(**caller_instance.options) + + self.setup_name = setup_name + + def todict(self): + """ + Return a dictionary compatible with the keyword arguments of distutils + setup function. + + Examples + -------- + >>> setup(**config.todict()) #doctest: +SKIP + """ + + self._optimize_data_files() + d = {} + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for n in known_keys: + a = getattr(self, n) + if a: + d[n] = a + return d + + def info(self, message): + if not self.options['quiet']: + print(message) + + def warn(self, message): + sys.stderr.write('Warning: %s' % (message,)) + + def set_options(self, **options): + """ + Configure Configuration instance. + + The following options are available: + - ignore_setup_xxx_py + - assume_default_configuration + - delegate_options_to_subpackages + - quiet + + """ + for key, value in options.items(): + if key in self.options: + self.options[key] = value + else: + raise ValueError('Unknown option: '+key) + + def get_distribution(self): + """Return the distutils distribution object for self.""" + from numpy.distutils.core import get_distribution + return get_distribution() + + def _wildcard_get_subpackage(self, subpackage_name, + parent_name, + caller_level = 1): + l = subpackage_name.split('.') + subpackage_path = njoin([self.local_path]+l) + dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)] + config_list = [] + for d in dirs: + if not os.path.isfile(njoin(d, '__init__.py')): + continue + if 'build' in d.split(os.sep): + continue + n = '.'.join(d.split(os.sep)[-len(l):]) + c = self.get_subpackage(n, + parent_name = parent_name, + caller_level = caller_level+1) + config_list.extend(c) + return config_list + + def _get_configuration_from_setup_py(self, setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = 1): + # In case setup_py imports local modules: + sys.path.insert(0, os.path.dirname(setup_py)) + try: + fo_setup_py = open(setup_py, 'U') + setup_name = os.path.splitext(os.path.basename(setup_py))[0] + n = dot_join(self.name, subpackage_name, setup_name) + setup_module = imp.load_module('_'.join(n.split('.')), + fo_setup_py, + setup_py, + ('.py', 'U', 1)) + fo_setup_py.close() + if not hasattr(setup_module, 'configuration'): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s does not define configuration())'\ + % (setup_module)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level + 1) + else: + pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) + args = (pn,) + def fix_args_py2(args): + if setup_module.configuration.__code__.co_argcount > 1: + args = args + (self.top_path,) + return args + def fix_args_py3(args): + if setup_module.configuration.__code__.co_argcount > 1: + args = args + (self.top_path,) + return args + if sys.version_info[0] < 3: + args = fix_args_py2(args) + else: + args = fix_args_py3(args) + config = setup_module.configuration(*args) + if config.name!=dot_join(parent_name, subpackage_name): + self.warn('Subpackage %r configuration returned as %r' % \ + (dot_join(parent_name, subpackage_name), config.name)) + finally: + del sys.path[0] + return config + + def get_subpackage(self,subpackage_name, + subpackage_path=None, + parent_name=None, + caller_level = 1): + """Return list of subpackage configurations. + + Parameters + ---------- + subpackage_name : str or None + Name of the subpackage to get the configuration. '*' in + subpackage_name is handled as a wildcard. + subpackage_path : str + If None, then the path is assumed to be the local path plus the + subpackage_name. If a setup.py file is not found in the + subpackage_path, then a default configuration is used. + parent_name : str + Parent name. + """ + if subpackage_name is None: + if subpackage_path is None: + raise ValueError( + "either subpackage_name or subpackage_path must be specified") + subpackage_name = os.path.basename(subpackage_path) + + # handle wildcards + l = subpackage_name.split('.') + if subpackage_path is None and '*' in subpackage_name: + return self._wildcard_get_subpackage(subpackage_name, + parent_name, + caller_level = caller_level+1) + assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) + if subpackage_path is None: + subpackage_path = njoin([self.local_path] + l) + else: + subpackage_path = njoin([subpackage_path] + l[:-1]) + subpackage_path = self.paths([subpackage_path])[0] + setup_py = njoin(subpackage_path, self.setup_name) + if not self.options['ignore_setup_xxx_py']: + if not os.path.isfile(setup_py): + setup_py = njoin(subpackage_path, + 'setup_%s.py' % (subpackage_name)) + if not os.path.isfile(setup_py): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s/{setup_%s,setup}.py was not found)' \ + % (os.path.dirname(setup_py), subpackage_name)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level+1) + else: + config = self._get_configuration_from_setup_py( + setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = caller_level + 1) + if config: + return [config] + else: + return [] + + def add_subpackage(self,subpackage_name, + subpackage_path=None, + standalone = False): + """Add a sub-package to the current Configuration instance. + + This is useful in a setup.py script for adding sub-packages to a + package. + + Parameters + ---------- + subpackage_name : str + name of the subpackage + subpackage_path : str + if given, the subpackage path such as the subpackage is in + subpackage_path / subpackage_name. If None,the subpackage is + assumed to be located in the local path / subpackage_name. + standalone : bool + """ + + if standalone: + parent_name = None + else: + parent_name = self.name + config_list = self.get_subpackage(subpackage_name, subpackage_path, + parent_name = parent_name, + caller_level = 2) + if not config_list: + self.warn('No configuration returned, assuming unavailable.') + for config in config_list: + d = config + if isinstance(config, Configuration): + d = config.todict() + assert isinstance(d, dict), repr(type(d)) + + self.info('Appending %s configuration to %s' \ + % (d.get('name'), self.name)) + self.dict_append(**d) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a subpackage '+ subpackage_name) + + def add_data_dir(self, data_path): + """Recursively add files under data_path to data_files list. + + Recursively add files under data_path to the list of data_files to be + installed (and distributed). The data_path can be either a relative + path-name, or an absolute path-name, or a 2-tuple where the first + argument shows where in the install directory the data directory + should be installed to. + + Parameters + ---------- + data_path : seq or str + Argument can be either + + * 2-sequence (, ) + * path to data directory where python datadir suffix defaults + to package dir. + + Notes + ----- + Rules for installation paths: + foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar + (gun, foo/bar) -> parent/gun + foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b + (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun + (gun/*, foo/*) -> parent/gun/a, parent/gun/b + /foo/bar -> (bar, /foo/bar) -> parent/bar + (gun, /foo/bar) -> parent/gun + (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar + + Examples + -------- + For example suppose the source directory contains fun/foo.dat and + fun/bar/car.dat:: + + >>> self.add_data_dir('fun') #doctest: +SKIP + >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP + >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP + + Will install data-files to the locations:: + + / + fun/ + foo.dat + bar/ + car.dat + sun/ + foo.dat + bar/ + car.dat + gun/ + foo.dat + car.dat + """ + if is_sequence(data_path): + d, data_path = data_path + else: + d = None + if is_sequence(data_path): + [self.add_data_dir((d, p)) for p in data_path] + return + if not is_string(data_path): + raise TypeError("not a string: %r" % (data_path,)) + if d is None: + if os.path.isabs(data_path): + return self.add_data_dir((os.path.basename(data_path), data_path)) + return self.add_data_dir((data_path, data_path)) + paths = self.paths(data_path, include_non_existing=False) + if is_glob_pattern(data_path): + if is_glob_pattern(d): + pattern_list = allpath(d).split(os.sep) + pattern_list.reverse() + # /a/*//b/ -> /a/*/b + rl = list(range(len(pattern_list)-1)); rl.reverse() + for i in rl: + if not pattern_list[i]: + del pattern_list[i] + # + for path in paths: + if not os.path.isdir(path): + print('Not a directory, skipping', path) + continue + rpath = rel_path(path, self.local_path) + path_list = rpath.split(os.sep) + path_list.reverse() + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + if i>=len(path_list): + raise ValueError('cannot fill pattern %r with %r' \ + % (d, path)) + target_list.append(path_list[i]) + else: + assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) + target_list.append(s) + i += 1 + if path_list[i:]: + self.warn('mismatch of pattern_list=%s and path_list=%s'\ + % (pattern_list, path_list)) + target_list.reverse() + self.add_data_dir((os.sep.join(target_list), path)) + else: + for path in paths: + self.add_data_dir((d, path)) + return + assert not is_glob_pattern(d), repr(d) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + for path in paths: + for d1, f in list(general_source_directories_files(path)): + target_path = os.path.join(self.path_in_package, d, d1) + data_files.append((target_path, f)) + + def _optimize_data_files(self): + data_dict = {} + for p, files in self.data_files: + if p not in data_dict: + data_dict[p] = set() + for f in files: + data_dict[p].add(f) + self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] + + def add_data_files(self,*files): + """Add data files to configuration data_files. + + Parameters + ---------- + files : sequence + Argument(s) can be either + + * 2-sequence (,) + * paths to data files where python datadir prefix defaults + to package dir. + + Notes + ----- + The form of each element of the files sequence is very flexible + allowing many combinations of where to get the files from the package + and where they should ultimately be installed on the system. The most + basic usage is for an element of the files argument sequence to be a + simple filename. This will cause that file from the local path to be + installed to the installation path of the self.name package (package + path). The file argument can also be a relative path in which case the + entire relative path will be installed into the package directory. + Finally, the file can be an absolute path name in which case the file + will be found at the absolute path name but installed to the package + path. + + This basic behavior can be augmented by passing a 2-tuple in as the + file argument. The first element of the tuple should specify the + relative path (under the package install directory) where the + remaining sequence of files should be installed to (it has nothing to + do with the file-names in the source distribution). The second element + of the tuple is the sequence of files that should be installed. The + files in this sequence can be filenames, relative paths, or absolute + paths. For absolute paths the file will be installed in the top-level + package installation directory (regardless of the first argument). + Filenames and relative path names will be installed in the package + install directory under the path name given as the first element of + the tuple. + + Rules for installation paths: + + #. file.txt -> (., file.txt)-> parent/file.txt + #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt + #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt + #. *.txt -> parent/a.txt, parent/b.txt + #. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt + #. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt + #. (sun, file.txt) -> parent/sun/file.txt + #. (sun, bar/file.txt) -> parent/sun/file.txt + #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt + #. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt + + An additional feature is that the path to a data-file can actually be + a function that takes no arguments and returns the actual path(s) to + the data-files. This is useful when the data files are generated while + building the package. + + Examples + -------- + Add files to the list of data_files to be included with the package. + + >>> self.add_data_files('foo.dat', + ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), + ... 'bar/cat.dat', + ... '/full/path/to/can.dat') #doctest: +SKIP + + will install these data files to:: + + / + foo.dat + fun/ + gun.dat + nun/ + pun.dat + sun.dat + bar/ + car.dat + can.dat + + where is the package (or sub-package) + directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage') or + '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). + """ + + if len(files)>1: + for f in files: + self.add_data_files(f) + return + assert len(files)==1 + if is_sequence(files[0]): + d, files = files[0] + else: + d = None + if is_string(files): + filepat = files + elif is_sequence(files): + if len(files)==1: + filepat = files[0] + else: + for f in files: + self.add_data_files((d, f)) + return + else: + raise TypeError(repr(type(files))) + + if d is None: + if hasattr(filepat, '__call__'): + d = '' + elif os.path.isabs(filepat): + d = '' + else: + d = os.path.dirname(filepat) + self.add_data_files((d, files)) + return + + paths = self.paths(filepat, include_non_existing=False) + if is_glob_pattern(filepat): + if is_glob_pattern(d): + pattern_list = d.split(os.sep) + pattern_list.reverse() + for path in paths: + path_list = path.split(os.sep) + path_list.reverse() + path_list.pop() # filename + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + target_list.append(path_list[i]) + i += 1 + else: + target_list.append(s) + target_list.reverse() + self.add_data_files((os.sep.join(target_list), path)) + else: + self.add_data_files((d, paths)) + return + assert not is_glob_pattern(d), repr((d, filepat)) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + data_files.append((os.path.join(self.path_in_package, d), paths)) + + ### XXX Implement add_py_modules + + def add_define_macros(self, macros): + """Add define macros to configuration + + Add the given sequence of macro name and value duples to the beginning + of the define_macros list This list will be visible to all extension + modules of the current package. + """ + dist = self.get_distribution() + if dist is not None: + if not hasattr(dist, 'define_macros'): + dist.define_macros = [] + dist.define_macros.extend(macros) + else: + self.define_macros.extend(macros) + + + def add_include_dirs(self,*paths): + """Add paths to configuration include directories. + + Add the given sequence of paths to the beginning of the include_dirs + list. This list will be visible to all extension modules of the + current package. + """ + include_dirs = self.paths(paths) + dist = self.get_distribution() + if dist is not None: + if dist.include_dirs is None: + dist.include_dirs = [] + dist.include_dirs.extend(include_dirs) + else: + self.include_dirs.extend(include_dirs) + + def add_headers(self,*files): + """Add installable headers to configuration. + + Add the given sequence of files to the beginning of the headers list. + By default, headers will be installed under // directory. If an item of files + is a tuple, then its first argument specifies the actual installation + location relative to the path. + + Parameters + ---------- + files : str or seq + Argument(s) can be either: + + * 2-sequence (,) + * path(s) to header file(s) where python includedir suffix will + default to package name. + """ + headers = [] + for path in files: + if is_string(path): + [headers.append((self.name, p)) for p in self.paths(path)] + else: + if not isinstance(path, (tuple, list)) or len(path) != 2: + raise TypeError(repr(path)) + [headers.append((path[0], p)) for p in self.paths(path[1])] + dist = self.get_distribution() + if dist is not None: + if dist.headers is None: + dist.headers = [] + dist.headers.extend(headers) + else: + self.headers.extend(headers) + + def paths(self,*paths,**kws): + """Apply glob to paths and prepend local_path if needed. + + Applies glob.glob(...) to each path in the sequence (if needed) and + pre-pends the local_path if needed. Because this is called on all + source lists, this allows wildcard characters to be specified in lists + of sources for extension modules and libraries and scripts and allows + path-names be relative to the source directory. + + """ + include_non_existing = kws.get('include_non_existing', True) + return gpaths(paths, + local_path = self.local_path, + include_non_existing=include_non_existing) + + def _fix_paths_dict(self, kw): + for k in kw.keys(): + v = kw[k] + if k in ['sources', 'depends', 'include_dirs', 'library_dirs', + 'module_dirs', 'extra_objects']: + new_v = self.paths(v) + kw[k] = new_v + + def add_extension(self,name,sources,**kw): + """Add extension to configuration. + + Create and add an Extension instance to the ext_modules list. This + method also takes the following optional keyword arguments that are + passed on to the Extension constructor. + + Parameters + ---------- + name : str + name of the extension + sources : seq + list of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + include_dirs : + define_macros : + undef_macros : + library_dirs : + libraries : + runtime_library_dirs : + extra_objects : + extra_compile_args : + extra_link_args : + extra_f77_compile_args : + extra_f90_compile_args : + export_symbols : + swig_opts : + depends : + The depends list contains paths to files or directories that the + sources of the extension module depend on. If any path in the + depends list is newer than the extension module, then the module + will be rebuilt. + language : + f2py_options : + module_dirs : + extra_info : dict or list + dict or list of dict of keywords to be appended to keywords. + + Notes + ----- + The self.paths(...) method is applied to all lists that may contain + paths. + """ + ext_args = copy.copy(kw) + ext_args['name'] = dot_join(self.name, name) + ext_args['sources'] = sources + + if 'extra_info' in ext_args: + extra_info = ext_args['extra_info'] + del ext_args['extra_info'] + if isinstance(extra_info, dict): + extra_info = [extra_info] + for info in extra_info: + assert isinstance(info, dict), repr(info) + dict_append(ext_args,**info) + + self._fix_paths_dict(ext_args) + + # Resolve out-of-tree dependencies + libraries = ext_args.get('libraries', []) + libnames = [] + ext_args['libraries'] = [] + for libname in libraries: + if isinstance(libname, tuple): + self._fix_paths_dict(libname[1]) + + # Handle library names of the form libname@relative/path/to/library + if '@' in libname: + lname, lpath = libname.split('@', 1) + lpath = os.path.abspath(njoin(self.local_path, lpath)) + if os.path.isdir(lpath): + c = self.get_subpackage(None, lpath, + caller_level = 2) + if isinstance(c, Configuration): + c = c.todict() + for l in [l[0] for l in c.get('libraries', [])]: + llname = l.split('__OF__', 1)[0] + if llname == lname: + c.pop('name', None) + dict_append(ext_args,**c) + break + continue + libnames.append(libname) + + ext_args['libraries'] = libnames + ext_args['libraries'] + ext_args['define_macros'] = \ + self.define_macros + ext_args.get('define_macros', []) + + from numpy.distutils.core import Extension + ext = Extension(**ext_args) + self.ext_modules.append(ext) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add an extension '+name) + return ext + + def add_library(self,name,sources,**build_info): + """ + Add library to configuration. + + Parameters + ---------- + name : str + Name of the extension. + sources : sequence + List of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compiler_args + * extra_f90_compiler_args + * f2py_options + * language + + """ + self._add_library(name, sources, None, build_info) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a library '+ name) + + def _add_library(self, name, sources, install_dir, build_info): + """Common implementation for add_library and add_installed_library. Do + not use directly""" + build_info = copy.copy(build_info) + name = name #+ '__OF__' + self.name + build_info['sources'] = sources + + # Sometimes, depends is not set up to an empty list by default, and if + # depends is not given to add_library, distutils barfs (#1134) + if not 'depends' in build_info: + build_info['depends'] = [] + + self._fix_paths_dict(build_info) + + # Add to libraries list so that it is build with build_clib + self.libraries.append((name, build_info)) + + def add_installed_library(self, name, sources, install_dir, build_info=None): + """ + Similar to add_library, but the specified library is installed. + + Most C libraries used with `distutils` are only used to build python + extensions, but libraries built through this method will be installed + so that they can be reused by third-party packages. + + Parameters + ---------- + name : str + Name of the installed library. + sources : sequence + List of the library's source files. See `add_library` for details. + install_dir : str + Path to install the library, relative to the current sub-package. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compiler_args + * extra_f90_compiler_args + * f2py_options + * language + + Returns + ------- + None + + See Also + -------- + add_library, add_npy_pkg_config, get_info + + Notes + ----- + The best way to encode the options required to link against the specified + C libraries is to use a "libname.ini" file, and use `get_info` to + retrieve the required options (see `add_npy_pkg_config` for more + information). + + """ + if not build_info: + build_info = {} + + install_dir = os.path.join(self.package_path, install_dir) + self._add_library(name, sources, install_dir, build_info) + self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) + + def add_npy_pkg_config(self, template, install_dir, subst_dict=None): + """ + Generate and install a npy-pkg config file from a template. + + The config file generated from `template` is installed in the + given install directory, using `subst_dict` for variable substitution. + + Parameters + ---------- + template : str + The path of the template, relatively to the current package path. + install_dir : str + Where to install the npy-pkg config file, relatively to the current + package path. + subst_dict : dict, optional + If given, any string of the form ``@key@`` will be replaced by + ``subst_dict[key]`` in the template file when installed. The install + prefix is always available through the variable ``@prefix@``, since the + install prefix is not easy to get reliably from setup.py. + + See also + -------- + add_installed_library, get_info + + Notes + ----- + This works for both standard installs and in-place builds, i.e. the + ``@prefix@`` refer to the source directory for in-place builds. + + Examples + -------- + :: + + config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) + + Assuming the foo.ini.in file has the following content:: + + [meta] + Name=@foo@ + Version=1.0 + Description=dummy description + + [default] + Cflags=-I@prefix@/include + Libs= + + The generated file will have the following content:: + + [meta] + Name=bar + Version=1.0 + Description=dummy description + + [default] + Cflags=-Iprefix_dir/include + Libs= + + and will be installed as foo.ini in the 'lib' subpath. + + """ + if subst_dict is None: + subst_dict = {} + basename = os.path.splitext(template)[0] + template = os.path.join(self.package_path, template) + + if self.name in self.installed_pkg_config: + self.installed_pkg_config[self.name].append((template, install_dir, + subst_dict)) + else: + self.installed_pkg_config[self.name] = [(template, install_dir, + subst_dict)] + + + def add_scripts(self,*files): + """Add scripts to configuration. + + Add the sequence of files to the beginning of the scripts list. + Scripts will be installed under the /bin/ directory. + + """ + scripts = self.paths(files) + dist = self.get_distribution() + if dist is not None: + if dist.scripts is None: + dist.scripts = [] + dist.scripts.extend(scripts) + else: + self.scripts.extend(scripts) + + def dict_append(self,**dict): + for key in self.list_keys: + a = getattr(self, key) + a.extend(dict.get(key, [])) + for key in self.dict_keys: + a = getattr(self, key) + a.update(dict.get(key, {})) + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for key in dict.keys(): + if key not in known_keys: + a = getattr(self, key, None) + if a and a==dict[key]: continue + self.warn('Inheriting attribute %r=%r from %r' \ + % (key, dict[key], dict.get('name', '?'))) + setattr(self, key, dict[key]) + self.extra_keys.append(key) + elif key in self.extra_keys: + self.info('Ignoring attempt to set %r (from %r to %r)' \ + % (key, getattr(self, key), dict[key])) + elif key in known_keys: + # key is already processed above + pass + else: + raise ValueError("Don't know about key=%r" % (key)) + + def __str__(self): + from pprint import pformat + known_keys = self.list_keys + self.dict_keys + self.extra_keys + s = '<'+5*'-' + '\n' + s += 'Configuration of '+self.name+':\n' + known_keys.sort() + for k in known_keys: + a = getattr(self, k, None) + if a: + s += '%s = %s\n' % (k, pformat(a)) + s += 5*'-' + '>' + return s + + def get_config_cmd(self): + """ + Returns the numpy.distutils config command instance. + """ + cmd = get_cmd('config') + cmd.ensure_finalized() + cmd.dump_source = 0 + cmd.noisy = 0 + old_path = os.environ.get('PATH') + if old_path: + path = os.pathsep.join(['.', old_path]) + os.environ['PATH'] = path + return cmd + + def get_build_temp_dir(self): + """ + Return a path to a temporary directory where temporary files should be + placed. + """ + cmd = get_cmd('build') + cmd.ensure_finalized() + return cmd.build_temp + + def have_f77c(self): + """Check for availability of Fortran 77 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 77 compiler is available (because a simple Fortran 77 + code was able to be compiled successfully). + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') + return flag + + def have_f90c(self): + """Check for availability of Fortran 90 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 90 compiler is available (because a simple Fortran + 90 code was able to be compiled successfully) + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') + return flag + + def append_to(self, extlib): + """Append libraries, include_dirs to extension or library item. + """ + if is_sequence(extlib): + lib_name, build_info = extlib + dict_append(build_info, + libraries=self.libraries, + include_dirs=self.include_dirs) + else: + from numpy.distutils.core import Extension + assert isinstance(extlib, Extension), repr(extlib) + extlib.libraries.extend(self.libraries) + extlib.include_dirs.extend(self.include_dirs) + + def _get_svn_revision(self, path): + """Return path's SVN revision number. + """ + revision = None + m = None + cwd = os.getcwd() + try: + os.chdir(path or '.') + p = subprocess.Popen(['svnversion'], shell=True, + stdout=subprocess.PIPE, stderr=None, + close_fds=True) + sout = p.stdout + m = re.match(r'(?P\d+)', sout.read()) + except: + pass + os.chdir(cwd) + if m: + revision = int(m.group('revision')) + return revision + if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): + entries = njoin(path, '_svn', 'entries') + else: + entries = njoin(path, '.svn', 'entries') + if os.path.isfile(entries): + f = open(entries) + fstr = f.read() + f.close() + if fstr[:5] == '\d+)"', fstr) + if m: + revision = int(m.group('revision')) + else: # non-xml entries file --- check to be sure that + m = re.search(r'dir[\n\r]+(?P\d+)', fstr) + if m: + revision = int(m.group('revision')) + return revision + + def _get_hg_revision(self, path): + """Return path's Mercurial revision number. + """ + revision = None + m = None + cwd = os.getcwd() + try: + os.chdir(path or '.') + p = subprocess.Popen(['hg identify --num'], shell=True, + stdout=subprocess.PIPE, stderr=None, + close_fds=True) + sout = p.stdout + m = re.match(r'(?P\d+)', sout.read()) + except: + pass + os.chdir(cwd) + if m: + revision = int(m.group('revision')) + return revision + branch_fn = njoin(path, '.hg', 'branch') + branch_cache_fn = njoin(path, '.hg', 'branch.cache') + + if os.path.isfile(branch_fn): + branch0 = None + f = open(branch_fn) + revision0 = f.read().strip() + f.close() + + branch_map = {} + for line in file(branch_cache_fn, 'r'): + branch1, revision1 = line.split()[:2] + if revision1==revision0: + branch0 = branch1 + try: + revision1 = int(revision1) + except ValueError: + continue + branch_map[branch1] = revision1 + + revision = branch_map.get(branch0) + return revision + + + def get_version(self, version_file=None, version_variable=None): + """Try to get version string of a package. + + Return a version string of the current package or None if the version + information could not be detected. + + Notes + ----- + This method scans files named + __version__.py, _version.py, version.py, and + __svn_version__.py for string variables version, __version\__, and + _version, until a version number is found. + """ + version = getattr(self, 'version', None) + if version is not None: + return version + + # Get version from version file. + if version_file is None: + files = ['__version__.py', + self.name.split('.')[-1]+'_version.py', + 'version.py', + '__svn_version__.py', + '__hg_version__.py'] + else: + files = [version_file] + if version_variable is None: + version_vars = ['version', + '__version__', + self.name.split('.')[-1]+'_version'] + else: + version_vars = [version_variable] + for f in files: + fn = njoin(self.local_path, f) + if os.path.isfile(fn): + info = (open(fn), fn, ('.py', 'U', 1)) + name = os.path.splitext(os.path.basename(fn))[0] + n = dot_join(self.name, name) + try: + version_module = imp.load_module('_'.join(n.split('.')),*info) + except ImportError: + msg = get_exception() + self.warn(str(msg)) + version_module = None + if version_module is None: + continue + + for a in version_vars: + version = getattr(version_module, a, None) + if version is not None: + break + if version is not None: + break + + if version is not None: + self.version = version + return version + + # Get version as SVN or Mercurial revision number + revision = self._get_svn_revision(self.local_path) + if revision is None: + revision = self._get_hg_revision(self.local_path) + + if revision is not None: + version = str(revision) + self.version = version + + return version + + def make_svn_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __svn_version__.py file to the current package directory. + + Generate package __svn_version__.py file from SVN revision number, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __svn_version__.py existed before, nothing is done. + + This is + intended for working with source directories that are in an SVN + repository. + """ + target = njoin(self.local_path, '__svn_version__.py') + revision = self._get_svn_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_svn_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + f = open(target, 'w') + f.write('version = %r\n' % (version)) + f.close() + + import atexit + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_svn_version_py())) + + def make_hg_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __hg_version__.py file to the current package directory. + + Generate package __hg_version__.py file from Mercurial revision, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __hg_version__.py existed before, nothing is done. + + This is intended for working with source directories that are + in an Mercurial repository. + """ + target = njoin(self.local_path, '__hg_version__.py') + revision = self._get_hg_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_hg_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + f = open(target, 'w') + f.write('version = %r\n' % (version)) + f.close() + + import atexit + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_hg_version_py())) + + def make_config_py(self,name='__config__'): + """Generate package __config__.py file containing system_info + information used during building the package. + + This file is installed to the + package installation directory. + + """ + self.py_modules.append((self.name, name, generate_config_py)) + + + def get_info(self,*names): + """Get resources information. + + Return information (from system_info.get_info) for all of the names in + the argument list in a single dictionary. + """ + from .system_info import get_info, dict_append + info_dict = {} + for a in names: + dict_append(info_dict,**get_info(a)) + return info_dict + + +def get_cmd(cmdname, _cache={}): + if cmdname not in _cache: + import distutils.core + dist = distutils.core._setup_distribution + if dist is None: + from distutils.errors import DistutilsInternalError + raise DistutilsInternalError( + 'setup distribution instance not initialized') + cmd = dist.get_command_obj(cmdname) + _cache[cmdname] = cmd + return _cache[cmdname] + +def get_numpy_include_dirs(): + # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] + include_dirs = Configuration.numpy_include_dirs[:] + if not include_dirs: + import numpy + include_dirs = [ numpy.get_include() ] + # else running numpy/core/setup.py + return include_dirs + +def get_npy_pkg_dir(): + """Return the path where to find the npy-pkg-config directory.""" + # XXX: import here for bootstrapping reasons + import numpy + d = os.path.join(os.path.dirname(numpy.__file__), + 'core', 'lib', 'npy-pkg-config') + return d + +def get_pkg_info(pkgname, dirs=None): + """ + Return library info for the given package. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_info + + """ + from numpy.distutils.npy_pkg_config import read_config + + if dirs: + dirs.append(get_npy_pkg_dir()) + else: + dirs = [get_npy_pkg_dir()] + return read_config(pkgname, dirs) + +def get_info(pkgname, dirs=None): + """ + Return an info dict for a given C library. + + The info dict contains the necessary options to use the C library. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + info : dict + The dictionary with build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_pkg_info + + Examples + -------- + To get the necessary information for the npymath library from NumPy: + + >>> npymath_info = np.distutils.misc_util.get_info('npymath') + >>> npymath_info #doctest: +SKIP + {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': + ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} + + This info dict can then be used as input to a `Configuration` instance:: + + config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) + + """ + from numpy.distutils.npy_pkg_config import parse_flags + pkg_info = get_pkg_info(pkgname, dirs) + + # Translate LibraryInfo instance into a build_info dict + info = parse_flags(pkg_info.cflags()) + for k, v in parse_flags(pkg_info.libs()).items(): + info[k].extend(v) + + # add_extension extra_info argument is ANAL + info['define_macros'] = info['macros'] + del info['macros'] + del info['ignored'] + + return info + +def is_bootstrapping(): + if sys.version_info[0] >= 3: + import builtins + else: + import __builtin__ as builtins + + try: + builtins.__NUMPY_SETUP__ + return True + except AttributeError: + return False + __NUMPY_SETUP__ = False + + +######################### + +def default_config_dict(name = None, parent_name = None, local_path=None): + """Return a configuration dictionary for usage in + configuration() function defined in file setup_.py. + """ + import warnings + warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ + 'deprecated default_config_dict(%r,%r,%r)' + % (name, parent_name, local_path, + name, parent_name, local_path, + )) + c = Configuration(name, parent_name, local_path) + return c.todict() + + +def dict_append(d, **kws): + for k, v in kws.items(): + if k in d: + ov = d[k] + if isinstance(ov, str): + d[k] = v + else: + d[k].extend(v) + else: + d[k] = v + +def appendpath(prefix, path): + if os.path.sep != '/': + prefix = prefix.replace('/', os.path.sep) + path = path.replace('/', os.path.sep) + drive = '' + if os.path.isabs(path): + drive = os.path.splitdrive(prefix)[0] + absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] + pathdrive, path = os.path.splitdrive(path) + d = os.path.commonprefix([absprefix, path]) + if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ + or os.path.join(path[:len(d)], path[len(d):]) != path: + # Handle invalid paths + d = os.path.dirname(d) + subpath = path[len(d):] + if os.path.isabs(subpath): + subpath = subpath[1:] + else: + subpath = path + return os.path.normpath(njoin(drive + prefix, subpath)) + +def generate_config_py(target): + """Generate config.py file containing system_info information + used during building the package. + + Usage: + config['py_modules'].append((packagename, '__config__',generate_config_py)) + """ + from numpy.distutils.system_info import system_info + from distutils.dir_util import mkpath + mkpath(os.path.dirname(target)) + f = open(target, 'w') + f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0]))) + f.write('# It contains system_info results at the time of building this package.\n') + f.write('__all__ = ["get_info","show"]\n\n') + for k, i in system_info.saved_results.items(): + f.write('%s=%r\n' % (k, i)) + f.write(r''' +def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + +def show(): + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + ''') + + f.close() + return target + +def msvc_version(compiler): + """Return version major and minor of compiler instance if it is + MSVC, raise an exception otherwise.""" + if not compiler.compiler_type == "msvc": + raise ValueError("Compiler instance is not msvc (%s)"\ + % compiler.compiler_type) + return compiler._MSVCCompiler__version + +if sys.version[:3] >= '2.5': + def get_build_architecture(): + from distutils.msvccompiler import get_build_architecture + return get_build_architecture() +else: + #copied from python 2.5.1 distutils/msvccompiler.py + def get_build_architecture(): + """Return the processor architecture. + + Possible results are "Intel", "Itanium", or "AMD64". + """ + prefix = " bit (" + i = sys.version.find(prefix) + if i == -1: + return "Intel" + j = sys.version.find(")", i) + return sys.version[i+len(prefix):j] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/npy_pkg_config.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/npy_pkg_config.py new file mode 100644 index 0000000000000..ceab906a4edff --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/npy_pkg_config.py @@ -0,0 +1,464 @@ +from __future__ import division, absolute_import, print_function + +import sys +import re +import os +import shlex + +if sys.version_info[0] < 3: + from ConfigParser import SafeConfigParser, NoOptionError +else: + from configparser import ConfigParser, SafeConfigParser, NoOptionError + +__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', + 'read_config', 'parse_flags'] + +_VAR = re.compile('\$\{([a-zA-Z0-9_-]+)\}') + +class FormatError(IOError): + """ + Exception thrown when there is a problem parsing a configuration file. + + """ + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +class PkgNotFound(IOError): + """Exception raised when a package can not be located.""" + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +def parse_flags(line): + """ + Parse a line from a config file containing compile flags. + + Parameters + ---------- + line : str + A single line containing one or more compile flags. + + Returns + ------- + d : dict + Dictionary of parsed flags, split into relevant categories. + These categories are the keys of `d`: + + * 'include_dirs' + * 'library_dirs' + * 'libraries' + * 'macros' + * 'ignored' + + """ + lexer = shlex.shlex(line) + lexer.whitespace_split = True + + d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], + 'macros': [], 'ignored': []} + def next_token(t): + if t.startswith('-I'): + if len(t) > 2: + d['include_dirs'].append(t[2:]) + else: + t = lexer.get_token() + d['include_dirs'].append(t) + elif t.startswith('-L'): + if len(t) > 2: + d['library_dirs'].append(t[2:]) + else: + t = lexer.get_token() + d['library_dirs'].append(t) + elif t.startswith('-l'): + d['libraries'].append(t[2:]) + elif t.startswith('-D'): + d['macros'].append(t[2:]) + else: + d['ignored'].append(t) + return lexer.get_token() + + t = lexer.get_token() + while t: + t = next_token(t) + + return d + +def _escape_backslash(val): + return val.replace('\\', '\\\\') + +class LibraryInfo(object): + """ + Object containing build information about a library. + + Parameters + ---------- + name : str + The library name. + description : str + Description of the library. + version : str + Version string. + sections : dict + The sections of the configuration file for the library. The keys are + the section headers, the values the text under each header. + vars : class instance + A `VariableSet` instance, which contains ``(name, value)`` pairs for + variables defined in the configuration file for the library. + requires : sequence, optional + The required libraries for the library to be installed. + + Notes + ----- + All input parameters (except "sections" which is a method) are available as + attributes of the same name. + + """ + def __init__(self, name, description, version, sections, vars, requires=None): + self.name = name + self.description = description + if requires: + self.requires = requires + else: + self.requires = [] + self.version = version + self._sections = sections + self.vars = vars + + def sections(self): + """ + Return the section headers of the config file. + + Parameters + ---------- + None + + Returns + ------- + keys : list of str + The list of section headers. + + """ + return list(self._sections.keys()) + + def cflags(self, section="default"): + val = self.vars.interpolate(self._sections[section]['cflags']) + return _escape_backslash(val) + + def libs(self, section="default"): + val = self.vars.interpolate(self._sections[section]['libs']) + return _escape_backslash(val) + + def __str__(self): + m = ['Name: %s' % self.name] + m.append('Description: %s' % self.description) + if self.requires: + m.append('Requires:') + else: + m.append('Requires: %s' % ",".join(self.requires)) + m.append('Version: %s' % self.version) + + return "\n".join(m) + +class VariableSet(object): + """ + Container object for the variables defined in a config file. + + `VariableSet` can be used as a plain dictionary, with the variable names + as keys. + + Parameters + ---------- + d : dict + Dict of items in the "variables" section of the configuration file. + + """ + def __init__(self, d): + self._raw_data = dict([(k, v) for k, v in d.items()]) + + self._re = {} + self._re_sub = {} + + self._init_parse() + + def _init_parse(self): + for k, v in self._raw_data.items(): + self._init_parse_var(k, v) + + def _init_parse_var(self, name, value): + self._re[name] = re.compile(r'\$\{%s\}' % name) + self._re_sub[name] = value + + def interpolate(self, value): + # Brute force: we keep interpolating until there is no '${var}' anymore + # or until interpolated string is equal to input string + def _interpolate(value): + for k in self._re.keys(): + value = self._re[k].sub(self._re_sub[k], value) + return value + while _VAR.search(value): + nvalue = _interpolate(value) + if nvalue == value: + break + value = nvalue + + return value + + def variables(self): + """ + Return the list of variable names. + + Parameters + ---------- + None + + Returns + ------- + names : list of str + The names of all variables in the `VariableSet` instance. + + """ + return list(self._raw_data.keys()) + + # Emulate a dict to set/get variables values + def __getitem__(self, name): + return self._raw_data[name] + + def __setitem__(self, name, value): + self._raw_data[name] = value + self._init_parse_var(name, value) + +def parse_meta(config): + if not config.has_section('meta'): + raise FormatError("No meta section found !") + + d = {} + for name, value in config.items('meta'): + d[name] = value + + for k in ['name', 'description', 'version']: + if not k in d: + raise FormatError("Option %s (section [meta]) is mandatory, " + "but not found" % k) + + if not 'requires' in d: + d['requires'] = [] + + return d + +def parse_variables(config): + if not config.has_section('variables'): + raise FormatError("No variables section found !") + + d = {} + + for name, value in config.items("variables"): + d[name] = value + + return VariableSet(d) + +def parse_sections(config): + return meta_d, r + +def pkg_to_filename(pkg_name): + return "%s.ini" % pkg_name + +def parse_config(filename, dirs=None): + if dirs: + filenames = [os.path.join(d, filename) for d in dirs] + else: + filenames = [filename] + + if sys.version[:3] > '3.1': + # SafeConfigParser is deprecated in py-3.2 and renamed to ConfigParser + config = ConfigParser() + else: + config = SafeConfigParser() + + n = config.read(filenames) + if not len(n) >= 1: + raise PkgNotFound("Could not find file(s) %s" % str(filenames)) + + # Parse meta and variables sections + meta = parse_meta(config) + + vars = {} + if config.has_section('variables'): + for name, value in config.items("variables"): + vars[name] = _escape_backslash(value) + + # Parse "normal" sections + secs = [s for s in config.sections() if not s in ['meta', 'variables']] + sections = {} + + requires = {} + for s in secs: + d = {} + if config.has_option(s, "requires"): + requires[s] = config.get(s, 'requires') + + for name, value in config.items(s): + d[name] = value + sections[s] = d + + return meta, vars, sections, requires + +def _read_config_imp(filenames, dirs=None): + def _read_config(f): + meta, vars, sections, reqs = parse_config(f, dirs) + # recursively add sections and variables of required libraries + for rname, rvalue in reqs.items(): + nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) + + # Update var dict for variables not in 'top' config file + for k, v in nvars.items(): + if not k in vars: + vars[k] = v + + # Update sec dict + for oname, ovalue in nsections[rname].items(): + if ovalue: + sections[rname][oname] += ' %s' % ovalue + + return meta, vars, sections, reqs + + meta, vars, sections, reqs = _read_config(filenames) + + # FIXME: document this. If pkgname is defined in the variables section, and + # there is no pkgdir variable defined, pkgdir is automatically defined to + # the path of pkgname. This requires the package to be imported to work + if not 'pkgdir' in vars and "pkgname" in vars: + pkgname = vars["pkgname"] + if not pkgname in sys.modules: + raise ValueError("You should import %s to get information on %s" % + (pkgname, meta["name"])) + + mod = sys.modules[pkgname] + vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) + + return LibraryInfo(name=meta["name"], description=meta["description"], + version=meta["version"], sections=sections, vars=VariableSet(vars)) + +# Trivial cache to cache LibraryInfo instances creation. To be really +# efficient, the cache should be handled in read_config, since a same file can +# be parsed many time outside LibraryInfo creation, but I doubt this will be a +# problem in practice +_CACHE = {} +def read_config(pkgname, dirs=None): + """ + Return library info for a package from its configuration file. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of directories - usually including + the NumPy base directory - where to look for npy-pkg-config files. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + misc_util.get_info, misc_util.get_pkg_info + + Examples + -------- + >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') + >>> type(npymath_info) + + >>> print npymath_info + Name: npymath + Description: Portable, core math library implementing C99 standard + Requires: + Version: 0.1 #random + + """ + try: + return _CACHE[pkgname] + except KeyError: + v = _read_config_imp(pkg_to_filename(pkgname), dirs) + _CACHE[pkgname] = v + return v + +# TODO: +# - implements version comparison (modversion + atleast) + +# pkg-config simple emulator - useful for debugging, and maybe later to query +# the system +if __name__ == '__main__': + import sys + from optparse import OptionParser + import glob + + parser = OptionParser() + parser.add_option("--cflags", dest="cflags", action="store_true", + help="output all preprocessor and compiler flags") + parser.add_option("--libs", dest="libs", action="store_true", + help="output all linker flags") + parser.add_option("--use-section", dest="section", + help="use this section instead of default for options") + parser.add_option("--version", dest="version", action="store_true", + help="output version") + parser.add_option("--atleast-version", dest="min_version", + help="Minimal version") + parser.add_option("--list-all", dest="list_all", action="store_true", + help="Minimal version") + parser.add_option("--define-variable", dest="define_variable", + help="Replace variable with the given value") + + (options, args) = parser.parse_args(sys.argv) + + if len(args) < 2: + raise ValueError("Expect package name on the command line:") + + if options.list_all: + files = glob.glob("*.ini") + for f in files: + info = read_config(f) + print("%s\t%s - %s" % (info.name, info.name, info.description)) + + pkg_name = args[1] + import os + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) + else: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) + + if options.section: + section = options.section + else: + section = "default" + + if options.define_variable: + m = re.search('([\S]+)=([\S]+)', options.define_variable) + if not m: + raise ValueError("--define-variable option should be of " \ + "the form --define-variable=foo=bar") + else: + name = m.group(1) + value = m.group(2) + info.vars[name] = value + + if options.cflags: + print(info.cflags(section)) + if options.libs: + print(info.libs(section)) + if options.version: + print(info.version) + if options.min_version: + print(info.version >= options.min_version) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/numpy_distribution.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/numpy_distribution.py new file mode 100644 index 0000000000000..6ae19d16b18f3 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/numpy_distribution.py @@ -0,0 +1,19 @@ +# XXX: Handle setuptools ? +from __future__ import division, absolute_import, print_function + +from distutils.core import Distribution + +# This class is used because we add new files (sconscripts, and so on) with the +# scons command +class NumpyDistribution(Distribution): + def __init__(self, attrs = None): + # A list of (sconscripts, pre_hook, post_hook, src, parent_names) + self.scons_data = [] + # A list of installable libraries + self.installed_libraries = [] + # A dict of pkg_config files to generate/install + self.installed_pkg_config = {} + Distribution.__init__(self, attrs) + + def has_scons_scripts(self): + return bool(self.scons_data) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/pathccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/pathccompiler.py new file mode 100644 index 0000000000000..fc9872db34da8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/pathccompiler.py @@ -0,0 +1,23 @@ +from __future__ import division, absolute_import, print_function + +from distutils.unixccompiler import UnixCCompiler + +class PathScaleCCompiler(UnixCCompiler): + + """ + PathScale compiler compatible with an gcc built Python. + """ + + compiler_type = 'pathcc' + cc_exe = 'pathcc' + cxx_exe = 'pathCC' + + def __init__ (self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__ (self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables(compiler=cc_compiler, + compiler_so=cc_compiler, + compiler_cxx=cxx_compiler, + linker_exe=cc_compiler, + linker_so=cc_compiler + ' -shared') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/setup.py new file mode 100644 index 0000000000000..82a53bd08dbe3 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/setup.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('distutils', parent_package, top_path) + config.add_subpackage('command') + config.add_subpackage('fcompiler') + config.add_data_dir('tests') + config.add_data_files('site.cfg') + config.add_data_files('mingw/gfortran_vs2003_hack.c') + config.make_config_py() + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/system_info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/system_info.py new file mode 100644 index 0000000000000..48c92c5482248 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/system_info.py @@ -0,0 +1,2242 @@ +#!/bin/env python +""" +This file defines a set of system_info classes for getting +information about various resources (libraries, library directories, +include directories, etc.) in the system. Currently, the following +classes are available: + + atlas_info + atlas_threads_info + atlas_blas_info + atlas_blas_threads_info + lapack_atlas_info + blas_info + lapack_info + openblas_info + blas_opt_info # usage recommended + lapack_opt_info # usage recommended + fftw_info,dfftw_info,sfftw_info + fftw_threads_info,dfftw_threads_info,sfftw_threads_info + djbfft_info + x11_info + lapack_src_info + blas_src_info + numpy_info + numarray_info + numpy_info + boost_python_info + agg2_info + wx_info + gdk_pixbuf_xlib_2_info + gdk_pixbuf_2_info + gdk_x11_2_info + gtkp_x11_2_info + gtkp_2_info + xft_info + freetype2_info + umfpack_info + +Usage: + info_dict = get_info() + where is a string 'atlas','x11','fftw','lapack','blas', + 'lapack_src', 'blas_src', etc. For a complete list of allowed names, + see the definition of get_info() function below. + + Returned info_dict is a dictionary which is compatible with + distutils.setup keyword arguments. If info_dict == {}, then the + asked resource is not available (system_info could not find it). + + Several *_info classes specify an environment variable to specify + the locations of software. When setting the corresponding environment + variable to 'None' then the software will be ignored, even when it + is available in system. + +Global parameters: + system_info.search_static_first - search static libraries (.a) + in precedence to shared ones (.so, .sl) if enabled. + system_info.verbosity - output the results to stdout if enabled. + +The file 'site.cfg' is looked for in + +1) Directory of main setup.py file being run. +2) Home directory of user running the setup.py file as ~/.numpy-site.cfg +3) System wide directory (location of this file...) + +The first one found is used to get system configuration options The +format is that used by ConfigParser (i.e., Windows .INI style). The +section ALL has options that are the default for each section. The +available sections are fftw, atlas, and x11. Appropiate defaults are +used if nothing is specified. + +The order of finding the locations of resources is the following: + 1. environment variable + 2. section in site.cfg + 3. ALL section in site.cfg +Only the first complete match is returned. + +Example: +---------- +[ALL] +library_dirs = /usr/lib:/usr/local/lib:/opt/lib +include_dirs = /usr/include:/usr/local/include:/opt/include +src_dirs = /usr/local/src:/opt/src +# search static libraries (.a) in preference to shared ones (.so) +search_static_first = 0 + +[fftw] +fftw_libs = rfftw, fftw +fftw_opt_libs = rfftw_threaded, fftw_threaded +# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs + +[atlas] +library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas +# for overriding the names of the atlas libraries +atlas_libs = lapack, f77blas, cblas, atlas + +[x11] +library_dirs = /usr/X11R6/lib +include_dirs = /usr/X11R6/include +---------- + +Authors: + Pearu Peterson , February 2002 + David M. Cooke , April 2002 + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import os +import re +import copy +import warnings +from glob import glob +from functools import reduce +if sys.version_info[0] < 3: + from ConfigParser import NoOptionError, ConfigParser +else: + from configparser import NoOptionError, ConfigParser + +from distutils.errors import DistutilsError +from distutils.dist import Distribution +import distutils.sysconfig +from distutils import log +from distutils.util import get_platform + +from numpy.distutils.exec_command import \ + find_executable, exec_command, get_pythonexe +from numpy.distutils.misc_util import is_sequence, is_string, \ + get_shared_lib_extension +from numpy.distutils.command.config import config as cmd_config +from numpy.distutils.compat import get_exception +import distutils.ccompiler +import tempfile +import shutil + + +# Determine number of bits +import platform +_bits = {'32bit': 32, '64bit': 64} +platform_bits = _bits[platform.architecture()[0]] + + +def libpaths(paths, bits): + """Return a list of library paths valid on 32 or 64 bit systems. + + Inputs: + paths : sequence + A sequence of strings (typically paths) + bits : int + An integer, the only valid values are 32 or 64. A ValueError exception + is raised otherwise. + + Examples: + + Consider a list of directories + >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] + + For a 32-bit platform, this is already valid: + >>> np.distutils.system_info.libpaths(paths,32) + ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] + + On 64 bits, we prepend the '64' postfix + >>> np.distutils.system_info.libpaths(paths,64) + ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', + '/usr/lib64', '/usr/lib'] + """ + if bits not in (32, 64): + raise ValueError("Invalid bit size in libpaths: 32 or 64 only") + + # Handle 32bit case + if bits == 32: + return paths + + # Handle 64bit case + out = [] + for p in paths: + out.extend([p + '64', p]) + + return out + + +if sys.platform == 'win32': + default_lib_dirs = ['C:\\', + os.path.join(distutils.sysconfig.EXEC_PREFIX, + 'libs')] + default_include_dirs = [] + default_src_dirs = ['.'] + default_x11_lib_dirs = [] + default_x11_include_dirs = [] +else: + default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', + '/opt/local/lib', '/sw/lib'], platform_bits) + default_include_dirs = ['/usr/local/include', + '/opt/include', '/usr/include', + # path of umfpack under macports + '/opt/local/include/ufsparse', + '/opt/local/include', '/sw/include', + '/usr/include/suitesparse'] + default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] + + default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', + '/usr/lib'], platform_bits) + default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include', + '/usr/include'] + + if os.path.exists('/usr/lib/X11'): + globbed_x11_dir = glob('/usr/lib/*/libX11.so') + if globbed_x11_dir: + x11_so_dir = os.path.split(globbed_x11_dir[0])[0] + default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) + default_x11_include_dirs.extend(['/usr/lib/X11/include', + '/usr/include/X11']) + + import subprocess as sp + tmp = None + try: + # Explicitly open/close file to avoid ResourceWarning when + # tests are run in debug mode Python 3. + tmp = open(os.devnull, 'w') + p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE, + stderr=tmp) + except (OSError, DistutilsError): + # OSError if gcc is not installed, or SandboxViolation (DistutilsError + # subclass) if an old setuptools bug is triggered (see gh-3160). + pass + else: + triplet = str(p.communicate()[0].decode().strip()) + if p.returncode == 0: + # gcc supports the "-print-multiarch" option + default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] + default_lib_dirs += [os.path.join("/usr/lib/", triplet)] + finally: + if tmp is not None: + tmp.close() + +if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: + default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) + default_include_dirs.append(os.path.join(sys.prefix, 'include')) + default_src_dirs.append(os.path.join(sys.prefix, 'src')) + +default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] +default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] +default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] + +so_ext = get_shared_lib_extension() + + +def get_standard_file(fname): + """Returns a list of files named 'fname' from + 1) System-wide directory (directory-location of this module) + 2) Users HOME directory (os.environ['HOME']) + 3) Local directory + """ + # System-wide file + filenames = [] + try: + f = __file__ + except NameError: + f = sys.argv[0] + else: + sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], + fname) + if os.path.isfile(sysfile): + filenames.append(sysfile) + + # Home directory + # And look for the user config file + try: + f = os.path.expanduser('~') + except KeyError: + pass + else: + user_file = os.path.join(f, fname) + if os.path.isfile(user_file): + filenames.append(user_file) + + # Local file + if os.path.isfile(fname): + filenames.append(os.path.abspath(fname)) + + return filenames + + +def get_info(name, notfound_action=0): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead + 'atlas_threads': atlas_threads_info, # ditto + 'atlas_blas': atlas_blas_info, + 'atlas_blas_threads': atlas_blas_threads_info, + 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead + 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto + 'mkl': mkl_info, + # openblas which may or may not have embedded lapack + 'openblas': openblas_info, # use blas_opt instead + # openblas with embedded lapack + 'openblas_lapack': openblas_lapack_info, # use blas_opt instead + 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead + 'blas_mkl': blas_mkl_info, # use blas_opt instead + 'x11': x11_info, + 'fft_opt': fft_opt_info, + 'fftw': fftw_info, + 'fftw2': fftw2_info, + 'fftw3': fftw3_info, + 'dfftw': dfftw_info, + 'sfftw': sfftw_info, + 'fftw_threads': fftw_threads_info, + 'dfftw_threads': dfftw_threads_info, + 'sfftw_threads': sfftw_threads_info, + 'djbfft': djbfft_info, + 'blas': blas_info, # use blas_opt instead + 'lapack': lapack_info, # use lapack_opt instead + 'lapack_src': lapack_src_info, + 'blas_src': blas_src_info, + 'numpy': numpy_info, + 'f2py': f2py_info, + 'Numeric': Numeric_info, + 'numeric': Numeric_info, + 'numarray': numarray_info, + 'numerix': numerix_info, + 'lapack_opt': lapack_opt_info, + 'blas_opt': blas_opt_info, + 'boost_python': boost_python_info, + 'agg2': agg2_info, + 'wx': wx_info, + 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, + 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, + 'gdk_pixbuf_2': gdk_pixbuf_2_info, + 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, + 'gdk': gdk_info, + 'gdk_2': gdk_2_info, + 'gdk-2.0': gdk_2_info, + 'gdk_x11_2': gdk_x11_2_info, + 'gdk-x11-2.0': gdk_x11_2_info, + 'gtkp_x11_2': gtkp_x11_2_info, + 'gtk+-x11-2.0': gtkp_x11_2_info, + 'gtkp_2': gtkp_2_info, + 'gtk+-2.0': gtkp_2_info, + 'xft': xft_info, + 'freetype2': freetype2_info, + 'umfpack': umfpack_info, + 'amd': amd_info, + }.get(name.lower(), system_info) + return cl().get_info(notfound_action) + + +class NotFoundError(DistutilsError): + """Some third-party program or library is not found.""" + + +class AtlasNotFoundError(NotFoundError): + """ + Atlas (http://math-atlas.sourceforge.net/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [atlas]) or by setting + the ATLAS environment variable.""" + + +class LapackNotFoundError(NotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [lapack]) or by setting + the LAPACK environment variable.""" + + +class LapackSrcNotFoundError(LapackNotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [lapack_src]) or by setting + the LAPACK_SRC environment variable.""" + + +class BlasNotFoundError(NotFoundError): + """ + Blas (http://www.netlib.org/blas/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [blas]) or by setting + the BLAS environment variable.""" + + +class BlasSrcNotFoundError(BlasNotFoundError): + """ + Blas (http://www.netlib.org/blas/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [blas_src]) or by setting + the BLAS_SRC environment variable.""" + + +class FFTWNotFoundError(NotFoundError): + """ + FFTW (http://www.fftw.org/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [fftw]) or by setting + the FFTW environment variable.""" + + +class DJBFFTNotFoundError(NotFoundError): + """ + DJBFFT (http://cr.yp.to/djbfft.html) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [djbfft]) or by setting + the DJBFFT environment variable.""" + + +class NumericNotFoundError(NotFoundError): + """ + Numeric (http://www.numpy.org/) module not found. + Get it from above location, install it, and retry setup.py.""" + + +class X11NotFoundError(NotFoundError): + """X11 libraries not found.""" + + +class UmfpackNotFoundError(NotFoundError): + """ + UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/) + not found. Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [umfpack]) or by setting + the UMFPACK environment variable.""" + + +class system_info: + + """ get_info() is the only public method. Don't use others. + """ + section = 'ALL' + dir_env_var = None + search_static_first = 0 # XXX: disabled by default, may disappear in + # future unless it is proved to be useful. + verbosity = 1 + saved_results = {} + + notfounderror = NotFoundError + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + verbosity=1, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {} + defaults['library_dirs'] = os.pathsep.join(default_lib_dirs) + defaults['include_dirs'] = os.pathsep.join(default_include_dirs) + defaults['src_dirs'] = os.pathsep.join(default_src_dirs) + defaults['search_static_first'] = str(self.search_static_first) + self.cp = ConfigParser(defaults) + self.files = [] + self.files.extend(get_standard_file('.numpy-site.cfg')) + self.files.extend(get_standard_file('site.cfg')) + self.parse_config_files() + if self.section is not None: + self.search_static_first = self.cp.getboolean( + self.section, 'search_static_first') + assert isinstance(self.search_static_first, int) + + def parse_config_files(self): + self.cp.read(self.files) + if not self.cp.has_section(self.section): + if self.section is not None: + self.cp.add_section(self.section) + + def calc_libraries_info(self): + libs = self.get_libraries() + dirs = self.get_lib_dirs() + info = {} + for lib in libs: + i = self.check_libs(dirs, [lib]) + if i is not None: + dict_append(info, **i) + else: + log.info('Library %s was not found. Ignoring' % (lib)) + return info + + def set_info(self, **info): + if info: + lib_info = self.calc_libraries_info() + dict_append(info, **lib_info) + self.saved_results[self.__class__.__name__] = info + + def has_info(self): + return self.__class__.__name__ in self.saved_results + + def get_info(self, notfound_action=0): + """ Return a dictonary with items that are compatible + with numpy.distutils.setup keyword arguments. + """ + flag = 0 + if not self.has_info(): + flag = 1 + log.info(self.__class__.__name__ + ':') + if hasattr(self, 'calc_info'): + self.calc_info() + if notfound_action: + if not self.has_info(): + if notfound_action == 1: + warnings.warn(self.notfounderror.__doc__) + elif notfound_action == 2: + raise self.notfounderror(self.notfounderror.__doc__) + else: + raise ValueError(repr(notfound_action)) + + if not self.has_info(): + log.info(' NOT AVAILABLE') + self.set_info() + else: + log.info(' FOUND:') + + res = self.saved_results.get(self.__class__.__name__) + if self.verbosity > 0 and flag: + for k, v in res.items(): + v = str(v) + if k in ['sources', 'libraries'] and len(v) > 270: + v = v[:120] + '...\n...\n...' + v[-120:] + log.info(' %s = %s', k, v) + log.info('') + + return copy.deepcopy(res) + + def get_paths(self, section, key): + dirs = self.cp.get(section, key).split(os.pathsep) + env_var = self.dir_env_var + if env_var: + if is_sequence(env_var): + e0 = env_var[-1] + for e in env_var: + if e in os.environ: + e0 = e + break + if not env_var[0] == e0: + log.info('Setting %s=%s' % (env_var[0], e0)) + env_var = e0 + if env_var and env_var in os.environ: + d = os.environ[env_var] + if d == 'None': + log.info('Disabled %s: %s', + self.__class__.__name__, '(%s is None)' + % (env_var,)) + return [] + if os.path.isfile(d): + dirs = [os.path.dirname(d)] + dirs + l = getattr(self, '_lib_names', []) + if len(l) == 1: + b = os.path.basename(d) + b = os.path.splitext(b)[0] + if b[:3] == 'lib': + log.info('Replacing _lib_names[0]==%r with %r' \ + % (self._lib_names[0], b[3:])) + self._lib_names[0] = b[3:] + else: + ds = d.split(os.pathsep) + ds2 = [] + for d in ds: + if os.path.isdir(d): + ds2.append(d) + for dd in ['include', 'lib']: + d1 = os.path.join(d, dd) + if os.path.isdir(d1): + ds2.append(d1) + dirs = ds2 + dirs + default_dirs = self.cp.get(self.section, key).split(os.pathsep) + dirs.extend(default_dirs) + ret = [] + for d in dirs: + if not os.path.isdir(d): + warnings.warn('Specified path %s is invalid.' % d) + continue + + if d not in ret: + ret.append(d) + + log.debug('( %s = %s )', key, ':'.join(ret)) + return ret + + def get_lib_dirs(self, key='library_dirs'): + return self.get_paths(self.section, key) + + def get_include_dirs(self, key='include_dirs'): + return self.get_paths(self.section, key) + + def get_src_dirs(self, key='src_dirs'): + return self.get_paths(self.section, key) + + def get_libs(self, key, default): + try: + libs = self.cp.get(self.section, key) + except NoOptionError: + if not default: + return [] + if is_string(default): + return [default] + return default + return [b for b in [a.strip() for a in libs.split(',')] if b] + + def get_libraries(self, key='libraries'): + return self.get_libs(key, '') + + def library_extensions(self): + static_exts = ['.a'] + if sys.platform == 'win32': + static_exts.append('.lib') # .lib is used by MSVC + if self.search_static_first: + exts = static_exts + [so_ext] + else: + exts = [so_ext] + static_exts + if sys.platform == 'cygwin': + exts.append('.dll.a') + if sys.platform == 'darwin': + exts.append('.dylib') + # Debian and Ubuntu added a g3f suffix to shared library to deal with + # g77 -> gfortran ABI transition + # XXX: disabled, it hides more problem than it solves. + #if sys.platform[:5] == 'linux': + # exts.append('.so.3gf') + return exts + + def check_libs(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks for all libraries as shared libraries first, then + static (or vice versa if self.search_static_first is True). + """ + exts = self.library_extensions() + info = None + for ext in exts: + info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) + if info is not None: + break + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + return info + + def check_libs2(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks each library for shared or static. + """ + exts = self.library_extensions() + info = self._check_libs(lib_dirs, libs, opt_libs, exts) + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + return info + + def _lib_list(self, lib_dir, libs, exts): + assert is_string(lib_dir) + liblist = [] + # under windows first try without 'lib' prefix + if sys.platform == 'win32': + lib_prefixes = ['', 'lib'] + else: + lib_prefixes = ['lib'] + # for each library name, see if we can find a file for it. + for l in libs: + for ext in exts: + for prefix in lib_prefixes: + p = self.combine_paths(lib_dir, prefix + l + ext) + if p: + break + if p: + assert len(p) == 1 + # ??? splitext on p[0] would do this for cygwin + # doesn't seem correct + if ext == '.dll.a': + l += '.dll' + liblist.append(l) + break + return liblist + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Find mandatory and optional libs in expected paths. + + Missing optional libraries are silently forgotten. + """ + # First, try to find the mandatory libraries + if is_sequence(lib_dirs): + found_libs, found_dirs = [], [] + for dir_ in lib_dirs: + found_libs1 = self._lib_list(dir_, libs, exts) + # It's possible that we'll find the same library in multiple + # directories. It's also possible that we'll find some + # libraries on in directory, and some in another. So the + # obvious thing would be to use a set instead of a list, but I + # don't know if preserving order matters (does it?). + for found_lib in found_libs1: + if found_lib not in found_libs: + found_libs.append(found_lib) + if dir_ not in found_dirs: + found_dirs.append(dir_) + else: + found_libs = self._lib_list(lib_dirs, libs, exts) + found_dirs = [lib_dirs] + if len(found_libs) > 0 and len(found_libs) == len(libs): + info = {'libraries': found_libs, 'library_dirs': found_dirs} + # Now, check for optional libraries + if is_sequence(lib_dirs): + for dir_ in lib_dirs: + opt_found_libs = self._lib_list(dir_, opt_libs, exts) + if opt_found_libs: + if dir_ not in found_dirs: + found_dirs.extend(dir_) + found_libs.extend(opt_found_libs) + else: + opt_found_libs = self._lib_list(lib_dirs, opt_libs, exts) + if opt_found_libs: + found_libs.extend(opt_found_libs) + return info + else: + return None + + def combine_paths(self, *args): + """Return a list of existing paths composed by all combinations + of items from the arguments. + """ + return combine_paths(*args, **{'verbosity': self.verbosity}) + + +class fft_opt_info(system_info): + + def calc_info(self): + info = {} + fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') + djbfft_info = get_info('djbfft') + if fftw_info: + dict_append(info, **fftw_info) + if djbfft_info: + dict_append(info, **djbfft_info) + self.set_info(**info) + return + + +class fftw_info(system_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + {'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]}] + + def calc_ver_info(self, ver_param): + """Returns True on successful version detection, else False""" + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + incl_dir = None + libs = self.get_libs(self.section + '_libs', ver_param['libs']) + info = self.check_libs(lib_dirs, libs) + if info is not None: + flag = 0 + for d in incl_dirs: + if len(self.combine_paths(d, ver_param['includes'])) \ + == len(ver_param['includes']): + dict_append(info, include_dirs=[d]) + flag = 1 + incl_dirs = [d] + break + if flag: + dict_append(info, define_macros=ver_param['macros']) + else: + info = None + if info is not None: + self.set_info(**info) + return True + else: + log.info(' %s not found' % (ver_param['name'])) + return False + + def calc_info(self): + for i in self.ver_info: + if self.calc_ver_info(i): + break + + +class fftw2_info(fftw_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]} + ] + + +class fftw3_info(fftw_info): + #variables to override + section = 'fftw3' + dir_env_var = 'FFTW3' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + ] + + +class dfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw', + 'libs':['drfftw', 'dfftw'], + 'includes':['dfftw.h', 'drfftw.h'], + 'macros':[('SCIPY_DFFTW_H', None)]}] + + +class sfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw', + 'libs':['srfftw', 'sfftw'], + 'includes':['sfftw.h', 'srfftw.h'], + 'macros':[('SCIPY_SFFTW_H', None)]}] + + +class fftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'fftw threads', + 'libs':['rfftw_threads', 'fftw_threads'], + 'includes':['fftw_threads.h', 'rfftw_threads.h'], + 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] + + +class dfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw threads', + 'libs':['drfftw_threads', 'dfftw_threads'], + 'includes':['dfftw_threads.h', 'drfftw_threads.h'], + 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] + + +class sfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw threads', + 'libs':['srfftw_threads', 'sfftw_threads'], + 'includes':['sfftw_threads.h', 'srfftw_threads.h'], + 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] + + +class djbfft_info(system_info): + section = 'djbfft' + dir_env_var = 'DJBFFT' + notfounderror = DJBFFTNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + info = None + for d in lib_dirs: + p = self.combine_paths(d, ['djbfft.a']) + if p: + info = {'extra_objects': p} + break + p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) + if p: + info = {'libraries': ['djbfft'], 'library_dirs': [d]} + break + if info is None: + return + for d in incl_dirs: + if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: + dict_append(info, include_dirs=[d], + define_macros=[('SCIPY_DJBFFT_H', None)]) + self.set_info(**info) + return + return + + +class mkl_info(system_info): + section = 'mkl' + dir_env_var = 'MKL' + _lib_mkl = ['mkl', 'vml', 'guide'] + + def get_mkl_rootdir(self): + mklroot = os.environ.get('MKLROOT', None) + if mklroot is not None: + return mklroot + paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) + ld_so_conf = '/etc/ld.so.conf' + if os.path.isfile(ld_so_conf): + for d in open(ld_so_conf, 'r'): + d = d.strip() + if d: + paths.append(d) + intel_mkl_dirs = [] + for path in paths: + path_atoms = path.split(os.sep) + for m in path_atoms: + if m.startswith('mkl'): + d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) + intel_mkl_dirs.append(d) + break + for d in paths: + dirs = glob(os.path.join(d, 'mkl', '*')) + dirs += glob(os.path.join(d, 'mkl*')) + for d in dirs: + if os.path.isdir(os.path.join(d, 'lib')): + return d + return None + + def __init__(self): + mklroot = self.get_mkl_rootdir() + if mklroot is None: + system_info.__init__(self) + else: + from .cpuinfo import cpu + l = 'mkl' # use shared library + if cpu.is_Itanium(): + plt = '64' + #l = 'mkl_ipf' + elif cpu.is_Xeon(): + plt = 'em64t' + #l = 'mkl_em64t' + else: + plt = '32' + #l = 'mkl_ia32' + if l not in self._lib_mkl: + self._lib_mkl.insert(0, l) + system_info.__init__( + self, + default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], + default_include_dirs=[os.path.join(mklroot, 'include')]) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + mkl_libs = self.get_libs('mkl_libs', self._lib_mkl) + info = self.check_libs2(lib_dirs, mkl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None)], + include_dirs=incl_dirs) + if sys.platform == 'win32': + pass # win32 has no pthread library + else: + dict_append(info, libraries=['pthread']) + self.set_info(**info) + + +class lapack_mkl_info(mkl_info): + + def calc_info(self): + mkl = get_info('mkl') + if not mkl: + return + if sys.platform == 'win32': + lapack_libs = self.get_libs('lapack_libs', ['mkl_lapack']) + else: + lapack_libs = self.get_libs('lapack_libs', + ['mkl_lapack32', 'mkl_lapack64']) + + info = {'libraries': lapack_libs} + dict_append(info, **mkl) + self.set_info(**info) + + +class blas_mkl_info(mkl_info): + pass + + +class atlas_info(system_info): + section = 'atlas' + dir_env_var = 'ATLAS' + _lib_names = ['f77blas', 'cblas'] + if sys.platform[:7] == 'freebsd': + _lib_atlas = ['atlas_r'] + _lib_lapack = ['alapack_r'] + else: + _lib_atlas = ['atlas'] + _lib_lapack = ['lapack'] + + notfounderror = AtlasNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', + 'sse', '3dnow', 'sse2']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + atlas_libs = self.get_libs('atlas_libs', + self._lib_names + self._lib_atlas) + lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) + atlas = None + lapack = None + atlas_1 = None + for d in lib_dirs: + atlas = self.check_libs2(d, atlas_libs, []) + lapack_atlas = self.check_libs2(d, ['lapack_atlas'], []) + if atlas is not None: + lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) + lapack = self.check_libs2(lib_dirs2, lapack_libs, []) + if lapack is not None: + break + if atlas: + atlas_1 = atlas + log.info(self.__class__) + if atlas is None: + atlas = atlas_1 + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + if lapack is not None: + dict_append(info, **lapack) + dict_append(info, **atlas) + elif 'lapack_atlas' in atlas['libraries']: + dict_append(info, **atlas) + dict_append(info, + define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) + self.set_info(**info) + return + else: + dict_append(info, **atlas) + dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) + message = """ +********************************************************************* + Could not find lapack library within the ATLAS installation. +********************************************************************* +""" + warnings.warn(message) + self.set_info(**info) + return + + # Check if lapack library is complete, only warn if it is not. + lapack_dir = lapack['library_dirs'][0] + lapack_name = lapack['libraries'][0] + lapack_lib = None + lib_prefixes = ['lib'] + if sys.platform == 'win32': + lib_prefixes.append('') + for e in self.library_extensions(): + for prefix in lib_prefixes: + fn = os.path.join(lapack_dir, prefix + lapack_name + e) + if os.path.exists(fn): + lapack_lib = fn + break + if lapack_lib: + break + if lapack_lib is not None: + sz = os.stat(lapack_lib)[6] + if sz <= 4000 * 1024: + message = """ +********************************************************************* + Lapack library (from ATLAS) is probably incomplete: + size of %s is %sk (expected >4000k) + + Follow the instructions in the KNOWN PROBLEMS section of the file + numpy/INSTALL.txt. +********************************************************************* +""" % (lapack_lib, sz / 1024) + warnings.warn(message) + else: + info['language'] = 'f77' + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(info, **atlas_extra_info) + + self.set_info(**info) + + +class atlas_blas_info(atlas_info): + _lib_names = ['f77blas', 'cblas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + atlas_libs = self.get_libs('atlas_libs', + self._lib_names + self._lib_atlas) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_threads_info(atlas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class atlas_blas_threads_info(atlas_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class lapack_atlas_info(atlas_info): + _lib_names = ['lapack_atlas'] + atlas_info._lib_names + + +class lapack_atlas_threads_info(atlas_threads_info): + _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names + + +class lapack_info(system_info): + section = 'lapack' + dir_env_var = 'LAPACK' + _lib_names = ['lapack'] + notfounderror = LapackNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + lapack_libs = self.get_libs('lapack_libs', self._lib_names) + info = self.check_libs(lib_dirs, lapack_libs, []) + if info is None: + return + info['language'] = 'f77' + self.set_info(**info) + + +class lapack_src_info(system_info): + section = 'lapack_src' + dir_env_var = 'LAPACK_SRC' + notfounderror = LapackSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'dgesv.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + # The following is extracted from LAPACK-3.0/SRC/Makefile. + # Added missing names from lapack-lite-3.1.1/SRC/Makefile + # while keeping removed names for Lapack-3.0 compatibility. + allaux = ''' + ilaenv ieeeck lsame lsamen xerbla + iparmq + ''' # *.f + laux = ''' + bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 + laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 + lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre + larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 + lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 + lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf + stebz stedc steqr sterf + + larra larrc larrd larr larrk larrj larrr laneg laisnan isnan + lazq3 lazq4 + ''' # [s|d]*.f + lasrc = ''' + gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak + gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv + gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 + geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd + gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal + gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd + ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein + hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 + lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb + lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp + laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv + lartv larz larzb larzt laswp lasyf latbs latdf latps latrd + latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv + pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 + potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri + pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs + spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv + sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 + tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs + trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs + tzrqf tzrzf + + lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 + ''' # [s|c|d|z]*.f + sd_lasrc = ''' + laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l + org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr + orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 + ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx + sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd + stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd + sygvx sytd2 sytrd + ''' # [s|d]*.f + cz_lasrc = ''' + bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev + heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv + hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd + hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf + hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 + laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe + laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv + spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq + ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 + unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr + ''' # [c|z]*.f + ####### + sclaux = laux + ' econd ' # s*.f + dzlaux = laux + ' secnd ' # d*.f + slasrc = lasrc + sd_lasrc # s*.f + dlasrc = lasrc + sd_lasrc # d*.f + clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f + zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f + oclasrc = ' icmax1 scsum1 ' # *.f + ozlasrc = ' izmax1 dzsum1 ' # *.f + sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ + + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ + + ['c%s.f' % f for f in (clasrc).split()] \ + + ['z%s.f' % f for f in (zlasrc).split()] \ + + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] + sources = [os.path.join(src_dir, f) for f in sources] + # Lapack 3.1: + src_dir2 = os.path.join(src_dir, '..', 'INSTALL') + sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] + # Lapack 3.2.1: + sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] + # Should we check here actual existence of source files? + # Yes, the file listing is different between 3.0 and 3.1 + # versions. + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + +atlas_version_c_text = r''' +/* This file is generated from numpy/distutils/system_info.py */ +void ATL_buildinfo(void); +int main(void) { + ATL_buildinfo(); + return 0; +} +''' + +_cached_atlas_version = {} + + +def get_atlas_version(**config): + libraries = config.get('libraries', []) + library_dirs = config.get('library_dirs', []) + key = (tuple(libraries), tuple(library_dirs)) + if key in _cached_atlas_version: + return _cached_atlas_version[key] + c = cmd_config(Distribution()) + atlas_version = None + info = {} + try: + s, o = c.get_output(atlas_version_c_text, + libraries=libraries, library_dirs=library_dirs, + use_tee=(system_info.verbosity > 0)) + if s and re.search(r'undefined reference to `_gfortran', o, re.M): + s, o = c.get_output(atlas_version_c_text, + libraries=libraries + ['gfortran'], + library_dirs=library_dirs, + use_tee=(system_info.verbosity > 0)) + if not s: + warnings.warn(""" +***************************************************** +Linkage with ATLAS requires gfortran. Use + + python setup.py config_fc --fcompiler=gnu95 ... + +when building extension libraries that use ATLAS. +Make sure that -lgfortran is used for C++ extensions. +***************************************************** +""") + dict_append(info, language='f90', + define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) + except Exception: # failed to get version from file -- maybe on Windows + # look at directory name + for o in library_dirs: + m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) + if m: + atlas_version = m.group('version') + if atlas_version is not None: + break + + # final choice --- look at ATLAS_VERSION environment + # variable + if atlas_version is None: + atlas_version = os.environ.get('ATLAS_VERSION', None) + if atlas_version: + dict_append(info, define_macros=[( + 'ATLAS_INFO', '"\\"%s\\""' % atlas_version) + ]) + else: + dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) + return atlas_version or '?.?.?', info + + if not s: + m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) + if m: + atlas_version = m.group('version') + if atlas_version is None: + if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): + atlas_version = '3.2.1_pre3.3.6' + else: + log.info('Status: %d', s) + log.info('Output: %s', o) + + if atlas_version == '3.2.1_pre3.3.6': + dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) + else: + dict_append(info, define_macros=[( + 'ATLAS_INFO', '"\\"%s\\""' % atlas_version) + ]) + result = _cached_atlas_version[key] = atlas_version, info + return result + + + +class lapack_opt_info(system_info): + + notfounderror = LapackNotFoundError + + def calc_info(self): + + openblas_info = get_info('openblas_lapack') + if openblas_info: + self.set_info(**openblas_info) + return + + lapack_mkl_info = get_info('lapack_mkl') + if lapack_mkl_info: + self.set_info(**lapack_mkl_info) + return + + atlas_info = get_info('atlas_threads') + if not atlas_info: + atlas_info = get_info('atlas') + + if sys.platform == 'darwin' and not atlas_info: + # Use the system lapack from Accelerate or vecLib under OSX + args = [] + link_args = [] + if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ + 'x86_64' in get_platform() or \ + 'i386' in platform.platform(): + intel = 1 + else: + intel = 0 + if os.path.exists('/System/Library/Frameworks' + '/Accelerate.framework/'): + if intel: + args.extend(['-msse3']) + else: + args.extend(['-faltivec']) + link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) + elif os.path.exists('/System/Library/Frameworks' + '/vecLib.framework/'): + if intel: + args.extend(['-msse3']) + else: + args.extend(['-faltivec']) + link_args.extend(['-Wl,-framework', '-Wl,vecLib']) + if args: + self.set_info(extra_compile_args=args, + extra_link_args=link_args, + define_macros=[('NO_ATLAS_INFO', 3)]) + return + + #atlas_info = {} ## uncomment for testing + need_lapack = 0 + need_blas = 0 + info = {} + if atlas_info: + l = atlas_info.get('define_macros', []) + if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ + or ('ATLAS_WITHOUT_LAPACK', None) in l: + need_lapack = 1 + info = atlas_info + + else: + warnings.warn(AtlasNotFoundError.__doc__) + need_blas = 1 + need_lapack = 1 + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + if need_lapack: + lapack_info = get_info('lapack') + #lapack_info = {} ## uncomment for testing + if lapack_info: + dict_append(info, **lapack_info) + else: + warnings.warn(LapackNotFoundError.__doc__) + lapack_src_info = get_info('lapack_src') + if not lapack_src_info: + warnings.warn(LapackSrcNotFoundError.__doc__) + return + dict_append(info, libraries=[('flapack_src', lapack_src_info)]) + + if need_blas: + blas_info = get_info('blas') + #blas_info = {} ## uncomment for testing + if blas_info: + dict_append(info, **blas_info) + else: + warnings.warn(BlasNotFoundError.__doc__) + blas_src_info = get_info('blas_src') + if not blas_src_info: + warnings.warn(BlasSrcNotFoundError.__doc__) + return + dict_append(info, libraries=[('fblas_src', blas_src_info)]) + + self.set_info(**info) + return + + +class blas_opt_info(system_info): + + notfounderror = BlasNotFoundError + + def calc_info(self): + + blas_mkl_info = get_info('blas_mkl') + if blas_mkl_info: + self.set_info(**blas_mkl_info) + return + + openblas_info = get_info('openblas') + if openblas_info: + self.set_info(**openblas_info) + return + + atlas_info = get_info('atlas_blas_threads') + if not atlas_info: + atlas_info = get_info('atlas_blas') + + if sys.platform == 'darwin' and not atlas_info: + # Use the system BLAS from Accelerate or vecLib under OSX + args = [] + link_args = [] + if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ + 'x86_64' in get_platform() or \ + 'i386' in platform.platform(): + intel = 1 + else: + intel = 0 + if os.path.exists('/System/Library/Frameworks' + '/Accelerate.framework/'): + if intel: + args.extend(['-msse3']) + else: + args.extend(['-faltivec']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) + elif os.path.exists('/System/Library/Frameworks' + '/vecLib.framework/'): + if intel: + args.extend(['-msse3']) + else: + args.extend(['-faltivec']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,vecLib']) + if args: + self.set_info(extra_compile_args=args, + extra_link_args=link_args, + define_macros=[('NO_ATLAS_INFO', 3)]) + return + + need_blas = 0 + info = {} + if atlas_info: + info = atlas_info + else: + warnings.warn(AtlasNotFoundError.__doc__) + need_blas = 1 + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + if need_blas: + blas_info = get_info('blas') + if blas_info: + dict_append(info, **blas_info) + else: + warnings.warn(BlasNotFoundError.__doc__) + blas_src_info = get_info('blas_src') + if not blas_src_info: + warnings.warn(BlasSrcNotFoundError.__doc__) + return + dict_append(info, libraries=[('fblas_src', blas_src_info)]) + + self.set_info(**info) + return + + +class blas_info(system_info): + section = 'blas' + dir_env_var = 'BLAS' + _lib_names = ['blas'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + blas_libs = self.get_libs('blas_libs', self._lib_names) + info = self.check_libs(lib_dirs, blas_libs, []) + if info is None: + return + info['language'] = 'f77' # XXX: is it generally true? + self.set_info(**info) + + +class openblas_info(blas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + notfounderror = BlasNotFoundError + + def check_embedded_lapack(self, info): + return True + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + openblas_libs = self.get_libs('libraries', self._lib_names) + if openblas_libs == self._lib_names: # backward compat with 1.8.0 + openblas_libs = self.get_libs('openblas_libs', self._lib_names) + info = self.check_libs(lib_dirs, openblas_libs, []) + if info is None: + return + + if not self.check_embedded_lapack(info): + return None + + info['language'] = 'f77' # XXX: is it generally true? + self.set_info(**info) + + +class openblas_lapack_info(openblas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + notfounderror = BlasNotFoundError + + def check_embedded_lapack(self, info): + res = False + c = distutils.ccompiler.new_compiler() + tmpdir = tempfile.mkdtemp() + s = """void zungqr(); + int main(int argc, const char *argv[]) + { + zungqr_(); + return 0; + }""" + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + try: + with open(src, 'wt') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs']) + res = True + except distutils.ccompiler.LinkError: + res = False + finally: + shutil.rmtree(tmpdir) + return res + + +class blas_src_info(system_info): + section = 'blas_src' + dir_env_var = 'BLAS_SRC' + notfounderror = BlasSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['blas'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'daxpy.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + blas1 = ''' + caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot + dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 + srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg + dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax + snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap + scabs1 + ''' + blas2 = ''' + cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv + chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv + dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv + sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger + stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc + zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 + ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv + ''' + blas3 = ''' + cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k + dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm + ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm + ''' + sources = [os.path.join(src_dir, f + '.f') \ + for f in (blas1 + blas2 + blas3).split()] + #XXX: should we check here actual existence of source files? + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + + +class x11_info(system_info): + section = 'x11' + notfounderror = X11NotFoundError + + def __init__(self): + system_info.__init__(self, + default_lib_dirs=default_x11_lib_dirs, + default_include_dirs=default_x11_include_dirs) + + def calc_info(self): + if sys.platform in ['win32']: + return + lib_dirs = self.get_lib_dirs() + include_dirs = self.get_include_dirs() + x11_libs = self.get_libs('x11_libs', ['X11']) + info = self.check_libs(lib_dirs, x11_libs, []) + if info is None: + return + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, 'X11/X.h'): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + self.set_info(**info) + + +class _numpy_info(system_info): + section = 'Numeric' + modulename = 'Numeric' + notfounderror = NumericNotFoundError + + def __init__(self): + include_dirs = [] + try: + module = __import__(self.modulename) + prefix = [] + for name in module.__file__.split(os.sep): + if name == 'lib': + break + prefix.append(name) + + # Ask numpy for its own include path before attempting + # anything else + try: + include_dirs.append(getattr(module, 'get_include')()) + except AttributeError: + pass + + include_dirs.append(distutils.sysconfig.get_python_inc( + prefix=os.sep.join(prefix))) + except ImportError: + pass + py_incl_dir = distutils.sysconfig.get_python_inc() + include_dirs.append(py_incl_dir) + py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) + if py_pincl_dir not in include_dirs: + include_dirs.append(py_pincl_dir) + for d in default_include_dirs: + d = os.path.join(d, os.path.basename(py_incl_dir)) + if d not in include_dirs: + include_dirs.append(d) + system_info.__init__(self, + default_lib_dirs=[], + default_include_dirs=include_dirs) + + def calc_info(self): + try: + module = __import__(self.modulename) + except ImportError: + return + info = {} + macros = [] + for v in ['__version__', 'version']: + vrs = getattr(module, v, None) + if vrs is None: + continue + macros = [(self.modulename.upper() + '_VERSION', + '"\\"%s\\""' % (vrs)), + (self.modulename.upper(), None)] + break +## try: +## macros.append( +## (self.modulename.upper()+'_VERSION_HEX', +## hex(vstr2hex(module.__version__))), +## ) +## except Exception as msg: +## print msg + dict_append(info, define_macros=macros) + include_dirs = self.get_include_dirs() + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, + os.path.join(self.modulename, + 'arrayobject.h')): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + if info: + self.set_info(**info) + return + + +class numarray_info(_numpy_info): + section = 'numarray' + modulename = 'numarray' + + +class Numeric_info(_numpy_info): + section = 'Numeric' + modulename = 'Numeric' + + +class numpy_info(_numpy_info): + section = 'numpy' + modulename = 'numpy' + + +class numerix_info(system_info): + section = 'numerix' + + def calc_info(self): + which = None, None + if os.getenv("NUMERIX"): + which = os.getenv("NUMERIX"), "environment var" + # If all the above fail, default to numpy. + if which[0] is None: + which = "numpy", "defaulted" + try: + import numpy + which = "numpy", "defaulted" + except ImportError: + msg1 = str(get_exception()) + try: + import Numeric + which = "numeric", "defaulted" + except ImportError: + msg2 = str(get_exception()) + try: + import numarray + which = "numarray", "defaulted" + except ImportError: + msg3 = str(get_exception()) + log.info(msg1) + log.info(msg2) + log.info(msg3) + which = which[0].strip().lower(), which[1] + if which[0] not in ["numeric", "numarray", "numpy"]: + raise ValueError("numerix selector must be either 'Numeric' " + "or 'numarray' or 'numpy' but the value obtained" + " from the %s was '%s'." % (which[1], which[0])) + os.environ['NUMERIX'] = which[0] + self.set_info(**get_info(which[0])) + + +class f2py_info(system_info): + def calc_info(self): + try: + import numpy.f2py as f2py + except ImportError: + return + f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') + self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], + include_dirs=[f2py_dir]) + return + + +class boost_python_info(system_info): + section = 'boost_python' + dir_env_var = 'BOOST' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['boost*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', + 'module.cpp')): + src_dir = d + break + if not src_dir: + return + py_incl_dirs = [distutils.sysconfig.get_python_inc()] + py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) + if py_pincl_dir not in py_incl_dirs: + py_incl_dirs.append(py_pincl_dir) + srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') + bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) + bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) + info = {'libraries': [('boost_python_src', + {'include_dirs': [src_dir] + py_incl_dirs, + 'sources':bpl_srcs} + )], + 'include_dirs': [src_dir], + } + if info: + self.set_info(**info) + return + + +class agg2_info(system_info): + section = 'agg2' + dir_env_var = 'AGG2' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['agg2*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): + src_dir = d + break + if not src_dir: + return + if sys.platform == 'win32': + agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', + 'win32', 'agg_win32_bmp.cpp')) + else: + agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) + agg2_srcs += [os.path.join(src_dir, 'src', 'platform', + 'X11', + 'agg_platform_support.cpp')] + + info = {'libraries': + [('agg2_src', + {'sources': agg2_srcs, + 'include_dirs': [os.path.join(src_dir, 'include')], + } + )], + 'include_dirs': [os.path.join(src_dir, 'include')], + } + if info: + self.set_info(**info) + return + + +class _pkg_config_info(system_info): + section = None + config_env_var = 'PKG_CONFIG' + default_config_exe = 'pkg-config' + append_config_exe = '' + version_macro_name = None + release_macro_name = None + version_flag = '--modversion' + cflags_flag = '--cflags' + + def get_config_exe(self): + if self.config_env_var in os.environ: + return os.environ[self.config_env_var] + return self.default_config_exe + + def get_config_output(self, config_exe, option): + cmd = config_exe + ' ' + self.append_config_exe + ' ' + option + s, o = exec_command(cmd, use_tee=0) + if not s: + return o + + def calc_info(self): + config_exe = find_executable(self.get_config_exe()) + if not config_exe: + log.warn('File not found: %s. Cannot determine %s info.' \ + % (config_exe, self.section)) + return + info = {} + macros = [] + libraries = [] + library_dirs = [] + include_dirs = [] + extra_link_args = [] + extra_compile_args = [] + version = self.get_config_output(config_exe, self.version_flag) + if version: + macros.append((self.__class__.__name__.split('.')[-1].upper(), + '"\\"%s\\""' % (version))) + if self.version_macro_name: + macros.append((self.version_macro_name + '_%s' + % (version.replace('.', '_')), None)) + if self.release_macro_name: + release = self.get_config_output(config_exe, '--release') + if release: + macros.append((self.release_macro_name + '_%s' + % (release.replace('.', '_')), None)) + opts = self.get_config_output(config_exe, '--libs') + if opts: + for opt in opts.split(): + if opt[:2] == '-l': + libraries.append(opt[2:]) + elif opt[:2] == '-L': + library_dirs.append(opt[2:]) + else: + extra_link_args.append(opt) + opts = self.get_config_output(config_exe, self.cflags_flag) + if opts: + for opt in opts.split(): + if opt[:2] == '-I': + include_dirs.append(opt[2:]) + elif opt[:2] == '-D': + if '=' in opt: + n, v = opt[2:].split('=') + macros.append((n, v)) + else: + macros.append((opt[2:], None)) + else: + extra_compile_args.append(opt) + if macros: + dict_append(info, define_macros=macros) + if libraries: + dict_append(info, libraries=libraries) + if library_dirs: + dict_append(info, library_dirs=library_dirs) + if include_dirs: + dict_append(info, include_dirs=include_dirs) + if extra_link_args: + dict_append(info, extra_link_args=extra_link_args) + if extra_compile_args: + dict_append(info, extra_compile_args=extra_compile_args) + if info: + self.set_info(**info) + return + + +class wx_info(_pkg_config_info): + section = 'wx' + config_env_var = 'WX_CONFIG' + default_config_exe = 'wx-config' + append_config_exe = '' + version_macro_name = 'WX_VERSION' + release_macro_name = 'WX_RELEASE' + version_flag = '--version' + cflags_flag = '--cxxflags' + + +class gdk_pixbuf_xlib_2_info(_pkg_config_info): + section = 'gdk_pixbuf_xlib_2' + append_config_exe = 'gdk-pixbuf-xlib-2.0' + version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' + + +class gdk_pixbuf_2_info(_pkg_config_info): + section = 'gdk_pixbuf_2' + append_config_exe = 'gdk-pixbuf-2.0' + version_macro_name = 'GDK_PIXBUF_VERSION' + + +class gdk_x11_2_info(_pkg_config_info): + section = 'gdk_x11_2' + append_config_exe = 'gdk-x11-2.0' + version_macro_name = 'GDK_X11_VERSION' + + +class gdk_2_info(_pkg_config_info): + section = 'gdk_2' + append_config_exe = 'gdk-2.0' + version_macro_name = 'GDK_VERSION' + + +class gdk_info(_pkg_config_info): + section = 'gdk' + append_config_exe = 'gdk' + version_macro_name = 'GDK_VERSION' + + +class gtkp_x11_2_info(_pkg_config_info): + section = 'gtkp_x11_2' + append_config_exe = 'gtk+-x11-2.0' + version_macro_name = 'GTK_X11_VERSION' + + +class gtkp_2_info(_pkg_config_info): + section = 'gtkp_2' + append_config_exe = 'gtk+-2.0' + version_macro_name = 'GTK_VERSION' + + +class xft_info(_pkg_config_info): + section = 'xft' + append_config_exe = 'xft' + version_macro_name = 'XFT_VERSION' + + +class freetype2_info(_pkg_config_info): + section = 'freetype2' + append_config_exe = 'freetype2' + version_macro_name = 'FREETYPE2_VERSION' + + +class amd_info(system_info): + section = 'amd' + dir_env_var = 'AMD' + _lib_names = ['amd'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + amd_libs = self.get_libs('amd_libs', self._lib_names) + info = self.check_libs(lib_dirs, amd_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, 'amd.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_AMD_H', None)], + swig_opts=['-I' + inc_dir]) + + self.set_info(**info) + return + + +class umfpack_info(system_info): + section = 'umfpack' + dir_env_var = 'UMFPACK' + notfounderror = UmfpackNotFoundError + _lib_names = ['umfpack'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + umfpack_libs = self.get_libs('umfpack_libs', self._lib_names) + info = self.check_libs(lib_dirs, umfpack_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_UMFPACK_H', None)], + swig_opts=['-I' + inc_dir]) + + amd = get_info('amd') + dict_append(info, **get_info('amd')) + + self.set_info(**info) + return + +## def vstr2hex(version): +## bits = [] +## n = [24,16,8,4,0] +## r = 0 +## for s in version.split('.'): +## r |= int(s) << n[0] +## del n[0] +## return r + +#-------------------------------------------------------------------- + + +def combine_paths(*args, **kws): + """ Return a list of existing paths composed by all combinations of + items from arguments. + """ + r = [] + for a in args: + if not a: + continue + if is_string(a): + a = [a] + r.append(a) + args = r + if not args: + return [] + if len(args) == 1: + result = reduce(lambda a, b: a + b, map(glob, args[0]), []) + elif len(args) == 2: + result = [] + for a0 in args[0]: + for a1 in args[1]: + result.extend(glob(os.path.join(a0, a1))) + else: + result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) + verbosity = kws.get('verbosity', 1) + log.debug('(paths: %s)', ','.join(result)) + return result + +language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} +inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} + + +def dict_append(d, **kws): + languages = [] + for k, v in kws.items(): + if k == 'language': + languages.append(v) + continue + if k in d: + if k in ['library_dirs', 'include_dirs', 'define_macros']: + [d[k].append(vv) for vv in v if vv not in d[k]] + else: + d[k].extend(v) + else: + d[k] = v + if languages: + l = inv_language_map[max([language_map.get(l, 0) for l in languages])] + d['language'] = l + return + + +def parseCmdLine(argv=(None,)): + import optparse + parser = optparse.OptionParser("usage: %prog [-v] [info objs]") + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', + default=False, + help='be verbose and print more messages') + + opts, args = parser.parse_args(args=argv[1:]) + return opts, args + + +def show_all(argv=None): + import inspect + if argv is None: + argv = sys.argv + opts, args = parseCmdLine(argv) + if opts.verbose: + log.set_threshold(log.DEBUG) + else: + log.set_threshold(log.INFO) + show_only = [] + for n in args: + if n[-5:] != '_info': + n = n + '_info' + show_only.append(n) + show_all = not show_only + _gdict_ = globals().copy() + for name, c in _gdict_.items(): + if not inspect.isclass(c): + continue + if not issubclass(c, system_info) or c is system_info: + continue + if not show_all: + if name not in show_only: + continue + del show_only[show_only.index(name)] + conf = c() + conf.verbosity = 2 + r = conf.get_info() + if show_only: + log.info('Info classes not defined: %s', ','.join(show_only)) + +if __name__ == "__main__": + show_all() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/__init__.py new file mode 100644 index 0000000000000..1d0f69b67d8fa --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/__init__.py @@ -0,0 +1 @@ +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/setup.py new file mode 100644 index 0000000000000..bb7d4bc1c8c8d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/setup.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('f2py_ext', parent_package, top_path) + config.add_extension('fib2', ['src/fib2.pyf', 'src/fib1.f']) + config.add_data_dir('tests') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib1.f b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib1.f new file mode 100644 index 0000000000000..cfbb1eea0df7a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib1.f @@ -0,0 +1,18 @@ +C FILE: FIB1.F + SUBROUTINE FIB(A,N) +C +C CALCULATE FIRST N FIBONACCI NUMBERS +C + INTEGER N + REAL*8 A(N) + DO I=1,N + IF (I.EQ.1) THEN + A(I) = 0.0D0 + ELSEIF (I.EQ.2) THEN + A(I) = 1.0D0 + ELSE + A(I) = A(I-1) + A(I-2) + ENDIF + ENDDO + END +C END FILE FIB1.F diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib2.pyf b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib2.pyf new file mode 100644 index 0000000000000..90a8cf00cb47e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib2.pyf @@ -0,0 +1,9 @@ +! -*- f90 -*- +python module fib2 + interface + subroutine fib(a,n) + real*8 dimension(n),intent(out),depend(n) :: a + integer intent(in) :: n + end subroutine fib + end interface +end python module fib2 diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/tests/test_fib2.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/tests/test_fib2.py new file mode 100644 index 0000000000000..5252db2830d1b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/tests/test_fib2.py @@ -0,0 +1,13 @@ +from __future__ import division, absolute_import, print_function + +import sys +from numpy.testing import * +from f2py_ext import fib2 + +class TestFib2(TestCase): + + def test_fib(self): + assert_array_equal(fib2.fib(6), [0, 1, 1, 2, 3, 5]) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/__init__.py new file mode 100644 index 0000000000000..1d0f69b67d8fa --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/__init__.py @@ -0,0 +1 @@ +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/include/body.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/include/body.f90 new file mode 100644 index 0000000000000..90b44e29dc850 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/include/body.f90 @@ -0,0 +1,5 @@ + subroutine bar13(a) + !f2py intent(out) a + integer a + a = 13 + end subroutine bar13 diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/setup.py new file mode 100644 index 0000000000000..7cca81637c578 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/setup.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('f2py_f90_ext', parent_package, top_path) + config.add_extension('foo', + ['src/foo_free.f90'], + include_dirs=['include'], + f2py_options=['--include_paths', + config.paths('include')[0]] + ) + config.add_data_dir('tests') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 new file mode 100644 index 0000000000000..c7713be59e169 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 @@ -0,0 +1,6 @@ +module foo_free +contains + +include "body.f90" + +end module foo_free diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py new file mode 100644 index 0000000000000..9653b9023cd2b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py @@ -0,0 +1,12 @@ +from __future__ import division, absolute_import, print_function + +import sys +from numpy.testing import * +from f2py_f90_ext import foo + +class TestFoo(TestCase): + def test_foo_free(self): + assert_equal(foo.foo_free.bar13(), 13) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/__init__.py new file mode 100644 index 0000000000000..1d0f69b67d8fa --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/__init__.py @@ -0,0 +1 @@ +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/setup.py new file mode 100644 index 0000000000000..de6b941e07f03 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/setup.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +fib3_f = ''' +C FILE: FIB3.F + SUBROUTINE FIB(A,N) +C +C CALCULATE FIRST N FIBONACCI NUMBERS +C + INTEGER N + REAL*8 A(N) +Cf2py intent(in) n +Cf2py intent(out) a +Cf2py depend(n) a + DO I=1,N + IF (I.EQ.1) THEN + A(I) = 0.0D0 + ELSEIF (I.EQ.2) THEN + A(I) = 1.0D0 + ELSE + A(I) = A(I-1) + A(I-2) + ENDIF + ENDDO + END +C END FILE FIB3.F +''' + +def source_func(ext, build_dir): + import os + from distutils.dep_util import newer + target = os.path.join(build_dir, 'fib3.f') + if newer(__file__, target): + f = open(target, 'w') + f.write(fib3_f) + f.close() + return [target] + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('gen_ext', parent_package, top_path) + config.add_extension('fib3', + [source_func] + ) + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/tests/test_fib3.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/tests/test_fib3.py new file mode 100644 index 0000000000000..5fd9be439485a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/tests/test_fib3.py @@ -0,0 +1,12 @@ +from __future__ import division, absolute_import, print_function + +import sys +from numpy.testing import * +from gen_ext import fib3 + +class TestFib3(TestCase): + def test_fib(self): + assert_array_equal(fib3.fib(6), [0, 1, 1, 2, 3, 5]) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/__init__.py new file mode 100644 index 0000000000000..1d0f69b67d8fa --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/__init__.py @@ -0,0 +1 @@ +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/primes.pyx b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/primes.pyx new file mode 100644 index 0000000000000..2ada0c5a08d4f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/primes.pyx @@ -0,0 +1,22 @@ +# +# Calculate prime numbers +# + +def primes(int kmax): + cdef int n, k, i + cdef int p[1000] + result = [] + if kmax > 1000: + kmax = 1000 + k = 0 + n = 2 + while k < kmax: + i = 0 + while i < k and n % p[i] <> 0: + i = i + 1 + if i == k: + p[k] = n + k = k + 1 + result.append(n) + n = n + 1 + return result diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/setup.py new file mode 100644 index 0000000000000..819dd3154a11b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/setup.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('pyrex_ext', parent_package, top_path) + config.add_extension('primes', + ['primes.pyx']) + config.add_data_dir('tests') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/tests/test_primes.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/tests/test_primes.py new file mode 100644 index 0000000000000..c9fdd6c6d5c4d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/tests/test_primes.py @@ -0,0 +1,14 @@ +from __future__ import division, absolute_import, print_function + +import sys +from numpy.testing import * +from pyrex_ext.primes import primes + +class TestPrimes(TestCase): + def test_simple(self, level=1): + l = primes(10) + assert_equal(l, [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/setup.py new file mode 100644 index 0000000000000..135de7c470d5c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/setup.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('testnumpydistutils', parent_package, top_path) + config.add_subpackage('pyrex_ext') + config.add_subpackage('f2py_ext') + #config.add_subpackage('f2py_f90_ext') + config.add_subpackage('swig_ext') + config.add_subpackage('gen_ext') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/__init__.py new file mode 100644 index 0000000000000..1d0f69b67d8fa --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/__init__.py @@ -0,0 +1 @@ +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/setup.py new file mode 100644 index 0000000000000..f6e07303bea64 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/setup.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('swig_ext', parent_package, top_path) + config.add_extension('_example', + ['src/example.i', 'src/example.c'] + ) + config.add_extension('_example2', + ['src/zoo.i', 'src/zoo.cc'], + depends=['src/zoo.h'], + include_dirs=['src'] + ) + config.add_data_dir('tests') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/example.i b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/example.i new file mode 100644 index 0000000000000..f4fc11e663701 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/example.i @@ -0,0 +1,14 @@ +/* -*- c -*- */ + +/* File : example.i */ +%module example +%{ +/* Put headers and other declarations here */ +extern double My_variable; +extern int fact(int); +extern int my_mod(int n, int m); +%} + +extern double My_variable; +extern int fact(int); +extern int my_mod(int n, int m); diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.cc b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.cc new file mode 100644 index 0000000000000..0a643d1e5d4f2 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.cc @@ -0,0 +1,23 @@ +#include "zoo.h" +#include +#include + +Zoo::Zoo() +{ + n = 0; +} + +void Zoo::shut_up(char *animal) +{ + if (n < 10) { + strcpy(animals[n], animal); + n++; + } +} + +void Zoo::display() +{ + int i; + for(i = 0; i < n; i++) + printf("%s\n", animals[i]); +} diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.h new file mode 100644 index 0000000000000..cb26e6ceff5df --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.h @@ -0,0 +1,9 @@ + +class Zoo{ + int n; + char animals[10][50]; +public: + Zoo(); + void shut_up(char *animal); + void display(); +}; diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.i b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.i new file mode 100644 index 0000000000000..a029c03e844b6 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.i @@ -0,0 +1,10 @@ +// -*- c++ -*- +// Example copied from http://linuxgazette.net/issue49/pramode.html + +%module example2 + +%{ +#include "zoo.h" +%} + +%include "zoo.h" diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example.py new file mode 100644 index 0000000000000..e81f98b1de200 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example.py @@ -0,0 +1,18 @@ +from __future__ import division, absolute_import, print_function + +import sys +from numpy.testing import * +from swig_ext import example + +class TestExample(TestCase): + def test_fact(self): + assert_equal(example.fact(10), 3628800) + + def test_cvar(self): + assert_equal(example.cvar.My_variable, 3.0) + example.cvar.My_variable = 5 + assert_equal(example.cvar.My_variable, 5.0) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example2.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example2.py new file mode 100644 index 0000000000000..82daed72894f9 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example2.py @@ -0,0 +1,16 @@ +from __future__ import division, absolute_import, print_function + +import sys +from numpy.testing import * +from swig_ext import example2 + +class TestExample2(TestCase): + def test_zoo(self): + z = example2.Zoo() + z.shut_up('Tiger') + z.shut_up('Lion') + z.display() + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_exec_command.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_exec_command.py new file mode 100644 index 0000000000000..0931f749b39c2 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_exec_command.py @@ -0,0 +1,92 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +from tempfile import TemporaryFile + +from numpy.distutils import exec_command + +# In python 3 stdout, stderr are text (unicode compliant) devices, so to +# emulate them import StringIO from the io module. +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +class redirect_stdout(object): + """Context manager to redirect stdout for exec_command test.""" + def __init__(self, stdout=None): + self._stdout = stdout or sys.stdout + + def __enter__(self): + self.old_stdout = sys.stdout + sys.stdout = self._stdout + + def __exit__(self, exc_type, exc_value, traceback): + self._stdout.flush() + sys.stdout = self.old_stdout + # note: closing sys.stdout won't close it. + self._stdout.close() + +class redirect_stderr(object): + """Context manager to redirect stderr for exec_command test.""" + def __init__(self, stderr=None): + self._stderr = stderr or sys.stderr + + def __enter__(self): + self.old_stderr = sys.stderr + sys.stderr = self._stderr + + def __exit__(self, exc_type, exc_value, traceback): + self._stderr.flush() + sys.stderr = self.old_stderr + # note: closing sys.stderr won't close it. + self._stderr.close() + +class emulate_nonposix(object): + """Context manager to emulate os.name != 'posix' """ + def __init__(self, osname='non-posix'): + self._new_name = osname + + def __enter__(self): + self._old_name = os.name + os.name = self._new_name + + def __exit__(self, exc_type, exc_value, traceback): + os.name = self._old_name + + +def test_exec_command_stdout(): + # Regression test for gh-2999 and gh-2915. + # There are several packages (nose, scipy.weave.inline, Sage inline + # Fortran) that replace stdout, in which case it doesn't have a fileno + # method. This is tested here, with a do-nothing command that fails if the + # presence of fileno() is assumed in exec_command. + + # The code has a special case for posix systems, so if we are on posix test + # both that the special case works and that the generic code works. + + # Test posix version: + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + exec_command.exec_command("cd '.'") + +def test_exec_command_stderr(): + # Test posix version: + with redirect_stdout(TemporaryFile(mode='w+')): + with redirect_stderr(StringIO()): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(TemporaryFile()): + with redirect_stderr(StringIO()): + exec_command.exec_command("cd '.'") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_gnu.py new file mode 100644 index 0000000000000..a0d191819cc10 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_gnu.py @@ -0,0 +1,53 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * + +import numpy.distutils.fcompiler + +g77_version_strings = [ + ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), + ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), + ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), + ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), + ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' + ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), +] + +gfortran_version_strings = [ + ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', + '4.0.3'), + ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), + ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), + ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), + ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), +] + +class TestG77Versions(TestCase): + def test_g77_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, version in g77_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_g77(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, _ in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) + +class TestGortranVersions(TestCase): + def test_gfortran_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, version in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_gfortran(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, _ in g77_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) + + +if __name__ == '__main__': + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_intel.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_intel.py new file mode 100644 index 0000000000000..eda209ebe060c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_intel.py @@ -0,0 +1,36 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * + +import numpy.distutils.fcompiler + +intel_32bit_version_strings = [ + ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"\ + "running on Intel(R) 32, Version 11.1", '11.1'), +] + +intel_64bit_version_strings = [ + ("Intel(R) Fortran IA-64 Compiler Professional for applications"\ + "running on IA-64, Version 11.0", '11.0'), + ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"\ + "running on Intel(R) 64, Version 11.1", '11.1') +] + +class TestIntelFCompilerVersions(TestCase): + def test_32bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') + for vs, version in intel_32bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) + + +class TestIntelEM64TFCompilerVersions(TestCase): + def test_64bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') + for vs, version in intel_64bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) + + +if __name__ == '__main__': + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_misc_util.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_misc_util.py new file mode 100644 index 0000000000000..fd6af638fb416 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_misc_util.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +from __future__ import division, absolute_import, print_function + +from numpy.testing import * +from numpy.distutils.misc_util import appendpath, minrelpath, \ + gpaths, get_shared_lib_extension +from os.path import join, sep, dirname + +ajoin = lambda *paths: join(*((sep,)+paths)) + +class TestAppendpath(TestCase): + + def test_1(self): + assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) + assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) + assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) + assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) + + def test_2(self): + assert_equal(appendpath('prefix/sub', 'name'), + join('prefix', 'sub', 'name')) + assert_equal(appendpath('prefix/sub', 'sup/name'), + join('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub', '/prefix/name'), + ajoin('prefix', 'sub', 'name')) + + def test_3(self): + assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), + ajoin('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) + +class TestMinrelpath(TestCase): + + def test_1(self): + n = lambda path: path.replace('/', sep) + assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) + assert_equal(minrelpath('..'), '..') + assert_equal(minrelpath(n('aa/..')), '') + assert_equal(minrelpath(n('aa/../bb')), 'bb') + assert_equal(minrelpath(n('aa/bb/..')), 'aa') + assert_equal(minrelpath(n('aa/bb/../..')), '') + assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) + assert_equal(minrelpath(n('.././..')), n('../..')) + assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) + +class TestGpaths(TestCase): + + def test_gpaths(self): + local_path = minrelpath(join(dirname(__file__), '..')) + ls = gpaths('command/*.py', local_path) + assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) + f = gpaths('system_info.py', local_path) + assert_(join(local_path, 'system_info.py')==f[0], repr(f)) + +class TestSharedExtension(TestCase): + + def test_get_shared_lib_extension(self): + import sys + ext = get_shared_lib_extension(is_python_ext=False) + if sys.platform.startswith('linux'): + assert_equal(ext, '.so') + elif sys.platform.startswith('gnukfreebsd'): + assert_equal(ext, '.so') + elif sys.platform.startswith('darwin'): + assert_equal(ext, '.dylib') + elif sys.platform.startswith('win'): + assert_equal(ext, '.dll') + # just check for no crash + assert_(get_shared_lib_extension(is_python_ext=True)) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_npy_pkg_config.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_npy_pkg_config.py new file mode 100644 index 0000000000000..5443ece485b2a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_npy_pkg_config.py @@ -0,0 +1,98 @@ +from __future__ import division, absolute_import, print_function + +import os +from tempfile import mkstemp + +from numpy.testing import * +from numpy.distutils.npy_pkg_config import read_config, parse_flags + +simple = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[default] +cflags = -I/usr/include +libs = -L/usr/lib +""" +simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', + 'version': '0.1', 'name': 'foo'} + +simple_variable = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[variables] +prefix = /foo/bar +libdir = ${prefix}/lib +includedir = ${prefix}/include + +[default] +cflags = -I${includedir} +libs = -L${libdir} +""" +simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', + 'version': '0.1', 'name': 'foo'} + +class TestLibraryInfo(TestCase): + def test_simple(self): + fd, filename = mkstemp('foo.ini') + try: + pkg = os.path.splitext(filename)[0] + try: + os.write(fd, simple.encode('ascii')) + finally: + os.close(fd) + + out = read_config(pkg) + self.assertTrue(out.cflags() == simple_d['cflags']) + self.assertTrue(out.libs() == simple_d['libflags']) + self.assertTrue(out.name == simple_d['name']) + self.assertTrue(out.version == simple_d['version']) + finally: + os.remove(filename) + + def test_simple_variable(self): + fd, filename = mkstemp('foo.ini') + try: + pkg = os.path.splitext(filename)[0] + try: + os.write(fd, simple_variable.encode('ascii')) + finally: + os.close(fd) + + out = read_config(pkg) + self.assertTrue(out.cflags() == simple_variable_d['cflags']) + self.assertTrue(out.libs() == simple_variable_d['libflags']) + self.assertTrue(out.name == simple_variable_d['name']) + self.assertTrue(out.version == simple_variable_d['version']) + + out.vars['prefix'] = '/Users/david' + self.assertTrue(out.cflags() == '-I/Users/david/include') + finally: + os.remove(filename) + +class TestParseFlags(TestCase): + def test_simple_cflags(self): + d = parse_flags("-I/usr/include") + self.assertTrue(d['include_dirs'] == ['/usr/include']) + + d = parse_flags("-I/usr/include -DFOO") + self.assertTrue(d['include_dirs'] == ['/usr/include']) + self.assertTrue(d['macros'] == ['FOO']) + + d = parse_flags("-I /usr/include -DFOO") + self.assertTrue(d['include_dirs'] == ['/usr/include']) + self.assertTrue(d['macros'] == ['FOO']) + + def test_simple_lflags(self): + d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") + self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + self.assertTrue(d['libraries'] == ['foo', 'bar']) + + d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") + self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + self.assertTrue(d['libraries'] == ['foo', 'bar']) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/unixccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/unixccompiler.py new file mode 100644 index 0000000000000..955407aa0384f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/unixccompiler.py @@ -0,0 +1,113 @@ +""" +unixccompiler - can handle very long argument lists for ar. + +""" +from __future__ import division, absolute_import, print_function + +import os + +from distutils.errors import DistutilsExecError, CompileError +from distutils.unixccompiler import * +from numpy.distutils.ccompiler import replace_method +from numpy.distutils.compat import get_exception + +if sys.version_info[0] < 3: + from . import log +else: + from numpy.distutils import log + +# Note that UnixCCompiler._compile appeared in Python 2.3 +def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile a single source files with a Unix-style compiler.""" + # HP ad-hoc fix, see ticket 1383 + ccomp = self.compiler_so + if ccomp[0] == 'aCC': + # remove flags that will trigger ANSI-C mode for aCC + if '-Ae' in ccomp: + ccomp.remove('-Ae') + if '-Aa' in ccomp: + ccomp.remove('-Aa') + # add flags for (almost) sane C++ handling + ccomp += ['-AA'] + self.compiler_so = ccomp + + display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) + try: + self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + + extra_postargs, display = display) + except DistutilsExecError: + msg = str(get_exception()) + raise CompileError(msg) + +replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) + + +def UnixCCompiler_create_static_lib(self, objects, output_libname, + output_dir=None, debug=0, target_lang=None): + """ + Build a static library in a separate sub-process. + + Parameters + ---------- + objects : list or tuple of str + List of paths to object files used to build the static library. + output_libname : str + The library name as an absolute or relative (if `output_dir` is used) + path. + output_dir : str, optional + The path to the output directory. Default is None, in which case + the ``output_dir`` attribute of the UnixCCompiler instance. + debug : bool, optional + This parameter is not used. + target_lang : str, optional + This parameter is not used. + + Returns + ------- + None + + """ + objects, output_dir = self._fix_object_args(objects, output_dir) + + output_filename = \ + self.library_filename(output_libname, output_dir=output_dir) + + if self._need_link(objects, output_filename): + try: + # previous .a may be screwed up; best to remove it first + # and recreate. + # Also, ar on OS X doesn't handle updating universal archives + os.unlink(output_filename) + except (IOError, OSError): + pass + self.mkpath(os.path.dirname(output_filename)) + tmp_objects = objects + self.objects + while tmp_objects: + objects = tmp_objects[:50] + tmp_objects = tmp_objects[50:] + display = '%s: adding %d object files to %s' % ( + os.path.basename(self.archiver[0]), + len(objects), output_filename) + self.spawn(self.archiver + [output_filename] + objects, + display = display) + + # Not many Unices required ranlib anymore -- SunOS 4.x is, I + # think the only major Unix that does. Maybe we need some + # platform intelligence here to skip ranlib if it's not + # needed -- or maybe Python's configure script took care of + # it for us, hence the check for leading colon. + if self.ranlib: + display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), + output_filename) + try: + self.spawn(self.ranlib + [output_filename], + display = display) + except DistutilsExecError: + msg = str(get_exception()) + raise LibError(msg) + else: + log.debug("skipping %s (up-to-date)", output_filename) + return + +replace_method(UnixCCompiler, 'create_static_lib', + UnixCCompiler_create_static_lib) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/__init__.py new file mode 100644 index 0000000000000..b6f1fa71c54a1 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/__init__.py @@ -0,0 +1,28 @@ +from __future__ import division, absolute_import, print_function + +import os + +ref_dir = os.path.join(os.path.dirname(__file__)) + +__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and + not f.startswith('__')) + +for f in __all__: + __import__(__name__ + '.' + f) + +del f, ref_dir + +__doc__ = """\ +Topical documentation +===================== + +The following topics are available: +%s + +You can view them by + +>>> help(np.doc.TOPIC) #doctest: +SKIP + +""" % '\n- '.join([''] + __all__) + +__all__.extend(['__doc__']) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/basics.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/basics.py new file mode 100644 index 0000000000000..86a3984c27e23 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/basics.py @@ -0,0 +1,146 @@ +""" +============ +Array basics +============ + +Array types and conversions between types +========================================= + +Numpy supports a much greater variety of numerical types than Python does. +This section shows which are available, and how to modify an array's data-type. + +========== ========================================================== +Data type Description +========== ========================================================== +bool_ Boolean (True or False) stored as a byte +int_ Default integer type (same as C ``long``; normally either + ``int64`` or ``int32``) +intc Identical to C ``int`` (normally ``int32`` or ``int64``) +intp Integer used for indexing (same as C ``ssize_t``; normally + either ``int32`` or ``int64``) +int8 Byte (-128 to 127) +int16 Integer (-32768 to 32767) +int32 Integer (-2147483648 to 2147483647) +int64 Integer (-9223372036854775808 to 9223372036854775807) +uint8 Unsigned integer (0 to 255) +uint16 Unsigned integer (0 to 65535) +uint32 Unsigned integer (0 to 4294967295) +uint64 Unsigned integer (0 to 18446744073709551615) +float_ Shorthand for ``float64``. +float16 Half precision float: sign bit, 5 bits exponent, + 10 bits mantissa +float32 Single precision float: sign bit, 8 bits exponent, + 23 bits mantissa +float64 Double precision float: sign bit, 11 bits exponent, + 52 bits mantissa +complex_ Shorthand for ``complex128``. +complex64 Complex number, represented by two 32-bit floats (real + and imaginary components) +complex128 Complex number, represented by two 64-bit floats (real + and imaginary components) +========== ========================================================== + +Additionally to ``intc`` the platform dependent C integer types ``short``, +``long``, ``longlong`` and their unsigned versions are defined. + +Numpy numerical types are instances of ``dtype`` (data-type) objects, each +having unique characteristics. Once you have imported NumPy using + + :: + + >>> import numpy as np + +the dtypes are available as ``np.bool_``, ``np.float32``, etc. + +Advanced types, not listed in the table above, are explored in +section :ref:`structured_arrays`. + +There are 5 basic numerical types representing booleans (bool), integers (int), +unsigned integers (uint) floating point (float) and complex. Those with numbers +in their name indicate the bitsize of the type (i.e. how many bits are needed +to represent a single value in memory). Some types, such as ``int`` and +``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit +vs. 64-bit machines). This should be taken into account when interfacing +with low-level code (such as C or Fortran) where the raw memory is addressed. + +Data-types can be used as functions to convert python numbers to array scalars +(see the array scalar section for an explanation), python sequences of numbers +to arrays of that type, or as arguments to the dtype keyword that many numpy +functions or methods accept. Some examples:: + + >>> import numpy as np + >>> x = np.float32(1.0) + >>> x + 1.0 + >>> y = np.int_([1,2,4]) + >>> y + array([1, 2, 4]) + >>> z = np.arange(3, dtype=np.uint8) + >>> z + array([0, 1, 2], dtype=uint8) + +Array types can also be referred to by character codes, mostly to retain +backward compatibility with older packages such as Numeric. Some +documentation may still refer to these, for example:: + + >>> np.array([1, 2, 3], dtype='f') + array([ 1., 2., 3.], dtype=float32) + +We recommend using dtype objects instead. + +To convert the type of an array, use the .astype() method (preferred) or +the type itself as a function. For example: :: + + >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE + array([ 0., 1., 2.]) + >>> np.int8(z) + array([0, 1, 2], dtype=int8) + +Note that, above, we use the *Python* float object as a dtype. NumPy knows +that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``, +that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``. +The other data-types do not have Python equivalents. + +To determine the type of an array, look at the dtype attribute:: + + >>> z.dtype + dtype('uint8') + +dtype objects also contain information about the type, such as its bit-width +and its byte-order. The data type can also be used indirectly to query +properties of the type, such as whether it is an integer:: + + >>> d = np.dtype(int) + >>> d + dtype('int32') + + >>> np.issubdtype(d, int) + True + + >>> np.issubdtype(d, float) + False + + +Array Scalars +============= + +Numpy generally returns elements of arrays as array scalars (a scalar +with an associated dtype). Array scalars differ from Python scalars, but +for the most part they can be used interchangeably (the primary +exception is for versions of Python older than v2.x, where integer array +scalars cannot act as indices for lists and tuples). There are some +exceptions, such as when code requires very specific attributes of a scalar +or when it checks specifically whether a value is a Python scalar. Generally, +problems are easily fixed by explicitly converting array scalars +to Python scalars, using the corresponding Python type function +(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``). + +The primary advantage of using array scalars is that +they preserve the array type (Python may not have a matching scalar type +available, e.g. ``int16``). Therefore, the use of array scalars ensures +identical behaviour between arrays and scalars, irrespective of whether the +value is inside an array or not. NumPy scalars also have many of the same +methods arrays do. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/broadcasting.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/broadcasting.py new file mode 100644 index 0000000000000..717914cda28c5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/broadcasting.py @@ -0,0 +1,178 @@ +""" +======================== +Broadcasting over arrays +======================== + +The term broadcasting describes how numpy treats arrays with different +shapes during arithmetic operations. Subject to certain constraints, +the smaller array is "broadcast" across the larger array so that they +have compatible shapes. Broadcasting provides a means of vectorizing +array operations so that looping occurs in C instead of Python. It does +this without making needless copies of data and usually leads to +efficient algorithm implementations. There are, however, cases where +broadcasting is a bad idea because it leads to inefficient use of memory +that slows computation. + +NumPy operations are usually done on pairs of arrays on an +element-by-element basis. In the simplest case, the two arrays must +have exactly the same shape, as in the following example: + + >>> a = np.array([1.0, 2.0, 3.0]) + >>> b = np.array([2.0, 2.0, 2.0]) + >>> a * b + array([ 2., 4., 6.]) + +NumPy's broadcasting rule relaxes this constraint when the arrays' +shapes meet certain constraints. The simplest broadcasting example occurs +when an array and a scalar value are combined in an operation: + +>>> a = np.array([1.0, 2.0, 3.0]) +>>> b = 2.0 +>>> a * b +array([ 2., 4., 6.]) + +The result is equivalent to the previous example where ``b`` was an array. +We can think of the scalar ``b`` being *stretched* during the arithmetic +operation into an array with the same shape as ``a``. The new elements in +``b`` are simply copies of the original scalar. The stretching analogy is +only conceptual. NumPy is smart enough to use the original scalar value +without actually making copies, so that broadcasting operations are as +memory and computationally efficient as possible. + +The code in the second example is more efficient than that in the first +because broadcasting moves less memory around during the multiplication +(``b`` is a scalar rather than an array). + +General Broadcasting Rules +========================== +When operating on two arrays, NumPy compares their shapes element-wise. +It starts with the trailing dimensions, and works its way forward. Two +dimensions are compatible when + +1) they are equal, or +2) one of them is 1 + +If these conditions are not met, a +``ValueError: frames are not aligned`` exception is thrown, indicating that +the arrays have incompatible shapes. The size of the resulting array +is the maximum size along each dimension of the input arrays. + +Arrays do not need to have the same *number* of dimensions. For example, +if you have a ``256x256x3`` array of RGB values, and you want to scale +each color in the image by a different value, you can multiply the image +by a one-dimensional array with 3 values. Lining up the sizes of the +trailing axes of these arrays according to the broadcast rules, shows that +they are compatible:: + + Image (3d array): 256 x 256 x 3 + Scale (1d array): 3 + Result (3d array): 256 x 256 x 3 + +When either of the dimensions compared is one, the other is +used. In other words, dimensions with size 1 are stretched or "copied" +to match the other. + +In the following example, both the ``A`` and ``B`` arrays have axes with +length one that are expanded to a larger size during the broadcast +operation:: + + A (4d array): 8 x 1 x 6 x 1 + B (3d array): 7 x 1 x 5 + Result (4d array): 8 x 7 x 6 x 5 + +Here are some more examples:: + + A (2d array): 5 x 4 + B (1d array): 1 + Result (2d array): 5 x 4 + + A (2d array): 5 x 4 + B (1d array): 4 + Result (2d array): 5 x 4 + + A (3d array): 15 x 3 x 5 + B (3d array): 15 x 1 x 5 + Result (3d array): 15 x 3 x 5 + + A (3d array): 15 x 3 x 5 + B (2d array): 3 x 5 + Result (3d array): 15 x 3 x 5 + + A (3d array): 15 x 3 x 5 + B (2d array): 3 x 1 + Result (3d array): 15 x 3 x 5 + +Here are examples of shapes that do not broadcast:: + + A (1d array): 3 + B (1d array): 4 # trailing dimensions do not match + + A (2d array): 2 x 1 + B (3d array): 8 x 4 x 3 # second from last dimensions mismatched + +An example of broadcasting in practice:: + + >>> x = np.arange(4) + >>> xx = x.reshape(4,1) + >>> y = np.ones(5) + >>> z = np.ones((3,4)) + + >>> x.shape + (4,) + + >>> y.shape + (5,) + + >>> x + y + : shape mismatch: objects cannot be broadcast to a single shape + + >>> xx.shape + (4, 1) + + >>> y.shape + (5,) + + >>> (xx + y).shape + (4, 5) + + >>> xx + y + array([[ 1., 1., 1., 1., 1.], + [ 2., 2., 2., 2., 2.], + [ 3., 3., 3., 3., 3.], + [ 4., 4., 4., 4., 4.]]) + + >>> x.shape + (4,) + + >>> z.shape + (3, 4) + + >>> (x + z).shape + (3, 4) + + >>> x + z + array([[ 1., 2., 3., 4.], + [ 1., 2., 3., 4.], + [ 1., 2., 3., 4.]]) + +Broadcasting provides a convenient way of taking the outer product (or +any other outer operation) of two arrays. The following example shows an +outer addition operation of two 1-d arrays:: + + >>> a = np.array([0.0, 10.0, 20.0, 30.0]) + >>> b = np.array([1.0, 2.0, 3.0]) + >>> a[:, np.newaxis] + b + array([[ 1., 2., 3.], + [ 11., 12., 13.], + [ 21., 22., 23.], + [ 31., 32., 33.]]) + +Here the ``newaxis`` index operator inserts a new axis into ``a``, +making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array +with ``b``, which has shape ``(3,)``, yields a ``4x3`` array. + +See `this article `_ +for illustrations of broadcasting concepts. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/byteswapping.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/byteswapping.py new file mode 100644 index 0000000000000..430683d308d4a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/byteswapping.py @@ -0,0 +1,147 @@ +""" + +============================= + Byteswapping and byte order +============================= + +Introduction to byte ordering and ndarrays +========================================== + +The ``ndarray`` is an object that provide a python array interface to data +in memory. + +It often happens that the memory that you want to view with an array is +not of the same byte ordering as the computer on which you are running +Python. + +For example, I might be working on a computer with a little-endian CPU - +such as an Intel Pentium, but I have loaded some data from a file +written by a computer that is big-endian. Let's say I have loaded 4 +bytes from a file written by a Sun (big-endian) computer. I know that +these 4 bytes represent two 16-bit integers. On a big-endian machine, a +two-byte integer is stored with the Most Significant Byte (MSB) first, +and then the Least Significant Byte (LSB). Thus the bytes are, in memory order: + +#. MSB integer 1 +#. LSB integer 1 +#. MSB integer 2 +#. LSB integer 2 + +Let's say the two integers were in fact 1 and 770. Because 770 = 256 * +3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2. +The bytes I have loaded from the file would have these contents: + +>>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2) +>>> big_end_str +'\\x00\\x01\\x03\\x02' + +We might want to use an ``ndarray`` to access these integers. In that +case, we can create an array around this memory, and tell numpy that +there are two integers, and that they are 16 bit and big-endian: + +>>> import numpy as np +>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str) +>>> big_end_arr[0] +1 +>>> big_end_arr[1] +770 + +Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian' +(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For +example, if our data represented a single unsigned 4-byte little-endian +integer, the dtype string would be ``>> little_end_u4 = np.ndarray(shape=(1,),dtype='>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3 +True + +Returning to our ``big_end_arr`` - in this case our underlying data is +big-endian (data endianness) and we've set the dtype to match (the dtype +is also big-endian). However, sometimes you need to flip these around. + +Changing byte ordering +====================== + +As you can imagine from the introduction, there are two ways you can +affect the relationship between the byte ordering of the array and the +underlying memory it is looking at: + +* Change the byte-ordering information in the array dtype so that it + interprets the undelying data as being in a different byte order. + This is the role of ``arr.newbyteorder()`` +* Change the byte-ordering of the underlying data, leaving the dtype + interpretation as it was. This is what ``arr.byteswap()`` does. + +The common situations in which you need to change byte ordering are: + +#. Your data and dtype endianess don't match, and you want to change + the dtype so that it matches the data. +#. Your data and dtype endianess don't match, and you want to swap the + data so that they match the dtype +#. Your data and dtype endianess match, but you want the data swapped + and the dtype to reflect this + +Data and dtype endianness don't match, change dtype to match data +----------------------------------------------------------------- + +We make something where they don't match: + +>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='>> wrong_end_dtype_arr[0] +256 + +The obvious fix for this situation is to change the dtype so it gives +the correct endianness: + +>>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder() +>>> fixed_end_dtype_arr[0] +1 + +Note the the array has not changed in memory: + +>>> fixed_end_dtype_arr.tobytes() == big_end_str +True + +Data and type endianness don't match, change data to match dtype +---------------------------------------------------------------- + +You might want to do this if you need the data in memory to be a certain +ordering. For example you might be writing the memory out to a file +that needs a certain byte ordering. + +>>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap() +>>> fixed_end_mem_arr[0] +1 + +Now the array *has* changed in memory: + +>>> fixed_end_mem_arr.tobytes() == big_end_str +False + +Data and dtype endianness match, swap data and dtype +---------------------------------------------------- + +You may have a correctly specified array dtype, but you need the array +to have the opposite byte order in memory, and you want the dtype to +match so the array values make sense. In this case you just do both of +the previous operations: + +>>> swapped_end_arr = big_end_arr.byteswap().newbyteorder() +>>> swapped_end_arr[0] +1 +>>> swapped_end_arr.tobytes() == big_end_str +False + +An easier way of casting the data to a specific dtype and byte ordering +can be achieved with the ndarray astype method: + +>>> swapped_end_arr = big_end_arr.astype('>> swapped_end_arr[0] +1 +>>> swapped_end_arr.tobytes() == big_end_str +False + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/constants.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/constants.py new file mode 100644 index 0000000000000..36f94d3070517 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/constants.py @@ -0,0 +1,393 @@ +""" +========= +Constants +========= + +Numpy includes several constants: + +%(constant_list)s +""" +# +# Note: the docstring is autogenerated. +# +from __future__ import division, absolute_import, print_function + +import textwrap, re + +# Maintain same format as in numpy.add_newdocs +constants = [] +def add_newdoc(module, name, doc): + constants.append((name, doc)) + +add_newdoc('numpy', 'Inf', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'Infinity', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'NAN', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + `NaN` and `NAN` are equivalent definitions of `nan`. Please use + `nan` instead of `NAN`. + + See Also + -------- + nan + + """) + +add_newdoc('numpy', 'NINF', + """ + IEEE 754 floating point representation of negative infinity. + + Returns + ------- + y : float + A floating point representation of negative infinity. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity + + isposinf : Shows which elements are positive infinity + + isneginf : Shows which elements are negative infinity + + isnan : Shows which elements are Not a Number + + isfinite : Shows which elements are finite (not one of Not a Number, + positive infinity and negative infinity) + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + Examples + -------- + >>> np.NINF + -inf + >>> np.log(0) + -inf + + """) + +add_newdoc('numpy', 'NZERO', + """ + IEEE 754 floating point representation of negative zero. + + Returns + ------- + y : float + A floating point representation of negative zero. + + See Also + -------- + PZERO : Defines positive zero. + + isinf : Shows which elements are positive or negative infinity. + + isposinf : Shows which elements are positive infinity. + + isneginf : Shows which elements are negative infinity. + + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite - not one of + Not a Number, positive infinity and negative infinity. + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). Negative zero is considered to be a finite number. + + Examples + -------- + >>> np.NZERO + -0.0 + >>> np.PZERO + 0.0 + + >>> np.isfinite([np.NZERO]) + array([ True], dtype=bool) + >>> np.isnan([np.NZERO]) + array([False], dtype=bool) + >>> np.isinf([np.NZERO]) + array([False], dtype=bool) + + """) + +add_newdoc('numpy', 'NaN', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + `NaN` and `NAN` are equivalent definitions of `nan`. Please use + `nan` instead of `NaN`. + + See Also + -------- + nan + + """) + +add_newdoc('numpy', 'PINF', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'PZERO', + """ + IEEE 754 floating point representation of positive zero. + + Returns + ------- + y : float + A floating point representation of positive zero. + + See Also + -------- + NZERO : Defines negative zero. + + isinf : Shows which elements are positive or negative infinity. + + isposinf : Shows which elements are positive infinity. + + isneginf : Shows which elements are negative infinity. + + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite - not one of + Not a Number, positive infinity and negative infinity. + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). Positive zero is considered to be a finite number. + + Examples + -------- + >>> np.PZERO + 0.0 + >>> np.NZERO + -0.0 + + >>> np.isfinite([np.PZERO]) + array([ True], dtype=bool) + >>> np.isnan([np.PZERO]) + array([False], dtype=bool) + >>> np.isinf([np.PZERO]) + array([False], dtype=bool) + + """) + +add_newdoc('numpy', 'e', + """ + Euler's constant, base of natural logarithms, Napier's constant. + + ``e = 2.71828182845904523536028747135266249775724709369995...`` + + See Also + -------- + exp : Exponential function + log : Natural logarithm + + References + ---------- + .. [1] http://en.wikipedia.org/wiki/Napier_constant + + """) + +add_newdoc('numpy', 'inf', + """ + IEEE 754 floating point representation of (positive) infinity. + + Returns + ------- + y : float + A floating point representation of positive infinity. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity + + isposinf : Shows which elements are positive infinity + + isneginf : Shows which elements are negative infinity + + isnan : Shows which elements are Not a Number + + isfinite : Shows which elements are finite (not one of Not a Number, + positive infinity and negative infinity) + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. + + Examples + -------- + >>> np.inf + inf + >>> np.array([1]) / 0. + array([ Inf]) + + """) + +add_newdoc('numpy', 'infty', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'nan', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + Returns + ------- + y : A floating point representation of Not a Number. + + See Also + -------- + isnan : Shows which elements are Not a Number. + isfinite : Shows which elements are finite (not one of + Not a Number, positive infinity and negative infinity) + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + `NaN` and `NAN` are aliases of `nan`. + + Examples + -------- + >>> np.nan + nan + >>> np.log(-1) + nan + >>> np.log([-1, 1, 2]) + array([ NaN, 0. , 0.69314718]) + + """) + +add_newdoc('numpy', 'newaxis', + """ + A convenient alias for None, useful for indexing arrays. + + See Also + -------- + `numpy.doc.indexing` + + Examples + -------- + >>> newaxis is None + True + >>> x = np.arange(3) + >>> x + array([0, 1, 2]) + >>> x[:, newaxis] + array([[0], + [1], + [2]]) + >>> x[:, newaxis, newaxis] + array([[[0]], + [[1]], + [[2]]]) + >>> x[:, newaxis] * x + array([[0, 0, 0], + [0, 1, 2], + [0, 2, 4]]) + + Outer product, same as ``outer(x, y)``: + + >>> y = np.arange(3, 6) + >>> x[:, newaxis] * y + array([[ 0, 0, 0], + [ 3, 4, 5], + [ 6, 8, 10]]) + + ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``: + + >>> x[newaxis, :].shape + (1, 3) + >>> x[newaxis].shape + (1, 3) + >>> x[None].shape + (1, 3) + >>> x[:, newaxis].shape + (3, 1) + + """) + +if __doc__: + constants_str = [] + constants.sort() + for name, doc in constants: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + new_lines.append('%s.. rubric:: %s' % (m.group(1), prev)) + new_lines.append('') + else: + new_lines.append(line) + s = "\n".join(new_lines) + + # Done. + constants_str.append(""".. const:: %s\n %s""" % (name, s)) + constants_str = "\n".join(constants_str) + + __doc__ = __doc__ % dict(constant_list=constants_str) + del constants_str, name, doc + del line, lines, new_lines, m, s, prev + +del constants, add_newdoc diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/creation.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/creation.py new file mode 100644 index 0000000000000..7979b51aabdc7 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/creation.py @@ -0,0 +1,144 @@ +""" +============== +Array Creation +============== + +Introduction +============ + +There are 5 general mechanisms for creating arrays: + +1) Conversion from other Python structures (e.g., lists, tuples) +2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros, + etc.) +3) Reading arrays from disk, either from standard or custom formats +4) Creating arrays from raw bytes through the use of strings or buffers +5) Use of special library functions (e.g., random) + +This section will not cover means of replicating, joining, or otherwise +expanding or mutating existing arrays. Nor will it cover creating object +arrays or record arrays. Both of those are covered in their own sections. + +Converting Python array_like Objects to Numpy Arrays +==================================================== + +In general, numerical data arranged in an array-like structure in Python can +be converted to arrays through the use of the array() function. The most +obvious examples are lists and tuples. See the documentation for array() for +details for its use. Some objects may support the array-protocol and allow +conversion to arrays this way. A simple way to find out if the object can be +converted to a numpy array using array() is simply to try it interactively and +see if it works! (The Python Way). + +Examples: :: + + >>> x = np.array([2,3,1,0]) + >>> x = np.array([2, 3, 1, 0]) + >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, + and types + >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]]) + +Intrinsic Numpy Array Creation +============================== + +Numpy has built-in functions for creating arrays from scratch: + +zeros(shape) will create an array filled with 0 values with the specified +shape. The default dtype is float64. + +``>>> np.zeros((2, 3)) +array([[ 0., 0., 0.], [ 0., 0., 0.]])`` + +ones(shape) will create an array filled with 1 values. It is identical to +zeros in all other respects. + +arange() will create arrays with regularly incrementing values. Check the +docstring for complete information on the various ways it can be used. A few +examples will be given here: :: + + >>> np.arange(10) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.arange(2, 10, dtype=np.float) + array([ 2., 3., 4., 5., 6., 7., 8., 9.]) + >>> np.arange(2, 3, 0.1) + array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) + +Note that there are some subtleties regarding the last usage that the user +should be aware of that are described in the arange docstring. + +linspace() will create arrays with a specified number of elements, and +spaced equally between the specified beginning and end values. For +example: :: + + >>> np.linspace(1., 4., 6) + array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ]) + +The advantage of this creation function is that one can guarantee the +number of elements and the starting and end point, which arange() +generally will not do for arbitrary start, stop, and step values. + +indices() will create a set of arrays (stacked as a one-higher dimensioned +array), one per dimension with each representing variation in that dimension. +An example illustrates much better than a verbal description: :: + + >>> np.indices((3,3)) + array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) + +This is particularly useful for evaluating functions of multiple dimensions on +a regular grid. + +Reading Arrays From Disk +======================== + +This is presumably the most common case of large array creation. The details, +of course, depend greatly on the format of data on disk and so this section +can only give general pointers on how to handle various formats. + +Standard Binary Formats +----------------------- + +Various fields have standard formats for array data. The following lists the +ones with known python libraries to read them and return numpy arrays (there +may be others for which it is possible to read and convert to numpy arrays so +check the last section as well) +:: + + HDF5: PyTables + FITS: PyFITS + +Examples of formats that cannot be read directly but for which it is not hard to +convert are those formats supported by libraries like PIL (able to read and +write many image formats such as jpg, png, etc). + +Common ASCII Formats +------------------------ + +Comma Separated Value files (CSV) are widely used (and an export and import +option for programs like Excel). There are a number of ways of reading these +files in Python. There are CSV functions in Python and functions in pylab +(part of matplotlib). + +More generic ascii files can be read using the io package in scipy. + +Custom Binary Formats +--------------------- + +There are a variety of approaches one can use. If the file has a relatively +simple format then one can write a simple I/O library and use the numpy +fromfile() function and .tofile() method to read and write numpy arrays +directly (mind your byteorder though!) If a good C or C++ library exists that +read the data, one can wrap that library with a variety of techniques though +that certainly is much more work and requires significantly more advanced +knowledge to interface with C or C++. + +Use of Special Libraries +------------------------ + +There are libraries that can be used to generate arrays for special purposes +and it isn't possible to enumerate all of them. The most common uses are use +of the many array generation functions in random that can generate arrays of +random values, and some utility functions to generate special matrices (e.g. +diagonal). + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/glossary.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/glossary.py new file mode 100644 index 0000000000000..3770f5761f2b4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/glossary.py @@ -0,0 +1,418 @@ +""" +======== +Glossary +======== + +.. glossary:: + + along an axis + Axes are defined for arrays with more than one dimension. A + 2-dimensional array has two corresponding axes: the first running + vertically downwards across rows (axis 0), and the second running + horizontally across columns (axis 1). + + Many operation can take place along one of these axes. For example, + we can sum each row of an array, in which case we operate along + columns, or axis 1:: + + >>> x = np.arange(12).reshape((3,4)) + + >>> x + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + >>> x.sum(axis=1) + array([ 6, 22, 38]) + + array + A homogeneous container of numerical elements. Each element in the + array occupies a fixed amount of memory (hence homogeneous), and + can be a numerical element of a single type (such as float, int + or complex) or a combination (such as ``(float, int, float)``). Each + array has an associated data-type (or ``dtype``), which describes + the numerical type of its elements:: + + >>> x = np.array([1, 2, 3], float) + + >>> x + array([ 1., 2., 3.]) + + >>> x.dtype # floating point number, 64 bits of memory per element + dtype('float64') + + + # More complicated data type: each array element is a combination of + # and integer and a floating point number + >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) + array([(1, 2.0), (3, 4.0)], + dtype=[('x', '>> x = np.array([1, 2, 3]) + >>> x.shape + (3,) + + BLAS + `Basic Linear Algebra Subprograms `_ + + broadcast + NumPy can do operations on arrays whose shapes are mismatched:: + + >>> x = np.array([1, 2]) + >>> y = np.array([[3], [4]]) + + >>> x + array([1, 2]) + + >>> y + array([[3], + [4]]) + + >>> x + y + array([[4, 5], + [5, 6]]) + + See `doc.broadcasting`_ for more information. + + C order + See `row-major` + + column-major + A way to represent items in a N-dimensional array in the 1-dimensional + computer memory. In column-major order, the leftmost index "varies the + fastest": for example the array:: + + [[1, 2, 3], + [4, 5, 6]] + + is represented in the column-major order as:: + + [1, 4, 2, 5, 3, 6] + + Column-major order is also known as the Fortran order, as the Fortran + programming language uses it. + + decorator + An operator that transforms a function. For example, a ``log`` + decorator may be defined to print debugging information upon + function execution:: + + >>> def log(f): + ... def new_logging_func(*args, **kwargs): + ... print "Logging call with parameters:", args, kwargs + ... return f(*args, **kwargs) + ... + ... return new_logging_func + + Now, when we define a function, we can "decorate" it using ``log``:: + + >>> @log + ... def add(a, b): + ... return a + b + + Calling ``add`` then yields: + + >>> add(1, 2) + Logging call with parameters: (1, 2) {} + 3 + + dictionary + Resembling a language dictionary, which provides a mapping between + words and descriptions thereof, a Python dictionary is a mapping + between two objects:: + + >>> x = {1: 'one', 'two': [1, 2]} + + Here, `x` is a dictionary mapping keys to values, in this case + the integer 1 to the string "one", and the string "two" to + the list ``[1, 2]``. The values may be accessed using their + corresponding keys:: + + >>> x[1] + 'one' + + >>> x['two'] + [1, 2] + + Note that dictionaries are not stored in any specific order. Also, + most mutable (see *immutable* below) objects, such as lists, may not + be used as keys. + + For more information on dictionaries, read the + `Python tutorial `_. + + Fortran order + See `column-major` + + flattened + Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details. + + immutable + An object that cannot be modified after execution is called + immutable. Two common examples are strings and tuples. + + instance + A class definition gives the blueprint for constructing an object:: + + >>> class House(object): + ... wall_colour = 'white' + + Yet, we have to *build* a house before it exists:: + + >>> h = House() # build a house + + Now, ``h`` is called a ``House`` instance. An instance is therefore + a specific realisation of a class. + + iterable + A sequence that allows "walking" (iterating) over items, typically + using a loop such as:: + + >>> x = [1, 2, 3] + >>> [item**2 for item in x] + [1, 4, 9] + + It is often used in combintion with ``enumerate``:: + >>> keys = ['a','b','c'] + >>> for n, k in enumerate(keys): + ... print "Key %d: %s" % (n, k) + ... + Key 0: a + Key 1: b + Key 2: c + + list + A Python container that can hold any number of objects or items. + The items do not have to be of the same type, and can even be + lists themselves:: + + >>> x = [2, 2.0, "two", [2, 2.0]] + + The list `x` contains 4 items, each which can be accessed individually:: + + >>> x[2] # the string 'two' + 'two' + + >>> x[3] # a list, containing an integer 2 and a float 2.0 + [2, 2.0] + + It is also possible to select more than one item at a time, + using *slicing*:: + + >>> x[0:2] # or, equivalently, x[:2] + [2, 2.0] + + In code, arrays are often conveniently expressed as nested lists:: + + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + For more information, read the section on lists in the `Python + tutorial `_. For a mapping + type (key-value), see *dictionary*. + + mask + A boolean array, used to select only certain elements for an operation:: + + >>> x = np.arange(5) + >>> x + array([0, 1, 2, 3, 4]) + + >>> mask = (x > 2) + >>> mask + array([False, False, False, True, True], dtype=bool) + + >>> x[mask] = -1 + >>> x + array([ 0, 1, 2, -1, -1]) + + masked array + Array that suppressed values indicated by a mask:: + + >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) + >>> x + masked_array(data = [-- 2.0 --], + mask = [ True False True], + fill_value = 1e+20) + + + >>> x + [1, 2, 3] + masked_array(data = [-- 4.0 --], + mask = [ True False True], + fill_value = 1e+20) + + + + Masked arrays are often used when operating on arrays containing + missing or invalid entries. + + matrix + A 2-dimensional ndarray that preserves its two-dimensional nature + throughout operations. It has certain special operations, such as ``*`` + (matrix multiplication) and ``**`` (matrix power), defined:: + + >>> x = np.mat([[1, 2], [3, 4]]) + + >>> x + matrix([[1, 2], + [3, 4]]) + + >>> x**2 + matrix([[ 7, 10], + [15, 22]]) + + method + A function associated with an object. For example, each ndarray has a + method called ``repeat``:: + + >>> x = np.array([1, 2, 3]) + + >>> x.repeat(2) + array([1, 1, 2, 2, 3, 3]) + + ndarray + See *array*. + + reference + If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, + ``a`` and ``b`` are different names for the same Python object. + + row-major + A way to represent items in a N-dimensional array in the 1-dimensional + computer memory. In row-major order, the rightmost index "varies + the fastest": for example the array:: + + [[1, 2, 3], + [4, 5, 6]] + + is represented in the row-major order as:: + + [1, 2, 3, 4, 5, 6] + + Row-major order is also known as the C order, as the C programming + language uses it. New Numpy arrays are by default in row-major order. + + self + Often seen in method signatures, ``self`` refers to the instance + of the associated class. For example: + + >>> class Paintbrush(object): + ... color = 'blue' + ... + ... def paint(self): + ... print "Painting the city %s!" % self.color + ... + >>> p = Paintbrush() + >>> p.color = 'red' + >>> p.paint() # self refers to 'p' + Painting the city red! + + slice + Used to select only certain elements from a sequence:: + + >>> x = range(5) + >>> x + [0, 1, 2, 3, 4] + + >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) + [1, 2] + + >>> x[1:5:2] # slice from 1 to 5, but skipping every second element + [1, 3] + + >>> x[::-1] # slice a sequence in reverse + [4, 3, 2, 1, 0] + + Arrays may have more than one dimension, each which can be sliced + individually:: + + >>> x = np.array([[1, 2], [3, 4]]) + >>> x + array([[1, 2], + [3, 4]]) + + >>> x[:, 1] + array([2, 4]) + + tuple + A sequence that may contain a variable number of types of any + kind. A tuple is immutable, i.e., once constructed it cannot be + changed. Similar to a list, it can be indexed and sliced:: + + >>> x = (1, 'one', [1, 2]) + + >>> x + (1, 'one', [1, 2]) + + >>> x[0] + 1 + + >>> x[:2] + (1, 'one') + + A useful concept is "tuple unpacking", which allows variables to + be assigned to the contents of a tuple:: + + >>> x, y = (1, 2) + >>> x, y = 1, 2 + + This is often used when a function returns multiple values: + + >>> def return_many(): + ... return 1, 'alpha', None + + >>> a, b, c = return_many() + >>> a, b, c + (1, 'alpha', None) + + >>> a + 1 + >>> b + 'alpha' + + ufunc + Universal function. A fast element-wise array operation. Examples include + ``add``, ``sin`` and ``logical_or``. + + view + An array that does not own its data, but refers to another array's + data instead. For example, we may create a view that only shows + every second element of another array:: + + >>> x = np.arange(5) + >>> x + array([0, 1, 2, 3, 4]) + + >>> y = x[::2] + >>> y + array([0, 2, 4]) + + >>> x[0] = 3 # changing x changes y as well, since y is a view on x + >>> y + array([3, 2, 4]) + + wrapper + Python is a high-level (highly abstracted, or English-like) language. + This abstraction comes at a price in execution speed, and sometimes + it becomes necessary to use lower level languages to do fast + computations. A wrapper is code that provides a bridge between + high and the low level languages, allowing, e.g., Python to execute + code written in C or Fortran. + + Examples include ctypes, SWIG and Cython (which wraps C and C++) + and f2py (which wraps Fortran). + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/howtofind.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/howtofind.py new file mode 100644 index 0000000000000..e080d263a2791 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/howtofind.py @@ -0,0 +1,10 @@ +""" + +================= +How to Find Stuff +================= + +How to find things in NumPy. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/indexing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/indexing.py new file mode 100644 index 0000000000000..d3f442c212e1b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/indexing.py @@ -0,0 +1,437 @@ +""" +============== +Array indexing +============== + +Array indexing refers to any use of the square brackets ([]) to index +array values. There are many options to indexing, which give numpy +indexing great power, but with power comes some complexity and the +potential for confusion. This section is just an overview of the +various options and issues related to indexing. Aside from single +element indexing, the details on most of these options are to be +found in related sections. + +Assignment vs referencing +========================= + +Most of the following examples show the use of indexing when +referencing data in an array. The examples work just as well +when assigning to an array. See the section at the end for +specific examples and explanations on how assignments work. + +Single element indexing +======================= + +Single element indexing for a 1-D array is what one expects. It work +exactly like that for other standard Python sequences. It is 0-based, +and accepts negative indices for indexing from the end of the array. :: + + >>> x = np.arange(10) + >>> x[2] + 2 + >>> x[-2] + 8 + +Unlike lists and tuples, numpy arrays support multidimensional indexing +for multidimensional arrays. That means that it is not necessary to +separate each dimension's index into its own set of square brackets. :: + + >>> x.shape = (2,5) # now x is 2-dimensional + >>> x[1,3] + 8 + >>> x[1,-1] + 9 + +Note that if one indexes a multidimensional array with fewer indices +than dimensions, one gets a subdimensional array. For example: :: + + >>> x[0] + array([0, 1, 2, 3, 4]) + +That is, each index specified selects the array corresponding to the +rest of the dimensions selected. In the above example, choosing 0 +means that remaining dimension of lenth 5 is being left unspecified, +and that what is returned is an array of that dimensionality and size. +It must be noted that the returned array is not a copy of the original, +but points to the same values in memory as does the original array. +In this case, the 1-D array at the first position (0) is returned. +So using a single index on the returned array, results in a single +element being returned. That is: :: + + >>> x[0][2] + 2 + +So note that ``x[0,2] = x[0][2]`` though the second case is more +inefficient a new temporary array is created after the first index +that is subsequently indexed by 2. + +Note to those used to IDL or Fortran memory order as it relates to +indexing. Numpy uses C-order indexing. That means that the last +index usually represents the most rapidly changing memory location, +unlike Fortran or IDL, where the first index represents the most +rapidly changing location in memory. This difference represents a +great potential for confusion. + +Other indexing options +====================== + +It is possible to slice and stride arrays to extract arrays of the +same number of dimensions, but of different sizes than the original. +The slicing and striding works exactly the same way it does for lists +and tuples except that they can be applied to multiple dimensions as +well. A few examples illustrates best: :: + + >>> x = np.arange(10) + >>> x[2:5] + array([2, 3, 4]) + >>> x[:-7] + array([0, 1, 2]) + >>> x[1:7:2] + array([1, 3, 5]) + >>> y = np.arange(35).reshape(5,7) + >>> y[1:5:2,::3] + array([[ 7, 10, 13], + [21, 24, 27]]) + +Note that slices of arrays do not copy the internal array data but +also produce new views of the original data. + +It is possible to index arrays with other arrays for the purposes of +selecting lists of values out of arrays into new arrays. There are +two different ways of accomplishing this. One uses one or more arrays +of index values. The other involves giving a boolean array of the proper +shape to indicate the values to be selected. Index arrays are a very +powerful tool that allow one to avoid looping over individual elements in +arrays and thus greatly improve performance. + +It is possible to use special features to effectively increase the +number of dimensions in an array through indexing so the resulting +array aquires the shape needed for use in an expression or with a +specific function. + +Index arrays +============ + +Numpy arrays may be indexed with other arrays (or any other sequence- +like object that can be converted to an array, such as lists, with the +exception of tuples; see the end of this document for why this is). The +use of index arrays ranges from simple, straightforward cases to +complex, hard-to-understand cases. For all cases of index arrays, what +is returned is a copy of the original data, not a view as one gets for +slices. + +Index arrays must be of integer type. Each value in the array indicates +which value in the array to use in place of the index. To illustrate: :: + + >>> x = np.arange(10,1,-1) + >>> x + array([10, 9, 8, 7, 6, 5, 4, 3, 2]) + >>> x[np.array([3, 3, 1, 8])] + array([7, 7, 9, 2]) + + +The index array consisting of the values 3, 3, 1 and 8 correspondingly +create an array of length 4 (same as the index array) where each index +is replaced by the value the index array has in the array being indexed. + +Negative values are permitted and work as they do with single indices +or slices: :: + + >>> x[np.array([3,3,-3,8])] + array([7, 7, 4, 2]) + +It is an error to have index values out of bounds: :: + + >>> x[np.array([3, 3, 20, 8])] + : index 20 out of bounds 0<=index<9 + +Generally speaking, what is returned when index arrays are used is +an array with the same shape as the index array, but with the type +and values of the array being indexed. As an example, we can use a +multidimensional index array instead: :: + + >>> x[np.array([[1,1],[2,3]])] + array([[9, 9], + [8, 7]]) + +Indexing Multi-dimensional arrays +================================= + +Things become more complex when multidimensional arrays are indexed, +particularly with multidimensional index arrays. These tend to be +more unusal uses, but theyare permitted, and they are useful for some +problems. We'll start with thesimplest multidimensional case (using +the array y from the previous examples): :: + + >>> y[np.array([0,2,4]), np.array([0,1,2])] + array([ 0, 15, 30]) + +In this case, if the index arrays have a matching shape, and there is +an index array for each dimension of the array being indexed, the +resultant array has the same shape as the index arrays, and the values +correspond to the index set for each position in the index arrays. In +this example, the first index value is 0 for both index arrays, and +thus the first value of the resultant array is y[0,0]. The next value +is y[2,1], and the last is y[4,2]. + +If the index arrays do not have the same shape, there is an attempt to +broadcast them to the same shape. If they cannot be broadcast to the +same shape, an exception is raised: :: + + >>> y[np.array([0,2,4]), np.array([0,1])] + : shape mismatch: objects cannot be + broadcast to a single shape + +The broadcasting mechanism permits index arrays to be combined with +scalars for other indices. The effect is that the scalar value is used +for all the corresponding values of the index arrays: :: + + >>> y[np.array([0,2,4]), 1] + array([ 1, 15, 29]) + +Jumping to the next level of complexity, it is possible to only +partially index an array with index arrays. It takes a bit of thought +to understand what happens in such cases. For example if we just use +one index array with y: :: + + >>> y[np.array([0,2,4])] + array([[ 0, 1, 2, 3, 4, 5, 6], + [14, 15, 16, 17, 18, 19, 20], + [28, 29, 30, 31, 32, 33, 34]]) + +What results is the construction of a new array where each value of +the index array selects one row from the array being indexed and the +resultant array has the resulting shape (size of row, number index +elements). + +An example of where this may be useful is for a color lookup table +where we want to map the values of an image into RGB triples for +display. The lookup table could have a shape (nlookup, 3). Indexing +such an array with an image with shape (ny, nx) with dtype=np.uint8 +(or any integer type so long as values are with the bounds of the +lookup table) will result in an array of shape (ny, nx, 3) where a +triple of RGB values is associated with each pixel location. + +In general, the shape of the resulant array will be the concatenation +of the shape of the index array (or the shape that all the index arrays +were broadcast to) with the shape of any unused dimensions (those not +indexed) in the array being indexed. + +Boolean or "mask" index arrays +============================== + +Boolean arrays used as indices are treated in a different manner +entirely than index arrays. Boolean arrays must be of the same shape +as the initial dimensions of the array being indexed. In the +most straightforward case, the boolean array has the same shape: :: + + >>> b = y>20 + >>> y[b] + array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]) + +The result is a 1-D array containing all the elements in the indexed +array corresponding to all the true elements in the boolean array. As +with index arrays, what is returned is a copy of the data, not a view +as one gets with slices. + +The result will be multidimensional if y has more dimensions than b. +For example: :: + + >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y + array([False, False, False, True, True], dtype=bool) + >>> y[b[:,5]] + array([[21, 22, 23, 24, 25, 26, 27], + [28, 29, 30, 31, 32, 33, 34]]) + +Here the 4th and 5th rows are selected from the indexed array and +combined to make a 2-D array. + +In general, when the boolean array has fewer dimensions than the array +being indexed, this is equivalent to y[b, ...], which means +y is indexed by b followed by as many : as are needed to fill +out the rank of y. +Thus the shape of the result is one dimension containing the number +of True elements of the boolean array, followed by the remaining +dimensions of the array being indexed. + +For example, using a 2-D boolean array of shape (2,3) +with four True elements to select rows from a 3-D array of shape +(2,3,5) results in a 2-D result of shape (4,5): :: + + >>> x = np.arange(30).reshape(2,3,5) + >>> x + array([[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14]], + [[15, 16, 17, 18, 19], + [20, 21, 22, 23, 24], + [25, 26, 27, 28, 29]]]) + >>> b = np.array([[True, True, False], [False, True, True]]) + >>> x[b] + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [20, 21, 22, 23, 24], + [25, 26, 27, 28, 29]]) + +For further details, consult the numpy reference documentation on array indexing. + +Combining index arrays with slices +================================== + +Index arrays may be combined with slices. For example: :: + + >>> y[np.array([0,2,4]),1:3] + array([[ 1, 2], + [15, 16], + [29, 30]]) + +In effect, the slice is converted to an index array +np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array +to produce a resultant array of shape (3,2). + +Likewise, slicing can be combined with broadcasted boolean indices: :: + + >>> y[b[:,5],1:3] + array([[22, 23], + [29, 30]]) + +Structural indexing tools +========================= + +To facilitate easy matching of array shapes with expressions and in +assignments, the np.newaxis object can be used within array indices +to add new dimensions with a size of 1. For example: :: + + >>> y.shape + (5, 7) + >>> y[:,np.newaxis,:].shape + (5, 1, 7) + +Note that there are no new elements in the array, just that the +dimensionality is increased. This can be handy to combine two +arrays in a way that otherwise would require explicitly reshaping +operations. For example: :: + + >>> x = np.arange(5) + >>> x[:,np.newaxis] + x[np.newaxis,:] + array([[0, 1, 2, 3, 4], + [1, 2, 3, 4, 5], + [2, 3, 4, 5, 6], + [3, 4, 5, 6, 7], + [4, 5, 6, 7, 8]]) + +The ellipsis syntax maybe used to indicate selecting in full any +remaining unspecified dimensions. For example: :: + + >>> z = np.arange(81).reshape(3,3,3,3) + >>> z[1,...,2] + array([[29, 32, 35], + [38, 41, 44], + [47, 50, 53]]) + +This is equivalent to: :: + + >>> z[1,:,:,2] + array([[29, 32, 35], + [38, 41, 44], + [47, 50, 53]]) + +Assigning values to indexed arrays +================================== + +As mentioned, one can select a subset of an array to assign to using +a single index, slices, and index and mask arrays. The value being +assigned to the indexed array must be shape consistent (the same shape +or broadcastable to the shape the index produces). For example, it is +permitted to assign a constant to a slice: :: + + >>> x = np.arange(10) + >>> x[2:7] = 1 + +or an array of the right size: :: + + >>> x[2:7] = np.arange(5) + +Note that assignments may result in changes if assigning +higher types to lower types (like floats to ints) or even +exceptions (assigning complex to floats or ints): :: + + >>> x[1] = 1.2 + >>> x[1] + 1 + >>> x[1] = 1.2j + : can't convert complex to long; use + long(abs(z)) + + +Unlike some of the references (such as array and mask indices) +assignments are always made to the original data in the array +(indeed, nothing else would make sense!). Note though, that some +actions may not work as one may naively expect. This particular +example is often surprising to people: :: + + >>> x = np.arange(0, 50, 10) + >>> x + array([ 0, 10, 20, 30, 40]) + >>> x[np.array([1, 1, 3, 1])] += 1 + >>> x + array([ 0, 11, 20, 31, 40]) + +Where people expect that the 1st location will be incremented by 3. +In fact, it will only be incremented by 1. The reason is because +a new array is extracted from the original (as a temporary) containing +the values at 1, 1, 3, 1, then the value 1 is added to the temporary, +and then the temporary is assigned back to the original array. Thus +the value of the array at x[1]+1 is assigned to x[1] three times, +rather than being incremented 3 times. + +Dealing with variable numbers of indices within programs +======================================================== + +The index syntax is very powerful but limiting when dealing with +a variable number of indices. For example, if you want to write +a function that can handle arguments with various numbers of +dimensions without having to write special case code for each +number of possible dimensions, how can that be done? If one +supplies to the index a tuple, the tuple will be interpreted +as a list of indices. For example (using the previous definition +for the array z): :: + + >>> indices = (1,1,1,1) + >>> z[indices] + 40 + +So one can use code to construct tuples of any number of indices +and then use these within an index. + +Slices can be specified within programs by using the slice() function +in Python. For example: :: + + >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2] + >>> z[indices] + array([39, 40]) + +Likewise, ellipsis can be specified by code by using the Ellipsis +object: :: + + >>> indices = (1, Ellipsis, 1) # same as [1,...,1] + >>> z[indices] + array([[28, 31, 34], + [37, 40, 43], + [46, 49, 52]]) + +For this reason it is possible to use the output from the np.where() +function directly as an index since it always returns a tuple of index +arrays. + +Because the special treatment of tuples, they are not automatically +converted to an array as a list would be. As an example: :: + + >>> z[[1,1,1,1]] # produces a large array + array([[[[27, 28, 29], + [30, 31, 32], ... + >>> z[(1,1,1,1)] # returns a single value + 40 + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/internals.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/internals.py new file mode 100644 index 0000000000000..6bd6b1ae9474e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/internals.py @@ -0,0 +1,163 @@ +""" +=============== +Array Internals +=============== + +Internal organization of numpy arrays +===================================== + +It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to Numpy". + +Numpy arrays consist of two major components, the raw array data (from now on, +referred to as the data buffer), and the information about the raw array data. +The data buffer is typically what people think of as arrays in C or Fortran, +a contiguous (and fixed) block of memory containing fixed sized data items. +Numpy also contains a significant set of data that describes how to interpret +the data in the data buffer. This extra information contains (among other things): + + 1) The basic data element's size in bytes + 2) The start of the data within the data buffer (an offset relative to the + beginning of the data buffer). + 3) The number of dimensions and the size of each dimension + 4) The separation between elements for each dimension (the 'stride'). This + does not have to be a multiple of the element size + 5) The byte order of the data (which may not be the native byte order) + 6) Whether the buffer is read-only + 7) Information (via the dtype object) about the interpretation of the basic + data element. The basic data element may be as simple as a int or a float, + or it may be a compound object (e.g., struct-like), a fixed character field, + or Python object pointers. + 8) Whether the array is to interpreted as C-order or Fortran-order. + +This arrangement allow for very flexible use of arrays. One thing that it allows +is simple changes of the metadata to change the interpretation of the array buffer. +Changing the byteorder of the array is a simple change involving no rearrangement +of the data. The shape of the array can be changed very easily without changing +anything in the data buffer or any data copying at all + +Among other things that are made possible is one can create a new array metadata +object that uses the same data buffer +to create a new view of that data buffer that has a different interpretation +of the buffer (e.g., different shape, offset, byte order, strides, etc) but +shares the same data bytes. Many operations in numpy do just this such as +slices. Other operations, such as transpose, don't move data elements +around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. + +Typically these new versions of the array metadata but the same data buffer are +new 'views' into the data buffer. There is a different ndarray object, but it +uses the same data buffer. This is why it is necessary to force copies through +use of the .copy() method if one really wants to make a new and independent +copy of the data buffer. + +New views into arrays mean the the object reference counts for the data buffer +increase. Simply doing away with the original array object will not remove the +data buffer if other views of it still exist. + +Multidimensional Array Indexing Order Issues +============================================ + +What is the right way to index +multi-dimensional arrays? Before you jump to conclusions about the one and +true way to index multi-dimensional arrays, it pays to understand why this is +a confusing issue. This section will try to explain in detail how numpy +indexing works and why we adopt the convention we do for images, and when it +may be appropriate to adopt other conventions. + +The first thing to understand is +that there are two conflicting conventions for indexing 2-dimensional arrays. +Matrix notation uses the first index to indicate which row is being selected and +the second index to indicate which column is selected. This is opposite the +geometrically oriented-convention for images where people generally think the +first index represents x position (i.e., column) and the second represents y +position (i.e., row). This alone is the source of much confusion; +matrix-oriented users and image-oriented users expect two different things with +regard to indexing. + +The second issue to understand is how indices correspond +to the order the array is stored in memory. In Fortran the first index is the +most rapidly varying index when moving through the elements of a two +dimensional array as it is stored in memory. If you adopt the matrix +convention for indexing, then this means the matrix is stored one column at a +time (since the first index moves to the next row as it changes). Thus Fortran +is considered a Column-major language. C has just the opposite convention. In +C, the last index changes most rapidly as one moves through the array as +stored in memory. Thus C is a Row-major language. The matrix is stored by +rows. Note that in both cases it presumes that the matrix convention for +indexing is being used, i.e., for both Fortran and C, the first index is the +row. Note this convention implies that the indexing convention is invariant +and that the data order changes to keep that so. + +But that's not the only way +to look at it. Suppose one has large two-dimensional arrays (images or +matrices) stored in data files. Suppose the data are stored by rows rather than +by columns. If we are to preserve our index convention (whether matrix or +image) that means that depending on the language we use, we may be forced to +reorder the data if it is read into memory to preserve our indexing +convention. For example if we read row-ordered data into memory without +reordering, it will match the matrix indexing convention for C, but not for +Fortran. Conversely, it will match the image indexing convention for Fortran, +but not for C. For C, if one is using data stored in row order, and one wants +to preserve the image index convention, the data must be reordered when +reading into memory. + +In the end, which you do for Fortran or C depends on +which is more important, not reordering data or preserving the indexing +convention. For large images, reordering data is potentially expensive, and +often the indexing convention is inverted to avoid that. + +The situation with +numpy makes this issue yet more complicated. The internal machinery of numpy +arrays is flexible enough to accept any ordering of indices. One can simply +reorder indices by manipulating the internal stride information for arrays +without reordering the data at all. Numpy will know how to map the new index +order to the data without moving the data. + +So if this is true, why not choose +the index order that matches what you most expect? In particular, why not define +row-ordered images to use the image convention? (This is sometimes referred +to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' +order options for array ordering in numpy.) The drawback of doing this is +potential performance penalties. It's common to access the data sequentially, +either implicitly in array operations or explicitly by looping over rows of an +image. When that is done, then the data will be accessed in non-optimal order. +As the first index is incremented, what is actually happening is that elements +spaced far apart in memory are being sequentially accessed, with usually poor +memory access speeds. For example, for a two dimensional image 'im' defined so +that im[0, 10] represents the value at x=0, y=10. To be consistent with usual +Python behavior then im[0] would represent a column at x=0. Yet that data +would be spread over the whole array since the data are stored in row order. +Despite the flexibility of numpy's indexing, it can't really paper over the fact +basic operations are rendered inefficient because of data order or that getting +contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs +im[0]), thus one can't use an idiom such as for row in im; for col in im does +work, but doesn't yield contiguous column data. + +As it turns out, numpy is +smart enough when dealing with ufuncs to determine which index is the most +rapidly varying one in memory and uses that for the innermost loop. Thus for +ufuncs there is no large intrinsic advantage to either approach in most cases. +On the other hand, use of .flat with an FORTRAN ordered array will lead to +non-optimal memory access as adjacent elements in the flattened array (iterator, +actually) are not contiguous in memory. + +Indeed, the fact is that Python +indexing on lists and other sequences naturally leads to an outside-to inside +ordering (the first index gets the largest grouping, the next the next largest, +and the last gets the smallest element). Since image data are normally stored +by rows, this corresponds to position within rows being the last item indexed. + +If you do want to use Fortran ordering realize that +there are two approaches to consider: 1) accept that the first index is just not +the most rapidly changing in memory and have all your I/O routines reorder +your data when going from memory to disk or visa versa, or use numpy's +mechanism for mapping the first index to the most rapidly varying data. We +recommend the former if possible. The disadvantage of the latter is that many +of numpy's functions will yield arrays without Fortran ordering unless you are +careful to use the 'order' keyword. Doing this would be highly inconvenient. + +Otherwise we recommend simply learning to reverse the usual order of indices +when accessing elements of an array. Granted, it goes against the grain, but +it is more in line with Python semantics and the natural order of the data. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/io.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/io.py new file mode 100644 index 0000000000000..e45bfc9b32110 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/io.py @@ -0,0 +1,10 @@ +""" + +========= +Array I/O +========= + +Placeholder for array I/O documentation. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/jargon.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/jargon.py new file mode 100644 index 0000000000000..3fcbc7d23f2f8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/jargon.py @@ -0,0 +1,10 @@ +""" + +====== +Jargon +====== + +Placeholder for computer science, engineering and other jargon. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/methods_vs_functions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/methods_vs_functions.py new file mode 100644 index 0000000000000..4149000bc80ac --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/methods_vs_functions.py @@ -0,0 +1,10 @@ +""" + +===================== +Methods vs. Functions +===================== + +Placeholder for Methods vs. Functions documentation. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/misc.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/misc.py new file mode 100644 index 0000000000000..1709ad66da7a8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/misc.py @@ -0,0 +1,226 @@ +""" +============= +Miscellaneous +============= + +IEEE 754 Floating Point Special Values +-------------------------------------- + +Special values defined in numpy: nan, inf, + +NaNs can be used as a poor-man's mask (if you don't care what the +original value was) + +Note: cannot use equality to test NaNs. E.g.: :: + + >>> myarr = np.array([1., 0., np.nan, 3.]) + >>> np.where(myarr == np.nan) + >>> np.nan == np.nan # is always False! Use special numpy functions instead. + False + >>> myarr[myarr == np.nan] = 0. # doesn't work + >>> myarr + array([ 1., 0., NaN, 3.]) + >>> myarr[np.isnan(myarr)] = 0. # use this instead find + >>> myarr + array([ 1., 0., 0., 3.]) + +Other related special value functions: :: + + isinf(): True if value is inf + isfinite(): True if not nan or inf + nan_to_num(): Map nan to 0, inf to max float, -inf to min float + +The following corresponds to the usual functions except that nans are excluded +from the results: :: + + nansum() + nanmax() + nanmin() + nanargmax() + nanargmin() + + >>> x = np.arange(10.) + >>> x[3] = np.nan + >>> x.sum() + nan + >>> np.nansum(x) + 42.0 + +How numpy handles numerical exceptions +-------------------------------------- + +The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` +and ``'ignore'`` for ``underflow``. But this can be changed, and it can be +set individually for different kinds of exceptions. The different behaviors +are: + + - 'ignore' : Take no action when the exception occurs. + - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). + - 'raise' : Raise a `FloatingPointError`. + - 'call' : Call a function specified using the `seterrcall` function. + - 'print' : Print a warning directly to ``stdout``. + - 'log' : Record error in a Log object specified by `seterrcall`. + +These behaviors can be set for all kinds of errors or specific ones: + + - all : apply to all numeric exceptions + - invalid : when NaNs are generated + - divide : divide by zero (for integers as well!) + - overflow : floating point overflows + - underflow : floating point underflows + +Note that integer divide-by-zero is handled by the same machinery. +These behaviors are set on a per-thread basis. + +Examples +-------- + +:: + + >>> oldsettings = np.seterr(all='warn') + >>> np.zeros(5,dtype=np.float32)/0. + invalid value encountered in divide + >>> j = np.seterr(under='ignore') + >>> np.array([1.e-100])**10 + >>> j = np.seterr(invalid='raise') + >>> np.sqrt(np.array([-1.])) + FloatingPointError: invalid value encountered in sqrt + >>> def errorhandler(errstr, errflag): + ... print "saw stupid error!" + >>> np.seterrcall(errorhandler) + + >>> j = np.seterr(all='call') + >>> np.zeros(5, dtype=np.int32)/0 + FloatingPointError: invalid value encountered in divide + saw stupid error! + >>> j = np.seterr(**oldsettings) # restore previous + ... # error-handling settings + +Interfacing to C +---------------- +Only a survey of the choices. Little detail on how each works. + +1) Bare metal, wrap your own C-code manually. + + - Plusses: + + - Efficient + - No dependencies on other tools + + - Minuses: + + - Lots of learning overhead: + + - need to learn basics of Python C API + - need to learn basics of numpy C API + - need to learn how to handle reference counting and love it. + + - Reference counting often difficult to get right. + + - getting it wrong leads to memory leaks, and worse, segfaults + + - API will change for Python 3.0! + +2) Cython + + - Plusses: + + - avoid learning C API's + - no dealing with reference counting + - can code in pseudo python and generate C code + - can also interface to existing C code + - should shield you from changes to Python C api + - has become the de-facto standard within the scientific Python community + - fast indexing support for arrays + + - Minuses: + + - Can write code in non-standard form which may become obsolete + - Not as flexible as manual wrapping + +4) ctypes + + - Plusses: + + - part of Python standard library + - good for interfacing to existing sharable libraries, particularly + Windows DLLs + - avoids API/reference counting issues + - good numpy support: arrays have all these in their ctypes + attribute: :: + + a.ctypes.data a.ctypes.get_strides + a.ctypes.data_as a.ctypes.shape + a.ctypes.get_as_parameter a.ctypes.shape_as + a.ctypes.get_data a.ctypes.strides + a.ctypes.get_shape a.ctypes.strides_as + + - Minuses: + + - can't use for writing code to be turned into C extensions, only a wrapper + tool. + +5) SWIG (automatic wrapper generator) + + - Plusses: + + - around a long time + - multiple scripting language support + - C++ support + - Good for wrapping large (many functions) existing C libraries + + - Minuses: + + - generates lots of code between Python and the C code + - can cause performance problems that are nearly impossible to optimize + out + - interface files can be hard to write + - doesn't necessarily avoid reference counting issues or needing to know + API's + +7) scipy.weave + + - Plusses: + + - can turn many numpy expressions into C code + - dynamic compiling and loading of generated C code + - can embed pure C code in Python module and have weave extract, generate + interfaces and compile, etc. + + - Minuses: + + - Future very uncertain: it's the only part of Scipy not ported to Python 3 + and is effectively deprecated in favor of Cython. + +8) Psyco + + - Plusses: + + - Turns pure python into efficient machine code through jit-like + optimizations + - very fast when it optimizes well + + - Minuses: + + - Only on intel (windows?) + - Doesn't do much for numpy? + +Interfacing to Fortran: +----------------------- +The clear choice to wrap Fortran code is +`f2py `_. + +Pyfort is an older alternative, but not supported any longer. +Fwrap is a newer project that looked promising but isn't being developed any +longer. + +Interfacing to C++: +------------------- + 1) Cython + 2) CXX + 3) Boost.python + 4) SWIG + 5) SIP (used mainly in PyQT) + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/performance.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/performance.py new file mode 100644 index 0000000000000..b0c158bf33c20 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/performance.py @@ -0,0 +1,10 @@ +""" + +=========== +Performance +=========== + +Placeholder for Improving Performance documentation. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/structured_arrays.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/structured_arrays.py new file mode 100644 index 0000000000000..0444bdf90c0c2 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/structured_arrays.py @@ -0,0 +1,223 @@ +""" +===================================== +Structured Arrays (and Record Arrays) +===================================== + +Introduction +============ + +Numpy provides powerful capabilities to create arrays of structs or records. +These arrays permit one to manipulate the data by the structs or by fields of +the struct. A simple example will show what is meant.: :: + + >>> x = np.zeros((2,),dtype=('i4,f4,a10')) + >>> x[:] = [(1,2.,'Hello'),(2,3.,"World")] + >>> x + array([(1, 2.0, 'Hello'), (2, 3.0, 'World')], + dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) + +Here we have created a one-dimensional array of length 2. Each element of +this array is a record that contains three items, a 32-bit integer, a 32-bit +float, and a string of length 10 or less. If we index this array at the second +position we get the second record: :: + + >>> x[1] + (2,3.,"World") + +Conveniently, one can access any field of the array by indexing using the +string that names that field. In this case the fields have received the +default names 'f0', 'f1' and 'f2'. :: + + >>> y = x['f1'] + >>> y + array([ 2., 3.], dtype=float32) + >>> y[:] = 2*y + >>> y + array([ 4., 6.], dtype=float32) + >>> x + array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], + dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) + +In these examples, y is a simple float array consisting of the 2nd field +in the record. But, rather than being a copy of the data in the structured +array, it is a view, i.e., it shares exactly the same memory locations. +Thus, when we updated this array by doubling its values, the structured +array shows the corresponding values as doubled as well. Likewise, if one +changes the record, the field view also changes: :: + + >>> x[1] = (-1,-1.,"Master") + >>> x + array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')], + dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) + >>> y + array([ 4., -1.], dtype=float32) + +Defining Structured Arrays +========================== + +One defines a structured array through the dtype object. There are +**several** alternative ways to define the fields of a record. Some of +these variants provide backward compatibility with Numeric, numarray, or +another module, and should not be used except for such purposes. These +will be so noted. One specifies record structure in +one of four alternative ways, using an argument (as supplied to a dtype +function keyword or a dtype object constructor itself). This +argument must be one of the following: 1) string, 2) tuple, 3) list, or +4) dictionary. Each of these is briefly described below. + +1) String argument (as used in the above examples). +In this case, the constructor expects a comma-separated list of type +specifiers, optionally with extra shape information. +The type specifiers can take 4 different forms: :: + + a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a + (representing bytes, ints, unsigned ints, floats, complex and + fixed length strings of specified byte lengths) + b) int8,...,uint8,...,float16, float32, float64, complex64, complex128 + (this time with bit sizes) + c) older Numeric/numarray type specifications (e.g. Float32). + Don't use these in new code! + d) Single character type specifiers (e.g H for unsigned short ints). + Avoid using these unless you must. Details can be found in the + Numpy book + +These different styles can be mixed within the same string (but why would you +want to do that?). Furthermore, each type specifier can be prefixed +with a repetition number, or a shape. In these cases an array +element is created, i.e., an array within a record. That array +is still referred to as a single field. An example: :: + + >>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64') + >>> x + array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), + ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), + ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])], + dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))]) + +By using strings to define the record structure, it precludes being +able to name the fields in the original definition. The names can +be changed as shown later, however. + +2) Tuple argument: The only relevant tuple case that applies to record +structures is when a structure is mapped to an existing data type. This +is done by pairing in a tuple, the existing data type with a matching +dtype definition (using any of the variants being described here). As +an example (using a definition using a list, so see 3) for further +details): :: + + >>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')])) + >>> x + array([0, 0, 0]) + >>> x['r'] + array([0, 0, 0], dtype=uint8) + +In this case, an array is produced that looks and acts like a simple int32 array, +but also has definitions for fields that use only one byte of the int32 (a bit +like Fortran equivalencing). + +3) List argument: In this case the record structure is defined with a list of +tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field +('' is permitted), 2) the type of the field, and 3) the shape (optional). +For example:: + + >>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) + >>> x + array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), + (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), + (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])], + dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))]) + +4) Dictionary argument: two different forms are permitted. The first consists +of a dictionary with two required keys ('names' and 'formats'), each having an +equal sized list of values. The format list contains any type/shape specifier +allowed in other contexts. The names must be strings. There are two optional +keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to +the required two where offsets contain integer offsets for each field, and +titles are objects containing metadata for each field (these do not have +to be strings), where the value of None is permitted. As an example: :: + + >>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']}) + >>> x + array([(0, 0.0), (0, 0.0), (0, 0.0)], + dtype=[('col1', '>i4'), ('col2', '>f4')]) + +The other dictionary form permitted is a dictionary of name keys with tuple +values specifying type, offset, and an optional title. :: + + >>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')}) + >>> x + array([(0, 0.0), (0, 0.0), (0, 0.0)], + dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')]) + +Accessing and modifying field names +=================================== + +The field names are an attribute of the dtype object defining the record structure. +For the last example: :: + + >>> x.dtype.names + ('col1', 'col2') + >>> x.dtype.names = ('x', 'y') + >>> x + array([(0, 0.0), (0, 0.0), (0, 0.0)], + dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')]) + >>> x.dtype.names = ('x', 'y', 'z') # wrong number of names + : must replace all names at once with a sequence of length 2 + +Accessing field titles +==================================== + +The field titles provide a standard place to put associated info for fields. +They do not have to be strings. :: + + >>> x.dtype.fields['x'][2] + 'title 1' + +Accessing multiple fields at once +==================================== + +You can access multiple fields at once using a list of field names: :: + + >>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))], + dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) + +Notice that `x` is created with a list of tuples. :: + + >>> x[['x','y']] + array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)], + dtype=[('x', '>> x[['x','value']] + array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]), + (1.0, [[2.0, 6.0], [2.0, 6.0]])], + dtype=[('x', '>> x[['y','x']] + array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)], + dtype=[('y', '>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')]) + >>> arr['var1'] = np.arange(5) + +If you fill it in row by row, it takes a take a tuple +(but not a list or array!):: + + >>> arr[0] = (10,20) + >>> arr + array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)], + dtype=[('var1', '`_. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/subclassing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/subclassing.py new file mode 100644 index 0000000000000..a62fc2d6de922 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/subclassing.py @@ -0,0 +1,560 @@ +""" +============================= +Subclassing ndarray in python +============================= + +Credits +------- + +This page is based with thanks on the wiki page on subclassing by Pierre +Gerard-Marchant - http://www.scipy.org/Subclasses. + +Introduction +------------ + +Subclassing ndarray is relatively simple, but it has some complications +compared to other Python objects. On this page we explain the machinery +that allows you to subclass ndarray, and the implications for +implementing a subclass. + +ndarrays and object creation +============================ + +Subclassing ndarray is complicated by the fact that new instances of +ndarray classes can come about in three different ways. These are: + +#. Explicit constructor call - as in ``MySubClass(params)``. This is + the usual route to Python instance creation. +#. View casting - casting an existing ndarray as a given subclass +#. New from template - creating a new instance from a template + instance. Examples include returning slices from a subclassed array, + creating return types from ufuncs, and copying arrays. See + :ref:`new-from-template` for more details + +The last two are characteristics of ndarrays - in order to support +things like array slicing. The complications of subclassing ndarray are +due to the mechanisms numpy has to support these latter two routes of +instance creation. + +.. _view-casting: + +View casting +------------ + +*View casting* is the standard ndarray mechanism by which you take an +ndarray of any subclass, and return a view of the array as another +(specified) subclass: + +>>> import numpy as np +>>> # create a completely useless ndarray subclass +>>> class C(np.ndarray): pass +>>> # create a standard ndarray +>>> arr = np.zeros((3,)) +>>> # take a view of it, as our useless subclass +>>> c_arr = arr.view(C) +>>> type(c_arr) + + +.. _new-from-template: + +Creating new from template +-------------------------- + +New instances of an ndarray subclass can also come about by a very +similar mechanism to :ref:`view-casting`, when numpy finds it needs to +create a new instance from a template instance. The most obvious place +this has to happen is when you are taking slices of subclassed arrays. +For example: + +>>> v = c_arr[1:] +>>> type(v) # the view is of type 'C' + +>>> v is c_arr # but it's a new instance +False + +The slice is a *view* onto the original ``c_arr`` data. So, when we +take a view from the ndarray, we return a new ndarray, of the same +class, that points to the data in the original. + +There are other points in the use of ndarrays where we need such views, +such as copying arrays (``c_arr.copy()``), creating ufunc output arrays +(see also :ref:`array-wrap`), and reducing methods (like +``c_arr.mean()``. + +Relationship of view casting and new-from-template +-------------------------------------------------- + +These paths both use the same machinery. We make the distinction here, +because they result in different input to your methods. Specifically, +:ref:`view-casting` means you have created a new instance of your array +type from any potential subclass of ndarray. :ref:`new-from-template` +means you have created a new instance of your class from a pre-existing +instance, allowing you - for example - to copy across attributes that +are particular to your subclass. + +Implications for subclassing +---------------------------- + +If we subclass ndarray, we need to deal not only with explicit +construction of our array type, but also :ref:`view-casting` or +:ref:`new-from-template`. Numpy has the machinery to do this, and this +machinery that makes subclassing slightly non-standard. + +There are two aspects to the machinery that ndarray uses to support +views and new-from-template in subclasses. + +The first is the use of the ``ndarray.__new__`` method for the main work +of object initialization, rather then the more usual ``__init__`` +method. The second is the use of the ``__array_finalize__`` method to +allow subclasses to clean up after the creation of views and new +instances from templates. + +A brief Python primer on ``__new__`` and ``__init__`` +===================================================== + +``__new__`` is a standard Python method, and, if present, is called +before ``__init__`` when we create a class instance. See the `python +__new__ documentation +`_ for more detail. + +For example, consider the following Python code: + +.. testcode:: + + class C(object): + def __new__(cls, *args): + print 'Cls in __new__:', cls + print 'Args in __new__:', args + return object.__new__(cls, *args) + + def __init__(self, *args): + print 'type(self) in __init__:', type(self) + print 'Args in __init__:', args + +meaning that we get: + +>>> c = C('hello') +Cls in __new__: +Args in __new__: ('hello',) +type(self) in __init__: +Args in __init__: ('hello',) + +When we call ``C('hello')``, the ``__new__`` method gets its own class +as first argument, and the passed argument, which is the string +``'hello'``. After python calls ``__new__``, it usually (see below) +calls our ``__init__`` method, with the output of ``__new__`` as the +first argument (now a class instance), and the passed arguments +following. + +As you can see, the object can be initialized in the ``__new__`` +method or the ``__init__`` method, or both, and in fact ndarray does +not have an ``__init__`` method, because all the initialization is +done in the ``__new__`` method. + +Why use ``__new__`` rather than just the usual ``__init__``? Because +in some cases, as for ndarray, we want to be able to return an object +of some other class. Consider the following: + +.. testcode:: + + class D(C): + def __new__(cls, *args): + print 'D cls is:', cls + print 'D args in __new__:', args + return C.__new__(C, *args) + + def __init__(self, *args): + # we never get here + print 'In D __init__' + +meaning that: + +>>> obj = D('hello') +D cls is: +D args in __new__: ('hello',) +Cls in __new__: +Args in __new__: ('hello',) +>>> type(obj) + + +The definition of ``C`` is the same as before, but for ``D``, the +``__new__`` method returns an instance of class ``C`` rather than +``D``. Note that the ``__init__`` method of ``D`` does not get +called. In general, when the ``__new__`` method returns an object of +class other than the class in which it is defined, the ``__init__`` +method of that class is not called. + +This is how subclasses of the ndarray class are able to return views +that preserve the class type. When taking a view, the standard +ndarray machinery creates the new ndarray object with something +like:: + + obj = ndarray.__new__(subtype, shape, ... + +where ``subdtype`` is the subclass. Thus the returned view is of the +same class as the subclass, rather than being of class ``ndarray``. + +That solves the problem of returning views of the same type, but now +we have a new problem. The machinery of ndarray can set the class +this way, in its standard methods for taking views, but the ndarray +``__new__`` method knows nothing of what we have done in our own +``__new__`` method in order to set attributes, and so on. (Aside - +why not call ``obj = subdtype.__new__(...`` then? Because we may not +have a ``__new__`` method with the same call signature). + +The role of ``__array_finalize__`` +================================== + +``__array_finalize__`` is the mechanism that numpy provides to allow +subclasses to handle the various ways that new instances get created. + +Remember that subclass instances can come about in these three ways: + +#. explicit constructor call (``obj = MySubClass(params)``). This will + call the usual sequence of ``MySubClass.__new__`` then (if it exists) + ``MySubClass.__init__``. +#. :ref:`view-casting` +#. :ref:`new-from-template` + +Our ``MySubClass.__new__`` method only gets called in the case of the +explicit constructor call, so we can't rely on ``MySubClass.__new__`` or +``MySubClass.__init__`` to deal with the view casting and +new-from-template. It turns out that ``MySubClass.__array_finalize__`` +*does* get called for all three methods of object creation, so this is +where our object creation housekeeping usually goes. + +* For the explicit constructor call, our subclass will need to create a + new ndarray instance of its own class. In practice this means that + we, the authors of the code, will need to make a call to + ``ndarray.__new__(MySubClass,...)``, or do view casting of an existing + array (see below) +* For view casting and new-from-template, the equivalent of + ``ndarray.__new__(MySubClass,...`` is called, at the C level. + +The arguments that ``__array_finalize__`` recieves differ for the three +methods of instance creation above. + +The following code allows us to look at the call sequences and arguments: + +.. testcode:: + + import numpy as np + + class C(np.ndarray): + def __new__(cls, *args, **kwargs): + print 'In __new__ with class %s' % cls + return np.ndarray.__new__(cls, *args, **kwargs) + + def __init__(self, *args, **kwargs): + # in practice you probably will not need or want an __init__ + # method for your subclass + print 'In __init__ with class %s' % self.__class__ + + def __array_finalize__(self, obj): + print 'In array_finalize:' + print ' self type is %s' % type(self) + print ' obj type is %s' % type(obj) + + +Now: + +>>> # Explicit constructor +>>> c = C((10,)) +In __new__ with class +In array_finalize: + self type is + obj type is +In __init__ with class +>>> # View casting +>>> a = np.arange(10) +>>> cast_a = a.view(C) +In array_finalize: + self type is + obj type is +>>> # Slicing (example of new-from-template) +>>> cv = c[:1] +In array_finalize: + self type is + obj type is + +The signature of ``__array_finalize__`` is:: + + def __array_finalize__(self, obj): + +``ndarray.__new__`` passes ``__array_finalize__`` the new object, of our +own class (``self``) as well as the object from which the view has been +taken (``obj``). As you can see from the output above, the ``self`` is +always a newly created instance of our subclass, and the type of ``obj`` +differs for the three instance creation methods: + +* When called from the explicit constructor, ``obj`` is ``None`` +* When called from view casting, ``obj`` can be an instance of any + subclass of ndarray, including our own. +* When called in new-from-template, ``obj`` is another instance of our + own subclass, that we might use to update the new ``self`` instance. + +Because ``__array_finalize__`` is the only method that always sees new +instances being created, it is the sensible place to fill in instance +defaults for new object attributes, among other tasks. + +This may be clearer with an example. + +Simple example - adding an extra attribute to ndarray +----------------------------------------------------- + +.. testcode:: + + import numpy as np + + class InfoArray(np.ndarray): + + def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + strides=None, order=None, info=None): + # Create the ndarray instance of our type, given the usual + # ndarray input arguments. This will call the standard + # ndarray constructor, but return an object of our type. + # It also triggers a call to InfoArray.__array_finalize__ + obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides, + order) + # set the new 'info' attribute to the value passed + obj.info = info + # Finally, we must return the newly created object: + return obj + + def __array_finalize__(self, obj): + # ``self`` is a new object resulting from + # ndarray.__new__(InfoArray, ...), therefore it only has + # attributes that the ndarray.__new__ constructor gave it - + # i.e. those of a standard ndarray. + # + # We could have got to the ndarray.__new__ call in 3 ways: + # From an explicit constructor - e.g. InfoArray(): + # obj is None + # (we're in the middle of the InfoArray.__new__ + # constructor, and self.info will be set when we return to + # InfoArray.__new__) + if obj is None: return + # From view casting - e.g arr.view(InfoArray): + # obj is arr + # (type(obj) can be InfoArray) + # From new-from-template - e.g infoarr[:3] + # type(obj) is InfoArray + # + # Note that it is here, rather than in the __new__ method, + # that we set the default value for 'info', because this + # method sees all creation of default objects - with the + # InfoArray.__new__ constructor, but also with + # arr.view(InfoArray). + self.info = getattr(obj, 'info', None) + # We do not need to return anything + + +Using the object looks like this: + + >>> obj = InfoArray(shape=(3,)) # explicit constructor + >>> type(obj) + + >>> obj.info is None + True + >>> obj = InfoArray(shape=(3,), info='information') + >>> obj.info + 'information' + >>> v = obj[1:] # new-from-template - here - slicing + >>> type(v) + + >>> v.info + 'information' + >>> arr = np.arange(10) + >>> cast_arr = arr.view(InfoArray) # view casting + >>> type(cast_arr) + + >>> cast_arr.info is None + True + +This class isn't very useful, because it has the same constructor as the +bare ndarray object, including passing in buffers and shapes and so on. +We would probably prefer the constructor to be able to take an already +formed ndarray from the usual numpy calls to ``np.array`` and return an +object. + +Slightly more realistic example - attribute added to existing array +------------------------------------------------------------------- + +Here is a class that takes a standard ndarray that already exists, casts +as our type, and adds an extra attribute. + +.. testcode:: + + import numpy as np + + class RealisticInfoArray(np.ndarray): + + def __new__(cls, input_array, info=None): + # Input array is an already formed ndarray instance + # We first cast to be our class type + obj = np.asarray(input_array).view(cls) + # add the new attribute to the created instance + obj.info = info + # Finally, we must return the newly created object: + return obj + + def __array_finalize__(self, obj): + # see InfoArray.__array_finalize__ for comments + if obj is None: return + self.info = getattr(obj, 'info', None) + + +So: + + >>> arr = np.arange(5) + >>> obj = RealisticInfoArray(arr, info='information') + >>> type(obj) + + >>> obj.info + 'information' + >>> v = obj[1:] + >>> type(v) + + >>> v.info + 'information' + +.. _array-wrap: + +``__array_wrap__`` for ufuncs +------------------------------------------------------- + +``__array_wrap__`` gets called at the end of numpy ufuncs and other numpy +functions, to allow a subclass to set the type of the return value +and update attributes and metadata. Let's show how this works with an example. +First we make the same subclass as above, but with a different name and +some print statements: + +.. testcode:: + + import numpy as np + + class MySubClass(np.ndarray): + + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def __array_finalize__(self, obj): + print 'In __array_finalize__:' + print ' self is %s' % repr(self) + print ' obj is %s' % repr(obj) + if obj is None: return + self.info = getattr(obj, 'info', None) + + def __array_wrap__(self, out_arr, context=None): + print 'In __array_wrap__:' + print ' self is %s' % repr(self) + print ' arr is %s' % repr(out_arr) + # then just call the parent + return np.ndarray.__array_wrap__(self, out_arr, context) + +We run a ufunc on an instance of our new array: + +>>> obj = MySubClass(np.arange(5), info='spam') +In __array_finalize__: + self is MySubClass([0, 1, 2, 3, 4]) + obj is array([0, 1, 2, 3, 4]) +>>> arr2 = np.arange(5)+1 +>>> ret = np.add(arr2, obj) +In __array_wrap__: + self is MySubClass([0, 1, 2, 3, 4]) + arr is array([1, 3, 5, 7, 9]) +In __array_finalize__: + self is MySubClass([1, 3, 5, 7, 9]) + obj is MySubClass([0, 1, 2, 3, 4]) +>>> ret +MySubClass([1, 3, 5, 7, 9]) +>>> ret.info +'spam' + +Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method of the +input with the highest ``__array_priority__`` value, in this case +``MySubClass.__array_wrap__``, with arguments ``self`` as ``obj``, and +``out_arr`` as the (ndarray) result of the addition. In turn, the +default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the +result to class ``MySubClass``, and called ``__array_finalize__`` - +hence the copying of the ``info`` attribute. This has all happened at the C level. + +But, we could do anything we wanted: + +.. testcode:: + + class SillySubClass(np.ndarray): + + def __array_wrap__(self, arr, context=None): + return 'I lost your data' + +>>> arr1 = np.arange(5) +>>> obj = arr1.view(SillySubClass) +>>> arr2 = np.arange(5) +>>> ret = np.multiply(obj, arr2) +>>> ret +'I lost your data' + +So, by defining a specific ``__array_wrap__`` method for our subclass, +we can tweak the output from ufuncs. The ``__array_wrap__`` method +requires ``self``, then an argument - which is the result of the ufunc - +and an optional parameter *context*. This parameter is returned by some +ufuncs as a 3-element tuple: (name of the ufunc, argument of the ufunc, +domain of the ufunc). ``__array_wrap__`` should return an instance of +its containing class. See the masked array subclass for an +implementation. + +In addition to ``__array_wrap__``, which is called on the way out of the +ufunc, there is also an ``__array_prepare__`` method which is called on +the way into the ufunc, after the output arrays are created but before any +computation has been performed. The default implementation does nothing +but pass through the array. ``__array_prepare__`` should not attempt to +access the array data or resize the array, it is intended for setting the +output array type, updating attributes and metadata, and performing any +checks based on the input that may be desired before computation begins. +Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or +subclass thereof or raise an error. + +Extra gotchas - custom ``__del__`` methods and ndarray.base +----------------------------------------------------------- + +One of the problems that ndarray solves is keeping track of memory +ownership of ndarrays and their views. Consider the case where we have +created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. +The two objects are looking at the same memory. Numpy keeps track of +where the data came from for a particular array or view, with the +``base`` attribute: + +>>> # A normal ndarray, that owns its own data +>>> arr = np.zeros((4,)) +>>> # In this case, base is None +>>> arr.base is None +True +>>> # We take a view +>>> v1 = arr[1:] +>>> # base now points to the array that it derived from +>>> v1.base is arr +True +>>> # Take a view of a view +>>> v2 = v1[1:] +>>> # base points to the view it derived from +>>> v2.base is v1 +True + +In general, if the array owns its own memory, as for ``arr`` in this +case, then ``arr.base`` will be None - there are some exceptions to this +- see the numpy book for more details. + +The ``base`` attribute is useful in being able to tell whether we have +a view or the original array. This in turn can be useful if we need +to know whether or not to do some specific cleanup when the subclassed +array is deleted. For example, we may only want to do the cleanup if +the original array is deleted, but not the views. For an example of +how this can work, have a look at the ``memmap`` class in +``numpy.core``. + + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/ufuncs.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/ufuncs.py new file mode 100644 index 0000000000000..0132202adc55c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/ufuncs.py @@ -0,0 +1,138 @@ +""" +=================== +Universal Functions +=================== + +Ufuncs are, generally speaking, mathematical functions or operations that are +applied element-by-element to the contents of an array. That is, the result +in each output array element only depends on the value in the corresponding +input array (or arrays) and on no other array elements. Numpy comes with a +large suite of ufuncs, and scipy extends that suite substantially. The simplest +example is the addition operator: :: + + >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) + array([1, 3, 2, 6]) + +The unfunc module lists all the available ufuncs in numpy. Documentation on +the specific ufuncs may be found in those modules. This documentation is +intended to address the more general aspects of unfuncs common to most of +them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) +have equivalent functions defined (e.g. add() for +) + +Type coercion +============= + +What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of +two different types? What is the type of the result? Typically, the result is +the higher of the two types. For example: :: + + float32 + float64 -> float64 + int8 + int32 -> int32 + int16 + float32 -> float32 + float32 + complex64 -> complex64 + +There are some less obvious cases generally involving mixes of types +(e.g. uints, ints and floats) where equal bit sizes for each are not +capable of saving all the information in a different type of equivalent +bit size. Some examples are int32 vs float32 or uint32 vs int32. +Generally, the result is the higher type of larger size than both +(if available). So: :: + + int32 + float32 -> float64 + uint32 + int32 -> int64 + +Finally, the type coercion behavior when expressions involve Python +scalars is different than that seen for arrays. Since Python has a +limited number of types, combining a Python int with a dtype=np.int8 +array does not coerce to the higher type but instead, the type of the +array prevails. So the rules for Python scalars combined with arrays is +that the result will be that of the array equivalent the Python scalar +if the Python scalar is of a higher 'kind' than the array (e.g., float +vs. int), otherwise the resultant type will be that of the array. +For example: :: + + Python int + int8 -> int8 + Python float + int8 -> float64 + +ufunc methods +============= + +Binary ufuncs support 4 methods. + +**.reduce(arr)** applies the binary operator to elements of the array in + sequence. For example: :: + + >>> np.add.reduce(np.arange(10)) # adds all elements of array + 45 + +For multidimensional arrays, the first dimension is reduced by default: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5)) + array([ 5, 7, 9, 11, 13]) + +The axis keyword can be used to specify different axes to reduce: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) + array([10, 35]) + +**.accumulate(arr)** applies the binary operator and generates an an +equivalently shaped array that includes the accumulated amount for each +element of the array. A couple examples: :: + + >>> np.add.accumulate(np.arange(10)) + array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) + >>> np.multiply.accumulate(np.arange(1,9)) + array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) + +The behavior for multidimensional arrays is the same as for .reduce(), +as is the use of the axis keyword). + +**.reduceat(arr,indices)** allows one to apply reduce to selected parts + of an array. It is a difficult method to understand. See the documentation + at: + +**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and + arr2. It will work on multidimensional arrays (the shape of the result is + the concatenation of the two input shapes.: :: + + >>> np.multiply.outer(np.arange(3),np.arange(4)) + array([[0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6]]) + +Output arguments +================ + +All ufuncs accept an optional output array. The array must be of the expected +output shape. Beware that if the type of the output array is of a different +(and lower) type than the output result, the results may be silently truncated +or otherwise corrupted in the downcast to the lower type. This usage is useful +when one wants to avoid creating large temporary arrays and instead allows one +to reuse the same array memory repeatedly (at the expense of not being able to +use more convenient operator notation in expressions). Note that when the +output argument is used, the ufunc still returns a reference to the result. + + >>> x = np.arange(2) + >>> np.add(np.arange(2),np.arange(2.),x) + array([0, 2]) + >>> x + array([0, 2]) + +and & or as ufuncs +================== + +Invariably people try to use the python 'and' and 'or' as logical operators +(and quite understandably). But these operators do not behave as normal +operators since Python treats these quite differently. They cannot be +overloaded with array equivalents. Thus using 'and' or 'or' with an array +results in an error. There are two alternatives: + + 1) use the ufunc functions logical_and() and logical_or(). + 2) use the bitwise operators & and \\|. The drawback of these is that if + the arguments to these operators are not boolean arrays, the result is + likely incorrect. On the other hand, most usages of logical_and and + logical_or are with boolean arrays. As long as one is careful, this is + a convenient way to apply these operators. + +""" +from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/dual.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/dual.py new file mode 100644 index 0000000000000..1517d8421345c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/dual.py @@ -0,0 +1,71 @@ +""" +Aliases for functions which may be accelerated by Scipy. + +Scipy_ can be built to use accelerated or otherwise improved libraries +for FFTs, linear algebra, and special functions. This module allows +developers to transparently support these accelerated functions when +scipy is available but still support users who have only installed +Numpy. + +.. _Scipy : http://www.scipy.org + +""" +from __future__ import division, absolute_import, print_function + +# This module should be used for functions both in numpy and scipy if +# you want to use the numpy version if available but the scipy version +# otherwise. +# Usage --- from numpy.dual import fft, inv + +__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2', + 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals', + 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0'] + +import numpy.linalg as linpkg +import numpy.fft as fftpkg +from numpy.lib import i0 +import sys + + +fft = fftpkg.fft +ifft = fftpkg.ifft +fftn = fftpkg.fftn +ifftn = fftpkg.ifftn +fft2 = fftpkg.fft2 +ifft2 = fftpkg.ifft2 + +norm = linpkg.norm +inv = linpkg.inv +svd = linpkg.svd +solve = linpkg.solve +det = linpkg.det +eig = linpkg.eig +eigvals = linpkg.eigvals +eigh = linpkg.eigh +eigvalsh = linpkg.eigvalsh +lstsq = linpkg.lstsq +pinv = linpkg.pinv +cholesky = linpkg.cholesky + +_restore_dict = {} + +def register_func(name, func): + if name not in __all__: + raise ValueError("%s not a dual function." % name) + f = sys._getframe(0).f_globals + _restore_dict[name] = f[name] + f[name] = func + +def restore_func(name): + if name not in __all__: + raise ValueError("%s not a dual function." % name) + try: + val = _restore_dict[name] + except KeyError: + return + else: + sys._getframe(0).f_globals[name] = val + +def restore_all(): + for name in _restore_dict.keys(): + restore_func(name) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__init__.py new file mode 100644 index 0000000000000..fcfd1853e2392 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__init__.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +from __future__ import division, absolute_import, print_function + +__all__ = ['run_main', 'compile', 'f2py_testing'] + +import os +import sys +import subprocess + +from . import f2py2e +from . import f2py_testing +from . import diagnose + +from .info import __doc__ + +run_main = f2py2e.run_main +main = f2py2e.main + +def compile(source, + modulename = 'untitled', + extra_args = '', + verbose = 1, + source_fn = None + ): + ''' Build extension module from processing source with f2py. + Read the source of this function for more information. + ''' + from numpy.distutils.exec_command import exec_command + import tempfile + if source_fn is None: + f = tempfile.NamedTemporaryFile(suffix='.f') + else: + f = open(source_fn, 'w') + + try: + f.write(source) + f.flush() + + args = ' -c -m %s %s %s'%(modulename, f.name, extra_args) + c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' % \ + (sys.executable, args) + s, o = exec_command(c) + finally: + f.close() + return s + +from numpy.testing import Tester +test = Tester().test +bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__version__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__version__.py new file mode 100644 index 0000000000000..49a2199bf38b0 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__version__.py @@ -0,0 +1,10 @@ +from __future__ import division, absolute_import, print_function + +major = 2 + +try: + from __svn_version__ import version + version_info = (major, version) + version = '%s_%s' % version_info +except (ImportError, ValueError): + version = str(major) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/auxfuncs.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/auxfuncs.py new file mode 100644 index 0000000000000..2e016e18656ac --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/auxfuncs.py @@ -0,0 +1,711 @@ +#!/usr/bin/env python +""" + +Auxiliary functions for f2py2e. + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) LICENSE. + + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/07/24 19:01:55 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +import pprint +import sys +import types +from functools import reduce + +from . import __version__ +from . import cfuncs + +f2py_version = __version__.version + + +errmess=sys.stderr.write +#outmess=sys.stdout.write +show=pprint.pprint + +options={} +debugoptions=[] +wrapfuncs = 1 + + +def outmess(t): + if options.get('verbose', 1): + sys.stdout.write(t) + +def debugcapi(var): + return 'capi' in debugoptions + +def _isstring(var): + return 'typespec' in var and var['typespec']=='character' and (not isexternal(var)) + +def isstring(var): + return _isstring(var) and not isarray(var) + +def ischaracter(var): + return isstring(var) and 'charselector' not in var + +def isstringarray(var): + return isarray(var) and _isstring(var) + +def isarrayofstrings(var): + # leaving out '*' for now so that + # `character*(*) a(m)` and `character a(m,*)` + # are treated differently. Luckily `character**` is illegal. + return isstringarray(var) and var['dimension'][-1]=='(*)' + +def isarray(var): + return 'dimension' in var and (not isexternal(var)) + +def isscalar(var): + return not (isarray(var) or isstring(var) or isexternal(var)) + +def iscomplex(var): + return isscalar(var) and var.get('typespec') in ['complex', 'double complex'] + +def islogical(var): + return isscalar(var) and var.get('typespec')=='logical' + +def isinteger(var): + return isscalar(var) and var.get('typespec')=='integer' + +def isreal(var): + return isscalar(var) and var.get('typespec')=='real' + +def get_kind(var): + try: + return var['kindselector']['*'] + except KeyError: + try: + return var['kindselector']['kind'] + except KeyError: + pass + +def islong_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') not in ['integer', 'logical']: + return 0 + return get_kind(var)=='8' + +def isunsigned_char(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var)=='-1' + +def isunsigned_short(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var)=='-2' + +def isunsigned(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var)=='-4' + +def isunsigned_long_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var)=='-8' + +def isdouble(var): + if not isscalar(var): + return 0 + if not var.get('typespec')=='real': + return 0 + return get_kind(var)=='8' + +def islong_double(var): + if not isscalar(var): + return 0 + if not var.get('typespec')=='real': + return 0 + return get_kind(var)=='16' + +def islong_complex(var): + if not iscomplex(var): + return 0 + return get_kind(var)=='32' + +def iscomplexarray(var): + return isarray(var) and var.get('typespec') in ['complex', 'double complex'] + +def isint1array(var): + return isarray(var) and var.get('typespec')=='integer' \ + and get_kind(var)=='1' + +def isunsigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var)=='-1' + +def isunsigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var)=='-2' + +def isunsignedarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var)=='-4' + +def isunsigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var)=='-8' + +def issigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var)=='1' + +def issigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var)=='2' + +def issigned_array(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var)=='4' + +def issigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var)=='8' + +def isallocatable(var): + return 'attrspec' in var and 'allocatable' in var['attrspec'] + +def ismutable(var): + return not (not 'dimension' in var or isstring(var)) + +def ismoduleroutine(rout): + return 'modulename' in rout + +def ismodule(rout): + return ('block' in rout and 'module'==rout['block']) + +def isfunction(rout): + return ('block' in rout and 'function'==rout['block']) + +#def isfunction_wrap(rout): +# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout)) + +def isfunction_wrap(rout): + if isintent_c(rout): + return 0 + return wrapfuncs and isfunction(rout) and (not isexternal(rout)) + +def issubroutine(rout): + return ('block' in rout and 'subroutine'==rout['block']) + +def issubroutine_wrap(rout): + if isintent_c(rout): + return 0 + return issubroutine(rout) and hasassumedshape(rout) + +def hasassumedshape(rout): + if rout.get('hasassumedshape'): + return True + for a in rout['args']: + for d in rout['vars'].get(a, {}).get('dimension', []): + if d==':': + rout['hasassumedshape'] = True + return True + return False + +def isroutine(rout): + return isfunction(rout) or issubroutine(rout) + +def islogicalfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a=rout['result'] + else: + a=rout['name'] + if a in rout['vars']: + return islogical(rout['vars'][a]) + return 0 + +def islong_longfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a=rout['result'] + else: + a=rout['name'] + if a in rout['vars']: + return islong_long(rout['vars'][a]) + return 0 + +def islong_doublefunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a=rout['result'] + else: + a=rout['name'] + if a in rout['vars']: + return islong_double(rout['vars'][a]) + return 0 + +def iscomplexfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a=rout['result'] + else: + a=rout['name'] + if a in rout['vars']: + return iscomplex(rout['vars'][a]) + return 0 + +def iscomplexfunction_warn(rout): + if iscomplexfunction(rout): + outmess("""\ + ************************************************************** + Warning: code with a function returning complex value + may not work correctly with your Fortran compiler. + Run the following test before using it in your applications: + $(f2py install dir)/test-site/{b/runme_scalar,e/runme} + When using GNU gcc/g77 compilers, codes should work correctly. + **************************************************************\n""") + return 1 + return 0 + +def isstringfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a=rout['result'] + else: + a=rout['name'] + if a in rout['vars']: + return isstring(rout['vars'][a]) + return 0 + +def hasexternals(rout): + return 'externals' in rout and rout['externals'] + +def isthreadsafe(rout): + return 'f2pyenhancements' in rout and 'threadsafe' in rout['f2pyenhancements'] + +def hasvariables(rout): + return 'vars' in rout and rout['vars'] + +def isoptional(var): + return ('attrspec' in var and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var) + +def isexternal(var): + return ('attrspec' in var and 'external' in var['attrspec']) + +def isrequired(var): + return not isoptional(var) and isintent_nothide(var) + +def isintent_in(var): + if 'intent' not in var: + return 1 + if 'hide' in var['intent']: + return 0 + if 'inplace' in var['intent']: + return 0 + if 'in' in var['intent']: + return 1 + if 'out' in var['intent']: + return 0 + if 'inout' in var['intent']: + return 0 + if 'outin' in var['intent']: + return 0 + return 1 + +def isintent_inout(var): + return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent'] + +def isintent_out(var): + return 'out' in var.get('intent', []) + +def isintent_hide(var): + return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout, isintent_inplace)(var))))) + +def isintent_nothide(var): + return not isintent_hide(var) + +def isintent_c(var): + return 'c' in var.get('intent', []) + +# def isintent_f(var): +# return not isintent_c(var) + +def isintent_cache(var): + return 'cache' in var.get('intent', []) + +def isintent_copy(var): + return 'copy' in var.get('intent', []) + +def isintent_overwrite(var): + return 'overwrite' in var.get('intent', []) + +def isintent_callback(var): + return 'callback' in var.get('intent', []) + +def isintent_inplace(var): + return 'inplace' in var.get('intent', []) + +def isintent_aux(var): + return 'aux' in var.get('intent', []) + +def isintent_aligned4(var): + return 'aligned4' in var.get('intent', []) +def isintent_aligned8(var): + return 'aligned8' in var.get('intent', []) +def isintent_aligned16(var): + return 'aligned16' in var.get('intent', []) + +isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', + isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', + isintent_cache: 'INTENT_CACHE', + isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', + isintent_inplace: 'INTENT_INPLACE', + isintent_aligned4: 'INTENT_ALIGNED4', + isintent_aligned8: 'INTENT_ALIGNED8', + isintent_aligned16: 'INTENT_ALIGNED16', + } + +def isprivate(var): + return 'attrspec' in var and 'private' in var['attrspec'] + +def hasinitvalue(var): + return '=' in var + +def hasinitvalueasstring(var): + if not hasinitvalue(var): + return 0 + return var['='][0] in ['"', "'"] + +def hasnote(var): + return 'note' in var + +def hasresultnote(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a=rout['result'] + else: + a=rout['name'] + if a in rout['vars']: + return hasnote(rout['vars'][a]) + return 0 + +def hascommon(rout): + return 'common' in rout + +def containscommon(rout): + if hascommon(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if containscommon(b): + return 1 + return 0 + +def containsmodule(block): + if ismodule(block): + return 1 + if not hasbody(block): + return 0 + for b in block['body']: + if containsmodule(b): + return 1 + return 0 + +def hasbody(rout): + return 'body' in rout + +def hascallstatement(rout): + return getcallstatement(rout) is not None + +def istrue(var): + return 1 + +def isfalse(var): + return 0 + +class F2PYError(Exception): + pass + +class throw_error: + def __init__(self, mess): + self.mess = mess + def __call__(self, var): + mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) + raise F2PYError(mess) + +def l_and(*f): + l, l2='lambda v', [] + for i in range(len(f)): + l='%s,f%d=f[%d]'%(l, i, i) + l2.append('f%d(v)'%(i)) + return eval('%s:%s'%(l, ' and '.join(l2))) + +def l_or(*f): + l, l2='lambda v', [] + for i in range(len(f)): + l='%s,f%d=f[%d]'%(l, i, i) + l2.append('f%d(v)'%(i)) + return eval('%s:%s'%(l, ' or '.join(l2))) + +def l_not(f): + return eval('lambda v,f=f:not f(v)') + +def isdummyroutine(rout): + try: + return rout['f2pyenhancements']['fortranname']=='' + except KeyError: + return 0 + +def getfortranname(rout): + try: + name = rout['f2pyenhancements']['fortranname'] + if name=='': + raise KeyError + if not name: + errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements'])) + raise KeyError + except KeyError: + name = rout['name'] + return name + +def getmultilineblock(rout,blockname,comment=1,counter=0): + try: + r = rout['f2pyenhancements'].get(blockname) + except KeyError: + return + if not r: return + if counter > 0 and isinstance(r, str): + return + if isinstance(r, list): + if counter>=len(r): return + r = r[counter] + if r[:3]=="'''": + if comment: + r = '\t/* start ' + blockname + ' multiline ('+repr(counter)+') */\n' + r[3:] + else: + r = r[3:] + if r[-3:]=="'''": + if comment: + r = r[:-3] + '\n\t/* end multiline ('+repr(counter)+')*/' + else: + r = r[:-3] + else: + errmess("%s multiline block should end with `'''`: %s\n" \ + % (blockname, repr(r))) + return r + +def getcallstatement(rout): + return getmultilineblock(rout, 'callstatement') + +def getcallprotoargument(rout,cb_map={}): + r = getmultilineblock(rout, 'callprotoargument', comment=0) + if r: return r + if hascallstatement(rout): + outmess('warning: callstatement is defined without callprotoargument\n') + return + from .capi_maps import getctype + arg_types, arg_types2 = [], [] + if l_and(isstringfunction, l_not(isfunction_wrap))(rout): + arg_types.extend(['char*', 'size_t']) + for n in rout['args']: + var = rout['vars'][n] + if isintent_callback(var): + continue + if n in cb_map: + ctype = cb_map[n]+'_typedef' + else: + ctype = getctype(var) + if l_and(isintent_c, l_or(isscalar, iscomplex))(var): + pass + elif isstring(var): + pass + #ctype = 'void*' + else: + ctype = ctype+'*' + if isstring(var) or isarrayofstrings(var): + arg_types2.append('size_t') + arg_types.append(ctype) + + proto_args = ','.join(arg_types+arg_types2) + if not proto_args: + proto_args = 'void' + #print proto_args + return proto_args + +def getusercode(rout): + return getmultilineblock(rout, 'usercode') + +def getusercode1(rout): + return getmultilineblock(rout, 'usercode', counter=1) + +def getpymethoddef(rout): + return getmultilineblock(rout, 'pymethoddef') + +def getargs(rout): + sortargs, args=[], [] + if 'args' in rout: + args=rout['args'] + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: sortargs=rout['args'] + return args, sortargs + +def getargs2(rout): + sortargs, args=[], rout.get('args', []) + auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\ + and a not in args] + args = auxvars + args + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: sortargs=auxvars + rout['args'] + return args, sortargs + +def getrestdoc(rout): + if 'f2pymultilines' not in rout: + return None + k = None + if rout['block']=='python module': + k = rout['block'], rout['name'] + return rout['f2pymultilines'].get(k, None) + +def gentitle(name): + l=(80-len(name)-6)//2 + return '/*%s %s %s*/'%(l*'*', name, l*'*') + +def flatlist(l): + if isinstance(l, list): + return reduce(lambda x,y,f=flatlist:x+f(y), l, []) + return [l] + +def stripcomma(s): + if s and s[-1]==',': return s[:-1] + return s + +def replace(str,d,defaultsep=''): + if isinstance(d, list): + return [replace(str, _m, defaultsep) for _m in d] + if isinstance(str, list): + return [replace(_m, d, defaultsep) for _m in str] + for k in 2*list(d.keys()): + if k=='separatorsfor': + continue + if 'separatorsfor' in d and k in d['separatorsfor']: + sep=d['separatorsfor'][k] + else: + sep=defaultsep + if isinstance(d[k], list): + str=str.replace('#%s#'%(k), sep.join(flatlist(d[k]))) + else: + str=str.replace('#%s#'%(k), d[k]) + return str + +def dictappend(rd, ar): + if isinstance(ar, list): + for a in ar: + rd=dictappend(rd, a) + return rd + for k in ar.keys(): + if k[0]=='_': + continue + if k in rd: + if isinstance(rd[k], str): + rd[k]=[rd[k]] + if isinstance(rd[k], list): + if isinstance(ar[k], list): + rd[k]=rd[k]+ar[k] + else: + rd[k].append(ar[k]) + elif isinstance(rd[k], dict): + if isinstance(ar[k], dict): + if k=='separatorsfor': + for k1 in ar[k].keys(): + if k1 not in rd[k]: + rd[k][k1]=ar[k][k1] + else: + rd[k]=dictappend(rd[k], ar[k]) + else: + rd[k]=ar[k] + return rd + +def applyrules(rules,d,var={}): + ret={} + if isinstance(rules, list): + for r in rules: + rr=applyrules(r, d, var) + ret=dictappend(ret, rr) + if '_break' in rr: + break + return ret + if '_check' in rules and (not rules['_check'](var)): + return ret + if 'need' in rules: + res = applyrules({'needs':rules['need']}, d, var) + if 'needs' in res: + cfuncs.append_needs(res['needs']) + + for k in rules.keys(): + if k=='separatorsfor': + ret[k]=rules[k]; continue + if isinstance(rules[k], str): + ret[k]=replace(rules[k], d) + elif isinstance(rules[k], list): + ret[k]=[] + for i in rules[k]: + ar=applyrules({k:i}, d, var) + if k in ar: + ret[k].append(ar[k]) + elif k[0]=='_': + continue + elif isinstance(rules[k], dict): + ret[k]=[] + for k1 in rules[k].keys(): + if isinstance(k1, types.FunctionType) and k1(var): + if isinstance(rules[k][k1], list): + for i in rules[k][k1]: + if isinstance(i, dict): + res=applyrules({'supertext':i}, d, var) + if 'supertext' in res: + i=res['supertext'] + else: i='' + ret[k].append(replace(i, d)) + else: + i=rules[k][k1] + if isinstance(i, dict): + res=applyrules({'supertext':i}, d) + if 'supertext' in res: + i=res['supertext'] + else: i='' + ret[k].append(replace(i, d)) + else: + errmess('applyrules: ignoring rule %s.\n'%repr(rules[k])) + if isinstance(ret[k], list): + if len(ret[k])==1: + ret[k]=ret[k][0] + if ret[k]==[]: + del ret[k] + return ret diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/capi_maps.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/capi_maps.py new file mode 100644 index 0000000000000..536a576dd101a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/capi_maps.py @@ -0,0 +1,773 @@ +#!/usr/bin/env python +""" + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 10:57:33 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.60 $"[10:-1] + +from . import __version__ +f2py_version = __version__.version + +import copy +import re +import os +import sys +from .auxfuncs import * +from .crackfortran import markoutercomma +from . import cb_rules + +# Numarray and Numeric users should set this False +using_newcore = True + +depargs=[] +lcb_map={} +lcb2_map={} +# forced casting: mainly caused by the fact that Python or Numeric +# C/APIs do not support the corresponding C types. +c2py_map={'double': 'float', + 'float': 'float', # forced casting + 'long_double': 'float', # forced casting + 'char': 'int', # forced casting + 'signed_char': 'int', # forced casting + 'unsigned_char': 'int', # forced casting + 'short': 'int', # forced casting + 'unsigned_short': 'int', # forced casting + 'int': 'int', # (forced casting) + 'long': 'int', + 'long_long': 'long', + 'unsigned': 'int', # forced casting + 'complex_float': 'complex', # forced casting + 'complex_double': 'complex', + 'complex_long_double': 'complex', # forced casting + 'string': 'string', + } +c2capi_map={'double':'NPY_DOUBLE', + 'float':'NPY_FLOAT', + 'long_double':'NPY_DOUBLE', # forced casting + 'char':'NPY_CHAR', + 'unsigned_char':'NPY_UBYTE', + 'signed_char':'NPY_BYTE', + 'short':'NPY_SHORT', + 'unsigned_short':'NPY_USHORT', + 'int':'NPY_INT', + 'unsigned':'NPY_UINT', + 'long':'NPY_LONG', + 'long_long':'NPY_LONG', # forced casting + 'complex_float':'NPY_CFLOAT', + 'complex_double':'NPY_CDOUBLE', + 'complex_long_double':'NPY_CDOUBLE', # forced casting + 'string':'NPY_CHAR'} + +#These new maps aren't used anyhere yet, but should be by default +# unless building numeric or numarray extensions. +if using_newcore: + c2capi_map={'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string': 'NPY_CHAR', # f2py 2e is not ready for NPY_STRING (must set itemisize etc) + #'string':'NPY_STRING' + + } +c2pycode_map={'double':'d', + 'float':'f', + 'long_double':'d', # forced casting + 'char':'1', + 'signed_char':'1', + 'unsigned_char':'b', + 'short':'s', + 'unsigned_short':'w', + 'int':'i', + 'unsigned':'u', + 'long':'l', + 'long_long':'L', + 'complex_float':'F', + 'complex_double':'D', + 'complex_long_double':'D', # forced casting + 'string':'c' + } +if using_newcore: + c2pycode_map={'double':'d', + 'float':'f', + 'long_double':'g', + 'char':'b', + 'unsigned_char':'B', + 'signed_char':'b', + 'short':'h', + 'unsigned_short':'H', + 'int':'i', + 'unsigned':'I', + 'long':'l', + 'unsigned_long':'L', + 'long_long':'q', + 'unsigned_long_long':'Q', + 'complex_float':'F', + 'complex_double':'D', + 'complex_long_double':'G', + 'string':'S'} +c2buildvalue_map={'double':'d', + 'float':'f', + 'char':'b', + 'signed_char':'b', + 'short':'h', + 'int':'i', + 'long':'l', + 'long_long':'L', + 'complex_float':'N', + 'complex_double':'N', + 'complex_long_double':'N', + 'string':'z'} + +if sys.version_info[0] >= 3: + # Bytes, not Unicode strings + c2buildvalue_map['string'] = 'y' + +if using_newcore: + #c2buildvalue_map=??? + pass + +f2cmap_all={'real':{'':'float','4':'float','8':'double','12':'long_double','16':'long_double'}, + 'integer':{'':'int','1':'signed_char','2':'short','4':'int','8':'long_long', + '-1':'unsigned_char','-2':'unsigned_short','-4':'unsigned', + '-8':'unsigned_long_long'}, + 'complex':{'':'complex_float','8':'complex_float', + '16':'complex_double','24':'complex_long_double', + '32':'complex_long_double'}, + 'complexkind':{'':'complex_float','4':'complex_float', + '8':'complex_double','12':'complex_long_double', + '16':'complex_long_double'}, + 'logical':{'':'int','1':'char','2':'short','4':'int','8':'long_long'}, + 'double complex':{'':'complex_double'}, + 'double precision':{'':'double'}, + 'byte':{'':'char'}, + 'character':{'':'string'} + } + +if os.path.isfile('.f2py_f2cmap'): + # User defined additions to f2cmap_all. + # .f2py_f2cmap must contain a dictionary of dictionaries, only. + # For example, {'real':{'low':'float'}} means that Fortran 'real(low)' is + # interpreted as C 'float'. + # This feature is useful for F90/95 users if they use PARAMETERSs + # in type specifications. + try: + outmess('Reading .f2py_f2cmap ...\n') + f = open('.f2py_f2cmap', 'r') + d = eval(f.read(), {}, {}) + f.close() + for k, d1 in d.items(): + for k1 in d1.keys(): + d1[k1.lower()] = d1[k1] + d[k.lower()] = d[k] + for k in d.keys(): + if k not in f2cmap_all: + f2cmap_all[k]={} + for k1 in d[k].keys(): + if d[k][k1] in c2py_map: + if k1 in f2cmap_all[k]: + outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k, k1, f2cmap_all[k][k1], d[k][k1])) + f2cmap_all[k][k1] = d[k][k1] + outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, d[k][k1])) + else: + errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) + outmess('Succesfully applied user defined changes from .f2py_f2cmap\n') + except Exception as msg: + errmess('Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg)) +cformat_map={'double': '%g', + 'float': '%g', + 'long_double': '%Lg', + 'char': '%d', + 'signed_char': '%d', + 'unsigned_char': '%hhu', + 'short': '%hd', + 'unsigned_short': '%hu', + 'int': '%d', + 'unsigned': '%u', + 'long': '%ld', + 'unsigned_long': '%lu', + 'long_long': '%ld', + 'complex_float': '(%g,%g)', + 'complex_double': '(%g,%g)', + 'complex_long_double': '(%Lg,%Lg)', + 'string': '%s', + } + +############### Auxiliary functions +def getctype(var): + """ + Determines C type + """ + ctype='void' + if isfunction(var): + if 'result' in var: + a=var['result'] + else: + a=var['name'] + if a in var['vars']: + return getctype(var['vars'][a]) + else: + errmess('getctype: function %s has no return value?!\n'%a) + elif issubroutine(var): + return ctype + elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: + typespec = var['typespec'].lower() + f2cmap=f2cmap_all[typespec] + ctype=f2cmap[''] # default type + if 'kindselector' in var: + if '*' in var['kindselector']: + try: + ctype=f2cmap[var['kindselector']['*']] + except KeyError: + errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'], '*', var['kindselector']['*'])) + elif 'kind' in var['kindselector']: + if typespec+'kind' in f2cmap_all: + f2cmap=f2cmap_all[typespec+'kind'] + try: + ctype=f2cmap[var['kindselector']['kind']] + except KeyError: + if typespec in f2cmap_all: + f2cmap=f2cmap_all[typespec] + try: + ctype=f2cmap[str(var['kindselector']['kind'])] + except KeyError: + errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n'\ + %(typespec, var['kindselector']['kind'], ctype, + typespec, var['kindselector']['kind'], os.getcwd())) + + else: + if not isexternal(var): + errmess('getctype: No C-type found in "%s", assuming void.\n'%var) + return ctype + +def getstrlength(var): + if isstringfunction(var): + if 'result' in var: + a=var['result'] + else: + a=var['name'] + if a in var['vars']: + return getstrlength(var['vars'][a]) + else: + errmess('getstrlength: function %s has no return value?!\n'%a) + if not isstring(var): + errmess('getstrlength: expected a signature of a string but got: %s\n'%(repr(var))) + len='1' + if 'charselector' in var: + a=var['charselector'] + if '*' in a: + len=a['*'] + elif 'len' in a: + len=a['len'] + if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): + #if len in ['(*)','*','(:)',':']: + if isintent_hide(var): + errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(repr(var))) + len='-1' + return len + +def getarrdims(a,var,verbose=0): + global depargs + ret={} + if isstring(var) and not isarray(var): + ret['dims']=getstrlength(var) + ret['size']=ret['dims'] + ret['rank']='1' + elif isscalar(var): + ret['size']='1' + ret['rank']='0' + ret['dims']='' + elif isarray(var): +# if not isintent_c(var): +# var['dimension'].reverse() + dim=copy.copy(var['dimension']) + ret['size']='*'.join(dim) + try: ret['size']=repr(eval(ret['size'])) + except: pass + ret['dims']=','.join(dim) + ret['rank']=repr(len(dim)) + ret['rank*[-1]']=repr(len(dim)*[-1])[1:-1] + for i in range(len(dim)): # solve dim for dependecies + v=[] + if dim[i] in depargs: v=[dim[i]] + else: + for va in depargs: + if re.match(r'.*?\b%s\b.*'%va, dim[i]): + v.append(va) + for va in v: + if depargs.index(va)>depargs.index(a): + dim[i]='*' + break + ret['setdims'], i='', -1 + for d in dim: + i=i+1 + if d not in ['*', ':', '(*)', '(:)']: + ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'], i, d) + if ret['setdims']: ret['setdims']=ret['setdims'][:-1] + ret['cbsetdims'], i='', -1 + for d in var['dimension']: + i=i+1 + if d not in ['*', ':', '(*)', '(:)']: + ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, d) + elif isintent_in(var): + outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' \ + % (d)) + ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, 0) + elif verbose : + errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(repr(a), repr(d))) + if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1] +# if not isintent_c(var): +# var['dimension'].reverse() + return ret + +def getpydocsign(a, var): + global lcb_map + if isfunction(var): + if 'result' in var: + af=var['result'] + else: + af=var['name'] + if af in var['vars']: + return getpydocsign(af, var['vars'][af]) + else: + errmess('getctype: function %s has no return value?!\n'%af) + return '', '' + sig, sigout=a, a + opt='' + if isintent_in(var): opt='input' + elif isintent_inout(var): opt='in/output' + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4]=='out=': + out_a = k[4:] + break + init='' + ctype=getctype(var) + + if hasinitvalue(var): + init, showinit=getinit(a, var) + init = ', optional\\n Default: %s' % showinit + if isscalar(var): + if isintent_inout(var): + sig='%s : %s rank-0 array(%s,\'%s\')%s'%(a, opt, c2py_map[ctype], + c2pycode_map[ctype], init) + else: + sig='%s : %s %s%s'%(a, opt, c2py_map[ctype], init) + sigout='%s : %s'%(out_a, c2py_map[ctype]) + elif isstring(var): + if isintent_inout(var): + sig='%s : %s rank-0 array(string(len=%s),\'c\')%s'%(a, opt, getstrlength(var), init) + else: + sig='%s : %s string(len=%s)%s'%(a, opt, getstrlength(var), init) + sigout='%s : string(len=%s)'%(out_a, getstrlength(var)) + elif isarray(var): + dim=var['dimension'] + rank=repr(len(dim)) + sig='%s : %s rank-%s array(\'%s\') with bounds (%s)%s'%(a, opt, rank, + c2pycode_map[ctype], + ','.join(dim), init) + if a==out_a: + sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\ + %(a, rank, c2pycode_map[ctype], ','.join(dim)) + else: + sigout='%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ + %(out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + elif isexternal(var): + ua='' + if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: + ua=lcb2_map[lcb_map[a]]['argname'] + if not ua==a: ua=' => %s'%ua + else: ua='' + sig='%s : call-back function%s'%(a, ua) + sigout=sig + else: + errmess('getpydocsign: Could not resolve docsignature for "%s".\\n'%a) + return sig, sigout + +def getarrdocsign(a, var): + ctype=getctype(var) + if isstring(var) and (not isarray(var)): + sig='%s : rank-0 array(string(len=%s),\'c\')'%(a, getstrlength(var)) + elif isscalar(var): + sig='%s : rank-0 array(%s,\'%s\')'%(a, c2py_map[ctype], + c2pycode_map[ctype],) + elif isarray(var): + dim=var['dimension'] + rank=repr(len(dim)) + sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a, rank, + c2pycode_map[ctype], + ','.join(dim)) + return sig + +def getinit(a, var): + if isstring(var): init, showinit='""', "''" + else: init, showinit='', '' + if hasinitvalue(var): + init=var['='] + showinit=init + if iscomplex(var) or iscomplexarray(var): + ret={} + + try: + v = var["="] + if ',' in v: + ret['init.r'], ret['init.i']=markoutercomma(v[1:-1]).split('@,@') + else: + v = eval(v, {}, {}) + ret['init.r'], ret['init.i']=str(v.real), str(v.imag) + except: + raise ValueError('getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) + if isarray(var): + init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'], ret['init.i']) + elif isstring(var): + if not init: init, showinit='""', "''" + if init[0]=="'": + init='"%s"'%(init[1:-1].replace('"', '\\"')) + if init[0]=='"': showinit="'%s'"%(init[1:-1]) + return init, showinit + +def sign2map(a, var): + """ + varname,ctype,atype + init,init.r,init.i,pytype + vardebuginfo,vardebugshowvalue,varshowvalue + varrfromat + intent + """ + global lcb_map, cb_map + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4]=='out=': + out_a = k[4:] + break + ret={'varname':a,'outvarname':out_a} + ret['ctype']=getctype(var) + intent_flags = [] + for f, s in isintent_dict.items(): + if f(var): intent_flags.append('F2PY_%s'%s) + if intent_flags: + #XXX: Evaluate intent_flags here. + ret['intent'] = '|'.join(intent_flags) + else: + ret['intent'] = 'F2PY_INTENT_IN' + if isarray(var): ret['varrformat']='N' + elif ret['ctype'] in c2buildvalue_map: + ret['varrformat']=c2buildvalue_map[ret['ctype']] + else: ret['varrformat']='O' + ret['init'], ret['showinit']=getinit(a, var) + if hasinitvalue(var) and iscomplex(var) and not isarray(var): + ret['init.r'], ret['init.i'] = markoutercomma(ret['init'][1:-1]).split('@,@') + if isexternal(var): + ret['cbnamekey']=a + if a in lcb_map: + ret['cbname']=lcb_map[a] + ret['maxnofargs']=lcb2_map[lcb_map[a]]['maxnofargs'] + ret['nofoptargs']=lcb2_map[lcb_map[a]]['nofoptargs'] + ret['cbdocstr']=lcb2_map[lcb_map[a]]['docstr'] + ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr'] + else: + ret['cbname']=a + errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a, list(lcb_map.keys()))) + if isstring(var): + ret['length']=getstrlength(var) + if isarray(var): + ret=dictappend(ret, getarrdims(a, var)) + dim=copy.copy(var['dimension']) + if ret['ctype'] in c2capi_map: + ret['atype']=c2capi_map[ret['ctype']] + # Debug info + if debugcapi(var): + il=[isintent_in, 'input', isintent_out, 'output', + isintent_inout, 'inoutput', isrequired, 'required', + isoptional, 'optional', isintent_hide, 'hidden', + iscomplex, 'complex scalar', + l_and(isscalar, l_not(iscomplex)), 'scalar', + isstring, 'string', isarray, 'array', + iscomplexarray, 'complex array', isstringarray, 'string array', + iscomplexfunction, 'complex function', + l_and(isfunction, l_not(iscomplexfunction)), 'function', + isexternal, 'callback', + isintent_callback, 'callback', + isintent_aux, 'auxiliary', + #ismutable,'mutable',l_not(ismutable),'immutable', + ] + rl=[] + for i in range(0, len(il), 2): + if il[i](var): rl.append(il[i+1]) + if isstring(var): + rl.append('slen(%s)=%s'%(a, ret['length'])) + if isarray(var): +# if not isintent_c(var): +# var['dimension'].reverse() + ddim=','.join(map(lambda x, y:'%s|%s'%(x, y), var['dimension'], dim)) + rl.append('dims(%s)'%ddim) +# if not isintent_c(var): +# var['dimension'].reverse() + if isexternal(var): + ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a, ret['cbname'], ','.join(rl)) + else: + ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'], a, ret['showinit'], ','.join(rl)) + if isscalar(var): + if ret['ctype'] in cformat_map: + ret['vardebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']]) + if isstring(var): + ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) + if isexternal(var): + ret['vardebugshowvalue']='debug-capi:%s=%%p'%(a) + if ret['ctype'] in cformat_map: + ret['varshowvalue']='#name#:%s=%s'%(a, cformat_map[ret['ctype']]) + ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) + if isstring(var): + ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) + if hasnote(var): + ret['note']=var['note'] + return ret + +def routsign2map(rout): + """ + name,NAME,begintitle,endtitle + rname,ctype,rformat + routdebugshowvalue + """ + global lcb_map + name = rout['name'] + fname = getfortranname(rout) + ret={'name': name, + 'texname': name.replace('_', '\\_'), + 'name_lower': name.lower(), + 'NAME': name.upper(), + 'begintitle': gentitle(name), + 'endtitle': gentitle('end of %s'%name), + 'fortranname': fname, + 'FORTRANNAME': fname.upper(), + 'callstatement': getcallstatement(rout) or '', + 'usercode': getusercode(rout) or '', + 'usercode1': getusercode1(rout) or '', + } + if '_' in fname: + ret['F_FUNC'] = 'F_FUNC_US' + else: + ret['F_FUNC'] = 'F_FUNC' + if '_' in name: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' + else: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' + lcb_map={} + if 'use' in rout: + for u in rout['use'].keys(): + if u in cb_rules.cb_map: + for un in cb_rules.cb_map[u]: + ln=un[0] + if 'map' in rout['use'][u]: + for k in rout['use'][u]['map'].keys(): + if rout['use'][u]['map'][k]==un[0]: ln=k;break + lcb_map[ln]=un[1] + #else: + # errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u)) + elif 'externals' in rout and rout['externals']: + errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'], repr(rout['externals']))) + ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' + if isfunction(rout): + if 'result' in rout: + a=rout['result'] + else: + a=rout['name'] + ret['rname']=a + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout) + ret['ctype']=getctype(rout['vars'][a]) + if hasresultnote(rout): + ret['resultnote']=rout['vars'][a]['note'] + rout['vars'][a]['note']=['See elsewhere.'] + if ret['ctype'] in c2buildvalue_map: + ret['rformat']=c2buildvalue_map[ret['ctype']] + else: + ret['rformat']='O' + errmess('routsign2map: no c2buildvalue key for type %s\n'%(repr(ret['ctype']))) + if debugcapi(rout): + if ret['ctype'] in cformat_map: + ret['routdebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) + if isstringfunction(rout): + ret['rlength']=getstrlength(rout['vars'][a]) + if ret['rlength']=='-1': + errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(repr(rout['name']))) + ret['rlength']='10' + if hasnote(rout): + ret['note']=rout['note'] + rout['note']=['See elsewhere.'] + return ret + +def modsign2map(m): + """ + modulename + """ + if ismodule(m): + ret={'f90modulename':m['name'], + 'F90MODULENAME':m['name'].upper(), + 'texf90modulename':m['name'].replace('_', '\\_')} + else: + ret={'modulename':m['name'], + 'MODULENAME':m['name'].upper(), + 'texmodulename':m['name'].replace('_', '\\_')} + ret['restdoc'] = getrestdoc(m) or [] + if hasnote(m): + ret['note']=m['note'] + #m['note']=['See elsewhere.'] + ret['usercode'] = getusercode(m) or '' + ret['usercode1'] = getusercode1(m) or '' + if m['body']: + ret['interface_usercode'] = getusercode(m['body'][0]) or '' + else: + ret['interface_usercode'] = '' + ret['pymethoddef'] = getpymethoddef(m) or '' + if 'coutput' in m: + ret['coutput'] = m['coutput'] + if 'f2py_wrapper_output' in m: + ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] + return ret + +def cb_sign2map(a,var,index=None): + ret={'varname':a} + if index is None or 1: # disable 7712 patch + ret['varname_i'] = ret['varname'] + else: + ret['varname_i'] = ret['varname'] + '_' + str(index) + ret['ctype']=getctype(var) + if ret['ctype'] in c2capi_map: + ret['atype']=c2capi_map[ret['ctype']] + if ret['ctype'] in cformat_map: + ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) + if isarray(var): + ret=dictappend(ret, getarrdims(a, var)) + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) + if hasnote(var): + ret['note']=var['note'] + var['note']=['See elsewhere.'] + return ret + +def cb_routsign2map(rout, um): + """ + name,begintitle,endtitle,argname + ctype,rctype,maxnofargs,nofoptargs,returncptr + """ + ret={'name':'cb_%s_in_%s'%(rout['name'], um), + 'returncptr':''} + if isintent_callback(rout): + if '_' in rout['name']: + F_FUNC='F_FUNC_US' + else: + F_FUNC='F_FUNC' + ret['callbackname'] = '%s(%s,%s)' \ + % (F_FUNC, + rout['name'].lower(), + rout['name'].upper(), + ) + ret['static'] = 'extern' + else: + ret['callbackname'] = ret['name'] + ret['static'] = 'static' + ret['argname']=rout['name'] + ret['begintitle']=gentitle(ret['name']) + ret['endtitle']=gentitle('end of %s'%ret['name']) + ret['ctype']=getctype(rout) + ret['rctype']='void' + if ret['ctype']=='string': ret['rctype']='void' + else: + ret['rctype']=ret['ctype'] + if ret['rctype']!='void': + if iscomplexfunction(rout): + ret['returncptr'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +return_value= +#endif +""" + else: + ret['returncptr'] = 'return_value=' + if ret['ctype'] in cformat_map: + ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['strlength']=getstrlength(rout) + if isfunction(rout): + if 'result' in rout: + a=rout['result'] + else: + a=rout['name'] + if hasnote(rout['vars'][a]): + ret['note']=rout['vars'][a]['note'] + rout['vars'][a]['note']=['See elsewhere.'] + ret['rname']=a + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout) + if iscomplexfunction(rout): + ret['rctype']=""" +#ifdef F2PY_CB_RETURNCOMPLEX +#ctype# +#else +void +#endif +""" + else: + if hasnote(rout): + ret['note']=rout['note'] + rout['note']=['See elsewhere.'] + nofargs=0 + nofoptargs=0 + if 'args' in rout and 'vars' in rout: + for a in rout['args']: + var=rout['vars'][a] + if l_or(isintent_in, isintent_inout)(var): + nofargs=nofargs+1 + if isoptional(var): + nofoptargs=nofoptargs+1 + ret['maxnofargs']=repr(nofargs) + ret['nofoptargs']=repr(nofoptargs) + if hasnote(rout) and isfunction(rout) and 'result' in rout: + ret['routnote']=rout['note'] + rout['note']=['See elsewhere.'] + return ret + +def common_sign2map(a, var): # obsolute + ret={'varname':a} + ret['ctype']=getctype(var) + if isstringarray(var): + ret['ctype']='char' + if ret['ctype'] in c2capi_map: + ret['atype']=c2capi_map[ret['ctype']] + if ret['ctype'] in cformat_map: + ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) + if isarray(var): + ret=dictappend(ret, getarrdims(a, var)) + elif isstring(var): + ret['size']=getstrlength(var) + ret['rank']='1' + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) + if hasnote(var): + ret['note']=var['note'] + var['note']=['See elsewhere.'] + ret['arrdocstr']=getarrdocsign(a, var) # for strings this returns 0-rank but actually is 1-rank + return ret diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cb_rules.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cb_rules.py new file mode 100644 index 0000000000000..f3bf848a74b2e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cb_rules.py @@ -0,0 +1,539 @@ +#!/usr/bin/env python +""" + +Build call-back mechanism for f2py2e. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/07/20 11:27:58 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +import pprint +import sys + +from . import __version__ +from .auxfuncs import * +from . import cfuncs + +f2py_version = __version__.version + +errmess=sys.stderr.write +outmess=sys.stdout.write +show=pprint.pprint + + +################## Rules for callback function ############## + +cb_routine_rules={ + 'cbtypedefs':'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', + 'body':""" +#begintitle# +PyObject *#name#_capi = NULL;/*was Py_None*/ +PyTupleObject *#name#_args_capi = NULL; +int #name#_nofargs = 0; +jmp_buf #name#_jmpbuf; +/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ +#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { +\tPyTupleObject *capi_arglist = #name#_args_capi; +\tPyObject *capi_return = NULL; +\tPyObject *capi_tmp = NULL; +\tint capi_j,capi_i = 0; +\tint capi_longjmp_ok = 1; +#decl# +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_clock(); +#endif +\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); +\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi); +\tif (#name#_capi==NULL) { +\t\tcapi_longjmp_ok = 0; +\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); +\t} +\tif (#name#_capi==NULL) { +\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); +\t\tgoto capi_fail; +\t} +\tif (F2PyCapsule_Check(#name#_capi)) { +\t#name#_typedef #name#_cptr; +\t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi); +\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); +\t#return# +\t} +\tif (capi_arglist==NULL) { +\t\tcapi_longjmp_ok = 0; +\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); +\t\tif (capi_tmp) { +\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); +\t\t\tif (capi_arglist==NULL) { +\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); +\t\t\t\tgoto capi_fail; +\t\t\t} +\t\t} else { +\t\t\tPyErr_Clear(); +\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); +\t\t} +\t} +\tif (capi_arglist == NULL) { +\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); +\t\tgoto capi_fail; +\t} +#setdims# +#pyobjfrom# +\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_call_clock(); +#endif +\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_call_clock(); +#endif +\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return); +\tif (capi_return == NULL) { +\t\tfprintf(stderr,\"capi_return is NULL\\n\"); +\t\tgoto capi_fail; +\t} +\tif (capi_return == Py_None) { +\t\tPy_DECREF(capi_return); +\t\tcapi_return = Py_BuildValue(\"()\"); +\t} +\telse if (!PyTuple_Check(capi_return)) { +\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return); +\t} +\tcapi_j = PyTuple_Size(capi_return); +\tcapi_i = 0; +#frompyobj# +\tCFUNCSMESS(\"cb:#name#:successful\\n\"); +\tPy_DECREF(capi_return); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_clock(); +#endif +\tgoto capi_return_pt; +capi_fail: +\tfprintf(stderr,\"Call-back #name# failed.\\n\"); +\tPy_XDECREF(capi_return); +\tif (capi_longjmp_ok) +\t\tlongjmp(#name#_jmpbuf,-1); +capi_return_pt: +\t; +#return# +} +#endtitle# +""", + 'need':['setjmp.h', 'CFUNCSMESS'], + 'maxnofargs':'#maxnofargs#', + 'nofoptargs':'#nofoptargs#', + 'docstr':"""\ +\tdef #argname#(#docsignature#): return #docreturn#\\n\\ +#docstrsigns#""", + 'latexdocstr':""" +{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} +#routnote# + +#latexdocstrsigns#""", + 'docstrshort':'def #argname#(#docsignature#): return #docreturn#' + } +cb_rout_rules=[ + {# Init + 'separatorsfor': {'decl': '\n', + 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', + 'args_td': ',', 'optargs_td': '', + 'args_nm': ',', 'optargs_nm': '', + 'frompyobj': '\n', 'setdims': '\n', + 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', + 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', + 'args_td': [], 'optargs_td': '', 'strarglens_td': '', + 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', + 'noargs': '', + 'setdims': '/*setdims*/', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\tRequired arguments:', + 'docstropt': '\tOptional arguments:', + 'docstrout': '\tReturn objects:', + 'docstrcbs': '\tCall-back functions:', + 'docreturn': '', 'docsign': '', 'docsignopt': '', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'routnote': {hasnote:'--- #note#',l_not(hasnote):''}, + }, { # Function + 'decl':'\t#ctype# return_value;', + 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, + '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', + {debugcapi:'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'} + ], + 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], + 'return':'\treturn return_value;', + '_check':l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) + }, + {# String function + 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, + 'args':'#ctype# return_value,int return_value_len', + 'args_nm':'return_value,&return_value_len', + 'args_td':'#ctype# ,int', + 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->\\"");'}, + """\tif (capi_j>capi_i) +\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", + {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} + ], + 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, + 'string.h', 'GETSTRFROMPYTUPLE'], + 'return':'return;', + '_check':isstringfunction + }, + {# Complex function + 'optargs':""" +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# *return_value +#endif +""", + 'optargs_nm':""" +#ifndef F2PY_CB_RETURNCOMPLEX +return_value +#endif +""", + 'optargs_td':""" +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# * +#endif +""", + 'decl':""" +#ifdef F2PY_CB_RETURNCOMPLEX +\t#ctype# return_value; +#endif +""", + 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, + """\ +\tif (capi_j>capi_i) +#ifdef F2PY_CB_RETURNCOMPLEX +\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); +#else +\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); +#endif +""", + {debugcapi:""" +#ifdef F2PY_CB_RETURNCOMPLEX +\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); +#else +\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); +#endif + +"""} + ], + 'return':""" +#ifdef F2PY_CB_RETURNCOMPLEX +\treturn return_value; +#else +\treturn; +#endif +""", + 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, + 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], + '_check':iscomplexfunction + }, + {'docstrout':'\t\t#pydocsignout#', + 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasnote:'--- #note#'}], + 'docreturn':'#rname#,', + '_check':isfunction}, + {'_check':issubroutine,'return':'return;'} + ] + +cb_arg_rules=[ + { # Doc + 'docstropt':{l_and(isoptional, isintent_nothide):'\t\t#pydocsign#'}, + 'docstrreq':{l_and(isrequired, isintent_nothide):'\t\t#pydocsign#'}, + 'docstrout':{isintent_out:'\t\t#pydocsignout#'}, + 'latexdocstropt':{l_and(isoptional, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote:'--- #note#'}]}, + 'latexdocstrreq':{l_and(isrequired, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote:'--- #note#'}]}, + 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide):'--- #note#', + l_and(hasnote, isintent_nothide):'--- See above.'}]}, + 'docsign':{l_and(isrequired, isintent_nothide):'#varname#,'}, + 'docsignopt':{l_and(isoptional, isintent_nothide):'#varname#,'}, + 'depend':'' + }, + { + 'args': { + l_and (isscalar, isintent_c):'#ctype# #varname_i#', + l_and (isscalar, l_not(isintent_c)):'#ctype# *#varname_i#_cb_capi', + isarray:'#ctype# *#varname_i#', + isstring:'#ctype# #varname_i#' + }, + 'args_nm': { + l_and (isscalar, isintent_c):'#varname_i#', + l_and (isscalar, l_not(isintent_c)):'#varname_i#_cb_capi', + isarray:'#varname_i#', + isstring:'#varname_i#' + }, + 'args_td': { + l_and (isscalar, isintent_c):'#ctype#', + l_and (isscalar, l_not(isintent_c)):'#ctype# *', + isarray:'#ctype# *', + isstring:'#ctype#' + }, + 'strarglens': {isstring:',int #varname_i#_cb_len'}, # untested with multiple args + 'strarglens_td': {isstring:',int'}, # untested with multiple args + 'strarglens_nm': {isstring:',#varname_i#_cb_len'}, # untested with multiple args + }, + { # Scalars + 'decl':{l_not(isintent_c):'\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'}, + 'error': {l_and(isintent_c, isintent_out, + throw_error('intent(c,out) is forbidden for callback scalar arguments')):\ + ''}, + 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, + {isintent_out:'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, + {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, + {l_and(debugcapi, l_and(iscomplex, isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, + {l_and(debugcapi, l_and(iscomplex, l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, + ], + 'need':[{isintent_out:['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, + {debugcapi:'CFUNCSMESS'}], + '_check':isscalar + }, { + 'pyobjfrom':[{isintent_in:"""\ +\tif (#name#_nofargs>capi_i) +\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname_i#))) +\t\t\tgoto capi_fail;"""}, + {isintent_inout:"""\ +\tif (#name#_nofargs>capi_i) +\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) +\t\t\tgoto capi_fail;"""}], + 'need':[{isintent_in:'pyobj_from_#ctype#1'}, + {isintent_inout:'pyarr_from_p_#ctype#1'}, + {iscomplex:'#ctype#'}], + '_check':l_and(isscalar, isintent_nothide), + '_optional':'' + }, {# String + 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->\\"");'}, + """\tif (capi_j>capi_i) +\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", + {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, + ], + 'need':['#ctype#', 'GETSTRFROMPYTUPLE', + {debugcapi:'CFUNCSMESS'}, 'string.h'], + '_check':l_and(isstring, isintent_out) + }, { + 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, + {isintent_in:"""\ +\tif (#name#_nofargs>capi_i) +\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) +\t\t\tgoto capi_fail;"""}, + {isintent_inout:"""\ +\tif (#name#_nofargs>capi_i) { +\t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len}; +\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) +\t\t\tgoto capi_fail; +\t}"""}], + 'need':[{isintent_in:'pyobj_from_#ctype#1size'}, + {isintent_inout:'pyarr_from_p_#ctype#1'}], + '_check':l_and(isstring, isintent_nothide), + '_optional':'' + }, +# Array ... + { + 'decl':'\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', + 'setdims':'\t#cbsetdims#;', + '_check':isarray, + '_depend':'' + }, + { + 'pyobjfrom': [{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, + {isintent_c: """\ +\tif (#name#_nofargs>capi_i) { +\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ +""", + l_not(isintent_c): """\ +\tif (#name#_nofargs>capi_i) { +\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ +""", + }, + """ +\t\tif (tmp_arr==NULL) +\t\t\tgoto capi_fail; +\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr)) +\t\t\tgoto capi_fail; +}"""], + '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), + '_optional': '', + }, { + 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, + """\tif (capi_j>capi_i) { +\t\tPyArrayObject *rv_cb_arr = NULL; +\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; +\t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", + {isintent_c:'|F2PY_INTENT_C'}, + """,capi_tmp); +\t\tif (rv_cb_arr == NULL) { +\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\"); +\t\t\tgoto capi_fail; +\t\t} +\t\tMEMCOPY(#varname_i#,rv_cb_arr->data,PyArray_NBYTES(rv_cb_arr)); +\t\tif (capi_tmp != (PyObject *)rv_cb_arr) { +\t\t\tPy_DECREF(rv_cb_arr); +\t\t} +\t}""", + {debugcapi:'\tfprintf(stderr,"<-.\\n");'}, + ], + 'need':['MEMCOPY', {iscomplexarray:'#ctype#'}], + '_check':l_and(isarray, isintent_out) + }, { + 'docreturn':'#varname#,', + '_check':isintent_out + } + ] + +################## Build call-back module ############# +cb_map={} +def buildcallbacks(m): + global cb_map + cb_map[m['name']]=[] + for bi in m['body']: + if bi['block']=='interface': + for b in bi['body']: + if b: + buildcallback(b, m['name']) + else: + errmess('warning: empty body for %s\n' % (m['name'])) + +def buildcallback(rout, um): + global cb_map + from . import capi_maps + + outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'], um)) + args, depargs=getargs(rout) + capi_maps.depargs=depargs + var=rout['vars'] + vrd=capi_maps.cb_routsign2map(rout, um) + rd=dictappend({}, vrd) + cb_map[um].append([rout['name'], rd['name']]) + for r in cb_rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar=applyrules(r, vrd, rout) + rd=dictappend(rd, ar) + savevrd={} + for i, a in enumerate(args): + vrd=capi_maps.cb_sign2map(a, var[a], index=i) + savevrd[a]=vrd + for r in cb_arg_rules: + if '_depend' in r: + continue + if '_optional' in r and isoptional(var[a]): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) + if '_break' in r: + break + for a in args: + vrd=savevrd[a] + for r in cb_arg_rules: + if '_depend' in r: + continue + if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + vrd=savevrd[a] + for r in cb_arg_rules: + if '_depend' not in r: + continue + if '_optional' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) + if '_break' in r: + break + if 'args' in rd and 'optargs' in rd: + if isinstance(rd['optargs'], list): + rd['optargs']=rd['optargs']+[""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_nm']=rd['optargs_nm']+[""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_td']=rd['optargs_td']+[""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + if isinstance(rd['docreturn'], list): + rd['docreturn']=stripcomma(replace('#docreturn#', {'docreturn':rd['docreturn']})) + optargs=stripcomma(replace('#docsignopt#', + {'docsignopt':rd['docsignopt']} + )) + if optargs=='': + rd['docsignature']=stripcomma(replace('#docsign#', {'docsign':rd['docsign']})) + else: + rd['docsignature']=replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignature']=rd['docsignature'].replace('_', '\\_') + rd['latexdocsignature']=rd['latexdocsignature'].replace(',', ', ') + rd['docstrsigns']=[] + rd['latexdocstrsigns']=[] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns']=rd['docstrsigns']+rd[k] + k='latex'+k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ + ['\\begin{description}']+rd[k][1:]+\ + ['\\end{description}'] + if 'args' not in rd: + rd['args']='' + rd['args_td']='' + rd['args_nm']='' + if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): + rd['noargs'] = 'void' + + ar=applyrules(cb_routine_rules, rd) + cfuncs.callbacks[rd['name']]=ar['body'] + if isinstance(ar['need'], str): + ar['need']=[ar['need']] + + if 'need' in rd: + for t in cfuncs.typedefs.keys(): + if t in rd['need']: + ar['need'].append(t) + + cfuncs.typedefs_generated[rd['name']+'_typedef'] = ar['cbtypedefs'] + ar['need'].append(rd['name']+'_typedef') + cfuncs.needs[rd['name']]=ar['need'] + + capi_maps.lcb2_map[rd['name']]={'maxnofargs':ar['maxnofargs'], + 'nofoptargs':ar['nofoptargs'], + 'docstr':ar['docstr'], + 'latexdocstr':ar['latexdocstr'], + 'argname':rd['argname'] + } + outmess('\t %s\n'%(ar['docstrshort'])) + #print ar['body'] + return +################## Build call-back function ############# diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cfuncs.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cfuncs.py new file mode 100644 index 0000000000000..7fb630697fe9b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cfuncs.py @@ -0,0 +1,1224 @@ +#!/usr/bin/env python +""" + +C declarations, CPP macros, and C functions for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 11:42:34 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +import sys +import copy + +from . import __version__ + +f2py_version = __version__.version +errmess = sys.stderr.write + +##################### Definitions ################## + +outneeds={'includes0':[],'includes':[],'typedefs':[],'typedefs_generated':[], + 'userincludes':[], + 'cppmacros':[],'cfuncs':[],'callbacks':[],'f90modhooks':[], + 'commonhooks':[]} +needs={} +includes0={'includes0':'/*need_includes0*/'} +includes={'includes':'/*need_includes*/'} +userincludes={'userincludes':'/*need_userincludes*/'} +typedefs={'typedefs':'/*need_typedefs*/'} +typedefs_generated={'typedefs_generated':'/*need_typedefs_generated*/'} +cppmacros={'cppmacros':'/*need_cppmacros*/'} +cfuncs={'cfuncs':'/*need_cfuncs*/'} +callbacks={'callbacks':'/*need_callbacks*/'} +f90modhooks={'f90modhooks': '/*need_f90modhooks*/', + 'initf90modhooksstatic': '/*initf90modhooksstatic*/', + 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', + } +commonhooks={'commonhooks': '/*need_commonhooks*/', + 'initcommonhooks': '/*need_initcommonhooks*/', + } + +############ Includes ################### + +includes0['math.h']='#include ' +includes0['string.h']='#include ' +includes0['setjmp.h']='#include ' + +includes['Python.h']='#include "Python.h"' +needs['arrayobject.h']=['Python.h'] +includes['arrayobject.h']='''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API +#include "arrayobject.h"''' + +includes['arrayobject.h']='#include "fortranobject.h"' +includes['stdarg.h']='#include ' + +############# Type definitions ############### + +typedefs['unsigned_char']='typedef unsigned char unsigned_char;' +typedefs['unsigned_short']='typedef unsigned short unsigned_short;' +typedefs['unsigned_long']='typedef unsigned long unsigned_long;' +typedefs['signed_char']='typedef signed char signed_char;' +typedefs['long_long']="""\ +#ifdef _WIN32 +typedef __int64 long_long; +#else +typedef long long long_long; +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['unsigned_long_long']="""\ +#ifdef _WIN32 +typedef __uint64 long_long; +#else +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['long_double']="""\ +#ifndef _LONG_DOUBLE +typedef long double long_double; +#endif +""" +typedefs['complex_long_double']='typedef struct {long double r,i;} complex_long_double;' +typedefs['complex_float']='typedef struct {float r,i;} complex_float;' +typedefs['complex_double']='typedef struct {double r,i;} complex_double;' +typedefs['string']="""typedef char * string;""" + + +############### CPP macros #################### +cppmacros['CFUNCSMESS']="""\ +#ifdef DEBUGCFUNCS +#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); +#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ +\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ +\tfprintf(stderr,\"\\n\"); +#else +#define CFUNCSMESS(mess) +#define CFUNCSMESSPY(mess,obj) +#endif +""" +cppmacros['F_FUNC']="""\ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F +#else +#define F_FUNC(f,F) _##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F##_ +#else +#define F_FUNC(f,F) _##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F +#else +#define F_FUNC(f,F) f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F##_ +#else +#define F_FUNC(f,F) f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) +#else +#define F_FUNC_US(f,F) F_FUNC(f,F) +#endif +""" +cppmacros['F_WRAPPEDFUNC']="""\ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) +#else +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) +#endif +""" +cppmacros['F_MODFUNC']="""\ +#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f +#else +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f +#else +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) f ## .in. ## m +#else +#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ +#endif +#endif +/* +#if defined(UPPERCASE_FORTRAN) +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) +#else +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) +#endif +*/ + +#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) +""" +cppmacros['SWAPUNSAFE']="""\ +#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) +""" +cppmacros['SWAP']="""\ +#define SWAP(a,b,t) {\\ +\tt *c;\\ +\tc = a;\\ +\ta = b;\\ +\tb = c;} +""" +#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS)' +cppmacros['PRINTPYOBJERR']="""\ +#define PRINTPYOBJERR(obj)\\ +\tfprintf(stderr,\"#modulename#.error is related to \");\\ +\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ +\tfprintf(stderr,\"\\n\"); +""" +cppmacros['MINMAX']="""\ +#ifndef max +#define max(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef min +#define min(a,b) ((a < b) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a,b) ((a < b) ? (a) : (b)) +#endif +""" +needs['len..']=['f2py_size'] +cppmacros['len..']="""\ +#define rank(var) var ## _Rank +#define shape(var,dim) var ## _Dims[dim] +#define old_rank(var) (((PyArrayObject *)(capi_ ## var ## _tmp))->nd) +#define old_shape(var,dim) (((PyArrayObject *)(capi_ ## var ## _tmp))->dimensions[dim]) +#define fshape(var,dim) shape(var,rank(var)-dim-1) +#define len(var) shape(var,0) +#define flen(var) fshape(var,0) +#define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) +/* #define index(i) capi_i ## i */ +#define slen(var) capi_ ## var ## _len +#define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1) +""" +needs['f2py_size']=['stdarg.h'] +cfuncs['f2py_size']="""\ +static int f2py_size(PyArrayObject* var, ...) +{ + npy_int sz = 0; + npy_int dim; + npy_int rank; + va_list argp; + va_start(argp, var); + dim = va_arg(argp, npy_int); + if (dim==-1) + { + sz = PyArray_SIZE(var); + } + else + { + rank = PyArray_NDIM(var); + if (dim>=1 && dim<=rank) + sz = PyArray_DIM(var, dim-1); + else + fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank); + } + va_end(argp); + return sz; +} +""" + +cppmacros['pyobj_from_char1']='#define pyobj_from_char1(v) (PyInt_FromLong(v))' +cppmacros['pyobj_from_short1']='#define pyobj_from_short1(v) (PyInt_FromLong(v))' +needs['pyobj_from_int1']=['signed_char'] +cppmacros['pyobj_from_int1']='#define pyobj_from_int1(v) (PyInt_FromLong(v))' +cppmacros['pyobj_from_long1']='#define pyobj_from_long1(v) (PyLong_FromLong(v))' +needs['pyobj_from_long_long1']=['long_long'] +cppmacros['pyobj_from_long_long1']="""\ +#ifdef HAVE_LONG_LONG +#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) +#else +#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. +#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) +#endif +""" +needs['pyobj_from_long_double1']=['long_double'] +cppmacros['pyobj_from_long_double1']='#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' +cppmacros['pyobj_from_double1']='#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' +cppmacros['pyobj_from_float1']='#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' +needs['pyobj_from_complex_long_double1']=['complex_long_double'] +cppmacros['pyobj_from_complex_long_double1']='#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' +needs['pyobj_from_complex_double1']=['complex_double'] +cppmacros['pyobj_from_complex_double1']='#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' +needs['pyobj_from_complex_float1']=['complex_float'] +cppmacros['pyobj_from_complex_float1']='#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' +needs['pyobj_from_string1']=['string'] +cppmacros['pyobj_from_string1']='#define pyobj_from_string1(v) (PyString_FromString((char *)v))' +needs['pyobj_from_string1size']=['string'] +cppmacros['pyobj_from_string1size']='#define pyobj_from_string1size(v,len) (PyUString_FromStringAndSize((char *)v, len))' +needs['TRYPYARRAYTEMPLATE']=['PRINTPYOBJERR'] +cppmacros['TRYPYARRAYTEMPLATE']="""\ +/* New SciPy */ +#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(arr->data)=*v; break; +#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(arr->data)=*v; break; +#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data); break; + +#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (arr->descr->type==typecode) {*(ctype *)(arr->data)=*v; return 1;}\\ + switch (arr->descr->type_num) {\\ + case NPY_DOUBLE: *(double *)(arr->data)=*v; break;\\ + case NPY_INT: *(int *)(arr->data)=*v; break;\\ + case NPY_LONG: *(long *)(arr->data)=*v; break;\\ + case NPY_FLOAT: *(float *)(arr->data)=*v; break;\\ + case NPY_CDOUBLE: *(double *)(arr->data)=*v; break;\\ + case NPY_CFLOAT: *(float *)(arr->data)=*v; break;\\ + case NPY_BOOL: *(npy_bool *)(arr->data)=(*v!=0); break;\\ + case NPY_UBYTE: *(unsigned char *)(arr->data)=*v; break;\\ + case NPY_BYTE: *(signed char *)(arr->data)=*v; break;\\ + case NPY_SHORT: *(short *)(arr->data)=*v; break;\\ + case NPY_USHORT: *(npy_ushort *)(arr->data)=*v; break;\\ + case NPY_UINT: *(npy_uint *)(arr->data)=*v; break;\\ + case NPY_ULONG: *(npy_ulong *)(arr->data)=*v; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(arr->data)=*v; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(arr->data)=*v; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\ + case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data, arr); break;\\ + default: return -2;\\ + };\\ + return 1 +""" + +needs['TRYCOMPLEXPYARRAYTEMPLATE']=['PRINTPYOBJERR'] +cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\ +#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break; +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (arr->descr->type==typecode) {\\ + *(ctype *)(arr->data)=(*v).r;\\ + *(ctype *)(arr->data+sizeof(ctype))=(*v).i;\\ + return 1;\\ + }\\ + switch (arr->descr->type_num) {\\ + case NPY_CDOUBLE: *(double *)(arr->data)=(*v).r;*(double *)(arr->data+sizeof(double))=(*v).i;break;\\ + case NPY_CFLOAT: *(float *)(arr->data)=(*v).r;*(float *)(arr->data+sizeof(float))=(*v).i;break;\\ + case NPY_DOUBLE: *(double *)(arr->data)=(*v).r; break;\\ + case NPY_LONG: *(long *)(arr->data)=(*v).r; break;\\ + case NPY_FLOAT: *(float *)(arr->data)=(*v).r; break;\\ + case NPY_INT: *(int *)(arr->data)=(*v).r; break;\\ + case NPY_SHORT: *(short *)(arr->data)=(*v).r; break;\\ + case NPY_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\ + case NPY_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\ + case NPY_BOOL: *(npy_bool *)(arr->data)=((*v).r!=0 && (*v).i!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(arr->data)=(*v).r; break;\\ + case NPY_UINT: *(npy_uint *)(arr->data)=(*v).r; break;\\ + case NPY_ULONG: *(npy_ulong *)(arr->data)=(*v).r; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(arr->data)=(*v).r; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(arr->data)=(*v).r; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r;*(npy_longdouble *)(arr->data+sizeof(npy_longdouble))=(*v).i;break;\\ + case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;\\ + default: return -2;\\ + };\\ + return -1; +""" +## cppmacros['NUMFROMARROBJ']="""\ +## #define NUMFROMARROBJ(typenum,ctype) \\ +## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +## \tif (arr) {\\ +## \t\tif (arr->descr->type_num==NPY_OBJECT) {\\ +## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\ +## \t\t\tgoto capi_fail;\\ +## \t\t} else {\\ +## \t\t\t(arr->descr->cast[typenum])(arr->data,1,(char*)v,1,1);\\ +## \t\t}\\ +## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +## \t\treturn 1;\\ +## \t} +## """ +## #XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ +## cppmacros['CNUMFROMARROBJ']="""\ +## #define CNUMFROMARROBJ(typenum,ctype) \\ +## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +## \tif (arr) {\\ +## \t\tif (arr->descr->type_num==NPY_OBJECT) {\\ +## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\ +## \t\t\tgoto capi_fail;\\ +## \t\t} else {\\ +## \t\t\t(arr->descr->cast[typenum])((void *)(arr->data),1,(void *)(v),1,1);\\ +## \t\t}\\ +## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +## \t\treturn 1;\\ +## \t} +## """ + + +needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN', 'PRINTPYOBJERR'] +cppmacros['GETSTRFROMPYTUPLE']="""\ +#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ +\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ +\t\tif (rv_cb_str == NULL)\\ +\t\t\tgoto capi_fail;\\ +\t\tif (PyString_Check(rv_cb_str)) {\\ +\t\t\tstr[len-1]='\\0';\\ +\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ +\t\t} else {\\ +\t\t\tPRINTPYOBJERR(rv_cb_str);\\ +\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\ +\t\t\tgoto capi_fail;\\ +\t\t}\\ +\t} +""" +cppmacros['GETSCALARFROMPYTUPLE']="""\ +#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ +\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ +\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ +\t\t\tgoto capi_fail;\\ +\t} +""" + +cppmacros['FAILNULL']="""\\ +#define FAILNULL(p) do { \\ + if ((p) == NULL) { \\ + PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ + goto capi_fail; \\ + } \\ +} while (0) +""" +needs['MEMCOPY']=['string.h', 'FAILNULL'] +cppmacros['MEMCOPY']="""\ +#define MEMCOPY(to,from,n)\\ + do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) +""" +cppmacros['STRINGMALLOC']="""\ +#define STRINGMALLOC(str,len)\\ +\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ +\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ +\t\tgoto capi_fail;\\ +\t} else {\\ +\t\t(str)[len] = '\\0';\\ +\t} +""" +cppmacros['STRINGFREE']="""\ +#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) +""" +needs['STRINGCOPYN']=['string.h', 'FAILNULL'] +cppmacros['STRINGCOPYN']="""\ +#define STRINGCOPYN(to,from,buf_size) \\ + do { \\ + int _m = (buf_size); \\ + char *_to = (to); \\ + char *_from = (from); \\ + FAILNULL(_to); FAILNULL(_from); \\ + (void)strncpy(_to, _from, sizeof(char)*_m); \\ + _to[_m-1] = '\\0'; \\ + /* Padding with spaces instead of nulls */ \\ + for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ + _to[_m] = ' '; \\ + } \\ + } while (0) +""" +needs['STRINGCOPY']=['string.h', 'FAILNULL'] +cppmacros['STRINGCOPY']="""\ +#define STRINGCOPY(to,from)\\ + do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) +""" +cppmacros['CHECKGENERIC']="""\ +#define CHECKGENERIC(check,tcheck,name) \\ +\tif (!(check)) {\\ +\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ +\t\t/*goto capi_fail;*/\\ +\t} else """ +cppmacros['CHECKARRAY']="""\ +#define CHECKARRAY(check,tcheck,name) \\ +\tif (!(check)) {\\ +\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ +\t\t/*goto capi_fail;*/\\ +\t} else """ +cppmacros['CHECKSTRING']="""\ +#define CHECKSTRING(check,tcheck,name,show,var)\\ +\tif (!(check)) {\\ +\t\tchar errstring[256];\\ +\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ +\t\tPyErr_SetString(#modulename#_error, errstring);\\ +\t\t/*goto capi_fail;*/\\ +\t} else """ +cppmacros['CHECKSCALAR']="""\ +#define CHECKSCALAR(check,tcheck,name,show,var)\\ +\tif (!(check)) {\\ +\t\tchar errstring[256];\\ +\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ +\t\tPyErr_SetString(#modulename#_error,errstring);\\ +\t\t/*goto capi_fail;*/\\ +\t} else """ +## cppmacros['CHECKDIMS']="""\ +## #define CHECKDIMS(dims,rank) \\ +## \tfor (int i=0;i<(rank);i++)\\ +## \t\tif (dims[i]<0) {\\ +## \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ +## \t\t\tgoto capi_fail;\\ +## \t\t} +## """ +cppmacros['ARRSIZE']='#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' +cppmacros['OLDPYNUM']="""\ +#ifdef OLDPYNUM +#error You need to intall Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369 +#endif +""" +################# C functions ############### + +cfuncs['calcarrindex']="""\ +static int calcarrindex(int *i,PyArrayObject *arr) { +\tint k,ii = i[0]; +\tfor (k=1; k < arr->nd; k++) +\t\tii += (ii*(arr->dimensions[k] - 1)+i[k]); /* assuming contiguous arr */ +\treturn ii; +}""" +cfuncs['calcarrindextr']="""\ +static int calcarrindextr(int *i,PyArrayObject *arr) { +\tint k,ii = i[arr->nd-1]; +\tfor (k=1; k < arr->nd; k++) +\t\tii += (ii*(arr->dimensions[arr->nd-k-1] - 1)+i[arr->nd-k-1]); /* assuming contiguous arr */ +\treturn ii; +}""" +cfuncs['forcomb']="""\ +static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; +static int initforcomb(npy_intp *dims,int nd,int tr) { + int k; + if (dims==NULL) return 0; + if (nd<0) return 0; + forcombcache.nd = nd; + forcombcache.d = dims; + forcombcache.tr = tr; + if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + for (k=1;kdata,str,PyArray_NBYTES(arr)); } +\treturn 1; +capi_fail: +\tPRINTPYOBJERR(obj); +\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\"); +\treturn 0; +} +""" +needs['string_from_pyobj']=['string', 'STRINGMALLOC', 'STRINGCOPYN'] +cfuncs['string_from_pyobj']="""\ +static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) { +\tPyArrayObject *arr = NULL; +\tPyObject *tmp = NULL; +#ifdef DEBUGCFUNCS +fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj); +#endif +\tif (obj == Py_None) { +\t\tif (*len == -1) +\t\t\t*len = strlen(inistr); /* Will this cause problems? */ +\t\tSTRINGMALLOC(*str,*len); +\t\tSTRINGCOPYN(*str,inistr,*len+1); +\t\treturn 1; +\t} +\tif (PyArray_Check(obj)) { +\t\tif ((arr = (PyArrayObject *)obj) == NULL) +\t\t\tgoto capi_fail; +\t\tif (!ISCONTIGUOUS(arr)) { +\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\"); +\t\t\tgoto capi_fail; +\t\t} +\t\tif (*len == -1) +\t\t\t*len = (arr->descr->elsize)*PyArray_SIZE(arr); +\t\tSTRINGMALLOC(*str,*len); +\t\tSTRINGCOPYN(*str,arr->data,*len+1); +\t\treturn 1; +\t} +\tif (PyString_Check(obj)) { +\t\ttmp = obj; +\t\tPy_INCREF(tmp); +\t} +#if PY_VERSION_HEX >= 0x03000000 +\telse if (PyUnicode_Check(obj)) { +\t\ttmp = PyUnicode_AsASCIIString(obj); +\t} +\telse { +\t\tPyObject *tmp2; +\t\ttmp2 = PyObject_Str(obj); +\t\tif (tmp2) { +\t\t\ttmp = PyUnicode_AsASCIIString(tmp2); +\t\t\tPy_DECREF(tmp2); +\t\t} +\t\telse { +\t\t\ttmp = NULL; +\t\t} +\t} +#else +\telse { +\t\ttmp = PyObject_Str(obj); +\t} +#endif +\tif (tmp == NULL) goto capi_fail; +\tif (*len == -1) +\t\t*len = PyString_GET_SIZE(tmp); +\tSTRINGMALLOC(*str,*len); +\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); +\tPy_DECREF(tmp); +\treturn 1; +capi_fail: +\tPy_XDECREF(tmp); +\t{ +\t\tPyObject* err = PyErr_Occurred(); +\t\tif (err==NULL) err = #modulename#_error; +\t\tPyErr_SetString(err,errmess); +\t} +\treturn 0; +} +""" +needs['char_from_pyobj']=['int_from_pyobj'] +cfuncs['char_from_pyobj']="""\ +static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { +\tint i=0; +\tif (int_from_pyobj(&i,obj,errmess)) { +\t\t*v = (char)i; +\t\treturn 1; +\t} +\treturn 0; +} +""" +needs['signed_char_from_pyobj']=['int_from_pyobj', 'signed_char'] +cfuncs['signed_char_from_pyobj']="""\ +static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { +\tint i=0; +\tif (int_from_pyobj(&i,obj,errmess)) { +\t\t*v = (signed_char)i; +\t\treturn 1; +\t} +\treturn 0; +} +""" +needs['short_from_pyobj']=['int_from_pyobj'] +cfuncs['short_from_pyobj']="""\ +static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { +\tint i=0; +\tif (int_from_pyobj(&i,obj,errmess)) { +\t\t*v = (short)i; +\t\treturn 1; +\t} +\treturn 0; +} +""" +cfuncs['int_from_pyobj']="""\ +static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { +\tPyObject* tmp = NULL; +\tif (PyInt_Check(obj)) { +\t\t*v = (int)PyInt_AS_LONG(obj); +\t\treturn 1; +\t} +\ttmp = PyNumber_Int(obj); +\tif (tmp) { +\t\t*v = PyInt_AS_LONG(tmp); +\t\tPy_DECREF(tmp); +\t\treturn 1; +\t} +\tif (PyComplex_Check(obj)) +\t\ttmp = PyObject_GetAttrString(obj,\"real\"); +\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) +\t\t/*pass*/; +\telse if (PySequence_Check(obj)) +\t\ttmp = PySequence_GetItem(obj,0); +\tif (tmp) { +\t\tPyErr_Clear(); +\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} +\t\tPy_DECREF(tmp); +\t} +\t{ +\t\tPyObject* err = PyErr_Occurred(); +\t\tif (err==NULL) err = #modulename#_error; +\t\tPyErr_SetString(err,errmess); +\t} +\treturn 0; +} +""" +cfuncs['long_from_pyobj']="""\ +static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { +\tPyObject* tmp = NULL; +\tif (PyInt_Check(obj)) { +\t\t*v = PyInt_AS_LONG(obj); +\t\treturn 1; +\t} +\ttmp = PyNumber_Int(obj); +\tif (tmp) { +\t\t*v = PyInt_AS_LONG(tmp); +\t\tPy_DECREF(tmp); +\t\treturn 1; +\t} +\tif (PyComplex_Check(obj)) +\t\ttmp = PyObject_GetAttrString(obj,\"real\"); +\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) +\t\t/*pass*/; +\telse if (PySequence_Check(obj)) +\t\ttmp = PySequence_GetItem(obj,0); +\tif (tmp) { +\t\tPyErr_Clear(); +\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} +\t\tPy_DECREF(tmp); +\t} +\t{ +\t\tPyObject* err = PyErr_Occurred(); +\t\tif (err==NULL) err = #modulename#_error; +\t\tPyErr_SetString(err,errmess); +\t} +\treturn 0; +} +""" +needs['long_long_from_pyobj']=['long_long'] +cfuncs['long_long_from_pyobj']="""\ +static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { +\tPyObject* tmp = NULL; +\tif (PyLong_Check(obj)) { +\t\t*v = PyLong_AsLongLong(obj); +\t\treturn (!PyErr_Occurred()); +\t} +\tif (PyInt_Check(obj)) { +\t\t*v = (long_long)PyInt_AS_LONG(obj); +\t\treturn 1; +\t} +\ttmp = PyNumber_Long(obj); +\tif (tmp) { +\t\t*v = PyLong_AsLongLong(tmp); +\t\tPy_DECREF(tmp); +\t\treturn (!PyErr_Occurred()); +\t} +\tif (PyComplex_Check(obj)) +\t\ttmp = PyObject_GetAttrString(obj,\"real\"); +\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) +\t\t/*pass*/; +\telse if (PySequence_Check(obj)) +\t\ttmp = PySequence_GetItem(obj,0); +\tif (tmp) { +\t\tPyErr_Clear(); +\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} +\t\tPy_DECREF(tmp); +\t} +\t{ +\t\tPyObject* err = PyErr_Occurred(); +\t\tif (err==NULL) err = #modulename#_error; +\t\tPyErr_SetString(err,errmess); +\t} +\treturn 0; +} +""" +needs['long_double_from_pyobj']=['double_from_pyobj', 'long_double'] +cfuncs['long_double_from_pyobj']="""\ +static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { +\tdouble d=0; +\tif (PyArray_CheckScalar(obj)){ +\t\tif PyArray_IsScalar(obj, LongDouble) { +\t\t\tPyArray_ScalarAsCtype(obj, v); +\t\t\treturn 1; +\t\t} +\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) { +\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj)); +\t\t\treturn 1; +\t\t} +\t} +\tif (double_from_pyobj(&d,obj,errmess)) { +\t\t*v = (long_double)d; +\t\treturn 1; +\t} +\treturn 0; +} +""" +cfuncs['double_from_pyobj']="""\ +static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { +\tPyObject* tmp = NULL; +\tif (PyFloat_Check(obj)) { +#ifdef __sgi +\t\t*v = PyFloat_AsDouble(obj); +#else +\t\t*v = PyFloat_AS_DOUBLE(obj); +#endif +\t\treturn 1; +\t} +\ttmp = PyNumber_Float(obj); +\tif (tmp) { +#ifdef __sgi +\t\t*v = PyFloat_AsDouble(tmp); +#else +\t\t*v = PyFloat_AS_DOUBLE(tmp); +#endif +\t\tPy_DECREF(tmp); +\t\treturn 1; +\t} +\tif (PyComplex_Check(obj)) +\t\ttmp = PyObject_GetAttrString(obj,\"real\"); +\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) +\t\t/*pass*/; +\telse if (PySequence_Check(obj)) +\t\ttmp = PySequence_GetItem(obj,0); +\tif (tmp) { +\t\tPyErr_Clear(); +\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} +\t\tPy_DECREF(tmp); +\t} +\t{ +\t\tPyObject* err = PyErr_Occurred(); +\t\tif (err==NULL) err = #modulename#_error; +\t\tPyErr_SetString(err,errmess); +\t} +\treturn 0; +} +""" +needs['float_from_pyobj']=['double_from_pyobj'] +cfuncs['float_from_pyobj']="""\ +static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { +\tdouble d=0.0; +\tif (double_from_pyobj(&d,obj,errmess)) { +\t\t*v = (float)d; +\t\treturn 1; +\t} +\treturn 0; +} +""" +needs['complex_long_double_from_pyobj']=['complex_long_double', 'long_double', + 'complex_double_from_pyobj'] +cfuncs['complex_long_double_from_pyobj']="""\ +static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { +\tcomplex_double cd={0.0,0.0}; +\tif (PyArray_CheckScalar(obj)){ +\t\tif PyArray_IsScalar(obj, CLongDouble) { +\t\t\tPyArray_ScalarAsCtype(obj, v); +\t\t\treturn 1; +\t\t} +\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { +\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; +\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; +\t\t\treturn 1; +\t\t} +\t} +\tif (complex_double_from_pyobj(&cd,obj,errmess)) { +\t\t(*v).r = (long_double)cd.r; +\t\t(*v).i = (long_double)cd.i; +\t\treturn 1; +\t} +\treturn 0; +} +""" +needs['complex_double_from_pyobj']=['complex_double'] +cfuncs['complex_double_from_pyobj']="""\ +static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { +\tPy_complex c; +\tif (PyComplex_Check(obj)) { +\t\tc=PyComplex_AsCComplex(obj); +\t\t(*v).r=c.real, (*v).i=c.imag; +\t\treturn 1; +\t} +\tif (PyArray_IsScalar(obj, ComplexFloating)) { +\t\tif (PyArray_IsScalar(obj, CFloat)) { +\t\t\tnpy_cfloat new; +\t\t\tPyArray_ScalarAsCtype(obj, &new); +\t\t\t(*v).r = (double)new.real; +\t\t\t(*v).i = (double)new.imag; +\t\t} +\t\telse if (PyArray_IsScalar(obj, CLongDouble)) { +\t\t\tnpy_clongdouble new; +\t\t\tPyArray_ScalarAsCtype(obj, &new); +\t\t\t(*v).r = (double)new.real; +\t\t\t(*v).i = (double)new.imag; +\t\t} +\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */ +\t\t\tPyArray_ScalarAsCtype(obj, v); +\t\t} +\t\treturn 1; +\t} +\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ +\t\tPyObject *arr; +\t\tif (PyArray_Check(obj)) { +\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); +\t\t} +\t\telse { +\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); +\t\t} +\t\tif (arr==NULL) return 0; +\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; +\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; +\t\treturn 1; +\t} +\t/* Python does not provide PyNumber_Complex function :-( */ +\t(*v).i=0.0; +\tif (PyFloat_Check(obj)) { +#ifdef __sgi +\t\t(*v).r = PyFloat_AsDouble(obj); +#else +\t\t(*v).r = PyFloat_AS_DOUBLE(obj); +#endif +\t\treturn 1; +\t} +\tif (PyInt_Check(obj)) { +\t\t(*v).r = (double)PyInt_AS_LONG(obj); +\t\treturn 1; +\t} +\tif (PyLong_Check(obj)) { +\t\t(*v).r = PyLong_AsDouble(obj); +\t\treturn (!PyErr_Occurred()); +\t} +\tif (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { +\t\tPyObject *tmp = PySequence_GetItem(obj,0); +\t\tif (tmp) { +\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) { +\t\t\t\tPy_DECREF(tmp); +\t\t\t\treturn 1; +\t\t\t} +\t\t\tPy_DECREF(tmp); +\t\t} +\t} +\t{ +\t\tPyObject* err = PyErr_Occurred(); +\t\tif (err==NULL) +\t\t\terr = PyExc_TypeError; +\t\tPyErr_SetString(err,errmess); +\t} +\treturn 0; +} +""" +needs['complex_float_from_pyobj']=['complex_float', 'complex_double_from_pyobj'] +cfuncs['complex_float_from_pyobj']="""\ +static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { +\tcomplex_double cd={0.0,0.0}; +\tif (complex_double_from_pyobj(&cd,obj,errmess)) { +\t\t(*v).r = (float)cd.r; +\t\t(*v).i = (float)cd.i; +\t\treturn 1; +\t} +\treturn 0; +} +""" +needs['try_pyarr_from_char']=['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] +cfuncs['try_pyarr_from_char']='static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n' +needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE', 'unsigned_char'] +cfuncs['try_pyarr_from_unsigned_char']='static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' +needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE', 'signed_char'] +cfuncs['try_pyarr_from_signed_char']='static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' +needs['try_pyarr_from_short']=['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] +cfuncs['try_pyarr_from_short']='static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n' +needs['try_pyarr_from_int']=['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] +cfuncs['try_pyarr_from_int']='static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n' +needs['try_pyarr_from_long']=['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] +cfuncs['try_pyarr_from_long']='static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n' +needs['try_pyarr_from_long_long']=['pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] +cfuncs['try_pyarr_from_long_long']='static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' +needs['try_pyarr_from_float']=['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] +cfuncs['try_pyarr_from_float']='static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n' +needs['try_pyarr_from_double']=['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] +cfuncs['try_pyarr_from_double']='static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n' +needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] +cfuncs['try_pyarr_from_complex_float']='static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' +needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] +cfuncs['try_pyarr_from_complex_double']='static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + +needs['create_cb_arglist']=['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] +cfuncs['create_cb_arglist']="""\ +static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { +\tPyObject *tmp = NULL; +\tPyObject *tmp_fun = NULL; +\tint tot,opt,ext,siz,i,di=0; +\tCFUNCSMESS(\"create_cb_arglist\\n\"); +\ttot=opt=ext=siz=0; +\t/* Get the total number of arguments */ +\tif (PyFunction_Check(fun)) +\t\ttmp_fun = fun; +\telse { +\t\tdi = 1; +\t\tif (PyObject_HasAttrString(fun,\"im_func\")) { +\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\"); +\t\t} +\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) { +\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\"); +\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\")) +\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); +\t\t\telse { +\t\t\t\ttmp_fun = fun; /* built-in function */ +\t\t\t\ttot = maxnofargs; +\t\t\t\tif (xa != NULL) +\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa); +\t\t\t} +\t\t\tPy_XDECREF(tmp); +\t\t} +\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { +\t\t\ttot = maxnofargs; +\t\t\tif (xa != NULL) +\t\t\t\ttot += PyTuple_Size((PyObject *)xa); +\t\t\ttmp_fun = fun; +\t\t} +\t\telse if (F2PyCapsule_Check(fun)) { +\t\t\ttot = maxnofargs; +\t\t\tif (xa != NULL) +\t\t\t\text = PyTuple_Size((PyObject *)xa); +\t\t\tif(ext>0) { +\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); +\t\t\t\tgoto capi_fail; +\t\t\t} +\t\t\ttmp_fun = fun; +\t\t} +\t} +if (tmp_fun==NULL) { +fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name)); +goto capi_fail; +} +#if PY_VERSION_HEX >= 0x03000000 +\tif (PyObject_HasAttrString(tmp_fun,\"__code__\")) { +\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) +#else +\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) { +\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) +#endif +\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; +\t\tPy_XDECREF(tmp); +\t} +\t/* Get the number of optional arguments */ +#if PY_VERSION_HEX >= 0x03000000 +\tif (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { +\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) +#else +\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) { +\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) +#endif +\t\t\topt = PyTuple_Size(tmp); +\t\tPy_XDECREF(tmp); +\t} +\t/* Get the number of extra arguments */ +\tif (xa != NULL) +\t\text = PyTuple_Size((PyObject *)xa); +\t/* Calculate the size of call-backs argument list */ +\tsiz = MIN(maxnofargs+ext,tot); +\t*nofargs = MAX(0,siz-ext); +#ifdef DEBUGCFUNCS +\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); +#endif +\tif (siz0: + if outneeds[n][0] not in needs: + out.append(outneeds[n][0]) + del outneeds[n][0] + else: + flag=0 + for k in outneeds[n][1:]: + if k in needs[outneeds[n][0]]: + flag=1 + break + if flag: + outneeds[n]=outneeds[n][1:]+[outneeds[n][0]] + else: + out.append(outneeds[n][0]) + del outneeds[n][0] + if saveout and (0 not in map(lambda x, y:x==y, saveout, outneeds[n])) \ + and outneeds[n] != []: + print(n, saveout) + errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') + out=out+saveout + break + saveout=copy.copy(outneeds[n]) + if out==[]: + out=[n] + res[n]=out + return res diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/common_rules.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/common_rules.py new file mode 100644 index 0000000000000..d3b7f6dc2ae8e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/common_rules.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +""" + +Build common block mechanism for f2py2e. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 10:57:33 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.19 $"[10:-1] + +from . import __version__ +f2py_version = __version__.version + +import pprint +import sys +errmess=sys.stderr.write +outmess=sys.stdout.write +show=pprint.pprint + +from .auxfuncs import * +from . import capi_maps +from . import func2subr +from .crackfortran import rmbadname +############## + +def findcommonblocks(block,top=1): + ret = [] + if hascommon(block): + for n in block['common'].keys(): + vars={} + for v in block['common'][n]: + vars[v]=block['vars'][v] + ret.append((n, block['common'][n], vars)) + elif hasbody(block): + for b in block['body']: + ret=ret+findcommonblocks(b, 0) + if top: + tret=[] + names=[] + for t in ret: + if t[0] not in names: + names.append(t[0]) + tret.append(t) + return tret + return ret + +def buildhooks(m): + ret = {'commonhooks':[],'initcommonhooks':[],'docs':['"COMMON blocks:\\n"']} + fwrap = [''] + def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0], line) + chooks = [''] + def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0], line) + ihooks = [''] + def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0], line) + doc = [''] + def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0], line) + for (name, vnames, vars) in findcommonblocks(m): + lower_name = name.lower() + hnames, inames = [], [] + for n in vnames: + if isintent_hide(vars[n]): hnames.append(n) + else: inames.append(n) + if hnames: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name, ','.join(inames), ','.join(hnames))) + else: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name, ','.join(inames))) + fadd('subroutine f2pyinit%s(setupfunc)'%name) + fadd('external setupfunc') + for n in vnames: + fadd(func2subr.var2fixfortran(vars, n)) + if name=='_BLNK_': + fadd('common %s'%(','.join(vnames))) + else: + fadd('common /%s/ %s'%(name, ','.join(vnames))) + fadd('call setupfunc(%s)'%(','.join(inames))) + fadd('end\n') + cadd('static FortranDataDef f2py_%s_def[] = {'%(name)) + idims=[] + for n in inames: + ct = capi_maps.getctype(vars[n]) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, vars[n]) + if dm['dims']: idims.append('(%s)'%(dm['dims'])) + else: idims.append('') + dms=dm['dims'].strip() + if not dms: dms='-1' + cadd('\t{\"%s\",%s,{{%s}},%s},'%(n, dm['rank'], dms, at)) + cadd('\t{NULL}\n};') + inames1 = rmbadname(inames) + inames1_tps = ','.join(['char *'+s for s in inames1]) + cadd('static void f2py_setup_%s(%s) {'%(name, inames1_tps)) + cadd('\tint i_f2py=0;') + for n in inames1: + cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name, n)) + cadd('}') + if '_' in lower_name: + F_FUNC='F_FUNC_US' + else: + F_FUNC='F_FUNC' + cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'\ + %(F_FUNC, lower_name, name.upper(), + ','.join(['char*']*len(inames1)))) + cadd('static void f2py_init_%s(void) {'%name) + cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ + %(F_FUNC, lower_name, name.upper(), name)) + cadd('}\n') + iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name, name, name)) + tname = name.replace('_', '\\_') + dadd('\\subsection{Common block \\texttt{%s}}\n'%(tname)) + dadd('\\begin{description}') + for n in inames: + dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n, vars[n]))) + if hasnote(vars[n]): + note = vars[n]['note'] + if isinstance(note, list): note='\n'.join(note) + dadd('--- %s'%(note)) + dadd('\\end{description}') + ret['docs'].append('"\t/%s/ %s\\n"'%(name, ','.join(map(lambda v, d:v+d, inames, idims)))) + ret['commonhooks']=chooks + ret['initcommonhooks']=ihooks + ret['latexdoc']=doc[0] + if len(ret['docs'])<=1: ret['docs']='' + return ret, fwrap[0] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/crackfortran.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/crackfortran.py new file mode 100644 index 0000000000000..8930811269c9b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/crackfortran.py @@ -0,0 +1,2868 @@ +#!/usr/bin/env python +""" +crackfortran --- read fortran (77,90) code and extract declaration information. + +Copyright 1999-2004 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/09/27 07:13:49 $ +Pearu Peterson + + +Usage of crackfortran: +====================== +Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h + -m ,--ignore-contains +Functions: crackfortran, crack2fortran +The following Fortran statements/constructions are supported +(or will be if needed): + block data,byte,call,character,common,complex,contains,data, + dimension,double complex,double precision,end,external,function, + implicit,integer,intent,interface,intrinsic, + logical,module,optional,parameter,private,public, + program,real,(sequence?),subroutine,type,use,virtual, + include,pythonmodule +Note: 'virtual' is mapped to 'dimension'. +Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). +Note: code after 'contains' will be ignored until its scope ends. +Note: 'common' statement is extended: dimensions are moved to variable definitions +Note: f2py directive: f2py is read as +Note: pythonmodule is introduced to represent Python module + +Usage: + `postlist=crackfortran(files,funcs)` + `postlist` contains declaration information read from the list of files `files`. + `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file + + `postlist` has the following structure: + *** it is a list of dictionaries containing `blocks': + B = {'block','body','vars','parent_block'[,'name','prefix','args','result', + 'implicit','externals','interfaced','common','sortvars', + 'commonvars','note']} + B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | + 'program' | 'block data' | 'type' | 'pythonmodule' + B['body'] --- list containing `subblocks' with the same structure as `blocks' + B['parent_block'] --- dictionary of a parent block: + C['body'][]['parent_block'] is C + B['vars'] --- dictionary of variable definitions + B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) + B['name'] --- name of the block (not if B['block']=='interface') + B['prefix'] --- prefix string (only if B['block']=='function') + B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' + B['result'] --- name of the return value (only if B['block']=='function') + B['implicit'] --- dictionary {'a':,'b':...} | None + B['externals'] --- list of variables being external + B['interfaced'] --- list of variables being external and defined + B['common'] --- dictionary of common blocks (list of objects) + B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) + B['from'] --- string showing the 'parents' of the current block + B['use'] --- dictionary of modules used in current block: + {:{['only':<0|1>],['map':{:,...}]}} + B['note'] --- list of LaTeX comments on the block + B['f2pyenhancements'] --- optional dictionary + {'threadsafe':'','fortranname':, + 'callstatement':|, + 'callprotoargument':, + 'usercode':|, + 'pymethoddef:' + } + B['entry'] --- dictionary {entryname:argslist,..} + B['varnames'] --- list of variable names given in the order of reading the + Fortran code, useful for derived types. + B['saved_interface'] --- a string of scanned routine signature, defines explicit interface + *** Variable definition is a dictionary + D = B['vars'][] = + {'typespec'[,'attrspec','kindselector','charselector','=','typename']} + D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | + 'double precision' | 'integer' | 'logical' | 'real' | 'type' + D['attrspec'] --- list of attributes (e.g. 'dimension()', + 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', + 'optional','required', etc) + K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = + 'complex' | 'integer' | 'logical' | 'real' ) + C = D['charselector'] = {['*','len','kind']} + (only if D['typespec']=='character') + D['='] --- initialization expression string + D['typename'] --- name of the type if D['typespec']=='type' + D['dimension'] --- list of dimension bounds + D['intent'] --- list of intent specifications + D['depend'] --- list of variable names on which current variable depends on + D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised + D['note'] --- list of LaTeX comments on the variable + *** Meaning of kind/char selectors (few examples): + D['typespec>']*K['*'] + D['typespec'](kind=K['kind']) + character*C['*'] + character(len=C['len'],kind=C['kind']) + (see also fortran type declaration statement formats below) + +Fortran 90 type declaration statement format (F77 is subset of F90) +==================================================================== +(Main source: IBM XL Fortran 5.1 Language Reference Manual) +type declaration = [[]::] + = byte | + character[] | + complex[] | + double complex | + double precision | + integer[] | + logical[] | + real[] | + type() + = * | + ([len=][,[kind=]]) | + (kind=[,len=]) + = * | + ([kind=]) + = comma separated list of attributes. + Only the following attributes are used in + building up the interface: + external + (parameter --- affects '=' key) + optional + intent + Other attributes are ignored. + = in | out | inout + = comma separated list of dimension bounds. + = [[*][()] | [()]*] + [// | =] [,] + +In addition, the following attributes are used: check,depend,note + +TODO: + * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' + -> 'real x(2)') + The above may be solved by creating appropriate preprocessor program, for example. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import string +import fileinput +import re +import pprint +import os +import copy +import platform + +from . import __version__ +from .auxfuncs import * + +f2py_version = __version__.version + +# Global flags: +strictf77=1 # Ignore `!' comments unless line[0]=='!' +sourcecodeform='fix' # 'fix','free' +quiet=0 # Be verbose if 0 (Obsolete: not used any more) +verbose=1 # Be quiet if 0, extra verbose if > 1. +tabchar=4*' ' +pyffilename='' +f77modulename='' +skipemptyends=0 # for old F77 programs without 'program' statement +ignorecontains=1 +dolowercase=1 +debug=[] + +# Global variables +groupcounter=0 +grouplist={groupcounter:[]} +neededmodule=-1 +expectbegin=1 +skipblocksuntil=-1 +usermodules=[] +f90modulevars={} +gotnextfile=1 +filepositiontext='' +currentfilename='' +skipfunctions=[] +skipfuncs=[] +onlyfuncs=[] +include_paths=[] +previous_context = None + + +def reset_global_f2py_vars(): + global groupcounter, grouplist, neededmodule, expectbegin, \ + skipblocksuntil, usermodules, f90modulevars, gotnextfile, \ + filepositiontext, currentfilename, skipfunctions, skipfuncs, \ + onlyfuncs, include_paths, previous_context, \ + strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename, \ + f77modulename, skipemptyends, ignorecontains, dolowercase, debug + + # flags + strictf77 = 1 + sourcecodeform = 'fix' + quiet = 0 + verbose = 1 + tabchar = 4*' ' + pyffilename = '' + f77modulename = '' + skipemptyends = 0 + ignorecontains = 1 + dolowercase = 1 + debug = [] + # variables + groupcounter = 0 + grouplist = {groupcounter:[]} + neededmodule =-1 + expectbegin = 1 + skipblocksuntil = -1 + usermodules = [] + f90modulevars = {} + gotnextfile = 1 + filepositiontext = '' + currentfilename = '' + skipfunctions = [] + skipfuncs = [] + onlyfuncs = [] + include_paths = [] + previous_context = None + + +###### Some helper functions +def show(o,f=0):pprint.pprint(o) +errmess=sys.stderr.write +def outmess(line,flag=1): + global filepositiontext + if not verbose: return + if not quiet: + if flag:sys.stdout.write(filepositiontext) + sys.stdout.write(line) +re._MAXCACHE=50 +defaultimplicitrules={} +for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c]={'typespec':'real'} +for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'} +del c +badnames={} +invbadnames={} +for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', + 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', + 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', + 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', + 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', + 'max', 'min', + 'flen', 'fshape', + 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', + 'type', 'default']: + badnames[n]=n+'_bn' + invbadnames[n+'_bn']=n + +def rmbadname1(name): + if name in badnames: + errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name, badnames[name])) + return badnames[name] + return name + +def rmbadname(names): return [rmbadname1(_m) for _m in names] + +def undo_rmbadname1(name): + if name in invbadnames: + errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\ + %(name, invbadnames[name])) + return invbadnames[name] + return name + +def undo_rmbadname(names): return [undo_rmbadname1(_m) for _m in names] + +def getextension(name): + i=name.rfind('.') + if i==-1: return '' + if '\\' in name[i:]: return '' + if '/' in name[i:]: return '' + return name[i+1:] + +is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search +_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search +_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search +_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match +def is_free_format(file): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = 0 + f = open(file, 'r') + line = f.readline() + n = 15 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = 1 + while n>0 and line: + if line[0]!='!' and line.strip(): + n -= 1 + if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&': + result = 1 + break + line = f.readline() + f.close() + return result + + +####### Read fortran (77,90) code +def readfortrancode(ffile,dowithline=show,istop=1): + """ + Read fortran codes from files and + 1) Get rid of comments, line continuations, and empty lines; lower cases. + 2) Call dowithline(line) on every line. + 3) Recursively call itself when statement \"include ''\" is met. + """ + global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase, include_paths + if not istop: + saveglobals=gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase + if ffile==[]: return + localdolowercase = dolowercase + cont=0 + finalline='' + ll='' + commentline=re.compile(r'(?P([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P.*)') + includeline=re.compile(r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) + cont1=re.compile(r'(?P.*)&\s*\Z') + cont2=re.compile(r'(\s*&|)(?P.*)') + mline_mark = re.compile(r".*?'''") + if istop: dowithline('', -1) + ll, l1='', '' + spacedigits=[' '] + [str(_m) for _m in range(10)] + filepositiontext='' + fin=fileinput.FileInput(ffile) + while True: + l=fin.readline() + if not l: break + if fin.isfirstline(): + filepositiontext='' + currentfilename=fin.filename() + gotnextfile=1 + l1=l + strictf77=0 + sourcecodeform='fix' + ext = os.path.splitext(currentfilename)[1] + if is_f_file(currentfilename) and \ + not (_has_f90_header(l) or _has_fix_header(l)): + strictf77=1 + elif is_free_format(currentfilename) and not _has_fix_header(l): + sourcecodeform='free' + if strictf77: beginpattern=beginpattern77 + else: beginpattern=beginpattern90 + outmess('\tReading file %s (format:%s%s)\n'\ + %(repr(currentfilename), sourcecodeform, + strictf77 and ',strict' or '')) + + l=l.expandtabs().replace('\xa0', ' ') + while not l=='': # Get rid of newline characters + if l[-1] not in "\n\r\f": break + l=l[:-1] + if not strictf77: + r=commentline.match(l) + if r: + l=r.group('line')+' ' # Strip comments starting with `!' + rl=r.group('rest') + if rl[:4].lower()=='f2py': # f2py directive + l = l + 4*' ' + r=commentline.match(rl[4:]) + if r: l=l+r.group('line') + else: l = l + rl[4:] + if l.strip()=='': # Skip empty line + cont=0 + continue + if sourcecodeform=='fix': + if l[0] in ['*', 'c', '!', 'C', '#']: + if l[1:5].lower()=='f2py': # f2py directive + l=' '+l[5:] + else: # Skip comment line + cont=0 + continue + elif strictf77: + if len(l)>72: l=l[:72] + if not (l[0] in spacedigits): + raise Exception('readfortrancode: Found non-(space,digit) char ' + 'in the first column.\n\tAre you sure that ' + 'this code is in fix form?\n\tline=%s' % repr(l)) + + if (not cont or strictf77) and (len(l)>5 and not l[5]==' '): + # Continuation of a previous line + ll=ll+l[6:] + finalline='' + origfinalline='' + else: + if not strictf77: + # F90 continuation + r=cont1.match(l) + if r: l=r.group('line') # Continuation follows .. + if cont: + ll=ll+cont2.match(l).group('line') + finalline='' + origfinalline='' + else: + l=' '+l[5:] # clean up line beginning from possible digits. + if localdolowercase: finalline=ll.lower() + else: finalline=ll + origfinalline=ll + ll=l + cont=(r is not None) + else: + l=' '+l[5:] # clean up line beginning from possible digits. + if localdolowercase: finalline=ll.lower() + else: finalline=ll + origfinalline =ll + ll=l + + elif sourcecodeform=='free': + if not cont and ext=='.pyf' and mline_mark.match(l): + l = l + '\n' + while True: + lc = fin.readline() + if not lc: + errmess('Unexpected end of file when reading multiline\n') + break + l = l + lc + if mline_mark.match(lc): + break + l = l.rstrip() + r=cont1.match(l) + if r: l=r.group('line') # Continuation follows .. + if cont: + ll=ll+cont2.match(l).group('line') + finalline='' + origfinalline='' + else: + if localdolowercase: finalline=ll.lower() + else: finalline=ll + origfinalline =ll + ll=l + cont=(r is not None) + else: + raise ValueError("Flag sourcecodeform must be either 'fix' or 'free': %s"%repr(sourcecodeform)) + filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1) + m=includeline.match(origfinalline) + if m: + fn=m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + l1=ll + if localdolowercase: + finalline=ll.lower() + else: finalline=ll + origfinalline = ll + filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1) + m=includeline.match(origfinalline) + if m: + fn=m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + filepositiontext='' + fin.close() + if istop: dowithline('', 1) + else: + gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase=saveglobals + +########### Crack line +beforethisafter=r'\s*(?P%s(?=\s*(\b(%s)\b)))'+ \ + r'\s*(?P(\b(%s)\b))'+ \ + r'\s*(?P%s)\s*\Z' +## +fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' +typespattern=re.compile(beforethisafter%('', fortrantypes, fortrantypes, '.*'), re.I), 'type' +typespattern4implicit=re.compile(beforethisafter%('', fortrantypes+'|static|automatic|undefined', fortrantypes+'|static|automatic|undefined', '.*'), re.I) +# +functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' +subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' +#modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' +# +groupbegins77=r'program|block\s*data' +beginpattern77=re.compile(beforethisafter%('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' +groupbegins90=groupbegins77+r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' +beginpattern90=re.compile(beforethisafter%('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' +groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' +endpattern=re.compile(beforethisafter%('', groupends, groupends, '[\w\s]*'), re.I), 'end' +#endifs='end\s*(if|do|where|select|while|forall)' +endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' +endifpattern=re.compile(beforethisafter%('[\w]*?', endifs, endifs, '[\w\s]*'), re.I), 'endif' +# +implicitpattern=re.compile(beforethisafter%('', 'implicit', 'implicit', '.*'), re.I), 'implicit' +dimensionpattern=re.compile(beforethisafter%('', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' +externalpattern=re.compile(beforethisafter%('', 'external', 'external', '.*'), re.I), 'external' +optionalpattern=re.compile(beforethisafter%('', 'optional', 'optional', '.*'), re.I), 'optional' +requiredpattern=re.compile(beforethisafter%('', 'required', 'required', '.*'), re.I), 'required' +publicpattern=re.compile(beforethisafter%('', 'public', 'public', '.*'), re.I), 'public' +privatepattern=re.compile(beforethisafter%('', 'private', 'private', '.*'), re.I), 'private' +intrisicpattern=re.compile(beforethisafter%('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic' +intentpattern=re.compile(beforethisafter%('', 'intent|depend|note|check', 'intent|depend|note|check', '\s*\(.*?\).*'), re.I), 'intent' +parameterpattern=re.compile(beforethisafter%('', 'parameter', 'parameter', '\s*\(.*'), re.I), 'parameter' +datapattern=re.compile(beforethisafter%('', 'data', 'data', '.*'), re.I), 'data' +callpattern=re.compile(beforethisafter%('', 'call', 'call', '.*'), re.I), 'call' +entrypattern=re.compile(beforethisafter%('', 'entry', 'entry', '.*'), re.I), 'entry' +callfunpattern=re.compile(beforethisafter%('', 'callfun', 'callfun', '.*'), re.I), 'callfun' +commonpattern=re.compile(beforethisafter%('', 'common', 'common', '.*'), re.I), 'common' +usepattern=re.compile(beforethisafter%('', 'use', 'use', '.*'), re.I), 'use' +containspattern=re.compile(beforethisafter%('', 'contains', 'contains', ''), re.I), 'contains' +formatpattern=re.compile(beforethisafter%('', 'format', 'format', '.*'), re.I), 'format' +## Non-fortran and f2py-specific statements +f2pyenhancementspattern=re.compile(beforethisafter%('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I|re.S), 'f2pyenhancements' +multilinepattern = re.compile(r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' +## + +def _simplifyargs(argsline): + a = [] + for n in markoutercomma(argsline).split('@,@'): + for r in '(),': + n = n.replace(r, '_') + a.append(n) + return ','.join(a) + +crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+[\w]*\b)\s*[=].*', re.I) +def crackline(line,reset=0): + """ + reset=-1 --- initialize + reset=0 --- crack the line + reset=1 --- final check if mismatch of blocks occured + + Cracked data is saved in grouplist[0]. + """ + global beginpattern, groupcounter, groupname, groupcache, grouplist, gotnextfile,\ + filepositiontext, currentfilename, neededmodule, expectbegin, skipblocksuntil,\ + skipemptyends, previous_context + if ';' in line and not (f2pyenhancementspattern[0].match(line) or + multilinepattern[0].match(line)): + for l in line.split(';'): + assert reset==0, repr(reset) # XXX: non-zero reset values need testing + crackline(l, reset) + return + if reset<0: + groupcounter=0 + groupname={groupcounter:''} + groupcache={groupcounter:{}} + grouplist={groupcounter:[]} + groupcache[groupcounter]['body']=[] + groupcache[groupcounter]['vars']={} + groupcache[groupcounter]['block']='' + groupcache[groupcounter]['name']='' + neededmodule=-1 + skipblocksuntil=-1 + return + if reset>0: + fl=0 + if f77modulename and neededmodule==groupcounter: fl=2 + while groupcounter>fl: + outmess('crackline: groupcounter=%s groupname=%s\n'%(repr(groupcounter), repr(groupname))) + outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') + grouplist[groupcounter-1].append(groupcache[groupcounter]) + grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter=groupcounter-1 + if f77modulename and neededmodule==groupcounter: + grouplist[groupcounter-1].append(groupcache[groupcounter]) + grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter=groupcounter-1 # end interface + grouplist[groupcounter-1].append(groupcache[groupcounter]) + grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter=groupcounter-1 # end module + neededmodule=-1 + return + if line=='': return + flag=0 + for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, + requiredpattern, + parameterpattern, datapattern, publicpattern, privatepattern, + intrisicpattern, + endifpattern, endpattern, + formatpattern, + beginpattern, functionpattern, subroutinepattern, + implicitpattern, typespattern, commonpattern, + callpattern, usepattern, containspattern, + entrypattern, + f2pyenhancementspattern, + multilinepattern + ]: + m = pat[0].match(line) + if m: + break + flag=flag+1 + if not m: + re_1 = crackline_re_1 + if 0<=skipblocksuntil<=groupcounter:return + if 'externals' in groupcache[groupcounter]: + for name in groupcache[groupcounter]['externals']: + if name in invbadnames: + name=invbadnames[name] + if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: + continue + m1=re.match(r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z'%name, markouterparen(line), re.I) + if m1: + m2 = re_1.match(m1.group('before')) + a = _simplifyargs(m1.group('args')) + if m2: + line='callfun %s(%s) result (%s)'%(name, a, m2.group('result')) + else: line='callfun %s(%s)'%(name, a) + m = callfunpattern[0].match(line) + if not m: + outmess('crackline: could not resolve function call for line=%s.\n'%repr(line)) + return + analyzeline(m, 'callfun', line) + return + if verbose>1 or (verbose==1 and currentfilename.lower().endswith('.pyf')): + previous_context = None + outmess('crackline:%d: No pattern for line\n'%(groupcounter)) + return + elif pat[1]=='end': + if 0<=skipblocksuntil(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) +nameargspattern=re.compile(r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P.*)\s*@\)@))*\s*\Z', re.I) +callnameargspattern=re.compile(r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) +real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') +real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') + +_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) +def _is_intent_callback(vdecl): + for a in vdecl.get('attrspec', []): + if _intentcallbackpattern.match(a): + return 1 + return 0 + +def _resolvenameargspattern(line): + line = markouterparen(line) + m1=nameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') + m1=callnameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), None, None + return None, [], None, None + +def analyzeline(m, case, line): + global groupcounter, groupname, groupcache, grouplist, filepositiontext,\ + currentfilename, f77modulename, neededinterface, neededmodule, expectbegin,\ + gotnextfile, previous_context + block=m.group('this') + if case != 'multiline': + previous_context = None + if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ + and not skipemptyends and groupcounter<1: + newname=os.path.basename(currentfilename).split('.')[0] + outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname) + gotnextfile=0 + groupcounter=groupcounter+1 + groupname[groupcounter]='program' + groupcache[groupcounter]={} + grouplist[groupcounter]=[] + groupcache[groupcounter]['body']=[] + groupcache[groupcounter]['vars']={} + groupcache[groupcounter]['block']='program' + groupcache[groupcounter]['name']=newname + groupcache[groupcounter]['from']='fromsky' + expectbegin=0 + if case in ['begin', 'call', 'callfun']: + # Crack line => block,name,args,result + block = block.lower() + if re.match(r'block\s*data', block, re.I): block='block data' + if re.match(r'python\s*module', block, re.I): block='python module' + name, args, result, bind = _resolvenameargspattern(m.group('after')) + if name is None: + if block=='block data': + name = '_BLOCK_DATA_' + else: + name = '' + if block not in ['interface', 'block data']: + outmess('analyzeline: No name/args pattern found for line.\n') + + previous_context = (block, name, groupcounter) + if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) + else: args=[] + if '' in args: + while '' in args: + args.remove('') + outmess('analyzeline: argument list is malformed (missing argument).\n') + + # end of crack line => block,name,args,result + needmodule=0 + needinterface=0 + + if case in ['call', 'callfun']: + needinterface=1 + if 'args' not in groupcache[groupcounter]: + return + if name not in groupcache[groupcounter]['args']: + return + for it in grouplist[groupcounter]: + if it['name']==name: + return + if name in groupcache[groupcounter]['interfaced']: + return + block={'call':'subroutine','callfun':'function'}[case] + if f77modulename and neededmodule==-1 and groupcounter<=1: + neededmodule=groupcounter+2 + needmodule=1 + if block != 'interface': + needinterface=1 + # Create new block(s) + groupcounter=groupcounter+1 + groupcache[groupcounter]={} + grouplist[groupcounter]=[] + if needmodule: + if verbose>1: + outmess('analyzeline: Creating module block %s\n'%repr(f77modulename), 0) + groupname[groupcounter]='module' + groupcache[groupcounter]['block']='python module' + groupcache[groupcounter]['name']=f77modulename + groupcache[groupcounter]['from']='' + groupcache[groupcounter]['body']=[] + groupcache[groupcounter]['externals']=[] + groupcache[groupcounter]['interfaced']=[] + groupcache[groupcounter]['vars']={} + groupcounter=groupcounter+1 + groupcache[groupcounter]={} + grouplist[groupcounter]=[] + if needinterface: + if verbose>1: + outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (groupcounter), 0) + groupname[groupcounter]='interface' + groupcache[groupcounter]['block']='interface' + groupcache[groupcounter]['name']='unknown_interface' + groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name']) + groupcache[groupcounter]['body']=[] + groupcache[groupcounter]['externals']=[] + groupcache[groupcounter]['interfaced']=[] + groupcache[groupcounter]['vars']={} + groupcounter=groupcounter+1 + groupcache[groupcounter]={} + grouplist[groupcounter]=[] + groupname[groupcounter]=block + groupcache[groupcounter]['block']=block + if not name: name='unknown_'+block + groupcache[groupcounter]['prefix']=m.group('before') + groupcache[groupcounter]['name']=rmbadname1(name) + groupcache[groupcounter]['result']=result + if groupcounter==1: + groupcache[groupcounter]['from']=currentfilename + else: + if f77modulename and groupcounter==3: + groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], currentfilename) + else: + groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name']) + for k in list(groupcache[groupcounter].keys()): + if not groupcache[groupcounter][k]: + del groupcache[groupcounter][k] + + groupcache[groupcounter]['args']=args + groupcache[groupcounter]['body']=[] + groupcache[groupcounter]['externals']=[] + groupcache[groupcounter]['interfaced']=[] + groupcache[groupcounter]['vars']={} + groupcache[groupcounter]['entry']={} + # end of creation + if block=='type': + groupcache[groupcounter]['varnames'] = [] + + if case in ['call', 'callfun']: # set parents variables + if name not in groupcache[groupcounter-2]['externals']: + groupcache[groupcounter-2]['externals'].append(name) + groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars']) + #try: del groupcache[groupcounter]['vars'][groupcache[groupcounter-2]['name']] + #except: pass + try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] + except: pass + if block in ['function', 'subroutine']: # set global attributes + try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter-2]['vars']['']) + except: pass + if case=='callfun': # return type + if result and result in groupcache[groupcounter]['vars']: + if not name==result: + groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) + #if groupcounter>1: # name is interfaced + try: groupcache[groupcounter-2]['interfaced'].append(name) + except: pass + if block=='function': + t=typespattern[0].match(m.group('before')+' '+name) + if t: + typespec, selector, attr, edecl=cracktypespec0(t.group('this'), t.group('after')) + updatevars(typespec, selector, attr, edecl) + + if case in ['call', 'callfun']: + grouplist[groupcounter-1].append(groupcache[groupcounter]) + grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter=groupcounter-1 # end routine + grouplist[groupcounter-1].append(groupcache[groupcounter]) + grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter=groupcounter-1 # end interface + + elif case=='entry': + name, args, result, bind=_resolvenameargspattern(m.group('after')) + if name is not None: + if args: + args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) + else: args=[] + assert result is None, repr(result) + groupcache[groupcounter]['entry'][name] = args + previous_context = ('entry', name, groupcounter) + elif case=='type': + typespec, selector, attr, edecl=cracktypespec0(block, m.group('after')) + last_name = updatevars(typespec, selector, attr, edecl) + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']: + edecl=groupcache[groupcounter]['vars'] + ll=m.group('after').strip() + i=ll.find('::') + if i<0 and case=='intent': + i=markouterparen(ll).find('@)@')-2 + ll=ll[:i+1]+'::'+ll[i+1:] + i=ll.find('::') + if ll[i:]=='::' and 'args' in groupcache[groupcounter]: + outmess('All arguments will have attribute %s%s\n'%(m.group('this'), ll[:i])) + ll = ll + ','.join(groupcache[groupcounter]['args']) + if i<0:i=0;pl='' + else: pl=ll[:i].strip();ll=ll[i+2:] + ch = markoutercomma(pl).split('@,@') + if len(ch)>1: + pl = ch[0] + outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (','.join(ch[1:]))) + last_name = None + + for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: + m1=namepattern.match(e) + if not m1: + if case in ['public', 'private']: k='' + else: + print(m.groupdict()) + outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case, repr(e))) + continue + else: + k=rmbadname1(m1.group('name')) + if k not in edecl: + edecl[k]={} + if case=='dimension': + ap=case+m1.group('after') + if case=='intent': + ap=m.group('this')+pl + if _intentcallbackpattern.match(ap): + if k not in groupcache[groupcounter]['args']: + if groupcounter>1: + if '__user__' not in groupcache[groupcounter-2]['name']: + outmess('analyzeline: missing __user__ module (could be nothing)\n') + if k!=groupcache[groupcounter]['name']: # fixes ticket 1693 + outmess('analyzeline: appending intent(callback) %s'\ + ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + groupcache[groupcounter]['args'].append(k) + else: + errmess('analyzeline: intent(callback) %s is ignored' % (k)) + else: + errmess('analyzeline: intent(callback) %s is already'\ + ' in argument list' % (k)) + if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']: + ap=case + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append(ap) + else: + edecl[k]['attrspec']=[ap] + if case=='external': + if groupcache[groupcounter]['block']=='program': + outmess('analyzeline: ignoring program arguments\n') + continue + if k not in groupcache[groupcounter]['args']: + #outmess('analyzeline: ignoring external %s (not in arguments list)\n'%(`k`)) + continue + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals']=[] + groupcache[groupcounter]['externals'].append(k) + last_name = k + groupcache[groupcounter]['vars']=edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case=='parameter': + edecl=groupcache[groupcounter]['vars'] + ll=m.group('after').strip()[1:-1] + last_name = None + for e in markoutercomma(ll).split('@,@'): + try: + k, initexpr=[x.strip() for x in e.split('=')] + except: + outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e, ll));continue + params = get_parameters(edecl) + k=rmbadname1(k) + if k not in edecl: + edecl[k]={} + if '=' in edecl[k] and (not edecl[k]['=']==initexpr): + outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k, edecl[k]['='], initexpr)) + t = determineexprtype(initexpr, params) + if t: + if t.get('typespec')=='real': + tt = list(initexpr) + for m in real16pattern.finditer(initexpr): + tt[m.start():m.end()] = list(\ + initexpr[m.start():m.end()].lower().replace('d', 'e')) + initexpr = ''.join(tt) + elif t.get('typespec')=='complex': + initexpr = initexpr[1:].lower().replace('d', 'e').\ + replace(',', '+1j*(') + try: + v = eval(initexpr, {}, params) + except (SyntaxError, NameError, TypeError) as msg: + errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\ + % (initexpr, msg)) + continue + edecl[k]['='] = repr(v) + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append('parameter') + else: edecl[k]['attrspec']=['parameter'] + last_name = k + groupcache[groupcounter]['vars']=edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case=='implicit': + if m.group('after').strip().lower()=='none': + groupcache[groupcounter]['implicit']=None + elif m.group('after'): + if 'implicit' in groupcache[groupcounter]: + impl=groupcache[groupcounter]['implicit'] + else: impl={} + if impl is None: + outmess('analyzeline: Overwriting earlier "implicit none" statement.\n') + impl={} + for e in markoutercomma(m.group('after')).split('@,@'): + decl={} + m1=re.match(r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) + if not m1: + outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue + m2=typespattern4implicit.match(m1.group('this')) + if not m2: + outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue + typespec, selector, attr, edecl=cracktypespec0(m2.group('this'), m2.group('after')) + kindselect, charselect, typename=cracktypespec(typespec, selector) + decl['typespec']=typespec + decl['kindselector']=kindselect + decl['charselector']=charselect + decl['typename']=typename + for k in list(decl.keys()): + if not decl[k]: del decl[k] + for r in markoutercomma(m1.group('after')).split('@,@'): + if '-' in r: + try: begc, endc=[x.strip() for x in r.split('-')] + except: + outmess('analyzeline: expected "-" instead of "%s" in range list of implicit statement\n'%r);continue + else: begc=endc=r.strip() + if not len(begc)==len(endc)==1: + outmess('analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n'%r);continue + for o in range(ord(begc), ord(endc)+1): + impl[chr(o)]=decl + groupcache[groupcounter]['implicit']=impl + elif case=='data': + ll=[] + dl='';il='';f=0;fc=1;inp=0 + for c in m.group('after'): + if not inp: + if c=="'": fc=not fc + if c=='/' and fc: f=f+1;continue + if c=='(': inp = inp + 1 + elif c==')': inp = inp - 1 + if f==0: dl=dl+c + elif f==1: il=il+c + elif f==2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + dl=c;il='';f=0 + if f==2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + vars={} + if 'vars' in groupcache[groupcounter]: + vars=groupcache[groupcounter]['vars'] + last_name = None + for l in ll: + l=[x.strip() for x in l] + if l[0][0]==',':l[0]=l[0][1:] + if l[0][0]=='(': + outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%l[0]) + continue + #if '(' in l[0]: + # #outmess('analyzeline: ignoring this data statement.\n') + # continue + i=0;j=0;llen=len(l[1]) + for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): + if v[0]=='(': + outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%v) + # XXX: subsequent init expressions may get wrong values. + # Ignoring since data statements are irrelevant for wrapping. + continue + fc=0 + while (i=3: + bn = bn.strip() + if not bn: bn='_BLNK_' + cl.append([bn, ol]) + f=f-2;bn='';ol='' + if f%2: bn=bn+c + else: ol=ol+c + bn = bn.strip() + if not bn: bn='_BLNK_' + cl.append([bn, ol]) + commonkey={} + if 'common' in groupcache[groupcounter]: + commonkey=groupcache[groupcounter]['common'] + for c in cl: + if c[0] in commonkey: + outmess('analyzeline: previously defined common block encountered. Skipping.\n') + continue + commonkey[c[0]]=[] + for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: + if i: commonkey[c[0]].append(i) + groupcache[groupcounter]['common']=commonkey + previous_context = ('common', bn, groupcounter) + elif case=='use': + m1=re.match(r'\A\s*(?P\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + if m1: + mm=m1.groupdict() + if 'use' not in groupcache[groupcounter]: + groupcache[groupcounter]['use']={} + name=m1.group('name') + groupcache[groupcounter]['use'][name]={} + isonly=0 + if 'list' in mm and mm['list'] is not None: + if 'notonly' in mm and mm['notonly'] is None: + isonly=1 + groupcache[groupcounter]['use'][name]['only']=isonly + ll=[x.strip() for x in mm['list'].split(',')] + rl={} + for l in ll: + if '=' in l: + m2=re.match(r'\A\s*(?P\b[\w]+\b)\s*=\s*>\s*(?P\b[\w]+\b)\s*\Z', l, re.I) + if m2: rl[m2.group('local').strip()]=m2.group('use').strip() + else: + outmess('analyzeline: Not local=>use pattern found in %s\n'%repr(l)) + else: + rl[l]=l + groupcache[groupcounter]['use'][name]['map']=rl + else: + pass + else: + print(m.groupdict()) + outmess('analyzeline: Could not crack the use statement.\n') + elif case in ['f2pyenhancements']: + if 'f2pyenhancements' not in groupcache[groupcounter]: + groupcache[groupcounter]['f2pyenhancements'] = {} + d = groupcache[groupcounter]['f2pyenhancements'] + if m.group('this')=='usercode' and 'usercode' in d: + if isinstance(d['usercode'], str): + d['usercode'] = [d['usercode']] + d['usercode'].append(m.group('after')) + else: + d[m.group('this')] = m.group('after') + elif case=='multiline': + if previous_context is None: + if verbose: + outmess('analyzeline: No context for multiline block.\n') + return + gc = groupcounter + #gc = previous_context[2] + appendmultiline(groupcache[gc], + previous_context[:2], + m.group('this')) + else: + if verbose>1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') + +def appendmultiline(group, context_name, ml): + if 'f2pymultilines' not in group: + group['f2pymultilines'] = {} + d = group['f2pymultilines'] + if context_name not in d: + d[context_name] = [] + d[context_name].append(ml) + return + +def cracktypespec0(typespec, ll): + selector=None + attr=None + if re.match(r'double\s*complex', typespec, re.I): typespec='double complex' + elif re.match(r'double\s*precision', typespec, re.I): typespec='double precision' + else: typespec=typespec.strip().lower() + m1=selectpattern.match(markouterparen(ll)) + if not m1: + outmess('cracktypespec0: no kind/char_selector pattern found for line.\n') + return + d=m1.groupdict() + for k in list(d.keys()): d[k]=unmarkouterparen(d[k]) + if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: + selector=d['this'] + ll=d['after'] + i=ll.find('::') + if i>=0: + attr=ll[:i].strip() + ll=ll[i+2:] + return typespec, selector, attr, ll +##### +namepattern=re.compile(r'\s*(?P\b[\w]+\b)\s*(?P.*)\s*\Z', re.I) +kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z', re.I) +charselector=re.compile(r'\s*(\((?P.*)\)|[*]\s*(?P.*))\s*\Z', re.I) +lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z', re.I) +lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*[*]\s*(?P.*?)|([*]\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) +def removespaces(expr): + expr=expr.strip() + if len(expr)<=1: return expr + expr2=expr[0] + for i in range(1, len(expr)-1): + if expr[i]==' ' and \ + ((expr[i+1] in "()[]{}=+-/* ") or (expr[i-1] in "()[]{}=+-/* ")): continue + expr2=expr2+expr[i] + expr2=expr2+expr[-1] + return expr2 +def markinnerspaces(line): + l='';f=0 + cc='\'' + cc1='"' + cb='' + for c in line: + if cb=='\\' and c in ['\\', '\'', '"']: + l=l+c; + cb=c + continue + if f==0 and c in ['\'', '"']: cc=c; cc1={'\'':'"','"':'\''}[c] + if c==cc:f=f+1 + elif c==cc:f=f-1 + elif c==' ' and f==1: l=l+'@_@'; continue + l=l+c;cb=c + return l +def updatevars(typespec, selector, attrspec, entitydecl): + global groupcache, groupcounter + last_name = None + kindselect, charselect, typename=cracktypespec(typespec, selector) + if attrspec: + attrspec=[x.strip() for x in markoutercomma(attrspec).split('@,@')] + l = [] + c = re.compile(r'(?P[a-zA-Z]+)') + for a in attrspec: + if not a: + continue + m = c.match(a) + if m: + s = m.group('start').lower() + a = s + a[len(s):] + l.append(a) + attrspec = l + el=[x.strip() for x in markoutercomma(entitydecl).split('@,@')] + el1=[] + for e in el: + for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: + if e1: el1.append(e1.replace('@_@', ' ')) + for e in el1: + m=namepattern.match(e) + if not m: + outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(repr(e))) + continue + ename=rmbadname1(m.group('name')) + edecl={} + if ename in groupcache[groupcounter]['vars']: + edecl=groupcache[groupcounter]['vars'][ename].copy() + not_has_typespec = 'typespec' not in edecl + if not_has_typespec: + edecl['typespec']=typespec + elif typespec and (not typespec==edecl['typespec']): + outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typespec'], typespec)) + if 'kindselector' not in edecl: + edecl['kindselector']=copy.copy(kindselect) + elif kindselect: + for k in list(kindselect.keys()): + if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]): + outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['kindselector'][k], kindselect[k])) + else: edecl['kindselector'][k]=copy.copy(kindselect[k]) + if 'charselector' not in edecl and charselect: + if not_has_typespec: + edecl['charselector']=charselect + else: + errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \ + %(ename, charselect)) + elif charselect: + for k in list(charselect.keys()): + if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]): + outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['charselector'][k], charselect[k])) + else: edecl['charselector'][k]=copy.copy(charselect[k]) + if 'typename' not in edecl: + edecl['typename']=typename + elif typename and (not edecl['typename']==typename): + outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typename'], typename)) + if 'attrspec' not in edecl: + edecl['attrspec']=copy.copy(attrspec) + elif attrspec: + for a in attrspec: + if a not in edecl['attrspec']: + edecl['attrspec'].append(a) + else: + edecl['typespec']=copy.copy(typespec) + edecl['kindselector']=copy.copy(kindselect) + edecl['charselector']=copy.copy(charselect) + edecl['typename']=typename + edecl['attrspec']=copy.copy(attrspec) + if m.group('after'): + m1=lenarraypattern.match(markouterparen(m.group('after'))) + if m1: + d1=m1.groupdict() + for lk in ['len', 'array', 'init']: + if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2'] + for k in list(d1.keys()): + if d1[k] is not None: d1[k]=unmarkouterparen(d1[k]) + else: del d1[k] + if 'len' in d1 and 'array' in d1: + if d1['len']=='': + d1['len']=d1['array'] + del d1['array'] + else: + d1['array']=d1['array']+','+d1['len'] + del d1['len'] + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec, e, typespec, ename, d1['array'])) + if 'array' in d1: + dm = 'dimension(%s)'%d1['array'] + if 'attrspec' not in edecl or (not edecl['attrspec']): + edecl['attrspec']=[dm] + else: + edecl['attrspec'].append(dm) + for dm1 in edecl['attrspec']: + if dm1[:9]=='dimension' and dm1!=dm: + del edecl['attrspec'][-1] + errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \ + % (ename, dm1, dm)) + break + + if 'len' in d1: + if typespec in ['complex', 'integer', 'logical', 'real']: + if ('kindselector' not in edecl) or (not edecl['kindselector']): + edecl['kindselector']={} + edecl['kindselector']['*']=d1['len'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector']={} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*']=d1['len'] + if 'init' in d1: + if '=' in edecl and (not edecl['=']==d1['init']): + outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['='], d1['init'])) + else: + edecl['=']=d1['init'] + else: + outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after'))) + for k in list(edecl.keys()): + if not edecl[k]: + del edecl[k] + groupcache[groupcounter]['vars'][ename]=edecl + if 'varnames' in groupcache[groupcounter]: + groupcache[groupcounter]['varnames'].append(ename) + last_name = ename + return last_name + +def cracktypespec(typespec, selector): + kindselect=None + charselect=None + typename=None + if selector: + if typespec in ['complex', 'integer', 'logical', 'real']: + kindselect=kindselector.match(selector) + if not kindselect: + outmess('cracktypespec: no kindselector pattern found for %s\n'%(repr(selector))) + return + kindselect=kindselect.groupdict() + kindselect['*']=kindselect['kind2'] + del kindselect['kind2'] + for k in list(kindselect.keys()): + if not kindselect[k]: del kindselect[k] + for k, i in list(kindselect.items()): + kindselect[k] = rmbadname1(i) + elif typespec=='character': + charselect=charselector.match(selector) + if not charselect: + outmess('cracktypespec: no charselector pattern found for %s\n'%(repr(selector))) + return + charselect=charselect.groupdict() + charselect['*']=charselect['charlen'] + del charselect['charlen'] + if charselect['lenkind']: + lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind'])) + lenkind=lenkind.groupdict() + for lk in ['len', 'kind']: + if lenkind[lk+'2']: + lenkind[lk]=lenkind[lk+'2'] + charselect[lk]=lenkind[lk] + del lenkind[lk+'2'] + del charselect['lenkind'] + for k in list(charselect.keys()): + if not charselect[k]: del charselect[k] + for k, i in list(charselect.items()): + charselect[k] = rmbadname1(i) + elif typespec=='type': + typename=re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) + if typename: typename=typename.group('name') + else: outmess('cracktypespec: no typename found in %s\n'%(repr(typespec+selector))) + else: + outmess('cracktypespec: no selector used for %s\n'%(repr(selector))) + return kindselect, charselect, typename +###### +def setattrspec(decl,attr,force=0): + if not decl: + decl={} + if not attr: + return decl + if 'attrspec' not in decl: + decl['attrspec']=[attr] + return decl + if force: decl['attrspec'].append(attr) + if attr in decl['attrspec']: return decl + if attr=='static' and 'automatic' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr=='automatic' and 'static' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr=='public' and 'private' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr=='private' and 'public' not in decl['attrspec']: + decl['attrspec'].append(attr) + else: + decl['attrspec'].append(attr) + return decl + +def setkindselector(decl,sel,force=0): + if not decl: + decl={} + if not sel: + return decl + if 'kindselector' not in decl: + decl['kindselector']=sel + return decl + for k in list(sel.keys()): + if force or k not in decl['kindselector']: + decl['kindselector'][k]=sel[k] + return decl + +def setcharselector(decl,sel,force=0): + if not decl: + decl={} + if not sel: + return decl + if 'charselector' not in decl: + decl['charselector']=sel + return decl + for k in list(sel.keys()): + if force or k not in decl['charselector']: + decl['charselector'][k]=sel[k] + return decl + +def getblockname(block,unknown='unknown'): + if 'name' in block: + return block['name'] + return unknown + +###### post processing + +def setmesstext(block): + global filepositiontext + try: + filepositiontext='In: %s:%s\n'%(block['from'], block['name']) + except: + pass + +def get_usedict(block): + usedict = {} + if 'parent_block' in block: + usedict = get_usedict(block['parent_block']) + if 'use' in block: + usedict.update(block['use']) + return usedict + +def get_useparameters(block, param_map=None): + global f90modulevars + if param_map is None: + param_map = {} + usedict = get_usedict(block) + if not usedict: + return param_map + for usename, mapping in list(usedict.items()): + usename = usename.lower() + if usename not in f90modulevars: + outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name'))) + continue + mvars = f90modulevars[usename] + params = get_parameters(mvars) + if not params: + continue + # XXX: apply mapping + if mapping: + errmess('get_useparameters: mapping for %s not impl.' % (mapping)) + for k, v in list(params.items()): + if k in param_map: + outmess('get_useparameters: overriding parameter %s with'\ + ' value from module %s' % (repr(k), repr(usename))) + param_map[k] = v + + return param_map + +def postcrack2(block,tab='',param_map=None): + global f90modulevars + if not f90modulevars: + return block + if isinstance(block, list): + ret = [] + for g in block: + g = postcrack2(g, tab=tab+'\t', param_map=param_map) + ret.append(g) + return ret + setmesstext(block) + outmess('%sBlock: %s\n'%(tab, block['name']), 0) + + if param_map is None: + param_map = get_useparameters(block) + + if param_map is not None and 'vars' in block: + vars = block['vars'] + for n in list(vars.keys()): + var = vars[n] + if 'kindselector' in var: + kind = var['kindselector'] + if 'kind' in kind: + val = kind['kind'] + if val in param_map: + kind['kind'] = param_map[val] + new_body = [] + for b in block['body']: + b = postcrack2(b, tab=tab+'\t', param_map=param_map) + new_body.append(b) + block['body'] = new_body + + return block + +def postcrack(block,args=None,tab=''): + """ + TODO: + function return values + determine expression types if in argument list + """ + global usermodules, onlyfunctions + if isinstance(block, list): + gret=[] + uret=[] + for g in block: + setmesstext(g) + g=postcrack(g, tab=tab+'\t') + if 'name' in g and '__user__' in g['name']: # sort user routines to appear first + uret.append(g) + else: + gret.append(g) + return uret+gret + setmesstext(block) + if not isinstance(block, dict) and 'block' not in block: + raise Exception('postcrack: Expected block dictionary instead of ' + \ + str(block)) + if 'name' in block and not block['name']=='unknown_interface': + outmess('%sBlock: %s\n'%(tab, block['name']), 0) + blocktype=block['block'] + block=analyzeargs(block) + block=analyzecommon(block) + block['vars']=analyzevars(block) + block['sortvars']=sortvarnames(block['vars']) + if 'args' in block and block['args']: + args=block['args'] + block['body']=analyzebody(block, args, tab=tab) + + userisdefined=[] +## fromuser = [] + if 'use' in block: + useblock=block['use'] + for k in list(useblock.keys()): + if '__user__' in k: + userisdefined.append(k) +## if 'map' in useblock[k]: +## for n in useblock[k]['map'].itervalues(): +## if n not in fromuser: fromuser.append(n) + else: useblock={} + name='' + if 'name' in block: + name=block['name'] + if 'externals' in block and block['externals']:# and not userisdefined: # Build a __user__ module + interfaced=[] + if 'interfaced' in block: + interfaced=block['interfaced'] + mvars=copy.copy(block['vars']) + if name: + mname=name+'__user__routines' + else: + mname='unknown__user__routines' + if mname in userisdefined: + i=1 + while '%s_%i'%(mname, i) in userisdefined: i=i+1 + mname='%s_%i'%(mname, i) + interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'} + for e in block['externals']: +## if e in fromuser: +## outmess(' Skipping %s that is defined explicitly in another use statement\n'%(`e`)) +## continue + if e in interfaced: + edef=[] + j=-1 + for b in block['body']: + j=j+1 + if b['block']=='interface': + i=-1 + for bb in b['body']: + i=i+1 + if 'name' in bb and bb['name']==e: + edef=copy.copy(bb) + del b['body'][i] + break + if edef: + if not b['body']: del block['body'][j] + del interfaced[interfaced.index(e)] + break + interface['body'].append(edef) + else: + if e in mvars and not isexternal(mvars[e]): + interface['vars'][e]=mvars[e] + if interface['vars'] or interface['body']: + block['interfaced']=interfaced + mblock={'block':'python module','body':[interface],'vars':{},'name':mname,'interfaced':block['externals']} + useblock[mname]={} + usermodules.append(mblock) + if useblock: + block['use']=useblock + return block + +def sortvarnames(vars): + indep = [] + dep = [] + for v in list(vars.keys()): + if 'depend' in vars[v] and vars[v]['depend']: + dep.append(v) + #print '%s depends on %s'%(v,vars[v]['depend']) + else: indep.append(v) + n = len(dep) + i = 0 + while dep: #XXX: How to catch dependence cycles correctly? + v = dep[0] + fl = 0 + for w in dep[1:]: + if w in vars[v]['depend']: + fl = 1 + break + if fl: + dep = dep[1:]+[v] + i = i + 1 + if i>n: + errmess('sortvarnames: failed to compute dependencies because' + ' of cyclic dependencies between ' + +', '.join(dep)+'\n') + indep = indep + dep + break + else: + indep.append(v) + dep = dep[1:] + n = len(dep) + i = 0 + #print indep + return indep + +def analyzecommon(block): + if not hascommon(block): return block + commonvars=[] + for k in list(block['common'].keys()): + comvars=[] + for e in block['common'][k]: + m=re.match(r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) + if m: + dims=[] + if m.group('dims'): + dims=[x.strip() for x in markoutercomma(m.group('dims')).split('@,@')] + n=m.group('name').strip() + if n in block['vars']: + if 'attrspec' in block['vars'][n]: + block['vars'][n]['attrspec'].append('dimension(%s)'%(','.join(dims))) + else: + block['vars'][n]['attrspec']=['dimension(%s)'%(','.join(dims))] + else: + if dims: + block['vars'][n]={'attrspec':['dimension(%s)'%(','.join(dims))]} + else: block['vars'][n]={} + if n not in commonvars: commonvars.append(n) + else: + n=e + errmess('analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n'%(e, k)) + comvars.append(n) + block['common'][k]=comvars + if 'commonvars' not in block: + block['commonvars']=commonvars + else: + block['commonvars']=block['commonvars']+commonvars + return block + +def analyzebody(block,args,tab=''): + global usermodules, skipfuncs, onlyfuncs, f90modulevars + setmesstext(block) + body=[] + for b in block['body']: + b['parent_block'] = block + if b['block'] in ['function', 'subroutine']: + if args is not None and b['name'] not in args: + continue + else: + as_=b['args'] + if b['name'] in skipfuncs: + continue + if onlyfuncs and b['name'] not in onlyfuncs: + continue + b['saved_interface'] = crack2fortrangen(b, '\n'+' '*6, as_interface=True) + + else: as_=args + b=postcrack(b, as_, tab=tab+'\t') + if b['block']=='interface' and not b['body']: + if 'f2pyenhancements' not in b: + continue + if b['block'].replace(' ', '')=='pythonmodule': + usermodules.append(b) + else: + if b['block']=='module': + f90modulevars[b['name']] = b['vars'] + body.append(b) + return body + +def buildimplicitrules(block): + setmesstext(block) + implicitrules=defaultimplicitrules + attrrules={} + if 'implicit' in block: + if block['implicit'] is None: + implicitrules=None + if verbose>1: + outmess('buildimplicitrules: no implicit rules for routine %s.\n'%repr(block['name'])) + else: + for k in list(block['implicit'].keys()): + if block['implicit'][k].get('typespec') not in ['static', 'automatic']: + implicitrules[k]=block['implicit'][k] + else: + attrrules[k]=block['implicit'][k]['typespec'] + return implicitrules, attrrules + +def myeval(e,g=None,l=None): + r = eval(e, g, l) + if type(r) in [type(0), type(0.0)]: + return r + raise ValueError('r=%r' % (r)) + +getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) +def getlincoef(e, xset): # e = a*x+b ; x in xset + try: + c = int(myeval(e, {}, {})) + return 0, c, None + except: pass + if getlincoef_re_1.match(e): + return 1, 0, e + len_e = len(e) + for x in xset: + if len(x)>len_e: continue + if re.search(r'\w\s*\([^)]*\b'+x+r'\b', e): + # skip function calls having x as an argument, e.g max(1, x) + continue + re_1 = re.compile(r'(?P.*?)\b'+x+r'\b(?P.*)', re.I) + m = re_1.match(e) + if m: + try: + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s'%(m1.group('before'), 0, m1.group('after')) + m1 = re_1.match(ee) + b = myeval(ee, {}, {}) + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s'%(m1.group('before'), 1, m1.group('after')) + m1 = re_1.match(ee) + a = myeval(ee, {}, {}) - b + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s'%(m1.group('before'), 0.5, m1.group('after')) + m1 = re_1.match(ee) + c = myeval(ee, {}, {}) + # computing another point to be sure that expression is linear + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s'%(m1.group('before'), 1.5, m1.group('after')) + m1 = re_1.match(ee) + c2 = myeval(ee, {}, {}) + if (a*0.5+b==c and a*1.5+b==c2): + return a, b, x + except: pass + break + return None, None, None + +_varname_match = re.compile(r'\A[a-z]\w*\Z').match +def getarrlen(dl,args,star='*'): + edl = [] + try: edl.append(myeval(dl[0], {}, {})) + except: edl.append(dl[0]) + try: edl.append(myeval(dl[1], {}, {})) + except: edl.append(dl[1]) + if isinstance(edl[0], int): + p1 = 1-edl[0] + if p1==0: d = str(dl[1]) + elif p1<0: d = '%s-%s'%(dl[1], -p1) + else: d = '%s+%s'%(dl[1], p1) + elif isinstance(edl[1], int): + p1 = 1+edl[1] + if p1==0: d='-(%s)' % (dl[0]) + else: d='%s-(%s)' % (p1, dl[0]) + else: d = '%s-(%s)+1'%(dl[1], dl[0]) + try: return repr(myeval(d, {}, {})), None, None + except: pass + d1, d2=getlincoef(dl[0], args), getlincoef(dl[1], args) + if None not in [d1[0], d2[0]]: + if (d1[0], d2[0])==(0, 0): + return repr(d2[1]-d1[1]+1), None, None + b = d2[1] - d1[1] + 1 + d1 = (d1[0], 0, d1[2]) + d2 = (d2[0], b, d2[2]) + if d1[0]==0 and d2[2] in args: + if b<0: return '%s * %s - %s'%(d2[0], d2[2], -b), d2[2], '+%s)/(%s)'%(-b, d2[0]) + elif b: return '%s * %s + %s'%(d2[0], d2[2], b), d2[2], '-%s)/(%s)'%(b, d2[0]) + else: return '%s * %s'%(d2[0], d2[2]), d2[2], ')/(%s)'%(d2[0]) + if d2[0]==0 and d1[2] in args: + + if b<0: return '%s * %s - %s'%(-d1[0], d1[2], -b), d1[2], '+%s)/(%s)'%(-b, -d1[0]) + elif b: return '%s * %s + %s'%(-d1[0], d1[2], b), d1[2], '-%s)/(%s)'%(b, -d1[0]) + else: return '%s * %s'%(-d1[0], d1[2]), d1[2], ')/(%s)'%(-d1[0]) + if d1[2]==d2[2] and d1[2] in args: + a = d2[0] - d1[0] + if not a: return repr(b), None, None + if b<0: return '%s * %s - %s'%(a, d1[2], -b), d2[2], '+%s)/(%s)'%(-b, a) + elif b: return '%s * %s + %s'%(a, d1[2], b), d2[2], '-%s)/(%s)'%(b, a) + else: return '%s * %s'%(a, d1[2]), d2[2], ')/(%s)'%(a) + if d1[0]==d2[0]==1: + c = str(d1[2]) + if c not in args: + if _varname_match(c): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) + c = '(%s)'%c + if b==0: d='%s-%s' % (d2[2], c) + elif b<0: d='%s-%s-%s' % (d2[2], c, -b) + else: d='%s-%s+%s' % (d2[2], c, b) + elif d1[0]==0: + c2 = str(d2[2]) + if c2 not in args: + if _varname_match(c2): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) + c2 = '(%s)'%c2 + if d2[0]==1: pass + elif d2[0]==-1: c2='-%s' %c2 + else: c2='%s*%s'%(d2[0], c2) + + if b==0: d=c2 + elif b<0: d='%s-%s' % (c2, -b) + else: d='%s+%s' % (c2, b) + elif d2[0]==0: + c1 = str(d1[2]) + if c1 not in args: + if _varname_match(c1): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) + c1 = '(%s)'%c1 + if d1[0]==1: c1='-%s'%c1 + elif d1[0]==-1: c1='+%s'%c1 + elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1) + else: c1 = '-%s*%s' % (d1[0], c1) + + if b==0: d=c1 + elif b<0: d='%s-%s' % (c1, -b) + else: d='%s+%s' % (c1, b) + else: + c1 = str(d1[2]) + if c1 not in args: + if _varname_match(c1): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) + c1 = '(%s)'%c1 + if d1[0]==1: c1='-%s'%c1 + elif d1[0]==-1: c1='+%s'%c1 + elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1) + else: c1 = '-%s*%s' % (d1[0], c1) + + c2 = str(d2[2]) + if c2 not in args: + if _varname_match(c2): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) + c2 = '(%s)'%c2 + if d2[0]==1: pass + elif d2[0]==-1: c2='-%s' %c2 + else: c2='%s*%s'%(d2[0], c2) + + if b==0: d='%s%s' % (c2, c1) + elif b<0: d='%s%s-%s' % (c2, c1, -b) + else: d='%s%s+%s' % (c2, c1, b) + return d, None, None + +word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) + +def _get_depend_dict(name, vars, deps): + if name in vars: + words = vars[name].get('depend', []) + + if '=' in vars[name] and not isstring(vars[name]): + for word in word_pattern.findall(vars[name]['=']): + if word not in words and word in vars: + words.append(word) + for word in words[:]: + for w in deps.get(word, []) \ + or _get_depend_dict(word, vars, deps): + if w not in words: + words.append(w) + else: + outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) + words = [] + deps[name] = words + return words + +def _calc_depend_dict(vars): + names = list(vars.keys()) + depend_dict = {} + for n in names: + _get_depend_dict(n, vars, depend_dict) + return depend_dict + +def get_sorted_names(vars): + """ + """ + depend_dict = _calc_depend_dict(vars) + names = [] + for name in list(depend_dict.keys()): + if not depend_dict[name]: + names.append(name) + del depend_dict[name] + while depend_dict: + for name, lst in list(depend_dict.items()): + new_lst = [n for n in lst if n in depend_dict] + if not new_lst: + names.append(name) + del depend_dict[name] + else: + depend_dict[name] = new_lst + return [name for name in names if name in vars] + +def _kind_func(string): + #XXX: return something sensible. + if string[0] in "'\"": + string = string[1:-1] + if real16pattern.match(string): + return 8 + elif real8pattern.match(string): + return 4 + return 'kind('+string+')' + +def _selected_int_kind_func(r): + #XXX: This should be processor dependent + m = 10**r + if m<=2**8: return 1 + if m<=2**16: return 2 + if m<=2**32: return 4 + if m<=2**63: return 8 + if m<=2**128: return 16 + return -1 + +def _selected_real_kind_func(p, r=0, radix=0): + #XXX: This should be processor dependent + # This is only good for 0 <= p <= 20 + if p < 7: return 4 + if p < 16: return 8 + if platform.machine().lower().startswith('power'): + if p <= 20: + return 16 + else: + if p < 19: + return 10 + elif p <= 20: + return 16 + return -1 + +def get_parameters(vars, global_params={}): + params = copy.copy(global_params) + g_params = copy.copy(global_params) + for name, func in [('kind', _kind_func), + ('selected_int_kind', _selected_int_kind_func), + ('selected_real_kind', _selected_real_kind_func), + ]: + if name not in g_params: + g_params[name] = func + param_names = [] + for n in get_sorted_names(vars): + if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: + param_names.append(n) + kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) + selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) + selected_kind_re = re.compile(r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) + for n in param_names: + if '=' in vars[n]: + v = vars[n]['='] + if islogical(vars[n]): + v = v.lower() + for repl in [ + ('.false.', 'False'), + ('.true.', 'True'), + #TODO: test .eq., .neq., etc replacements. + ]: + v = v.replace(*repl) + v = kind_re.sub(r'kind("\1")', v) + v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) + if isinteger(vars[n]) and not selected_kind_re.match(v): + v = v.split('_')[0] + if isdouble(vars[n]): + tt = list(v) + for m in real16pattern.finditer(v): + tt[m.start():m.end()] = list(\ + v[m.start():m.end()].lower().replace('d', 'e')) + v = ''.join(tt) + if iscomplex(vars[n]): + if v[0]=='(' and v[-1]==')': + l = markoutercomma(v[1:-1]).split('@,@') + try: + params[n] = eval(v, g_params, params) + except Exception as msg: + params[n] = v + #print params + outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v))) + if isstring(vars[n]) and isinstance(params[n], int): + params[n] = chr(params[n]) + nl = n.lower() + if nl!=n: + params[nl] = params[n] + else: + print(vars[n]) + outmess('get_parameters:parameter %s does not have value?!\n'%(repr(n))) + return params + +def _eval_length(length, params): + if length in ['(:)', '(*)', '*']: + return '(*)' + return _eval_scalar(length, params) + +_is_kind_number = re.compile(r'\d+_').match + +def _eval_scalar(value, params): + if _is_kind_number(value): + value = value.split('_')[0] + try: + value = str(eval(value, {}, params)) + except (NameError, SyntaxError): + return value + except Exception as msg: + errmess('"%s" in evaluating %r '\ + '(available names: %s)\n' \ + % (msg, value, list(params.keys()))) + return value + +def analyzevars(block): + global f90modulevars + setmesstext(block) + implicitrules, attrrules=buildimplicitrules(block) + vars=copy.copy(block['vars']) + if block['block']=='function' and block['name'] not in vars: + vars[block['name']]={} + if '' in block['vars']: + del vars[''] + if 'attrspec' in block['vars']['']: + gen=block['vars']['']['attrspec'] + for n in list(vars.keys()): + for k in ['public', 'private']: + if k in gen: + vars[n]=setattrspec(vars[n], k) + svars=[] + args = block['args'] + for a in args: + try: + vars[a] + svars.append(a) + except KeyError: + pass + for n in list(vars.keys()): + if n not in args: svars.append(n) + + params = get_parameters(vars, get_useparameters(block)) + + dep_matches = {} + name_match = re.compile(r'\w[\w\d_$]*').match + for v in list(vars.keys()): + m = name_match(v) + if m: + n = v[m.start():m.end()] + try: + dep_matches[n] + except KeyError: + dep_matches[n] = re.compile(r'.*\b%s\b'%(v), re.I).match + for n in svars: + if n[0] in list(attrrules.keys()): + vars[n]=setattrspec(vars[n], attrrules[n[0]]) + if 'typespec' not in vars[n]: + if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if implicitrules: + ln0 = n[0].lower() + for k in list(implicitrules[ln0].keys()): + if k=='typespec' and implicitrules[ln0][k]=='undefined': + continue + if k not in vars[n]: + vars[n][k]=implicitrules[ln0][k] + elif k=='attrspec': + for l in implicitrules[ln0][k]: + vars[n]=setattrspec(vars[n], l) + elif n in block['args']: + outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(repr(n), block['name'])) + + if 'charselector' in vars[n]: + if 'len' in vars[n]['charselector']: + l = vars[n]['charselector']['len'] + try: + l = str(eval(l, {}, params)) + except: + pass + vars[n]['charselector']['len'] = l + + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + l = vars[n]['kindselector']['kind'] + try: + l = str(eval(l, {}, params)) + except: + pass + vars[n]['kindselector']['kind'] = l + + savelindims = {} + if 'attrspec' in vars[n]: + attr=vars[n]['attrspec'] + attr.reverse() + vars[n]['attrspec']=[] + dim, intent, depend, check, note=None, None, None, None, None + for a in attr: + if a[:9]=='dimension': dim=(a[9:].strip())[1:-1] + elif a[:6]=='intent': intent=(a[6:].strip())[1:-1] + elif a[:6]=='depend': depend=(a[6:].strip())[1:-1] + elif a[:5]=='check': check=(a[5:].strip())[1:-1] + elif a[:4]=='note': note=(a[4:].strip())[1:-1] + else: vars[n]=setattrspec(vars[n], a) + if intent: + if 'intent' not in vars[n]: + vars[n]['intent']=[] + for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: + if not c in vars[n]['intent']: + vars[n]['intent'].append(c) + intent=None + if note: + note=note.replace('\\n\\n', '\n\n') + note=note.replace('\\n ', '\n') + if 'note' not in vars[n]: + vars[n]['note']=[note] + else: + vars[n]['note'].append(note) + note=None + if depend is not None: + if 'depend' not in vars[n]: + vars[n]['depend']=[] + for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): + if c not in vars[n]['depend']: + vars[n]['depend'].append(c) + depend=None + if check is not None: + if 'check' not in vars[n]: + vars[n]['check']=[] + for c in [x.strip() for x in markoutercomma(check).split('@,@')]: + if not c in vars[n]['check']: + vars[n]['check'].append(c) + check=None + if dim and 'dimension' not in vars[n]: + vars[n]['dimension']=[] + for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): + star = '*' + if d==':': + star=':' + if d in params: + d = str(params[d]) + for p in list(params.keys()): + m = re.match(r'(?P.*?)\b'+p+r'\b(?P.*)', d, re.I) + if m: + #outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`)) + d = m.group('before')+str(params[p])+m.group('after') + if d==star: + dl = [star] + else: + dl=markoutercomma(d, ':').split('@:@') + if len(dl)==2 and '*' in dl: # e.g. dimension(5:*) + dl = ['*'] + d = '*' + if len(dl)==1 and not dl[0]==star: dl = ['1', dl[0]] + if len(dl)==2: + d, v, di = getarrlen(dl, list(block['vars'].keys())) + if d[:4] == '1 * ': d = d[4:] + if di and di[-4:] == '/(1)': di = di[:-4] + if v: savelindims[d] = v, di + vars[n]['dimension'].append(d) + if 'dimension' in vars[n]: + if isintent_c(vars[n]): + shape_macro = 'shape' + else: + shape_macro = 'shape'#'fshape' + if isstringarray(vars[n]): + if 'charselector' in vars[n]: + d = vars[n]['charselector'] + if '*' in d: + d = d['*'] + errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\ + %(d, n, + ','.join(vars[n]['dimension']), + n, ','.join(vars[n]['dimension']+[d]))) + vars[n]['dimension'].append(d) + del vars[n]['charselector'] + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + if 'c' not in vars[n]['intent']: + vars[n]['intent'].append('c') + else: + errmess("analyzevars: charselector=%r unhandled." % (d)) + if 'check' not in vars[n] and 'args' in block and n in block['args']: + flag = 'depend' not in vars[n] + if flag: + vars[n]['depend']=[] + vars[n]['check']=[] + if 'dimension' in vars[n]: + #/----< no check + #vars[n]['check'].append('rank(%s)==%s'%(n,len(vars[n]['dimension']))) + i=-1; ni=len(vars[n]['dimension']) + for d in vars[n]['dimension']: + ddeps=[] # dependecies of 'd' + ad='' + pd='' + #origd = d + if d not in vars: + if d in savelindims: + pd, ad='(', savelindims[d][1] + d = savelindims[d][0] + else: + for r in block['args']: + #for r in block['vars'].iterkeys(): + if r not in vars: + continue + if re.match(r'.*?\b'+r+r'\b', d, re.I): + ddeps.append(r) + if d in vars: + if 'attrspec' in vars[d]: + for aa in vars[d]['attrspec']: + if aa[:6]=='depend': + ddeps += aa[6:].strip()[1:-1].split(',') + if 'depend' in vars[d]: + ddeps=ddeps+vars[d]['depend'] + i=i+1 + if d in vars and ('depend' not in vars[d]) \ + and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ + and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): + vars[d]['depend']=[n] + if ni>1: + vars[d]['=']='%s%s(%s,%s)%s'% (pd, shape_macro, n, i, ad) + else: + vars[d]['=']='%slen(%s)%s'% (pd, n, ad) + # /---< no check + if 1 and 'check' not in vars[d]: + if ni>1: + vars[d]['check']=['%s%s(%s,%i)%s==%s'\ + %(pd, shape_macro, n, i, ad, d)] + else: + vars[d]['check']=['%slen(%s)%s>=%s'%(pd, n, ad, d)] + if 'attrspec' not in vars[d]: + vars[d]['attrspec']=['optional'] + if ('optional' not in vars[d]['attrspec']) and\ + ('required' not in vars[d]['attrspec']): + vars[d]['attrspec'].append('optional') + elif d not in ['*', ':']: + #/----< no check + #if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d)) + #else: vars[n]['check'].append('len(%s)>=%s'%(n,d)) + if flag: + if d in vars: + if n not in ddeps: + vars[n]['depend'].append(d) + else: + vars[n]['depend'] = vars[n]['depend'] + ddeps + elif isstring(vars[n]): + length='1' + if 'charselector' in vars[n]: + if '*' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['*'], + params) + vars[n]['charselector']['*']=length + elif 'len' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['len'], + params) + del vars[n]['charselector']['len'] + vars[n]['charselector']['*']=length + + if not vars[n]['check']: + del vars[n]['check'] + if flag and not vars[n]['depend']: + del vars[n]['depend'] + if '=' in vars[n]: + if 'attrspec' not in vars[n]: + vars[n]['attrspec']=[] + if ('optional' not in vars[n]['attrspec']) and \ + ('required' not in vars[n]['attrspec']): + vars[n]['attrspec'].append('optional') + if 'depend' not in vars[n]: + vars[n]['depend']=[] + for v, m in list(dep_matches.items()): + if m(vars[n]['=']): vars[n]['depend'].append(v) + if not vars[n]['depend']: del vars[n]['depend'] + if isscalar(vars[n]): + vars[n]['='] = _eval_scalar(vars[n]['='], params) + + for n in list(vars.keys()): + if n==block['name']: # n is block name + if 'note' in vars[n]: + block['note']=vars[n]['note'] + if block['block']=='function': + if 'result' in block and block['result'] in vars: + vars[n]=appenddecl(vars[n], vars[block['result']]) + if 'prefix' in block: + pr=block['prefix']; ispure=0; isrec=1 + pr1=pr.replace('pure', '') + ispure=(not pr==pr1) + pr=pr1.replace('recursive', '') + isrec=(not pr==pr1) + m=typespattern[0].match(pr) + if m: + typespec, selector, attr, edecl=cracktypespec0(m.group('this'), m.group('after')) + kindselect, charselect, typename=cracktypespec(typespec, selector) + vars[n]['typespec']=typespec + if kindselect: + if 'kind' in kindselect: + try: + kindselect['kind'] = eval(kindselect['kind'], {}, params) + except: + pass + vars[n]['kindselector']=kindselect + if charselect: vars[n]['charselector']=charselect + if typename: vars[n]['typename']=typename + if ispure: vars[n]=setattrspec(vars[n], 'pure') + if isrec: vars[n]=setattrspec(vars[n], 'recursive') + else: + outmess('analyzevars: prefix (%s) were not used\n'%repr(block['prefix'])) + if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: + if 'commonvars' in block: + neededvars=copy.copy(block['args']+block['commonvars']) + else: + neededvars=copy.copy(block['args']) + for n in list(vars.keys()): + if l_or(isintent_callback, isintent_aux)(vars[n]): + neededvars.append(n) + if 'entry' in block: + neededvars.extend(list(block['entry'].keys())) + for k in list(block['entry'].keys()): + for n in block['entry'][k]: + if n not in neededvars: + neededvars.append(n) + if block['block']=='function': + if 'result' in block: + neededvars.append(block['result']) + else: + neededvars.append(block['name']) + if block['block'] in ['subroutine', 'function']: + name = block['name'] + if name in vars and 'intent' in vars[name]: + block['intent'] = vars[name]['intent'] + if block['block'] == 'type': + neededvars.extend(list(vars.keys())) + for n in list(vars.keys()): + if n not in neededvars: + del vars[n] + return vars + +analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) +def expr2name(a, block, args=[]): + orig_a = a + a_is_expr = not analyzeargs_re_1.match(a) + if a_is_expr: # `a` is an expression + implicitrules, attrrules=buildimplicitrules(block) + at=determineexprtype(a, block['vars'], implicitrules) + na='e_' + for c in a: + c = c.lower() + if c not in string.ascii_lowercase+string.digits: c='_' + na=na+c + if na[-1]=='_': na=na+'e' + else: na=na+'_e' + a=na + while a in block['vars'] or a in block['args']: + a=a+'r' + if a in args: + k = 1 + while a + str(k) in args: + k = k + 1 + a = a + str(k) + if a_is_expr: + block['vars'][a]=at + else: + if a not in block['vars']: + if orig_a in block['vars']: + block['vars'][a] = block['vars'][orig_a] + else: + block['vars'][a]={} + if 'externals' in block and orig_a in block['externals']+block['interfaced']: + block['vars'][a]=setattrspec(block['vars'][a], 'external') + return a + +def analyzeargs(block): + setmesstext(block) + implicitrules, attrrules=buildimplicitrules(block) + if 'args' not in block: + block['args']=[] + args=[] + for a in block['args']: + a = expr2name(a, block, args) + args.append(a) + block['args']=args + if 'entry' in block: + for k, args1 in list(block['entry'].items()): + for a in args1: + if a not in block['vars']: + block['vars'][a]={} + + for b in block['body']: + if b['name'] in args: + if 'externals' not in block: + block['externals']=[] + if b['name'] not in block['externals']: + block['externals'].append(b['name']) + if 'result' in block and block['result'] not in block['vars']: + block['vars'][block['result']]={} + return block + +determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P[\w]+)|)\Z', re.I) +determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P[\w]+)|)\Z', re.I) +determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) +determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) +def _ensure_exprdict(r): + if isinstance(r, int): + return {'typespec':'integer'} + if isinstance(r, float): + return {'typespec':'real'} + if isinstance(r, complex): + return {'typespec':'complex'} + if isinstance(r, dict): + return r + raise AssertionError(repr(r)) + +def determineexprtype(expr,vars,rules={}): + if expr in vars: + return _ensure_exprdict(vars[expr]) + expr=expr.strip() + if determineexprtype_re_1.match(expr): + return {'typespec':'complex'} + m=determineexprtype_re_2.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr)) + return {'typespec':'integer'} + m = determineexprtype_re_3.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr)) + return {'typespec':'real'} + for op in ['+', '-', '*', '/']: + for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@'+op+'@')]: + if e in vars: + return _ensure_exprdict(vars[e]) + t={} + if determineexprtype_re_4.match(expr): # in parenthesis + t=determineexprtype(expr[1:-1], vars, rules) + else: + m = determineexprtype_re_5.match(expr) + if m: + rn=m.group('name') + t=determineexprtype(m.group('name'), vars, rules) + if t and 'attrspec' in t: + del t['attrspec'] + if not t: + if rn[0] in rules: + return _ensure_exprdict(rules[rn[0]]) + if expr[0] in '\'"': + return {'typespec':'character','charselector':{'*':'*'}} + if not t: + outmess('determineexprtype: could not determine expressions (%s) type.\n'%(repr(expr))) + return t + +###### +def crack2fortrangen(block,tab='\n', as_interface=False): + global skipfuncs, onlyfuncs + setmesstext(block) + ret='' + if isinstance(block, list): + for g in block: + if g and g['block'] in ['function', 'subroutine']: + if g['name'] in skipfuncs: + continue + if onlyfuncs and g['name'] not in onlyfuncs: + continue + ret=ret+crack2fortrangen(g, tab, as_interface=as_interface) + return ret + prefix='' + name='' + args='' + blocktype=block['block'] + if blocktype=='program': return '' + argsl = [] + if 'name' in block: + name=block['name'] + if 'args' in block: + vars = block['vars'] + for a in block['args']: + a = expr2name(a, block, argsl) + if not isintent_callback(vars[a]): + argsl.append(a) + if block['block']=='function' or argsl: + args='(%s)'%','.join(argsl) + f2pyenhancements = '' + if 'f2pyenhancements' in block: + for k in list(block['f2pyenhancements'].keys()): + f2pyenhancements = '%s%s%s %s'%(f2pyenhancements, tab+tabchar, k, block['f2pyenhancements'][k]) + intent_lst = block.get('intent', [])[:] + if blocktype=='function' and 'callback' in intent_lst: + intent_lst.remove('callback') + if intent_lst: + f2pyenhancements = '%s%sintent(%s) %s'%\ + (f2pyenhancements, tab+tabchar, + ','.join(intent_lst), name) + use='' + if 'use' in block: + use=use2fortran(block['use'], tab+tabchar) + common='' + if 'common' in block: + common=common2fortran(block['common'], tab+tabchar) + if name=='unknown_interface': name='' + result='' + if 'result' in block: + result=' result (%s)'%block['result'] + if block['result'] not in argsl: + argsl.append(block['result']) + #if 'prefix' in block: + # prefix=block['prefix']+' ' + body=crack2fortrangen(block['body'], tab+tabchar) + vars=vars2fortran(block, block['vars'], argsl, tab+tabchar, as_interface=as_interface) + mess='' + if 'from' in block and not as_interface: + mess='! in %s'%block['from'] + if 'entry' in block: + entry_stmts = '' + for k, i in list(block['entry'].items()): + entry_stmts = '%s%sentry %s(%s)' \ + % (entry_stmts, tab+tabchar, k, ','.join(i)) + body = body + entry_stmts + if blocktype=='block data' and name=='_BLOCK_DATA_': + name = '' + ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + return ret + +def common2fortran(common,tab=''): + ret='' + for k in list(common.keys()): + if k=='_BLNK_': + ret='%s%scommon %s'%(ret, tab, ','.join(common[k])) + else: + ret='%s%scommon /%s/ %s'%(ret, tab, k, ','.join(common[k])) + return ret + +def use2fortran(use,tab=''): + ret='' + for m in list(use.keys()): + ret='%s%suse %s,'%(ret, tab, m) + if use[m]=={}: + if ret and ret[-1]==',': ret=ret[:-1] + continue + if 'only' in use[m] and use[m]['only']: + ret='%s only:'%(ret) + if 'map' in use[m] and use[m]['map']: + c=' ' + for k in list(use[m]['map'].keys()): + if k==use[m]['map'][k]: + ret='%s%s%s'%(ret, c, k); c=',' + else: + ret='%s%s%s=>%s'%(ret, c, k, use[m]['map'][k]); c=',' + if ret and ret[-1]==',': ret=ret[:-1] + return ret + +def true_intent_list(var): + lst = var['intent'] + ret = [] + for intent in lst: + try: + c = eval('isintent_%s(var)' % intent) + except NameError: + c = 0 + if c: + ret.append(intent) + return ret + +def vars2fortran(block,vars,args,tab='', as_interface=False): + """ + TODO: + public sub + ... + """ + setmesstext(block) + ret='' + nout=[] + for a in args: + if a in block['vars']: + nout.append(a) + if 'commonvars' in block: + for a in block['commonvars']: + if a in vars: + if a not in nout: + nout.append(a) + else: + errmess('vars2fortran: Confused?!: "%s" is not defined in vars.\n'%a) + if 'varnames' in block: + nout.extend(block['varnames']) + if not as_interface: + for a in list(vars.keys()): + if a not in nout: + nout.append(a) + for a in nout: + if 'depend' in vars[a]: + for d in vars[a]['depend']: + if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: + errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a, d)) + if 'externals' in block and a in block['externals']: + if isintent_callback(vars[a]): + ret='%s%sintent(callback) %s'%(ret, tab, a) + ret='%s%sexternal %s'%(ret, tab, a) + if isoptional(vars[a]): + ret='%s%soptional %s'%(ret, tab, a) + if a in vars and 'typespec' not in vars[a]: + continue + cont=1 + for b in block['body']: + if a==b['name'] and b['block']=='function': + cont=0;break + if cont: + continue + if a not in vars: + show(vars) + outmess('vars2fortran: No definition for argument "%s".\n'%a) + continue + if a==block['name'] and not block['block']=='function': + continue + if 'typespec' not in vars[a]: + if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: + if a in args: + ret='%s%sexternal %s'%(ret, tab, a) + continue + show(vars[a]) + outmess('vars2fortran: No typespec for argument "%s".\n'%a) + continue + vardef=vars[a]['typespec'] + if vardef=='type' and 'typename' in vars[a]: + vardef='%s(%s)'%(vardef, vars[a]['typename']) + selector={} + if 'kindselector' in vars[a]: + selector=vars[a]['kindselector'] + elif 'charselector' in vars[a]: + selector=vars[a]['charselector'] + if '*' in selector: + if selector['*'] in ['*', ':']: + vardef='%s*(%s)'%(vardef, selector['*']) + else: + vardef='%s*%s'%(vardef, selector['*']) + else: + if 'len' in selector: + vardef='%s(len=%s'%(vardef, selector['len']) + if 'kind' in selector: + vardef='%s,kind=%s)'%(vardef, selector['kind']) + else: + vardef='%s)'%(vardef) + elif 'kind' in selector: + vardef='%s(kind=%s)'%(vardef, selector['kind']) + c=' ' + if 'attrspec' in vars[a]: + attr=[] + for l in vars[a]['attrspec']: + if l not in ['external']: + attr.append(l) + if attr: + vardef='%s, %s'%(vardef, ','.join(attr)) + c=',' + if 'dimension' in vars[a]: +# if not isintent_c(vars[a]): +# vars[a]['dimension'].reverse() + vardef='%s%sdimension(%s)'%(vardef, c, ','.join(vars[a]['dimension'])) + c=',' + if 'intent' in vars[a]: + lst = true_intent_list(vars[a]) + if lst: + vardef='%s%sintent(%s)'%(vardef, c, ','.join(lst)) + c=',' + if 'check' in vars[a]: + vardef='%s%scheck(%s)'%(vardef, c, ','.join(vars[a]['check'])) + c=',' + if 'depend' in vars[a]: + vardef='%s%sdepend(%s)'%(vardef, c, ','.join(vars[a]['depend'])) + c=',' + if '=' in vars[a]: + v = vars[a]['='] + if vars[a]['typespec'] in ['complex', 'double complex']: + try: + v = eval(v) + v = '(%s,%s)' % (v.real, v.imag) + except: + pass + vardef='%s :: %s=%s'%(vardef, a, v) + else: + vardef='%s :: %s'%(vardef, a) + ret='%s%s%s'%(ret, tab, vardef) + return ret +###### + +def crackfortran(files): + global usermodules + outmess('Reading fortran codes...\n', 0) + readfortrancode(files, crackline) + outmess('Post-processing...\n', 0) + usermodules=[] + postlist=postcrack(grouplist[0]) + outmess('Post-processing (stage 2)...\n', 0) + postlist=postcrack2(postlist) + return usermodules+postlist + +def crack2fortran(block): + global f2py_version + pyf=crack2fortrangen(block)+'\n' + header="""! -*- f90 -*- +! Note: the context of this file is case sensitive. +""" + footer=""" +! This file was auto-generated with f2py (version:%s). +! See http://cens.ioc.ee/projects/f2py2e/ +"""%(f2py_version) + return header+pyf+footer + +if __name__ == "__main__": + files=[] + funcs=[] + f=1;f2=0;f3=0 + showblocklist=0 + for l in sys.argv[1:]: + if l=='': pass + elif l[0]==':': + f=0 + elif l=='-quiet': + quiet=1 + verbose=0 + elif l=='-verbose': + verbose=2 + quiet=0 + elif l=='-fix': + if strictf77: + outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) + skipemptyends=1 + sourcecodeform='fix' + elif l=='-skipemptyends': + skipemptyends=1 + elif l=='--ignore-contains': + ignorecontains=1 + elif l=='-f77': + strictf77=1 + sourcecodeform='fix' + elif l=='-f90': + strictf77=0 + sourcecodeform='free' + skipemptyends=1 + elif l=='-h': + f2=1 + elif l=='-show': + showblocklist=1 + elif l=='-m': + f3=1 + elif l[0]=='-': + errmess('Unknown option %s\n'%repr(l)) + elif f2: + f2=0 + pyffilename=l + elif f3: + f3=0 + f77modulename=l + elif f: + try: + open(l).close() + files.append(l) + except IOError as detail: + errmess('IOError: %s\n'%str(detail)) + else: + funcs.append(l) + if not strictf77 and f77modulename and not skipemptyends: + outmess("""\ + Warning: You have specifyied module name for non Fortran 77 code + that should not need one (expect if you are scanning F90 code + for non module blocks but then you should use flag -skipemptyends + and also be sure that the files do not contain programs without program statement). +""", 0) + + postlist=crackfortran(files, funcs) + if pyffilename: + outmess('Writing fortran code to file %s\n'%repr(pyffilename), 0) + pyf=crack2fortran(postlist) + f=open(pyffilename, 'w') + f.write(pyf) + f.close() + if showblocklist: + show(postlist) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/diagnose.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/diagnose.py new file mode 100644 index 0000000000000..68d7e48d29755 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/diagnose.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +from __future__ import division, absolute_import, print_function + +import os +import sys +import tempfile + +def run_command(cmd): + print('Running %r:' % (cmd)) + s = os.system(cmd) + print('------') +def run(): + _path = os.getcwd() + os.chdir(tempfile.gettempdir()) + print('------') + print('os.name=%r' % (os.name)) + print('------') + print('sys.platform=%r' % (sys.platform)) + print('------') + print('sys.version:') + print(sys.version) + print('------') + print('sys.prefix:') + print(sys.prefix) + print('------') + print('sys.path=%r' % (':'.join(sys.path))) + print('------') + + try: + import numpy + has_newnumpy = 1 + except ImportError: + print('Failed to import new numpy:', sys.exc_info()[1]) + has_newnumpy = 0 + + try: + from numpy.f2py import f2py2e + has_f2py2e = 1 + except ImportError: + print('Failed to import f2py2e:', sys.exc_info()[1]) + has_f2py2e = 0 + + try: + import numpy.distutils + has_numpy_distutils = 2 + except ImportError: + try: + import numpy_distutils + has_numpy_distutils = 1 + except ImportError: + print('Failed to import numpy_distutils:', sys.exc_info()[1]) + has_numpy_distutils = 0 + + if has_newnumpy: + try: + print('Found new numpy version %r in %s' % \ + (numpy.__version__, numpy.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_f2py2e: + try: + print('Found f2py2e version %r in %s' % \ + (f2py2e.__version__.version, f2py2e.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_numpy_distutils: + try: + if has_numpy_distutils == 2: + print('Found numpy.distutils version %r in %r' % (\ + numpy.distutils.__version__, + numpy.distutils.__file__)) + else: + print('Found numpy_distutils version %r in %r' % (\ + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 1: + print('Importing numpy_distutils.command.build_flib ...', end=' ') + import numpy_distutils.command.build_flib as build_flib + print('ok') + print('------') + try: + print('Checking availability of supported Fortran compilers:') + for compiler_class in build_flib.all_compilers: + compiler_class(verbose=1).is_available() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print('error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.fcompiler ...', end=' ') + import numpy.distutils.fcompiler as fcompiler + else: + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler + print('ok') + print('------') + try: + print('Checking availability of supported Fortran compilers:') + fcompiler.show_fcompilers() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.cpuinfo ...', end=' ') + from numpy.distutils.cpuinfo import cpuinfo + print('ok') + print('------') + else: + try: + print('Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo + print('ok') + print('------') + cpu = cpuinfo() + print('CPU information:', end=' ') + for name in dir(cpuinfo): + if name[0]=='_' and name[1]!='_' and getattr(cpu, name[1:])(): + print(name[1:], end=' ') + print('------') + except Exception as msg: + print('error:', msg) + print('------') + os.chdir(_path) +if __name__ == "__main__": + run() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py2e.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py2e.py new file mode 100644 index 0000000000000..25407d42163a1 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py2e.py @@ -0,0 +1,598 @@ +#!/usr/bin/env python +""" + +f2py2e - Fortran to Python C/API generator. 2nd Edition. + See __usage__ below. + +Copyright 1999--2011 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 08:31:19 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +import sys +import os +import pprint +import re + +from . import crackfortran +from . import rules +from . import cb_rules +from . import auxfuncs +from . import cfuncs +from . import f90mod_rules +from . import __version__ + +f2py_version = __version__.version +errmess = sys.stderr.write +#outmess=sys.stdout.write +show = pprint.pprint +outmess = auxfuncs.outmess + +try: + from numpy import __version__ as numpy_version +except ImportError: + numpy_version = 'N/A' + +__usage__ = """\ +Usage: + +1) To construct extension module sources: + + f2py [] [[[only:]||[skip:]] \\ + ] \\ + [: ...] + +2) To compile fortran files and build extension modules: + + f2py -c [, , ] + +3) To generate signature files: + + f2py -h ...< same options as in (1) > + +Description: This program generates a Python C/API file (module.c) + that contains wrappers for given fortran functions so that they + can be called from Python. With the -c option the corresponding + extension modules are built. + +Options: + + --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT] + --2d-numeric Use f2py2e tool with Numeric support. + --2d-numarray Use f2py2e tool with Numarray support. + --g3-numpy Use 3rd generation f2py from the separate f2py package. + [NOT AVAILABLE YET] + + -h Write signatures of the fortran routines to file + and exit. You can then edit and use it instead + of . If ==stdout then the + signatures are printed to stdout. + Names of fortran routines for which Python C/API + functions will be generated. Default is all that are found + in . + Paths to fortran/signature files that will be scanned for + in order to determine their signatures. + skip: Ignore fortran functions that follow until `:'. + only: Use only fortran functions that follow until `:'. + : Get back to mode. + + -m Name of the module; f2py generates a Python/C API + file module.c or extension module . + Default is 'untitled'. + + --[no-]lower Do [not] lower the cases in . By default, + --lower is assumed with -h key, and --no-lower without -h key. + + --build-dir All f2py generated files are created in . + Default is tempfile.mkdtemp(). + + --overwrite-signature Overwrite existing signature file. + + --[no-]latex-doc Create (or not) module.tex. + Default is --no-latex-doc. + --short-latex Create 'incomplete' LaTeX document (without commands + \\documentclass, \\tableofcontents, and \\begin{document}, + \\end{document}). + + --[no-]rest-doc Create (or not) module.rst. + Default is --no-rest-doc. + + --debug-capi Create C/API code that reports the state of the wrappers + during runtime. Useful for debugging. + + --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 + functions. --wrap-functions is default because it ensures + maximum portability/compiler independence. + + --include-paths ::... Search include files from the given + directories. + + --help-link [..] List system resources found by system_info.py. See also + --link- switch below. [..] is optional list + of resources names. E.g. try 'f2py --help-link lapack_opt'. + + --quiet Run quietly. + --verbose Run with extra verbosity. + -v Print f2py version ID and exit. + + +numpy.distutils options (only effective with -c): + + --fcompiler= Specify Fortran compiler type by vendor + --compiler= Specify C compiler type (as defined by distutils) + + --help-fcompiler List available Fortran compilers and exit + --f77exec= Specify the path to F77 compiler + --f90exec= Specify the path to F90 compiler + --f77flags= Specify F77 compiler flags + --f90flags= Specify F90 compiler flags + --opt= Specify optimization flags + --arch= Specify architecture specific optimization flags + --noopt Compile without optimization + --noarch Compile without arch-dependent optimization + --debug Compile with debugging information + +Extra options (only effective with -c): + + --link- Link extension module with as defined + by numpy.distutils/system_info.py. E.g. to link + with optimized LAPACK libraries (vecLib on MacOSX, + ATLAS elsewhere), use --link-lapack_opt. + See also --help-link switch. + + -L/path/to/lib/ -l + -D -U + -I/path/to/include/ + .o .so .a + + Using the following macros may be required with non-gcc Fortran + compilers: + -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN + -DUNDERSCORE_G77 + + When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY + interface is printed out at exit (platforms: Linux). + + When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is + sent to stderr whenever F2PY interface makes a copy of an + array. Integer sets the threshold for array sizes when + a message should be shown. + +Version: %s +numpy Version: %s +Requires: Python 2.3 or higher. +License: NumPy license (see LICENSE.txt in the NumPy source code) +Copyright 1999 - 2011 Pearu Peterson all rights reserved. +http://cens.ioc.ee/projects/f2py2e/"""%(f2py_version, numpy_version) + +def scaninputline(inputline): + files, funcs, skipfuncs, onlyfuncs, debug=[], [], [], [], [] + f, f2, f3, f4, f5, f6, f7, f8, f9=1, 0, 0, 0, 0, 0, 0, 0, 0 + verbose = 1 + dolc=-1 + dolatexdoc = 0 + dorestdoc = 0 + wrapfuncs = 1 + buildpath = '.' + include_paths = [] + signsfile, modulename=None, None + options = {'buildpath':buildpath, + 'coutput': None, + 'f2py_wrapper_output': None} + for l in inputline: + if l=='': pass + elif l=='only:': f=0 + elif l=='skip:': f=-1 + elif l==':': f=1;f4=0 + elif l[:8]=='--debug-': debug.append(l[8:]) + elif l=='--lower': dolc=1 + elif l=='--build-dir': f6=1 + elif l=='--no-lower': dolc=0 + elif l=='--quiet': verbose = 0 + elif l=='--verbose': verbose += 1 + elif l=='--latex-doc': dolatexdoc=1 + elif l=='--no-latex-doc': dolatexdoc=0 + elif l=='--rest-doc': dorestdoc=1 + elif l=='--no-rest-doc': dorestdoc=0 + elif l=='--wrap-functions': wrapfuncs=1 + elif l=='--no-wrap-functions': wrapfuncs=0 + elif l=='--short-latex': options['shortlatex']=1 + elif l=='--coutput': f8=1 + elif l=='--f2py-wrapper-output': f9=1 + elif l=='--overwrite-signature': options['h-overwrite']=1 + elif l=='-h': f2=1 + elif l=='-m': f3=1 + elif l[:2]=='-v': + print(f2py_version) + sys.exit() + elif l=='--show-compilers': + f5=1 + elif l[:8]=='-include': + cfuncs.outneeds['userincludes'].append(l[9:-1]) + cfuncs.userincludes[l[9:-1]]='#include '+l[8:] + elif l[:15] in '--include_paths': + outmess('f2py option --include_paths is deprecated, use --include-paths instead.\n') + f7=1 + elif l[:15] in '--include-paths': + f7=1 + elif l[0]=='-': + errmess('Unknown option %s\n'%repr(l)) + sys.exit() + elif f2: f2=0;signsfile=l + elif f3: f3=0;modulename=l + elif f6: f6=0;buildpath=l + elif f7: f7=0;include_paths.extend(l.split(os.pathsep)) + elif f8: f8=0;options["coutput"]=l + elif f9: f9=0;options["f2py_wrapper_output"]=l + elif f==1: + try: + open(l).close() + files.append(l) + except IOError as detail: + errmess('IOError: %s. Skipping file "%s".\n'%(str(detail), l)) + elif f==-1: skipfuncs.append(l) + elif f==0: onlyfuncs.append(l) + if not f5 and not files and not modulename: + print(__usage__) + sys.exit() + if not os.path.isdir(buildpath): + if not verbose: + outmess('Creating build directory %s'%(buildpath)) + os.mkdir(buildpath) + if signsfile: + signsfile = os.path.join(buildpath, signsfile) + if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: + errmess('Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n'%(signsfile)) + sys.exit() + + options['debug']=debug + options['verbose']=verbose + if dolc==-1 and not signsfile: options['do-lower']=0 + else: options['do-lower']=dolc + if modulename: options['module']=modulename + if signsfile: options['signsfile']=signsfile + if onlyfuncs: options['onlyfuncs']=onlyfuncs + if skipfuncs: options['skipfuncs']=skipfuncs + options['dolatexdoc'] = dolatexdoc + options['dorestdoc'] = dorestdoc + options['wrapfuncs'] = wrapfuncs + options['buildpath']=buildpath + options['include_paths']=include_paths + return files, options + +def callcrackfortran(files, options): + rules.options=options + funcs=[] + crackfortran.debug=options['debug'] + crackfortran.verbose=options['verbose'] + if 'module' in options: + crackfortran.f77modulename=options['module'] + if 'skipfuncs' in options: + crackfortran.skipfuncs=options['skipfuncs'] + if 'onlyfuncs' in options: + crackfortran.onlyfuncs=options['onlyfuncs'] + crackfortran.include_paths[:]=options['include_paths'] + crackfortran.dolowercase=options['do-lower'] + postlist=crackfortran.crackfortran(files) + if 'signsfile' in options: + outmess('Saving signatures to file "%s"\n'%(options['signsfile'])) + pyf=crackfortran.crack2fortran(postlist) + if options['signsfile'][-6:]=='stdout': + sys.stdout.write(pyf) + else: + f=open(options['signsfile'], 'w') + f.write(pyf) + f.close() + if options["coutput"] is None: + for mod in postlist: + mod["coutput"] = "%smodule.c" % mod["name"] + else: + for mod in postlist: + mod["coutput"] = options["coutput"] + if options["f2py_wrapper_output"] is None: + for mod in postlist: + mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] + else: + for mod in postlist: + mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + return postlist + +def buildmodules(lst): + cfuncs.buildcfuncs() + outmess('Building modules...\n') + modules, mnames, isusedby=[], [], {} + for i in range(len(lst)): + if '__user__' in lst[i]['name']: + cb_rules.buildcallbacks(lst[i]) + else: + if 'use' in lst[i]: + for u in lst[i]['use'].keys(): + if u not in isusedby: + isusedby[u]=[] + isusedby[u].append(lst[i]['name']) + modules.append(lst[i]) + mnames.append(lst[i]['name']) + ret = {} + for i in range(len(mnames)): + if mnames[i] in isusedby: + outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i], ','.join(['"%s"'%s for s in isusedby[mnames[i]]]))) + else: + um=[] + if 'use' in modules[i]: + for u in modules[i]['use'].keys(): + if u in isusedby and u in mnames: + um.append(modules[mnames.index(u)]) + else: + outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i], u)) + ret[mnames[i]] = {} + dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um)) + return ret + +def dict_append(d_out, d_in): + for (k, v) in d_in.items(): + if k not in d_out: + d_out[k] = [] + if isinstance(v, list): + d_out[k] = d_out[k] + v + else: + d_out[k].append(v) + +def run_main(comline_list): + """Run f2py as if string.join(comline_list,' ') is used as a command line. + In case of using -h flag, return None. + """ + crackfortran.reset_global_f2py_vars() + f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__)) + fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') + fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') + files, options=scaninputline(comline_list) + auxfuncs.options=options + postlist=callcrackfortran(files, options) + isusedby={} + for i in range(len(postlist)): + if 'use' in postlist[i]: + for u in postlist[i]['use'].keys(): + if u not in isusedby: + isusedby[u]=[] + isusedby[u].append(postlist[i]['name']) + for i in range(len(postlist)): + if postlist[i]['block']=='python module' and '__user__' in postlist[i]['name']: + if postlist[i]['name'] in isusedby: + #if not quiet: + outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'], ','.join(['"%s"'%s for s in isusedby[postlist[i]['name']]]))) + if 'signsfile' in options: + if options['verbose']>1: + outmess('Stopping. Edit the signature file and then run f2py on the signature file: ') + outmess('%s %s\n'%(os.path.basename(sys.argv[0]), options['signsfile'])) + return + for i in range(len(postlist)): + if postlist[i]['block']!='python module': + if 'python module' not in options: + errmess('Tip: If your original code is Fortran source then you must use -m option.\n') + raise TypeError('All blocks must be python module blocks but got %s'%(repr(postlist[i]['block']))) + auxfuncs.debugoptions=options['debug'] + f90mod_rules.options=options + auxfuncs.wrapfuncs=options['wrapfuncs'] + + ret=buildmodules(postlist) + + for mn in ret.keys(): + dict_append(ret[mn], {'csrc':fobjcsrc,'h':fobjhsrc}) + return ret + +def filter_files(prefix,suffix,files,remove_prefix=None): + """ + Filter files by prefix and suffix. + """ + filtered, rest = [], [] + match = re.compile(prefix+r'.*'+suffix+r'\Z').match + if remove_prefix: + ind = len(prefix) + else: + ind = 0 + for file in [x.strip() for x in files]: + if match(file): filtered.append(file[ind:]) + else: rest.append(file) + return filtered, rest + +def get_prefix(module): + p = os.path.dirname(os.path.dirname(module.__file__)) + return p + +def run_compile(): + """ + Do it all in one call! + """ + import tempfile + + i = sys.argv.index('-c') + del sys.argv[i] + + remove_build_dir = 0 + try: i = sys.argv.index('--build-dir') + except ValueError: i=None + if i is not None: + build_dir = sys.argv[i+1] + del sys.argv[i+1] + del sys.argv[i] + else: + remove_build_dir = 1 + build_dir = tempfile.mkdtemp() + + _reg1 = re.compile(r'[-][-]link[-]') + sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] + if sysinfo_flags: + sysinfo_flags = [f[7:] for f in sysinfo_flags] + + _reg2 = re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include') + f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] + f2py_flags2 = [] + fl = 0 + for a in sys.argv[1:]: + if a in ['only:', 'skip:']: + fl = 1 + elif a==':': + fl = 0 + if fl or a==':': + f2py_flags2.append(a) + if f2py_flags2 and f2py_flags2[-1]!=':': + f2py_flags2.append(':') + f2py_flags.extend(f2py_flags2) + + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] + _reg3 = re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)') + flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in flib_flags] + _reg4 = re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in fc_flags] + + if 1: + del_list = [] + for s in flib_flags: + v = '--fcompiler=' + if s[:len(v)]==v: + from numpy.distutils import fcompiler + fcompiler.load_all_fcompiler_classes() + allowed_keys = list(fcompiler.fcompiler_class.keys()) + nv = ov = s[len(v):].lower() + if ov not in allowed_keys: + vmap = {} # XXX + try: + nv = vmap[ov] + except KeyError: + if ov not in vmap.values(): + print('Unknown vendor: "%s"' % (s[len(v):])) + nv = ov + i = flib_flags.index(s) + flib_flags[i] = '--fcompiler=' + nv + continue + for s in del_list: + i = flib_flags.index(s) + del flib_flags[i] + assert len(flib_flags)<=2, repr(flib_flags) + + _reg5 = re.compile(r'[-][-](verbose)') + setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in setup_flags] + + if '--quiet' in f2py_flags: + setup_flags.append('--quiet') + + modulename = 'untitled' + sources = sys.argv[1:] + + for optname in ['--include_paths', '--include-paths']: + if optname in sys.argv: + i = sys.argv.index (optname) + f2py_flags.extend (sys.argv[i:i+2]) + del sys.argv[i+1], sys.argv[i] + sources = sys.argv[1:] + + if '-m' in sys.argv: + i = sys.argv.index('-m') + modulename = sys.argv[i+1] + del sys.argv[i+1], sys.argv[i] + sources = sys.argv[1:] + else: + from numpy.distutils.command.build_src import get_f2py_modulename + pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources) + sources = pyf_files + sources + for f in pyf_files: + modulename = get_f2py_modulename(f) + if modulename: + break + + extra_objects, sources = filter_files('', '[.](o|a|so)', sources) + include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1) + library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) + libraries, sources = filter_files('-l', '', sources, remove_prefix=1) + undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) + define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) + using_numarray = 0 + using_numeric = 0 + for i in range(len(define_macros)): + name_value = define_macros[i].split('=', 1) + if len(name_value)==1: + name_value.append(None) + if len(name_value)==2: + define_macros[i] = tuple(name_value) + else: + print('Invalid use of -D:', name_value) + + from numpy.distutils.system_info import get_info + + num_include_dir = None + num_info = {} + #import numpy + #n = 'numpy' + #p = get_prefix(numpy) + #from numpy.distutils.misc_util import get_numpy_include_dirs + #num_info = {'include_dirs': get_numpy_include_dirs()} + + if num_info: + include_dirs.extend(num_info.get('include_dirs', [])) + + from numpy.distutils.core import setup, Extension + ext_args = {'name': modulename, 'sources': sources, + 'include_dirs': include_dirs, + 'library_dirs': library_dirs, + 'libraries': libraries, + 'define_macros': define_macros, + 'undef_macros': undef_macros, + 'extra_objects': extra_objects, + 'f2py_options': f2py_flags, + } + + if sysinfo_flags: + from numpy.distutils.misc_util import dict_append + for n in sysinfo_flags: + i = get_info(n) + if not i: + outmess('No %s resources found in system'\ + ' (try `f2py --help-link`)\n' % (repr(n))) + dict_append(ext_args,**i) + + ext = Extension(**ext_args) + sys.argv = [sys.argv[0]] + setup_flags + sys.argv.extend(['build', + '--build-temp', build_dir, + '--build-base', build_dir, + '--build-platlib', '.']) + if fc_flags: + sys.argv.extend(['config_fc']+fc_flags) + if flib_flags: + sys.argv.extend(['build_ext']+flib_flags) + + setup(ext_modules = [ext]) + + if remove_build_dir and os.path.exists(build_dir): + import shutil + outmess('Removing build directory %s\n'%(build_dir)) + shutil.rmtree(build_dir) + +def main(): + if '--help-link' in sys.argv[1:]: + sys.argv.remove('--help-link') + from numpy.distutils.system_info import show_all + show_all() + return + if '-c' in sys.argv[1:]: + run_compile() + else: + run_main(sys.argv[1:]) + +#if __name__ == "__main__": +# main() + + +# EOF diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py_testing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py_testing.py new file mode 100644 index 0000000000000..4cec4baad77bc --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py_testing.py @@ -0,0 +1,46 @@ +from __future__ import division, absolute_import, print_function + +import sys +import re + +from numpy.testing.utils import jiffies, memusage + +def cmdline(): + m=re.compile(r'\A\d+\Z') + args = [] + repeat = 1 + for a in sys.argv[1:]: + if m.match(a): + repeat = eval(a) + else: + args.append(a) + f2py_opts = ' '.join(args) + return repeat, f2py_opts + +def run(runtest,test_functions,repeat=1): + l = [(t, repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] + #l = [(t,'') for t in test_functions] + start_memusage = memusage() + diff_memusage = None + start_jiffies = jiffies() + i = 0 + while i +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/02/03 19:30:23 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.27 $"[10:-1] + +f2py_version='See `f2py -v`' + +import pprint +import sys +errmess=sys.stderr.write +outmess=sys.stdout.write +show=pprint.pprint + +from .auxfuncs import * +import numpy as np +from . import capi_maps +from . import func2subr +from .crackfortran import undo_rmbadname, undo_rmbadname1 + +options={} + +def findf90modules(m): + if ismodule(m): return [m] + if not hasbody(m): return [] + ret = [] + for b in m['body']: + if ismodule(b): ret.append(b) + else: ret=ret+findf90modules(b) + return ret + +fgetdims1 = """\ + external f2pysetdata + logical ns + integer r,i,j + integer(%d) s(*) + ns = .FALSE. + if (allocated(d)) then + do i=1,r + if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then + ns = .TRUE. + end if + end do + if (ns) then + deallocate(d) + end if + end if + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + +fgetdims2="""\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + end if + flag = 1 + call f2pysetdata(d,allocated(d))""" + +fgetdims2_sa="""\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + !s(r) must be equal to len(d(1)) + end if + flag = 2 + call f2pysetdata(d,allocated(d))""" + + +def buildhooks(pymod): + global fgetdims1, fgetdims2 + from . import rules + ret = {'f90modhooks':[],'initf90modhooks':[],'body':[], + 'need':['F_FUNC', 'arrayobject.h'], + 'separatorsfor':{'includes0':'\n','includes':'\n'}, + 'docs':['"Fortran 90/95 modules:\\n"'], + 'latexdoc':[]} + fhooks=[''] + def fadd(line,s=fhooks): s[0] = '%s\n %s'%(s[0], line) + doc = [''] + def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0], line) + for m in findf90modules(pymod): + sargs, fargs, efargs, modobjs, notvars, onlyvars=[], [], [], [], [m['name']], [] + sargsp = [] + ifargs = [] + mfargs = [] + if hasbody(m): + for b in m['body']: notvars.append(b['name']) + for n in m['vars'].keys(): + var = m['vars'][n] + if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + onlyvars.append(n) + mfargs.append(n) + outmess('\t\tConstructing F90 module support for "%s"...\n'%(m['name'])) + if onlyvars: + outmess('\t\t Variables: %s\n'%(' '.join(onlyvars))) + chooks=[''] + def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0], line) + ihooks=[''] + def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0], line) + + vrd=capi_maps.modsign2map(m) + cadd('static FortranDataDef f2py_%s_def[] = {'%(m['name'])) + dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n'%(m['name'])) + if hasnote(m): + note = m['note'] + if isinstance(note, list): note='\n'.join(note) + dadd(note) + if onlyvars: + dadd('\\begin{description}') + for n in onlyvars: + var = m['vars'][n] + modobjs.append(n) + ct = capi_maps.getctype(var) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, var) + dms = dm['dims'].replace('*', '-1').strip() + dms = dms.replace(':', '-1').strip() + if not dms: dms='-1' + use_fgetdims2 = fgetdims2 + if isstringarray(var): + if 'charselector' in var and 'len' in var['charselector']: + cadd('\t{"%s",%s,{{%s,%s}},%s},'\ + %(undo_rmbadname1(n), dm['rank'], dms, var['charselector']['len'], at)) + use_fgetdims2 = fgetdims2_sa + else: + cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n), dm['rank'], dms, at)) + else: + cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n), dm['rank'], dms, at)) + dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n, var))) + if hasnote(var): + note = var['note'] + if isinstance(note, list): note='\n'.join(note) + dadd('--- %s'%(note)) + if isallocatable(var): + fargs.append('f2py_%s_getdims_%s'%(m['name'], n)) + efargs.append(fargs[-1]) + sargs.append('void (*%s)(int*,int*,void(*)(char*,int*),int*)'%(n)) + sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)') + iadd('\tf2py_%s_def[i_f2py++].func = %s;'%(m['name'], n)) + fadd('subroutine %s(r,s,f2pysetdata,flag)'%(fargs[-1])) + fadd('use %s, only: d => %s\n'%(m['name'], undo_rmbadname1(n))) + fadd('integer flag\n') + fhooks[0]=fhooks[0]+fgetdims1 + dms = eval('range(1,%s+1)'%(dm['rank'])) + fadd(' allocate(d(%s))\n'%(','.join(['s(%s)'%i for i in dms]))) + fhooks[0]=fhooks[0]+use_fgetdims2 + fadd('end subroutine %s'%(fargs[-1])) + else: + fargs.append(n) + sargs.append('char *%s'%(n)) + sargsp.append('char*') + iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'], n)) + if onlyvars: + dadd('\\end{description}') + if hasbody(m): + for b in m['body']: + if not isroutine(b): + print('Skipping', b['block'], b['name']) + continue + modobjs.append('%s()'%(b['name'])) + b['modulename'] = m['name'] + api, wrap=rules.buildapi(b) + if isfunction(b): + fhooks[0]=fhooks[0]+wrap + fargs.append('f2pywrap_%s_%s'%(m['name'], b['name'])) + #efargs.append(fargs[-1]) + ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + else: + if wrap: + fhooks[0]=fhooks[0]+wrap + fargs.append('f2pywrap_%s_%s'%(m['name'], b['name'])) + ifargs.append(func2subr.createsubrwrapper(b, signature=1)) + else: + fargs.append(b['name']) + mfargs.append(fargs[-1]) + #if '--external-modroutines' in options and options['--external-modroutines']: + # outmess('\t\t\tapplying --external-modroutines for %s\n'%(b['name'])) + # efargs.append(fargs[-1]) + api['externroutines']=[] + ar=applyrules(api, vrd) + ar['docs']=[] + ar['docshort']=[] + ret=dictappend(ret, ar) + cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},'%(b['name'], m['name'], b['name'], m['name'], b['name'])) + sargs.append('char *%s'%(b['name'])) + sargsp.append('char *') + iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'], b['name'])) + cadd('\t{NULL}\n};\n') + iadd('}') + ihooks[0]='static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s'%(m['name'], ','.join(sargs), ihooks[0]) + if '_' in m['name']: + F_FUNC='F_FUNC_US' + else: + F_FUNC='F_FUNC' + iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));'\ + %(F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) + iadd('static void f2py_init_%s(void) {'%(m['name'])) + iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ + %(F_FUNC, m['name'], m['name'].upper(), m['name'])) + iadd('}\n') + ret['f90modhooks']=ret['f90modhooks']+chooks+ihooks + ret['initf90modhooks']=['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(m['name'], m['name'], m['name'])]+ret['initf90modhooks'] + fadd('') + fadd('subroutine f2pyinit%s(f2pysetupfunc)'%(m['name'])) + #fadd('use %s'%(m['name'])) + if mfargs: + for a in undo_rmbadname(mfargs): + fadd('use %s, only : %s'%(m['name'], a)) + if ifargs: + fadd(' '.join(['interface']+ifargs)) + fadd('end interface') + fadd('external f2pysetupfunc') + if efargs: + for a in undo_rmbadname(efargs): + fadd('external %s'%(a)) + fadd('call f2pysetupfunc(%s)'%(','.join(undo_rmbadname(fargs)))) + fadd('end subroutine f2pyinit%s\n'%(m['name'])) + + dadd('\n'.join(ret['latexdoc']).replace(r'\subsection{', r'\subsubsection{')) + + ret['latexdoc']=[] + ret['docs'].append('"\t%s --- %s"'%(m['name'], + ','.join(undo_rmbadname(modobjs)))) + + ret['routine_defs']='' + ret['doc']=[] + ret['docshort']=[] + ret['latexdoc']=doc[0] + if len(ret['docs'])<=1: ret['docs']='' + return ret, fhooks[0] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/func2subr.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/func2subr.py new file mode 100644 index 0000000000000..22f60851d202a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/func2subr.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python +""" + +Rules for building C/API module with f2py2e. + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2004/11/26 11:13:06 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.16 $"[10:-1] + +f2py_version='See `f2py -v`' + +import pprint +import copy +import sys +errmess=sys.stderr.write +outmess=sys.stdout.write +show=pprint.pprint + +from .auxfuncs import * +def var2fixfortran(vars,a,fa=None,f90mode=None): + if fa is None: + fa = a + if a not in vars: + show(vars) + outmess('var2fixfortran: No definition for argument "%s".\n'%a) + return '' + if 'typespec' not in vars[a]: + show(vars[a]) + outmess('var2fixfortran: No typespec for argument "%s".\n'%a) + return '' + vardef=vars[a]['typespec'] + if vardef=='type' and 'typename' in vars[a]: + vardef='%s(%s)'%(vardef, vars[a]['typename']) + selector={} + lk = '' + if 'kindselector' in vars[a]: + selector=vars[a]['kindselector'] + lk = 'kind' + elif 'charselector' in vars[a]: + selector=vars[a]['charselector'] + lk = 'len' + if '*' in selector: + if f90mode: + if selector['*'] in ['*', ':', '(*)']: + vardef='%s(len=*)'%(vardef) + else: + vardef='%s(%s=%s)'%(vardef, lk, selector['*']) + else: + if selector['*'] in ['*', ':']: + vardef='%s*(%s)'%(vardef, selector['*']) + else: + vardef='%s*%s'%(vardef, selector['*']) + else: + if 'len' in selector: + vardef='%s(len=%s'%(vardef, selector['len']) + if 'kind' in selector: + vardef='%s,kind=%s)'%(vardef, selector['kind']) + else: + vardef='%s)'%(vardef) + elif 'kind' in selector: + vardef='%s(kind=%s)'%(vardef, selector['kind']) + + vardef='%s %s'%(vardef, fa) + if 'dimension' in vars[a]: + vardef='%s(%s)'%(vardef, ','.join(vars[a]['dimension'])) + return vardef + +def createfuncwrapper(rout,signature=0): + assert isfunction(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d==':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + def add(line,ret=ret): + ret[0] = '%s\n %s'%(ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + newname = '%sf2pywrap'%(name) + + if newname not in vars: + vars[newname] = vars[name] + args = [newname]+rout['args'][1:] + else: + args = [newname]+rout['args'] + + l = var2fixfortran(vars, name, newname, f90mode) + return_char_star = 0 + if l[:13]=='character*(*)': + return_char_star = 1 + if f90mode: l = 'character(len=10)'+l[13:] + else: l = 'character*10'+l[13:] + charselect = vars[name]['charselector'] + if charselect.get('*', '')=='(*)': + charselect['*'] = '10' + sargs = ', '.join(args) + if f90mode: + add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'], name, sargs)) + if not signature: + add('use %s, only : %s'%(rout['modulename'], fortranname)) + else: + add('subroutine f2pywrap%s (%s)'%(name, sargs)) + if not need_interface: + add('external %s'%(fortranname)) + l = l + ', '+fortranname + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use '): + add(line) + + args = args[1:] + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s'%(a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: continue + if isintent_in(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + add(l) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + if islogicalfunction(rout): + add('%s = .not.(.not.%s(%s))'%(newname, fortranname, sargs)) + else: + add('%s = %s(%s)'%(newname, fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s'%(rout['modulename'], name)) + else: + add('end') + #print '**'*10 + #print ret[0] + #print '**'*10 + return ret[0] + +def createsubrwrapper(rout,signature=0): + assert issubroutine(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d==':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + def add(line,ret=ret): + ret[0] = '%s\n %s'%(ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + + args = rout['args'] + + sargs = ', '.join(args) + if f90mode: + add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'], name, sargs)) + if not signature: + add('use %s, only : %s'%(rout['modulename'], fortranname)) + else: + add('subroutine f2pywrap%s (%s)'%(name, sargs)) + if not need_interface: + add('external %s'%(fortranname)) + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use '): + add(line) + + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s'%(a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + add('call %s(%s)'%(fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s'%(rout['modulename'], name)) + else: + add('end') + #print '**'*10 + #print ret[0] + #print '**'*10 + return ret[0] + + +def assubr(rout): + if isfunction_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n'%(name, fortranname)) + rout = copy.copy(rout) + fname = name + rname = fname + if 'result' in rout: + rname = rout['result'] + rout['vars'][fname]=rout['vars'][rname] + fvar = rout['vars'][fname] + if not isintent_out(fvar): + if 'intent' not in fvar: + fvar['intent']=[] + fvar['intent'].append('out') + flag=1 + for i in fvar['intent']: + if i.startswith('out='): + flag = 0 + break + if flag: + fvar['intent'].append('out=%s' % (rname)) + rout['args'][:] = [fname] + rout['args'] + return rout, createfuncwrapper(rout) + if issubroutine_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n'%(name, fortranname)) + rout = copy.copy(rout) + return rout, createsubrwrapper(rout) + return rout, '' diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/info.py new file mode 100644 index 0000000000000..c895c5de28d0b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/info.py @@ -0,0 +1,6 @@ +"""Fortran to Python Interface Generator. + +""" +from __future__ import division, absolute_import, print_function + +postpone_import = True diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/rules.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/rules.py new file mode 100644 index 0000000000000..4c186712c9e35 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/rules.py @@ -0,0 +1,1448 @@ +#!/usr/bin/env python +""" + +Rules for building C/API module with f2py2e. + +Here is a skeleton of a new wrapper function (13Dec2001): + +wrapper_function(args) + declarations + get_python_arguments, say, `a' and `b' + + get_a_from_python + if (successful) { + + get_b_from_python + if (successful) { + + callfortran + if (succesful) { + + put_a_to_python + if (succesful) { + + put_b_to_python + if (succesful) { + + buildvalue = ... + + } + + } + + } + + } + cleanup_b + + } + cleanup_a + + return buildvalue + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/08/30 08:58:42 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.129 $"[10:-1] + +from . import __version__ +f2py_version = __version__.version + +import pprint +import sys +import time +import copy + +from .auxfuncs import * +from . import capi_maps +from .capi_maps import * +from . import cfuncs +from . import common_rules +from . import use_rules +from . import f90mod_rules +from . import func2subr + +errmess = sys.stderr.write +outmess = sys.stdout.write +show = pprint.pprint + +options={} +sepdict={} +#for k in ['need_cfuncs']: sepdict[k]=',' +for k in ['decl', + 'frompyobj', + 'cleanupfrompyobj', + 'topyarr', 'method', + 'pyobjfrom', 'closepyobjfrom', + 'freemem', + 'userincludes', + 'includes0', 'includes', 'typedefs', 'typedefs_generated', + 'cppmacros', 'cfuncs', 'callbacks', + 'latexdoc', + 'restdoc', + 'routine_defs', 'externroutines', + 'initf2pywraphooks', + 'commonhooks', 'initcommonhooks', + 'f90modhooks', 'initf90modhooks']: + sepdict[k]='\n' + +#################### Rules for C/API module ################# + +module_rules={ + 'modulebody':"""\ +/* File: #modulename#module.c + * This file is auto-generated with f2py (version:#f2py_version#). + * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, + * written by Pearu Peterson . + * See http://cens.ioc.ee/projects/f2py2e/ + * Generation date: """+time.asctime(time.localtime(time.time()))+""" + * $R"""+"""evision:$ + * $D"""+"""ate:$ + * Do not edit this file directly unless you know what you are doing!!! + */ +#ifdef __cplusplus +extern \"C\" { +#endif + +"""+gentitle("See f2py2e/cfuncs.py: includes")+""" +#includes# +#includes0# + +"""+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+""" +static PyObject *#modulename#_error; +static PyObject *#modulename#_module; + +"""+gentitle("See f2py2e/cfuncs.py: typedefs")+""" +#typedefs# + +"""+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+""" +#typedefs_generated# + +"""+gentitle("See f2py2e/cfuncs.py: cppmacros")+""" +#cppmacros# + +"""+gentitle("See f2py2e/cfuncs.py: cfuncs")+""" +#cfuncs# + +"""+gentitle("See f2py2e/cfuncs.py: userincludes")+""" +#userincludes# + +"""+gentitle("See f2py2e/capi_rules.py: usercode")+""" +#usercode# + +/* See f2py2e/rules.py */ +#externroutines# + +"""+gentitle("See f2py2e/capi_rules.py: usercode1")+""" +#usercode1# + +"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+""" +#callbacks# + +"""+gentitle("See f2py2e/rules.py: buildapi")+""" +#body# + +"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+""" +#f90modhooks# + +"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+""" + +"""+gentitle("See f2py2e/common_rules.py: buildhooks")+""" +#commonhooks# + +"""+gentitle("See f2py2e/rules.py")+""" + +static FortranDataDef f2py_routine_defs[] = { +#routine_defs# +\t{NULL} +}; + +static PyMethodDef f2py_module_methods[] = { +#pymethoddef# +\t{NULL,NULL} +}; + +#if PY_VERSION_HEX >= 0x03000000 +static struct PyModuleDef moduledef = { +\tPyModuleDef_HEAD_INIT, +\t"#modulename#", +\tNULL, +\t-1, +\tf2py_module_methods, +\tNULL, +\tNULL, +\tNULL, +\tNULL +}; +#endif + +#if PY_VERSION_HEX >= 0x03000000 +#define RETVAL m +PyMODINIT_FUNC PyInit_#modulename#(void) { +#else +#define RETVAL +PyMODINIT_FUNC init#modulename#(void) { +#endif +\tint i; +\tPyObject *m,*d, *s; +#if PY_VERSION_HEX >= 0x03000000 +\tm = #modulename#_module = PyModule_Create(&moduledef); +#else +\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods); +#endif +\tPy_TYPE(&PyFortran_Type) = &PyType_Type; +\timport_array(); +\tif (PyErr_Occurred()) +\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;} +\td = PyModule_GetDict(m); +\ts = PyString_FromString(\"$R"""+"""evision: $\"); +\tPyDict_SetItemString(d, \"__version__\", s); +#if PY_VERSION_HEX >= 0x03000000 +\ts = PyUnicode_FromString( +#else +\ts = PyString_FromString( +#endif +\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); +\tPyDict_SetItemString(d, \"__doc__\", s); +\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); +\tPy_DECREF(s); +\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) +\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i])); +#initf2pywraphooks# +#initf90modhooks# +#initcommonhooks# +#interface_usercode# + +#ifdef F2PY_REPORT_ATEXIT +\tif (! PyErr_Occurred()) +\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); +#endif + +\treturn RETVAL; +} +#ifdef __cplusplus +} +#endif +""", + 'separatorsfor':{'latexdoc':'\n\n', + 'restdoc':'\n\n'}, + 'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n', + '#modnote#\n', + '#latexdoc#'], + 'restdoc':['Module #modulename#\n'+'='*80, + '\n#restdoc#'] + } + +defmod_rules=[ + {'body': '/*eof body*/', + 'method': '/*eof method*/', + 'externroutines': '/*eof externroutines*/', + 'routine_defs': '/*eof routine_defs*/', + 'initf90modhooks': '/*eof initf90modhooks*/', + 'initf2pywraphooks': '/*eof initf2pywraphooks*/', + 'initcommonhooks': '/*eof initcommonhooks*/', + 'latexdoc': '', + 'restdoc': '', + 'modnote': {hasnote:'#note#',l_not(hasnote):''}, + } + ] + +routine_rules={ + 'separatorsfor':sepdict, + 'body':""" +#begintitle# +static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; +/* #declfortranroutine# */ +static PyObject *#apiname#(const PyObject *capi_self, + PyObject *capi_args, + PyObject *capi_keywds, + #functype# (*f2py_func)(#callprotoargument#)) { +\tPyObject * volatile capi_buildvalue = NULL; +\tvolatile int f2py_success = 1; +#decl# +\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; +#usercode# +#routdebugenter# +#ifdef F2PY_REPORT_ATEXIT +f2py_start_clock(); +#endif +\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ +\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\ +\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL; +#frompyobj# +/*end of frompyobj*/ +#ifdef F2PY_REPORT_ATEXIT +f2py_start_call_clock(); +#endif +#callfortranroutine# +if (PyErr_Occurred()) + f2py_success = 0; +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_call_clock(); +#endif +/*end of callfortranroutine*/ +\t\tif (f2py_success) { +#pyobjfrom# +/*end of pyobjfrom*/ +\t\tCFUNCSMESS(\"Building return value.\\n\"); +\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); +/*closepyobjfrom*/ +#closepyobjfrom# +\t\t} /*if (f2py_success) after callfortranroutine*/ +/*cleanupfrompyobj*/ +#cleanupfrompyobj# +\tif (capi_buildvalue == NULL) { +#routdebugfailure# +\t} else { +#routdebugleave# +\t} +\tCFUNCSMESS(\"Freeing memory.\\n\"); +#freemem# +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_clock(); +#endif +\treturn capi_buildvalue; +} +#endtitle# +""", + 'routine_defs':'#routine_def#', + 'initf2pywraphooks':'#initf2pywraphook#', + 'externroutines':'#declfortranroutine#', + 'doc':'#docreturn##name#(#docsignature#)', + 'docshort':'#docreturn##name#(#docsignatureshort#)', + 'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n', + 'need':['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], + 'cppmacros':{debugcapi:'#define DEBUGCFUNCS'}, + 'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n', + """ +\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} +#routnote# + +#latexdocstrsigns# +"""], + 'restdoc':['Wrapped function ``#name#``\n'+'-'*80, + + ] + } + +################## Rules for C/API function ############## + +rout_rules=[ + { # Init + 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', + 'routdebugleave': '\n', 'routdebugfailure': '\n', + 'setjmpbuf': ' || ', + 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', + 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', + 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', + 'freemem': '/*freemem*/', + 'docsignshort': '', 'docsignoptshort': '', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\\nParameters\\n----------', + 'docstropt': '\\nOther Parameters\\n----------------', + 'docstrout': '\\nReturns\\n-------', + 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'args_capi': '', 'keys_capi': '', 'functype': '', + 'frompyobj': '/*frompyobj*/', + 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], #this list will be reversed + 'pyobjfrom': '/*pyobjfrom*/', + 'closepyobjfrom': ['/*end of closepyobjfrom*/'], #this list will be reversed + 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', + 'routdebugenter': '/*routdebugenter*/', + 'routdebugfailure': '/*routdebugfailure*/', + 'callfortranroutine': '/*callfortranroutine*/', + 'argformat': '', 'keyformat': '', 'need_cfuncs': '', + 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', + 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', + 'initf2pywraphook': '', + 'routnote': {hasnote:'--- #note#',l_not(hasnote):''}, + }, { + 'apiname':'f2py_rout_#modulename#_#name#', + 'pyname':'#modulename#.#name#', + 'decl':'', + '_check':l_not(ismoduleroutine) + }, { + 'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#', + 'pyname':'#modulename#.#f90modulename#.#name#', + 'decl':'', + '_check':ismoduleroutine + }, { # Subroutine + 'functype': 'void', + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);', + ismoduleroutine:'', + isdummyroutine:'' + }, + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'F_FUNC'}, + 'callfortranroutine': [ + {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, + {hasexternals:"""\ +\t\tif (#setjmpbuf#) { +\t\t\tf2py_success = 0; +\t\t} else {"""}, + {isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'}, + {hascallstatement:'''\t\t\t\t#callstatement#; +\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, + {l_not(l_or(hascallstatement, isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'}, + {isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'}, + {hasexternals:"""\t\t}"""} + ], + '_check': l_and(issubroutine, l_not(issubroutine_wrap)), + }, { # Wrapped function + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)):''' + { + extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); +#if PY_VERSION_HEX >= 0x03000000 + PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); +#else + PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); +#endif + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)):['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals:"""\ +\tif (#setjmpbuf#) { +\t\tf2py_success = 0; +\t} else {"""}, + {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, + {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, + {hasexternals:'\t}'} + ], + '_check': isfunction_wrap, + }, { # Wrapped subroutine + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)):''' + { + extern void #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); +#if PY_VERSION_HEX >= 0x03000000 + PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); +#else + PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); +#endif + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)):['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals:"""\ +\tif (#setjmpbuf#) { +\t\tf2py_success = 0; +\t} else {"""}, + {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, + {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, + {hasexternals:'\t}'} + ], + '_check': issubroutine_wrap, + }, { # Function + 'functype':'#ctype#', + 'docreturn':{l_not(isintent_hide):'#rname#,'}, + 'docstrout':'#pydocsignout#', + 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasresultnote:'--- #resultnote#'}], + 'callfortranroutine':[{l_and(debugcapi, isstringfunction):"""\ +#ifdef USESCOMPAQFORTRAN +\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); +#else +\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +#endif +"""}, + {l_and(debugcapi, l_not(isstringfunction)):"""\ +\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +"""} + ], + '_check':l_and(isfunction, l_not(isfunction_wrap)) + }, { # Scalar function + 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);', + isdummyroutine:'' + }, + 'routine_def':{l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};', + l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'}, + {iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'} + ], + 'callfortranroutine':[ + {hasexternals:"""\ +\tif (#setjmpbuf#) { +\t\tf2py_success = 0; +\t} else {"""}, + {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, + {hascallstatement:'''\t#callstatement#; +/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ +'''}, + {l_not(l_or(hascallstatement, isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'}, + {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, + {hasexternals:'\t}'}, + {l_and(debugcapi, iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + 'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, + 'need':[{l_not(isdummyroutine):'F_FUNC'}, + {iscomplexfunction:'pyobj_from_#ctype#1'}, + {islong_longfunction:'long_long'}, + {islong_doublefunction:'long_double'}], + 'returnformat':{l_not(isintent_hide):'#rformat#'}, + 'return':{iscomplexfunction:',#name#_return_value_capi', + l_not(l_or(iscomplexfunction, isintent_hide)):',#name#_return_value'}, + '_check':l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) + }, { # String function # in use for --no-wrap + 'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + 'routine_def':{l_not(l_or(ismoduleroutine, isintent_c)): +# '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},', + '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c): +# '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},' + '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' + }, + 'decl':['\t#ctype# #name#_return_value = NULL;', + '\tint #name#_return_value_len = 0;'], + 'callfortran':'#name#_return_value,#name#_return_value_len,', + 'callfortranroutine':['\t#name#_return_value_len = #rlength#;', + '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {', + '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");', + '\t\tf2py_success = 0;', + '\t} else {', + "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';", + '\t}', + '\tif (f2py_success) {', + {hasexternals:"""\ +\t\tif (#setjmpbuf#) { +\t\t\tf2py_success = 0; +\t\t} else {"""}, + {isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'}, + """\ +#ifdef USESCOMPAQFORTRAN +\t\t(*f2py_func)(#callcompaqfortran#); +#else +\t\t(*f2py_func)(#callfortran#); +#endif +""", + {isthreadsafe:'\t\tPy_END_ALLOW_THREADS'}, + {hasexternals:'\t\t}'}, + {debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, + '\t} /* if (f2py_success) after (string)malloc */', + ], + 'returnformat':'#rformat#', + 'return':',#name#_return_value', + 'freemem':'\tSTRINGFREE(#name#_return_value);', + 'need':['F_FUNC', '#ctype#', 'STRINGFREE'], + '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + }, + { # Debugging + 'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', + 'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', + 'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', + '_check':debugcapi + } + ] + +################ Rules for arguments ################## + +typedef_need_dict = {islong_long: 'long_long', + islong_double: 'long_double', + islong_complex: 'complex_long_double', + isunsigned_char: 'unsigned_char', + isunsigned_short: 'unsigned_short', + isunsigned: 'unsigned', + isunsigned_long_long: 'unsigned_long_long', + isunsigned_chararray: 'unsigned_char', + isunsigned_shortarray: 'unsigned_short', + isunsigned_long_longarray: 'unsigned_long_long', + issigned_long_longarray: 'long_long', + } + +aux_rules=[ + { + 'separatorsfor':sepdict + }, + { # Common + 'frompyobj': ['\t/* Processing auxiliary variable #varname# */', + {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], + 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + 'need': typedef_need_dict, + }, +# Scalars (not complex) + { # Common + 'decl': '\t#ctype# #varname# = 0;', + 'need': {hasinitvalue:'math.h'}, + 'frompyobj': {hasinitvalue:'\t#varname# = #init#;'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, + { + 'return': ',#varname#', + 'docstrout': '#pydocsignout#', + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': l_and(isscalar, l_not(iscomplex), isintent_out), + }, +# Complex scalars + { # Common + 'decl':'\t#ctype# #varname#;', + 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check':iscomplex + }, +# String + { # Common + 'decl':['\t#ctype# #varname# = NULL;', + '\tint slen(#varname#);', + ], + 'need':['len..'], + '_check':isstring + }, +# Array + { # Common + 'decl':['\t#ctype# *#varname# = NULL;', + '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + '\tconst int #varname#_Rank = #rank#;', + ], + 'need':['len..', {hasinitvalue:'forcomb'}, {hasinitvalue:'CFUNCSMESS'}], + '_check':isarray + }, +# Scalararray + { # Common + '_check':l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check':l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, +# Integer*1 array + {'need':'#ctype#', + '_check':isint1array, + '_depend':'' + }, +# Integer*-1 array + {'need':'#ctype#', + '_check':isunsigned_chararray, + '_depend':'' + }, +# Integer*-2 array + {'need':'#ctype#', + '_check':isunsigned_shortarray, + '_depend':'' + }, +# Integer*-8 array + {'need':'#ctype#', + '_check':isunsigned_long_longarray, + '_depend':'' + }, +# Complexarray + {'need':'#ctype#', + '_check':iscomplexarray, + '_depend':'' + }, +# Stringarray + { + 'callfortranappend':{isarrayofstrings:'flen(#varname#),'}, + 'need':'string', + '_check':isstringarray + } + ] + +arg_rules=[ + { + 'separatorsfor':sepdict + }, + { # Common + 'frompyobj': ['\t/* Processing variable #varname# */', + {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], + 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + '_depend': '', + 'need': typedef_need_dict, + }, +# Doc signatures + { + 'docstropt':{l_and(isoptional, isintent_nothide):'#pydocsign#'}, + 'docstrreq':{l_and(isrequired, isintent_nothide):'#pydocsign#'}, + 'docstrout':{isintent_out:'#pydocsignout#'}, + 'latexdocstropt':{l_and(isoptional, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote:'--- #note#'}]}, + 'latexdocstrreq':{l_and(isrequired, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote:'--- #note#'}]}, + 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide):'--- #note#', + l_and(hasnote, isintent_nothide):'--- See above.'}]}, + 'depend':'' + }, +# Required/Optional arguments + { + 'kwlist':'"#varname#",', + 'docsign':'#varname#,', + '_check':l_and(isintent_nothide, l_not(isoptional)) + }, + { + 'kwlistopt':'"#varname#",', + 'docsignopt':'#varname#=#showinit#,', + 'docsignoptshort':'#varname#,', + '_check':l_and(isintent_nothide, isoptional) + }, +# Docstring/BuildValue + { + 'docreturn':'#outvarname#,', + 'returnformat':'#varrformat#', + '_check':isintent_out + }, +# Externals (call-back functions) + { # Common + 'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'}, + 'docsignxashort':{isintent_nothide:'#varname#_extra_args,'}, + 'docstropt':{isintent_nothide:'#varname#_extra_args : input tuple, optional\\n Default: ()'}, + 'docstrcbs':'#cbdocstr#', + 'latexdocstrcbs':'\\item[] #cblatexdocstr#', + 'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, + 'decl':['\tPyObject *#varname#_capi = Py_None;', + '\tPyTupleObject *#varname#_xa_capi = NULL;', + '\tPyTupleObject *#varname#_args_capi = NULL;', + '\tint #varname#_nofargs_capi = 0;', + {l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'} + ], + 'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'}, + 'argformat':{isrequired:'O'}, + 'keyformat':{isoptional:'O'}, + 'xaformat':{isintent_nothide:'O!'}, + 'args_capi':{isrequired:',&#varname#_capi'}, + 'keys_capi':{isoptional:',&#varname#_capi'}, + 'keys_xa':',&PyTuple_Type,&#varname#_xa_capi', + 'setjmpbuf':'(setjmp(#cbname#_jmpbuf))', + 'callfortran':{l_not(isintent_callback):'#varname#_cptr,'}, + 'need':['#cbname#', 'setjmp.h'], + '_check':isexternal + }, + { + 'frompyobj':[{l_not(isintent_callback):"""\ +if(F2PyCapsule_Check(#varname#_capi)) { + #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi); +} else { + #varname#_cptr = #cbname#; +} +"""}, {isintent_callback:"""\ +if (#varname#_capi==Py_None) { + #varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); + if (#varname#_capi) { + if (#varname#_xa_capi==NULL) { + if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { + PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); + if (capi_tmp) + #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); + else + #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); + if (#varname#_xa_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); + return NULL; + } + } + } + } + if (#varname#_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); + return NULL; + } +} +"""}, +## {l_not(isintent_callback):"""\ +## if (#varname#_capi==Py_None) { +## printf(\"hoi\\n\"); +## } +## """}, +"""\ +\t#varname#_nofargs_capi = #cbname#_nofargs; +\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) { +\t\tjmp_buf #varname#_jmpbuf;""", +{debugcapi:["""\ +\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs); +\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""", +{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, + """\ +\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\"); +\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject); +\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject); +\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""", + ], +'cleanupfrompyobj': +"""\ +\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\"); +\t\t#cbname#_capi = #varname#_capi; +\t\tPy_DECREF(#cbname#_args_capi); +\t\t#cbname#_args_capi = #varname#_args_capi; +\t\t#cbname#_nofargs = #varname#_nofargs_capi; +\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf)); +\t}""", + 'need':['SWAP', 'create_cb_arglist'], + '_check':isexternal, + '_depend':'' + }, +# Scalars (not complex) + { # Common + 'decl':'\t#ctype# #varname# = 0;', + 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'}, + 'return':{isintent_out:',#varname#'}, + '_check':l_and(isscalar, l_not(iscomplex)) + }, { + 'need': {hasinitvalue:'math.h'}, + '_check': l_and(isscalar, l_not(iscomplex)), + #'_depend':'' + }, { # Not hidden + 'decl':'\tPyObject *#varname#_capi = Py_None;', + 'argformat':{isrequired:'O'}, + 'keyformat':{isoptional:'O'}, + 'args_capi':{isrequired:',&#varname#_capi'}, + 'keys_capi':{isoptional:',&#varname#_capi'}, + 'pyobjfrom':{isintent_inout:"""\ +\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); +\tif (f2py_success) {"""}, + 'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, + 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, + '_check':l_and(isscalar, l_not(iscomplex), isintent_nothide) + }, { + 'frompyobj':[ +# hasinitvalue... +# if pyobj is None: +# varname = init +# else +# from_pyobj(varname) +# +# isoptional and noinitvalue... +# if pyobj is not None: +# from_pyobj(varname) +# else: +# varname is uninitialized +# +# ... +# from_pyobj(varname) +# + {hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else', + '_depend':''}, + {l_and(isoptional, l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)', + '_depend':''}, + {l_not(islogical):'''\ +\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); +\tif (f2py_success) {'''}, + {islogical:'''\ +\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); +\t\tf2py_success = 1; +\tif (f2py_success) {'''}, + ], + 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/', + 'need':{l_not(islogical):'#ctype#_from_pyobj'}, + '_check':l_and(isscalar, l_not(iscomplex), isintent_nothide), + '_depend':'' +# },{ # Hidden +# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide) + }, { # Hidden + 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'}, + 'need':typedef_need_dict, + '_check':l_and(isscalar, l_not(iscomplex), isintent_hide), + '_depend':'' + }, { # Common + 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + '_check':l_and(isscalar, l_not(iscomplex)), + '_depend':'' + }, +# Complex scalars + { # Common + 'decl':'\t#ctype# #varname#;', + 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'}, + 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'return':{isintent_out:',#varname#_capi'}, + '_check':iscomplex + }, { # Not hidden + 'decl':'\tPyObject *#varname#_capi = Py_None;', + 'argformat':{isrequired:'O'}, + 'keyformat':{isoptional:'O'}, + 'args_capi':{isrequired:',&#varname#_capi'}, + 'keys_capi':{isoptional:',&#varname#_capi'}, + 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, + 'pyobjfrom':{isintent_inout:"""\ +\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); +\t\tif (f2py_success) {"""}, + 'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, + '_check':l_and(iscomplex, isintent_nothide) + }, { + 'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, + {l_and(isoptional, l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'}, +# '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");' + '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n\tif (f2py_success) {'], + 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/', + 'need':['#ctype#_from_pyobj'], + '_check':l_and(iscomplex, isintent_nothide), + '_depend':'' + }, { # Hidden + 'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'}, + '_check':l_and(iscomplex, isintent_hide) + }, { + 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check':l_and(iscomplex, isintent_hide), + '_depend':'' + }, { # Common + 'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, + 'need':['pyobj_from_#ctype#1'], + '_check':iscomplex + }, { + 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + '_check':iscomplex, + '_depend':'' + }, +# String + { # Common + 'decl':['\t#ctype# #varname# = NULL;', + '\tint slen(#varname#);', + '\tPyObject *#varname#_capi = Py_None;'], + 'callfortran':'#varname#,', + 'callfortranappend':'slen(#varname#),', + 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, +# 'freemem':'\tSTRINGFREE(#varname#);', + 'return':{isintent_out:',#varname#'}, + 'need':['len..'],#'STRINGFREE'], + '_check':isstring + }, { # Common + 'frompyobj':"""\ +\tslen(#varname#) = #length#; +\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\"); +\tif (f2py_success) {""", + 'cleanupfrompyobj':"""\ +\t\tSTRINGFREE(#varname#); +\t} /*if (f2py_success) of #varname#*/""", + 'need':['#ctype#_from_pyobj', 'len..', 'STRINGFREE'], + '_check':isstring, + '_depend':'' + }, { # Not hidden + 'argformat':{isrequired:'O'}, + 'keyformat':{isoptional:'O'}, + 'args_capi':{isrequired:',&#varname#_capi'}, + 'keys_capi':{isoptional:',&#varname#_capi'}, + 'pyobjfrom':{isintent_inout:'''\ +\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#); +\tif (f2py_success) {'''}, + 'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, + 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, + '_check':l_and(isstring, isintent_nothide) + }, { # Hidden + '_check':l_and(isstring, isintent_hide) + }, { + 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + '_check':isstring, + '_depend':'' + }, +# Array + { # Common + 'decl':['\t#ctype# *#varname# = NULL;', + '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + '\tconst int #varname#_Rank = #rank#;', + '\tPyArrayObject *capi_#varname#_tmp = NULL;', + '\tint capi_#varname#_intent = 0;', + ], + 'callfortran':'#varname#,', + 'return':{isintent_out:',capi_#varname#_tmp'}, + 'need':'len..', + '_check':isarray + }, { # intent(overwrite) array + 'decl': '\tint capi_overwrite_#varname# = 1;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=1,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', + '_check': l_and(isarray, isintent_overwrite), + }, { + 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_overwrite), + '_depend': '', + }, + { # intent(copy) array + 'decl': '\tint capi_overwrite_#varname# = 0;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=0,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', + '_check': l_and(isarray, isintent_copy), + }, { + 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_copy), + '_depend': '', + }, { + 'need':[{hasinitvalue:'forcomb'}, {hasinitvalue:'CFUNCSMESS'}], + '_check':isarray, + '_depend':'' + }, { # Not hidden + 'decl':'\tPyObject *#varname#_capi = Py_None;', + 'argformat':{isrequired:'O'}, + 'keyformat':{isoptional:'O'}, + 'args_capi':{isrequired:',&#varname#_capi'}, + 'keys_capi':{isoptional:',&#varname#_capi'}, +# 'pyobjfrom':{isintent_inout:"""\ +# /* Partly because of the following hack, intent(inout) is depreciated, +# Use intent(in,out) instead. + +# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\ +# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) { +# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) { +# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base) +# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi); +# \t\t} else +# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi); +# \t} +# */ +# """}, +# 'need':{isintent_inout:'copy_ND_array'}, + '_check':l_and(isarray, isintent_nothide) + }, { + 'frompyobj':['\t#setdims#;', + '\tcapi_#varname#_intent |= #intent#;', + {isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, + {isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, + """\ +\tif (capi_#varname#_tmp == NULL) { +\t\tif (!PyErr_Occurred()) +\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); +\t} else { +\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data); +""", +{hasinitvalue:[ + {isintent_nothide:'\tif (#varname#_capi == Py_None) {'}, + {isintent_hide:'\t{'}, + {iscomplexarray:'\t\t#ctype# capi_c;'}, + """\ +\t\tint *_i,capi_i=0; +\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); +\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) { +\t\t\twhile ((_i = nextforcomb())) +\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */ +\t\t} else { +\t\t\tif (!PyErr_Occurred()) +\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); +\t\t\tf2py_success = 0; +\t\t} +\t} +\tif (f2py_success) {"""]}, + ], + 'cleanupfrompyobj':[ # note that this list will be reversed + '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', + {l_not(l_or(isintent_out, isintent_hide)):"""\ +\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { +\t\tPy_XDECREF(capi_#varname#_tmp); }"""}, + {l_and(isintent_hide, l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""}, + {hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'}, + ], + '_check':isarray, + '_depend':'' + }, +# { # Hidden +# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'}, +# '_check':l_and(isarray,isintent_hide) +# }, +# Scalararray + { # Common + '_check':l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check':l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, +# Integer*1 array + {'need':'#ctype#', + '_check':isint1array, + '_depend':'' + }, +# Integer*-1 array + {'need':'#ctype#', + '_check':isunsigned_chararray, + '_depend':'' + }, +# Integer*-2 array + {'need':'#ctype#', + '_check':isunsigned_shortarray, + '_depend':'' + }, +# Integer*-8 array + {'need':'#ctype#', + '_check':isunsigned_long_longarray, + '_depend':'' + }, +# Complexarray + {'need':'#ctype#', + '_check':iscomplexarray, + '_depend':'' + }, +# Stringarray + { + 'callfortranappend':{isarrayofstrings:'flen(#varname#),'}, + 'need':'string', + '_check':isstringarray + } + ] + +################# Rules for checking ############### + +check_rules=[ + { + 'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, + 'need':'len..' + }, { + 'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/', + 'need':'CHECKSCALAR', + '_check':l_and(isscalar, l_not(iscomplex)), + '_break':'' + }, { + 'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/', + 'need':'CHECKSTRING', + '_check':isstring, + '_break':'' + }, { + 'need':'CHECKARRAY', + 'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/', + '_check':isarray, + '_break':'' + }, { + 'need': 'CHECKGENERIC', + 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/', + } +] + +########## Applying the rules. No need to modify what follows ############# + +#################### Build C/API module ####################### + +def buildmodule(m, um): + """ + Return + """ + global f2py_version, options + outmess('\tBuilding module "%s"...\n'%(m['name'])) + ret = {} + mod_rules=defmod_rules[:] + vrd=modsign2map(m) + rd=dictappend({'f2py_version':f2py_version}, vrd) + funcwrappers = [] + funcwrappers2 = [] # F90 codes + for n in m['interfaced']: + nb=None + for bi in m['body']: + if not bi['block']=='interface': + errmess('buildmodule: Expected interface block. Skipping.\n') + continue + for b in bi['body']: + if b['name']==n: nb=b;break + + if not nb: + errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n)) + continue + nb_list = [nb] + if 'entry' in nb: + for k, a in nb['entry'].items(): + nb1 = copy.deepcopy(nb) + del nb1['entry'] + nb1['name'] = k + nb1['args'] = a + nb_list.append(nb1) + for nb in nb_list: + api, wrap=buildapi(nb) + if wrap: + if ismoduleroutine(nb): + funcwrappers2.append(wrap) + else: + funcwrappers.append(wrap) + ar=applyrules(api, vrd) + rd=dictappend(rd, ar) + + # Construct COMMON block support + cr, wrap = common_rules.buildhooks(m) + if wrap: + funcwrappers.append(wrap) + ar=applyrules(cr, vrd) + rd=dictappend(rd, ar) + + # Construct F90 module support + mr, wrap = f90mod_rules.buildhooks(m) + if wrap: + funcwrappers2.append(wrap) + ar=applyrules(mr, vrd) + rd=dictappend(rd, ar) + + for u in um: + ar=use_rules.buildusevars(u, m['use'][u['name']]) + rd=dictappend(rd, ar) + + needs=cfuncs.get_needs() + code={} + for n in needs.keys(): + code[n]=[] + for k in needs[n]: + c='' + if k in cfuncs.includes0: + c=cfuncs.includes0[k] + elif k in cfuncs.includes: + c=cfuncs.includes[k] + elif k in cfuncs.userincludes: + c=cfuncs.userincludes[k] + elif k in cfuncs.typedefs: + c=cfuncs.typedefs[k] + elif k in cfuncs.typedefs_generated: + c=cfuncs.typedefs_generated[k] + elif k in cfuncs.cppmacros: + c=cfuncs.cppmacros[k] + elif k in cfuncs.cfuncs: + c=cfuncs.cfuncs[k] + elif k in cfuncs.callbacks: + c=cfuncs.callbacks[k] + elif k in cfuncs.f90modhooks: + c=cfuncs.f90modhooks[k] + elif k in cfuncs.commonhooks: + c=cfuncs.commonhooks[k] + else: + errmess('buildmodule: unknown need %s.\n'%(repr(k)));continue + code[n].append(c) + mod_rules.append(code) + for r in mod_rules: + if ('_check' in r and r['_check'](m)) or ('_check' not in r): + ar=applyrules(r, vrd, m) + rd=dictappend(rd, ar) + ar=applyrules(module_rules, rd) + + fn = os.path.join(options['buildpath'], vrd['coutput']) + ret['csrc'] = fn + f=open(fn, 'w') + f.write(ar['modulebody'].replace('\t', 2*' ')) + f.close() + outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'], fn)) + + if options['dorestdoc']: + fn = os.path.join(options['buildpath'], vrd['modulename']+'module.rest') + f=open(fn, 'w') + f.write('.. -*- rest -*-\n') + f.write('\n'.join(ar['restdoc'])) + f.close() + outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'], vrd['modulename'])) + if options['dolatexdoc']: + fn = os.path.join(options['buildpath'], vrd['modulename']+'module.tex') + ret['ltx'] = fn + f=open(fn, 'w') + f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version)) + if 'shortlatex' not in options: + f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') + f.write('\n'.join(ar['latexdoc'])) + if 'shortlatex' not in options: + f.write('\\end{document}') + f.close() + outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'], vrd['modulename'])) + if funcwrappers: + wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) + ret['fsrc'] = wn + f=open(wn, 'w') + f.write('C -*- fortran -*-\n') + f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) + f.write('C It contains Fortran 77 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'): + if l and l[0]==' ': + while len(l)>=66: + lines.append(l[:66]+'\n &') + l = l[66:] + lines.append(l+'\n') + else: lines.append(l+'\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + f.close() + outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn)) + if funcwrappers2: + wn = os.path.join(options['buildpath'], '%s-f2pywrappers2.f90'%(vrd['modulename'])) + ret['fsrc'] = wn + f=open(wn, 'w') + f.write('! -*- f90 -*-\n') + f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) + f.write('! It contains Fortran 90 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'): + if len(l)>72 and l[0]==' ': + lines.append(l[:72]+'&\n &') + l = l[72:] + while len(l)>66: + lines.append(l[:66]+'&\n &') + l = l[66:] + lines.append(l+'\n') + else: lines.append(l+'\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + f.close() + outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn)) + return ret + +################## Build C/API function ############# + +stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'} + +def buildapi(rout): + rout, wrap = func2subr.assubr(rout) + args, depargs=getargs2(rout) + capi_maps.depargs=depargs + var=rout['vars'] + auxvars = [a for a in var.keys() if isintent_aux(var[a])] + + if ismoduleroutine(rout): + outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'], rout['name'])) + else: + outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name'])) + # Routine + vrd=routsign2map(rout) + rd=dictappend({}, vrd) + for r in rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar=applyrules(r, vrd, rout) + rd=dictappend(rd, ar) + + # Args + nth, nthk=0, 0 + savevrd={} + for a in args: + vrd=sign2map(a, var[a]) + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + if not isintent_hide(var[a]): + if not isoptional(var[a]): + nth=nth+1 + vrd['nth']=repr(nth)+stnd[nth%10]+' argument' + else: + nthk=nthk+1 + vrd['nth']=repr(nthk)+stnd[nthk%10]+' keyword' + else: vrd['nth']='hidden' + savevrd[a]=vrd + for r in _rules: + if '_depend' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + vrd=savevrd[a] + for r in _rules: + if '_depend' not in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) + if '_break' in r: + break + if 'check' in var[a]: + for c in var[a]['check']: + vrd['check']=c + ar=applyrules(check_rules, vrd, var[a]) + rd=dictappend(rd, ar) + if isinstance(rd['cleanupfrompyobj'], list): + rd['cleanupfrompyobj'].reverse() + if isinstance(rd['closepyobjfrom'], list): + rd['closepyobjfrom'].reverse() + rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#', + {'docsign':rd['docsign'], + 'docsignopt':rd['docsignopt'], + 'docsignxa':rd['docsignxa']})) + optargs=stripcomma(replace('#docsignopt##docsignxa#', + {'docsignxa':rd['docsignxashort'], + 'docsignopt':rd['docsignoptshort']} + )) + if optargs=='': + rd['docsignatureshort']=stripcomma(replace('#docsign#', {'docsign':rd['docsign']})) + else: + rd['docsignatureshort']=replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_', '\\_') + rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',', ', ') + cfs=stripcomma(replace('#callfortran##callfortranappend#', {'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) + if len(rd['callfortranappend'])>1: + rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#', {'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) + else: + rd['callcompaqfortran']=cfs + rd['callfortran']=cfs + if isinstance(rd['docreturn'], list): + rd['docreturn']=stripcomma(replace('#docreturn#', {'docreturn':rd['docreturn']}))+' = ' + rd['docstrsigns']=[] + rd['latexdocstrsigns']=[] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns']=rd['docstrsigns']+rd[k] + k='latex'+k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ + ['\\begin{description}']+rd[k][1:]+\ + ['\\end{description}'] + + # Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720 + if rd['keyformat'] or rd['xaformat']: + argformat = rd['argformat'] + if isinstance(argformat, list): + argformat.append('|') + else: + assert isinstance(argformat, str), repr((argformat, type(argformat))) + rd['argformat'] += '|' + + ar=applyrules(routine_rules, rd) + if ismoduleroutine(rout): + outmess('\t\t\t %s\n'%(ar['docshort'])) + else: + outmess('\t\t %s\n'%(ar['docshort'])) + return ar, wrap + + +#################### EOF rules.py ####################### diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/setup.py new file mode 100644 index 0000000000000..2f1fd6a015076 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/setup.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +""" +setup.py for installing F2PY + +Usage: + python setup.py install + +Copyright 2001-2005 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Revision: 1.32 $ +$Date: 2005/01/30 17:22:14 $ +Pearu Peterson + +""" +from __future__ import division, print_function + +__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $" + +import os +import sys +from distutils.dep_util import newer +from numpy.distutils import log +from numpy.distutils.core import setup +from numpy.distutils.misc_util import Configuration + +from __version__ import version + +def configuration(parent_package='',top_path=None): + config = Configuration('f2py', parent_package, top_path) + + config.add_data_dir('docs') + config.add_data_dir('tests') + + config.add_data_files('src/fortranobject.c', + 'src/fortranobject.h', + 'f2py.1' + ) + + config.make_svn_version_py() + + def generate_f2py_py(build_dir): + f2py_exe = 'f2py'+os.path.basename(sys.executable)[6:] + if f2py_exe[-4:]=='.exe': + f2py_exe = f2py_exe[:-4] + '.py' + if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py': + f2py_exe = f2py_exe + '.py' + target = os.path.join(build_dir, f2py_exe) + if newer(__file__, target): + log.info('Creating %s', target) + f = open(target, 'w') + f.write('''\ +#!%s +# See http://cens.ioc.ee/projects/f2py2e/ +import os, sys +for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]: + try: + i=sys.argv.index("--"+mode) + del sys.argv[i] + break + except ValueError: pass +os.environ["NO_SCIPY_IMPORT"]="f2py" +if mode=="g3-numpy": + sys.stderr.write("G3 f2py support is not implemented, yet.\\n") + sys.exit(1) +elif mode=="2e-numeric": + from f2py2e import main +elif mode=="2e-numarray": + sys.argv.append("-DNUMARRAY") + from f2py2e import main +elif mode=="2e-numpy": + from numpy.f2py import main +else: + sys.stderr.write("Unknown mode: " + repr(mode) + "\\n") + sys.exit(1) +main() +'''%(sys.executable)) + f.close() + return target + + config.add_scripts(generate_f2py_py) + + log.info('F2PY Version %s', config.get_version()) + + return config + +if __name__ == "__main__": + + config = configuration(top_path='') + version = config.get_version() + print('F2PY Version', version) + config = config.todict() + + if sys.version[:3]>='2.3': + config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ + "/F2PY-2-latest.tar.gz" + config['classifiers'] = [ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: NumPy License', + 'Natural Language :: English', + 'Operating System :: OS Independent', + 'Programming Language :: C', + 'Programming Language :: Fortran', + 'Programming Language :: Python', + 'Topic :: Scientific/Engineering', + 'Topic :: Software Development :: Code Generators', + ] + setup(version=version, + description = "F2PY - Fortran to Python Interface Generaton", + author = "Pearu Peterson", + author_email = "pearu@cens.ioc.ee", + maintainer = "Pearu Peterson", + maintainer_email = "pearu@cens.ioc.ee", + license = "BSD", + platforms = "Unix, Windows (mingw|cygwin), Mac OSX", + long_description = """\ +The Fortran to Python Interface Generator, or F2PY for short, is a +command line tool (f2py) for generating Python C/API modules for +wrapping Fortran 77/90/95 subroutines, accessing common blocks from +Python, and calling Python functions from Fortran (call-backs). +Interfacing subroutines/data from Fortran 90/95 modules is supported.""", + url = "http://cens.ioc.ee/projects/f2py2e/", + keywords = ['Fortran', 'f2py'], + **config) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/src/fortranobject.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/src/fortranobject.h new file mode 100644 index 0000000000000..689f78c923b06 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/src/fortranobject.h @@ -0,0 +1,162 @@ +#ifndef Py_FORTRANOBJECT_H +#define Py_FORTRANOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include "Python.h" + +#ifdef FORTRANOBJECT_C +#define NO_IMPORT_ARRAY +#endif +#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API +#include "numpy/arrayobject.h" + +/* + * Python 3 support macros + */ +#if PY_VERSION_HEX >= 0x03000000 +#define PyString_Check PyBytes_Check +#define PyString_GET_SIZE PyBytes_GET_SIZE +#define PyString_AS_STRING PyBytes_AS_STRING +#define PyString_FromString PyBytes_FromString +#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize +#define PyString_ConcatAndDel PyBytes_ConcatAndDel +#define PyString_AsString PyBytes_AsString + +#define PyInt_Check PyLong_Check +#define PyInt_FromLong PyLong_FromLong +#define PyInt_AS_LONG PyLong_AsLong +#define PyInt_AsLong PyLong_AsLong + +#define PyNumber_Int PyNumber_Long + +#else + +#define PyUString_FromStringAndSize PyString_FromStringAndSize +#endif + + +#ifdef F2PY_REPORT_ATEXIT +#include + extern void f2py_start_clock(void); + extern void f2py_stop_clock(void); + extern void f2py_start_call_clock(void); + extern void f2py_stop_call_clock(void); + extern void f2py_cb_start_clock(void); + extern void f2py_cb_stop_clock(void); + extern void f2py_cb_start_call_clock(void); + extern void f2py_cb_stop_call_clock(void); + extern void f2py_report_on_exit(int,void*); +#endif + +#ifdef DMALLOC +#include "dmalloc.h" +#endif + +/* Fortran object interface */ + +/* +123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 + +PyFortranObject represents various Fortran objects: +Fortran (module) routines, COMMON blocks, module data. + +Author: Pearu Peterson +*/ + +#define F2PY_MAX_DIMS 40 + +typedef void (*f2py_set_data_func)(char*,npy_intp*); +typedef void (*f2py_void_func)(void); +typedef void (*f2py_init_func)(int*,npy_intp*,f2py_set_data_func,int*); + + /*typedef void* (*f2py_c_func)(void*,...);*/ + +typedef void *(*f2pycfunc)(void); + +typedef struct { + char *name; /* attribute (array||routine) name */ + int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, + || rank=-1 for Fortran routine */ + struct {npy_intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ + int type; /* PyArray_ || not used */ + char *data; /* pointer to array || Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ + char *doc; /* documentation string; only recommended + for routines. */ +} FortranDataDef; + +typedef struct { + PyObject_HEAD + int len; /* Number of attributes */ + FortranDataDef *defs; /* An array of FortranDataDef's */ + PyObject *dict; /* Fortran object attribute dictionary */ +} PyFortranObject; + +#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) +#define PyFortran_Check1(op) (0==strcmp(Py_TYPE(op)->tp_name,"fortran")) + + extern PyTypeObject PyFortran_Type; + extern int F2PyDict_SetItemString(PyObject* dict, char *name, PyObject *obj); + extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init); + extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs); + +#if PY_VERSION_HEX >= 0x03000000 + +PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); +void * F2PyCapsule_AsVoidPtr(PyObject *obj); +int F2PyCapsule_Check(PyObject *ptr); + +#else + +PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)); +void * F2PyCapsule_AsVoidPtr(PyObject *ptr); +int F2PyCapsule_Check(PyObject *ptr); + +#endif + +#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS) +#define F2PY_INTENT_IN 1 +#define F2PY_INTENT_INOUT 2 +#define F2PY_INTENT_OUT 4 +#define F2PY_INTENT_HIDE 8 +#define F2PY_INTENT_CACHE 16 +#define F2PY_INTENT_COPY 32 +#define F2PY_INTENT_C 64 +#define F2PY_OPTIONAL 128 +#define F2PY_INTENT_INPLACE 256 +#define F2PY_INTENT_ALIGNED4 512 +#define F2PY_INTENT_ALIGNED8 1024 +#define F2PY_INTENT_ALIGNED16 2048 + +#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) +#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) +#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) +#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) + +#define F2PY_GET_ALIGNMENT(intent) \ + (F2PY_ALIGN4(intent) ? 4 : \ + (F2PY_ALIGN8(intent) ? 8 : \ + (F2PY_ALIGN16(intent) ? 16 : 1) )) +#define F2PY_CHECK_ALIGNMENT(arr, intent) ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) + + extern PyArrayObject* array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj); + extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out); + +#ifdef DEBUG_COPY_ND_ARRAY + extern void dump_attrs(const PyArrayObject* arr); +#endif + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FORTRANOBJECT_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap new file mode 100644 index 0000000000000..2665f89b52d2f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(rk="double")) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_free.f90 new file mode 100644 index 0000000000000..b301710f5dda0 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_free.f90 @@ -0,0 +1,34 @@ + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 new file mode 100644 index 0000000000000..cbe6317ed8f39 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 @@ -0,0 +1,41 @@ + +module mod + +contains + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum + + +end module mod diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_use.f90 new file mode 100644 index 0000000000000..337465ac54044 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_use.f90 @@ -0,0 +1,19 @@ +subroutine sum_with_use(x, res) + use precision + + implicit none + + real(kind=rk), intent(in) :: x(:) + real(kind=rk), intent(out) :: res + + integer :: i + + !print *, "size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + + end subroutine diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/precision.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/precision.f90 new file mode 100644 index 0000000000000..ed6c70cbbe7da --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/precision.f90 @@ -0,0 +1,4 @@ +module precision + integer, parameter :: rk = selected_real_kind(8) + integer, parameter :: ik = selected_real_kind(4) +end module diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/kind/foo.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/kind/foo.f90 new file mode 100644 index 0000000000000..d3d15cfb20a15 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/kind/foo.f90 @@ -0,0 +1,20 @@ + + +subroutine selectedrealkind(p, r, res) + implicit none + + integer, intent(in) :: p, r + !f2py integer :: r=0 + integer, intent(out) :: res + res = selected_real_kind(p, r) + +end subroutine + +subroutine selectedintkind(p, res) + implicit none + + integer, intent(in) :: p + integer, intent(out) :: res + res = selected_int_kind(p) + +end subroutine diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo.f b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo.f new file mode 100644 index 0000000000000..c34742578f855 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo.f @@ -0,0 +1,5 @@ + subroutine bar11(a) +cf2py intent(out) a + integer a + a = 11 + end diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_fixed.f90 new file mode 100644 index 0000000000000..7543a6acb7375 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_fixed.f90 @@ -0,0 +1,8 @@ + module foo_fixed + contains + subroutine bar12(a) +!f2py intent(out) a + integer a + a = 12 + end subroutine bar12 + end module foo_fixed diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_free.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_free.f90 new file mode 100644 index 0000000000000..c1b641f13ec29 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_free.f90 @@ -0,0 +1,8 @@ +module foo_free +contains + subroutine bar13(a) + !f2py intent(out) a + integer a + a = 13 + end subroutine bar13 +end module foo_free diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/size/foo.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/size/foo.f90 new file mode 100644 index 0000000000000..5b66f8c430d79 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/size/foo.f90 @@ -0,0 +1,44 @@ + +subroutine foo(a, n, m, b) + implicit none + + real, intent(in) :: a(n, m) + integer, intent(in) :: n, m + real, intent(out) :: b(size(a, 1)) + + integer :: i + + do i = 1, size(b) + b(i) = sum(a(i,:)) + enddo +end subroutine + +subroutine trans(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x,2), size(x,1) ) :: y + integer :: N, M, i, j + N = size(x,1) + M = size(x,2) + DO i=1,N + do j=1,M + y(j,i) = x(i,j) + END DO + END DO +end subroutine trans + +subroutine flatten(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x) ) :: y + integer :: N, M, i, j, k + N = size(x,1) + M = size(x,2) + k = 1 + DO i=1,N + do j=1,M + y(k) = x(i,j) + k = k + 1 + END DO + END DO +end subroutine flatten diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_array_from_pyobj.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_array_from_pyobj.py new file mode 100644 index 0000000000000..c51fa39363e4f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_array_from_pyobj.py @@ -0,0 +1,559 @@ +from __future__ import division, absolute_import, print_function + +import unittest +import os +import sys +import copy +import platform + +import nose + +from numpy.testing import * +from numpy import (array, alltrue, ndarray, asarray, can_cast, zeros, dtype, + intp, clongdouble) +from numpy.core.multiarray import typeinfo + +import util + +wrap = None +def setup(): + """ + Build the required testing extension module + + """ + global wrap + + # Check compiler availability first + if not util.has_c_compiler(): + raise nose.SkipTest("No C compiler available") + + if wrap is None: + config_code = """ + config.add_extension('test_array_from_pyobj_ext', + sources=['wrapmodule.c', 'fortranobject.c'], + define_macros=[]) + """ + d = os.path.dirname(__file__) + src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), + os.path.join(d, '..', 'src', 'fortranobject.c'), + os.path.join(d, '..', 'src', 'fortranobject.h')] + wrap = util.build_module_distutils(src, config_code, + 'test_array_from_pyobj_ext') + +def flags_info(arr): + flags = wrap.array_attrs(arr)[6] + return flags2names(flags) + +def flags2names(flags): + info = [] + for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', + 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', + 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', + 'CARRAY', 'FARRAY' + ]: + if abs(flags) & getattr(wrap, flagname, 0): + info.append(flagname) + return info + +class Intent(object): + def __init__(self,intent_list=[]): + self.intent_list = intent_list[:] + flags = 0 + for i in intent_list: + if i=='optional': + flags |= wrap.F2PY_OPTIONAL + else: + flags |= getattr(wrap, 'F2PY_INTENT_'+i.upper()) + self.flags = flags + def __getattr__(self, name): + name = name.lower() + if name=='in_': name='in' + return self.__class__(self.intent_list+[name]) + def __str__(self): + return 'intent(%s)' % (','.join(self.intent_list)) + def __repr__(self): + return 'Intent(%r)' % (self.intent_list) + def is_intent(self,*names): + for name in names: + if name not in self.intent_list: + return False + return True + def is_intent_exact(self,*names): + return len(self.intent_list)==len(names) and self.is_intent(*names) + +intent = Intent() + +_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', + 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', + 'FLOAT', 'DOUBLE', 'CFLOAT'] + +_cast_dict = {'BOOL':['BOOL']} +_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] +_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] +_cast_dict['BYTE'] = ['BYTE'] +_cast_dict['UBYTE'] = ['UBYTE'] +_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] +_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] +_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] +_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] + +_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] +_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] + +_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] +_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] + +_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] +_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] + +_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] + +# 32 bit system malloc typically does not provide the alignment required by +# 16 byte long double types this means the inout intent cannot be satisfied and +# several tests fail as the alignment flag can be randomly true or fals +# when numpy gains an aligned allocator the tests could be enabled again +if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and + sys.platform != 'win32'): + _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) + _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ + ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] + _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \ + ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] + _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] + +class Type(object): + _type_cache = {} + + def __new__(cls, name): + if isinstance(name, dtype): + dtype0 = name + name = None + for n, i in typeinfo.items(): + if isinstance(i, tuple) and dtype0.type is i[-1]: + name = n + break + obj = cls._type_cache.get(name.upper(), None) + if obj is not None: + return obj + obj = object.__new__(cls) + obj._init(name) + cls._type_cache[name.upper()] = obj + return obj + + def _init(self, name): + self.NAME = name.upper() + self.type_num = getattr(wrap, 'NPY_'+self.NAME) + assert_equal(self.type_num, typeinfo[self.NAME][1]) + self.dtype = typeinfo[self.NAME][-1] + self.elsize = typeinfo[self.NAME][2] / 8 + self.dtypechar = typeinfo[self.NAME][0] + + def cast_types(self): + return [self.__class__(_m) for _m in _cast_dict[self.NAME]] + + def all_types(self): + return [self.__class__(_m) for _m in _type_names] + + def smaller_types(self): + bits = typeinfo[self.NAME][3] + types = [] + for name in _type_names: + if typeinfo[name][3]bits: + types.append(Type(name)) + return types + +class Array(object): + def __init__(self, typ, dims, intent, obj): + self.type = typ + self.dims = dims + self.intent = intent + self.obj_copy = copy.deepcopy(obj) + self.obj = obj + + # arr.dtypechar may be different from typ.dtypechar + self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) + + assert_(isinstance(self.arr, ndarray), repr(type(self.arr))) + + self.arr_attr = wrap.array_attrs(self.arr) + + if len(dims)>1: + if self.intent.is_intent('c'): + assert_(intent.flags & wrap.F2PY_INTENT_C) + assert_(not self.arr.flags['FORTRAN'], repr((self.arr.flags, getattr(obj, 'flags', None)))) + assert_(self.arr.flags['CONTIGUOUS']) + assert_(not self.arr_attr[6] & wrap.FORTRAN) + else: + assert_(not intent.flags & wrap.F2PY_INTENT_C) + assert_(self.arr.flags['FORTRAN']) + assert_(not self.arr.flags['CONTIGUOUS']) + assert_(self.arr_attr[6] & wrap.FORTRAN) + + if obj is None: + self.pyarr = None + self.pyarr_attr = None + return + + if intent.is_intent('cache'): + assert_(isinstance(obj, ndarray), repr(type(obj))) + self.pyarr = array(obj).reshape(*dims).copy() + else: + self.pyarr = array(array(obj, + dtype = typ.dtypechar).reshape(*dims), + order=self.intent.is_intent('c') and 'C' or 'F') + assert_(self.pyarr.dtype == typ, \ + repr((self.pyarr.dtype, typ))) + assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) + self.pyarr_attr = wrap.array_attrs(self.pyarr) + + if len(dims)>1: + if self.intent.is_intent('c'): + assert_(not self.pyarr.flags['FORTRAN']) + assert_(self.pyarr.flags['CONTIGUOUS']) + assert_(not self.pyarr_attr[6] & wrap.FORTRAN) + else: + assert_(self.pyarr.flags['FORTRAN']) + assert_(not self.pyarr.flags['CONTIGUOUS']) + assert_(self.pyarr_attr[6] & wrap.FORTRAN) + + + assert_(self.arr_attr[1]==self.pyarr_attr[1]) # nd + assert_(self.arr_attr[2]==self.pyarr_attr[2]) # dimensions + if self.arr_attr[1]<=1: + assert_(self.arr_attr[3]==self.pyarr_attr[3],\ + repr((self.arr_attr[3], self.pyarr_attr[3], + self.arr.tobytes(), self.pyarr.tobytes()))) # strides + assert_(self.arr_attr[5][-2:]==self.pyarr_attr[5][-2:],\ + repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr + assert_(self.arr_attr[6]==self.pyarr_attr[6],\ + repr((self.arr_attr[6], self.pyarr_attr[6], flags2names(0*self.arr_attr[6]-self.pyarr_attr[6]), flags2names(self.arr_attr[6]), intent))) # flags + + if intent.is_intent('cache'): + assert_(self.arr_attr[5][3]>=self.type.elsize,\ + repr((self.arr_attr[5][3], self.type.elsize))) + else: + assert_(self.arr_attr[5][3]==self.type.elsize,\ + repr((self.arr_attr[5][3], self.type.elsize))) + assert_(self.arr_equal(self.pyarr, self.arr)) + + if isinstance(self.obj, ndarray): + if typ.elsize==Type(obj.dtype).elsize: + if not intent.is_intent('copy') and self.arr_attr[1]<=1: + assert_(self.has_shared_memory()) + + def arr_equal(self, arr1, arr2): + if arr1.shape != arr2.shape: + return False + s = arr1==arr2 + return alltrue(s.flatten()) + + def __str__(self): + return str(self.arr) + + def has_shared_memory(self): + """Check that created array shares data with input array. + """ + if self.obj is self.arr: + return True + if not isinstance(self.obj, ndarray): + return False + obj_attr = wrap.array_attrs(self.obj) + return obj_attr[0]==self.arr_attr[0] + +################################################## + +class test_intent(unittest.TestCase): + def test_in_out(self): + assert_equal(str(intent.in_.out), 'intent(in,out)') + assert_(intent.in_.c.is_intent('c')) + assert_(not intent.in_.c.is_intent_exact('c')) + assert_(intent.in_.c.is_intent_exact('c', 'in')) + assert_(intent.in_.c.is_intent_exact('in', 'c')) + assert_(not intent.in_.is_intent('c')) + +class _test_shared_memory: + num2seq = [1, 2] + num23seq = [[1, 2, 3], [4, 5, 6]] + def test_in_from_2seq(self): + a = self.array([2], intent.in_, self.num2seq) + assert_(not a.has_shared_memory()) + + def test_in_from_2casttype(self): + for t in self.type.cast_types(): + obj = array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_, obj) + if t.elsize==self.type.elsize: + assert_(a.has_shared_memory(), repr((self.type.dtype, t.dtype))) + else: + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_inout_2seq(self): + obj = array(self.num2seq, dtype=self.type.dtype) + a = self.array([len(self.num2seq)], intent.inout, obj) + assert_(a.has_shared_memory()) + + try: + a = self.array([2], intent.in_.inout, self.num2seq) + except TypeError as msg: + if not str(msg).startswith('failed to initialize intent(inout|inplace|cache) array'): + raise + else: + raise SystemError('intent(inout) should have failed on sequence') + + def test_f_inout_23seq(self): + obj = array(self.num23seq, dtype=self.type.dtype, order='F') + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.inout, obj) + assert_(a.has_shared_memory()) + + obj = array(self.num23seq, dtype=self.type.dtype, order='C') + shape = (len(self.num23seq), len(self.num23seq[0])) + try: + a = self.array(shape, intent.in_.inout, obj) + except ValueError as msg: + if not str(msg).startswith('failed to initialize intent(inout) array'): + raise + else: + raise SystemError('intent(inout) should have failed on improper array') + + def test_c_inout_23seq(self): + obj = array(self.num23seq, dtype=self.type.dtype) + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.c.inout, obj) + assert_(a.has_shared_memory()) + + def test_in_copy_from_2casttype(self): + for t in self.type.cast_types(): + obj = array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_c_in_from_23seq(self): + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, self.num23seq) + assert_(not a.has_shared_memory()) + + def test_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_f_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype, order='F') + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, obj) + if t.elsize==self.type.elsize: + assert_(a.has_shared_memory(), repr(t.dtype)) + else: + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_c_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.c, obj) + if t.elsize==self.type.elsize: + assert_(a.has_shared_memory(), repr(t.dtype)) + else: + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_f_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype, order='F') + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_c_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.c.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_in_cache_from_2casttype(self): + for t in self.type.all_types(): + if t.elsize != self.type.elsize: + continue + obj = array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq),) + a = self.array(shape, intent.in_.c.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + a = self.array(shape, intent.in_.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + obj = array(self.num2seq, dtype=t.dtype, order='F') + a = self.array(shape, intent.in_.c.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + a = self.array(shape, intent.in_.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + try: + a = self.array(shape, intent.in_.cache, obj[::-1]) + except ValueError as msg: + if not str(msg).startswith('failed to initialize intent(cache) array'): + raise + else: + raise SystemError('intent(cache) should have failed on multisegmented array') + def test_in_cache_from_2casttype_failure(self): + for t in self.type.all_types(): + if t.elsize >= self.type.elsize: + continue + obj = array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq),) + try: + a = self.array(shape, intent.in_.cache, obj) + except ValueError as msg: + if not str(msg).startswith('failed to initialize intent(cache) array'): + raise + else: + raise SystemError('intent(cache) should have failed on smaller array') + + def test_cache_hidden(self): + shape = (2,) + a = self.array(shape, intent.cache.hide, None) + assert_(a.arr.shape==shape) + + shape = (2, 3) + a = self.array(shape, intent.cache.hide, None) + assert_(a.arr.shape==shape) + + shape = (-1, 3) + try: + a = self.array(shape, intent.cache.hide, None) + except ValueError as msg: + if not str(msg).startswith('failed to create intent(cache|hide)|optional array'): + raise + else: + raise SystemError('intent(cache) should have failed on undefined dimensions') + + def test_hidden(self): + shape = (2,) + a = self.array(shape, intent.hide, None) + assert_(a.arr.shape==shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + + shape = (2, 3) + a = self.array(shape, intent.hide, None) + assert_(a.arr.shape==shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) + + shape = (2, 3) + a = self.array(shape, intent.c.hide, None) + assert_(a.arr.shape==shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) + + shape = (-1, 3) + try: + a = self.array(shape, intent.hide, None) + except ValueError as msg: + if not str(msg).startswith('failed to create intent(cache|hide)|optional array'): + raise + else: + raise SystemError('intent(hide) should have failed on undefined dimensions') + + def test_optional_none(self): + shape = (2,) + a = self.array(shape, intent.optional, None) + assert_(a.arr.shape==shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + + shape = (2, 3) + a = self.array(shape, intent.optional, None) + assert_(a.arr.shape==shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) + + shape = (2, 3) + a = self.array(shape, intent.c.optional, None) + assert_(a.arr.shape==shape) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) + assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) + + def test_optional_from_2seq(self): + obj = self.num2seq + shape = (len(obj),) + a = self.array(shape, intent.optional, obj) + assert_(a.arr.shape==shape) + assert_(not a.has_shared_memory()) + + def test_optional_from_23seq(self): + obj = self.num23seq + shape = (len(obj), len(obj[0])) + a = self.array(shape, intent.optional, obj) + assert_(a.arr.shape==shape) + assert_(not a.has_shared_memory()) + + a = self.array(shape, intent.optional.c, obj) + assert_(a.arr.shape==shape) + assert_(not a.has_shared_memory()) + + def test_inplace(self): + obj = array(self.num23seq, dtype=self.type.dtype) + assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert_(obj[1][2]==a.arr[1][2], repr((obj, a.arr))) + a.arr[1][2]=54 + assert_(obj[1][2]==a.arr[1][2]==array(54, dtype=self.type.dtype), repr((obj, a.arr))) + assert_(a.arr is obj) + assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! + assert_(not obj.flags['CONTIGUOUS']) + + def test_inplace_from_casttype(self): + for t in self.type.cast_types(): + if t is self.type: + continue + obj = array(self.num23seq, dtype=t.dtype) + assert_(obj.dtype.type==t.dtype) + assert_(obj.dtype.type is not self.type.dtype) + assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert_(obj[1][2]==a.arr[1][2], repr((obj, a.arr))) + a.arr[1][2]=54 + assert_(obj[1][2]==a.arr[1][2]==array(54, dtype=self.type.dtype), repr((obj, a.arr))) + assert_(a.arr is obj) + assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! + assert_(not obj.flags['CONTIGUOUS']) + assert_(obj.dtype.type is self.type.dtype) # obj type is changed inplace! + + +for t in _type_names: + exec('''\ +class test_%s_gen(unittest.TestCase, + _test_shared_memory + ): + def setUp(self): + self.type = Type(%r) + array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj) +''' % (t, t, t)) + +if __name__ == "__main__": + setup() + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_assumed_shape.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_assumed_shape.py new file mode 100644 index 0000000000000..d6beaee63dfd5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_assumed_shape.py @@ -0,0 +1,37 @@ +from __future__ import division, absolute_import, print_function + +import os +import math + +from numpy.testing import * +from numpy import array + +import util + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + +class TestAssumedShapeSumExample(util.F2PyTest): + sources = [_path('src', 'assumed_shape', 'foo_free.f90'), + _path('src', 'assumed_shape', 'foo_use.f90'), + _path('src', 'assumed_shape', 'precision.f90'), + _path('src', 'assumed_shape', 'foo_mod.f90'), + ] + + @dec.slow + def test_all(self): + r = self.module.fsum([1, 2]) + assert_(r==3, repr(r)) + r = self.module.sum([1, 2]) + assert_(r==3, repr(r)) + r = self.module.sum_with_use([1, 2]) + assert_(r==3, repr(r)) + + r = self.module.mod.sum([1, 2]) + assert_(r==3, repr(r)) + r = self.module.mod.fsum([1, 2]) + assert_(r==3, repr(r)) + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_callback.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_callback.py new file mode 100644 index 0000000000000..16464140f14cd --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_callback.py @@ -0,0 +1,132 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * +from numpy import array +import math +import util +import textwrap + +class TestF77Callback(util.F2PyTest): + code = """ + subroutine t(fun,a) + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine func(a) +cf2py intent(in,out) a + integer a + a = a + 11 + end + + subroutine func0(a) +cf2py intent(out) a + integer a + a = 11 + end + + subroutine t2(a) +cf2py intent(callback) fun + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine string_callback(callback, a) + external callback + double precision callback + double precision a + character*1 r +cf2py intent(out) a + r = 'r' + a = callback(r) + end + + """ + + @dec.slow + def test_all(self): + for name in "t,t2".split(","): + self.check_function(name) + + @dec.slow + def test_docstring(self): + expected = """ + a = t(fun,[fun_extra_args]) + + Wrapper for ``t``. + + Parameters + ---------- + fun : call-back function + + Other Parameters + ---------------- + fun_extra_args : input tuple, optional + Default: () + + Returns + ------- + a : int + + Notes + ----- + Call-back functions:: + + def fun(): return a + Return objects: + a : int + """ + assert_equal(self.module.t.__doc__, textwrap.dedent(expected).lstrip()) + + def check_function(self, name): + t = getattr(self.module, name) + r = t(lambda : 4) + assert_( r==4, repr(r)) + r = t(lambda a:5, fun_extra_args=(6,)) + assert_( r==5, repr(r)) + r = t(lambda a:a, fun_extra_args=(6,)) + assert_( r==6, repr(r)) + r = t(lambda a:5+a, fun_extra_args=(7,)) + assert_( r==12, repr(r)) + r = t(lambda a:math.degrees(a), fun_extra_args=(math.pi,)) + assert_( r==180, repr(r)) + r = t(math.degrees, fun_extra_args=(math.pi,)) + assert_( r==180, repr(r)) + + r = t(self.module.func, fun_extra_args=(6,)) + assert_( r==17, repr(r)) + r = t(self.module.func0) + assert_( r==11, repr(r)) + r = t(self.module.func0._cpointer) + assert_( r==11, repr(r)) + class A(object): + def __call__(self): + return 7 + def mth(self): + return 9 + a = A() + r = t(a) + assert_( r==7, repr(r)) + r = t(a.mth) + assert_( r==9, repr(r)) + + def test_string_callback(self): + + def callback(code): + if code == 'r': + return 0 + else: + return 1 + + f = getattr(self.module, 'string_callback') + r = f(callback) + assert_(r == 0, repr(r)) + + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_kind.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_kind.py new file mode 100644 index 0000000000000..f96fbffdb51be --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_kind.py @@ -0,0 +1,36 @@ +from __future__ import division, absolute_import, print_function + +import os +import math + +from numpy.testing import * +from numpy import array + +import util + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + +from numpy.f2py.crackfortran import _selected_int_kind_func as selected_int_kind +from numpy.f2py.crackfortran import _selected_real_kind_func as selected_real_kind + +class TestKind(util.F2PyTest): + sources = [_path('src', 'kind', 'foo.f90'), + ] + + @dec.slow + def test_all(self): + selectedrealkind = self.module.selectedrealkind + selectedintkind = self.module.selectedintkind + + for i in range(40): + assert_(selectedintkind(i) in [selected_int_kind(i), -1],\ + 'selectedintkind(%s): expected %r but got %r' % (i, selected_int_kind(i), selectedintkind(i))) + + for i in range(20): + assert_(selectedrealkind(i) in [selected_real_kind(i), -1],\ + 'selectedrealkind(%s): expected %r but got %r' % (i, selected_real_kind(i), selectedrealkind(i))) + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_mixed.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_mixed.py new file mode 100644 index 0000000000000..c4cb4889bcb3a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_mixed.py @@ -0,0 +1,41 @@ +from __future__ import division, absolute_import, print_function + +import os +import math + +from numpy.testing import * +from numpy import array + +import util +import textwrap + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + +class TestMixed(util.F2PyTest): + sources = [_path('src', 'mixed', 'foo.f'), + _path('src', 'mixed', 'foo_fixed.f90'), + _path('src', 'mixed', 'foo_free.f90')] + + @dec.slow + def test_all(self): + assert_( self.module.bar11() == 11) + assert_( self.module.foo_fixed.bar12() == 12) + assert_( self.module.foo_free.bar13() == 13) + + @dec.slow + def test_docstring(self): + expected = """ + a = bar11() + + Wrapper for ``bar11``. + + Returns + ------- + a : int + """ + assert_equal(self.module.bar11.__doc__, textwrap.dedent(expected).lstrip()) + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_character.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_character.py new file mode 100644 index 0000000000000..0865d54b3eb01 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_character.py @@ -0,0 +1,142 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * +from numpy import array +from numpy.compat import asbytes +import util + +class TestReturnCharacter(util.F2PyTest): + def check_function(self, t): + tname = t.__doc__.split()[0] + if tname in ['t0', 't1', 's0', 's1']: + assert_( t(23)==asbytes('2')) + r = t('ab');assert_( r==asbytes('a'), repr(r)) + r = t(array('ab'));assert_( r==asbytes('a'), repr(r)) + r = t(array(77, 'u1'));assert_( r==asbytes('M'), repr(r)) + #assert_(_raises(ValueError, t, array([77,87]))) + #assert_(_raises(ValueError, t, array(77))) + elif tname in ['ts', 'ss']: + assert_( t(23)==asbytes('23 '), repr(t(23))) + assert_( t('123456789abcdef')==asbytes('123456789a')) + elif tname in ['t5', 's5']: + assert_( t(23)==asbytes('23 '), repr(t(23))) + assert_( t('ab')==asbytes('ab '), repr(t('ab'))) + assert_( t('123456789abcdef')==asbytes('12345')) + else: + raise NotImplementedError + +class TestF77ReturnCharacter(TestReturnCharacter): + code = """ + function t0(value) + character value + character t0 + t0 = value + end + function t1(value) + character*1 value + character*1 t1 + t1 = value + end + function t5(value) + character*5 value + character*5 t5 + t5 = value + end + function ts(value) + character*(*) value + character*(*) ts + ts = value + end + + subroutine s0(t0,value) + character value + character t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + character*1 value + character*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s5(t5,value) + character*5 value + character*5 t5 +cf2py intent(out) t5 + t5 = value + end + subroutine ss(ts,value) + character*(*) value + character*10 ts +cf2py intent(out) ts + ts = value + end + """ + + @dec.slow + def test_all(self): + for name in "t0,t1,t5,s0,s1,s5,ss".split(","): + self.check_function(getattr(self.module, name)) + +class TestF90ReturnCharacter(TestReturnCharacter): + suffix = ".f90" + code = """ +module f90_return_char + contains + function t0(value) + character :: value + character :: t0 + t0 = value + end function t0 + function t1(value) + character(len=1) :: value + character(len=1) :: t1 + t1 = value + end function t1 + function t5(value) + character(len=5) :: value + character(len=5) :: t5 + t5 = value + end function t5 + function ts(value) + character(len=*) :: value + character(len=10) :: ts + ts = value + end function ts + + subroutine s0(t0,value) + character :: value + character :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + character(len=1) :: value + character(len=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s5(t5,value) + character(len=5) :: value + character(len=5) :: t5 +!f2py intent(out) t5 + t5 = value + end subroutine s5 + subroutine ss(ts,value) + character(len=*) :: value + character(len=10) :: ts +!f2py intent(out) ts + ts = value + end subroutine ss +end module f90_return_char + """ + + @dec.slow + def test_all(self): + for name in "t0,t1,t5,ts,s0,s1,s5,ss".split(","): + self.check_function(getattr(self.module.f90_return_char, name)) + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_complex.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_complex.py new file mode 100644 index 0000000000000..d144cecf16575 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_complex.py @@ -0,0 +1,169 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * +from numpy import array +from numpy.compat import long +import util + +class TestReturnComplex(util.F2PyTest): + def check_function(self, t): + tname = t.__doc__.split()[0] + if tname in ['t0', 't8', 's0', 's8']: + err = 1e-5 + else: + err = 0.0 + assert_( abs(t(234j)-234.0j)<=err) + assert_( abs(t(234.6)-234.6)<=err) + assert_( abs(t(long(234))-234.0)<=err) + assert_( abs(t(234.6+3j)-(234.6+3j))<=err) + #assert_( abs(t('234')-234.)<=err) + #assert_( abs(t('234.6')-234.6)<=err) + assert_( abs(t(-234)+234.)<=err) + assert_( abs(t([234])-234.)<=err) + assert_( abs(t((234,))-234.)<=err) + assert_( abs(t(array(234))-234.)<=err) + assert_( abs(t(array(23+4j, 'F'))-(23+4j))<=err) + assert_( abs(t(array([234]))-234.)<=err) + assert_( abs(t(array([[234]]))-234.)<=err) + assert_( abs(t(array([234], 'b'))+22.)<=err) + assert_( abs(t(array([234], 'h'))-234.)<=err) + assert_( abs(t(array([234], 'i'))-234.)<=err) + assert_( abs(t(array([234], 'l'))-234.)<=err) + assert_( abs(t(array([234], 'q'))-234.)<=err) + assert_( abs(t(array([234], 'f'))-234.)<=err) + assert_( abs(t(array([234], 'd'))-234.)<=err) + assert_( abs(t(array([234+3j], 'F'))-(234+3j))<=err) + assert_( abs(t(array([234], 'D'))-234.)<=err) + + #assert_raises(TypeError, t, array([234], 'a1')) + assert_raises(TypeError, t, 'abc') + + assert_raises(IndexError, t, []) + assert_raises(IndexError, t, ()) + + assert_raises(TypeError, t, t) + assert_raises(TypeError, t, {}) + + try: + r = t(10**400) + assert_( repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r)) + except OverflowError: + pass + + +class TestF77ReturnComplex(TestReturnComplex): + code = """ + function t0(value) + complex value + complex t0 + t0 = value + end + function t8(value) + complex*8 value + complex*8 t8 + t8 = value + end + function t16(value) + complex*16 value + complex*16 t16 + t16 = value + end + function td(value) + double complex value + double complex td + td = value + end + + subroutine s0(t0,value) + complex value + complex t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s8(t8,value) + complex*8 value + complex*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine s16(t16,value) + complex*16 value + complex*16 t16 +cf2py intent(out) t16 + t16 = value + end + subroutine sd(td,value) + double complex value + double complex td +cf2py intent(out) td + td = value + end + """ + + @dec.slow + def test_all(self): + for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","): + self.check_function(getattr(self.module, name)) + + +class TestF90ReturnComplex(TestReturnComplex): + suffix = ".f90" + code = """ +module f90_return_complex + contains + function t0(value) + complex :: value + complex :: t0 + t0 = value + end function t0 + function t8(value) + complex(kind=4) :: value + complex(kind=4) :: t8 + t8 = value + end function t8 + function t16(value) + complex(kind=8) :: value + complex(kind=8) :: t16 + t16 = value + end function t16 + function td(value) + double complex :: value + double complex :: td + td = value + end function td + + subroutine s0(t0,value) + complex :: value + complex :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s8(t8,value) + complex(kind=4) :: value + complex(kind=4) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine s16(t16,value) + complex(kind=8) :: value + complex(kind=8) :: t16 +!f2py intent(out) t16 + t16 = value + end subroutine s16 + subroutine sd(td,value) + double complex :: value + double complex :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_complex + """ + + @dec.slow + def test_all(self): + for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","): + self.check_function(getattr(self.module.f90_return_complex, name)) + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_integer.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_integer.py new file mode 100644 index 0000000000000..056466208f6cb --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_integer.py @@ -0,0 +1,178 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * +from numpy import array +from numpy.compat import long +import util + +class TestReturnInteger(util.F2PyTest): + def check_function(self, t): + assert_( t(123)==123, repr(t(123))) + assert_( t(123.6)==123) + assert_( t(long(123))==123) + assert_( t('123')==123) + assert_( t(-123)==-123) + assert_( t([123])==123) + assert_( t((123,))==123) + assert_( t(array(123))==123) + assert_( t(array([123]))==123) + assert_( t(array([[123]]))==123) + assert_( t(array([123], 'b'))==123) + assert_( t(array([123], 'h'))==123) + assert_( t(array([123], 'i'))==123) + assert_( t(array([123], 'l'))==123) + assert_( t(array([123], 'B'))==123) + assert_( t(array([123], 'f'))==123) + assert_( t(array([123], 'd'))==123) + + #assert_raises(ValueError, t, array([123],'S3')) + assert_raises(ValueError, t, 'abc') + + assert_raises(IndexError, t, []) + assert_raises(IndexError, t, ()) + + assert_raises(Exception, t, t) + assert_raises(Exception, t, {}) + + if t.__doc__.split()[0] in ['t8', 's8']: + assert_raises(OverflowError, t, 100000000000000000000000) + assert_raises(OverflowError, t, 10000000011111111111111.23) + +class TestF77ReturnInteger(TestReturnInteger): + code = """ + function t0(value) + integer value + integer t0 + t0 = value + end + function t1(value) + integer*1 value + integer*1 t1 + t1 = value + end + function t2(value) + integer*2 value + integer*2 t2 + t2 = value + end + function t4(value) + integer*4 value + integer*4 t4 + t4 = value + end + function t8(value) + integer*8 value + integer*8 t8 + t8 = value + end + + subroutine s0(t0,value) + integer value + integer t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + integer*1 value + integer*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + integer*2 value + integer*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + integer*4 value + integer*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + integer*8 value + integer*8 t8 +cf2py intent(out) t8 + t8 = value + end + """ + + @dec.slow + def test_all(self): + for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","): + self.check_function(getattr(self.module, name)) + + +class TestF90ReturnInteger(TestReturnInteger): + suffix = ".f90" + code = """ +module f90_return_integer + contains + function t0(value) + integer :: value + integer :: t0 + t0 = value + end function t0 + function t1(value) + integer(kind=1) :: value + integer(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + integer(kind=2) :: value + integer(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + integer(kind=4) :: value + integer(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + integer(kind=8) :: value + integer(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + integer :: value + integer :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + integer(kind=1) :: value + integer(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + integer(kind=2) :: value + integer(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + integer(kind=4) :: value + integer(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + integer(kind=8) :: value + integer(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_integer + """ + + @dec.slow + def test_all(self): + for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","): + self.check_function(getattr(self.module.f90_return_integer, name)) + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_logical.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_logical.py new file mode 100644 index 0000000000000..82f86b67f1ea4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_logical.py @@ -0,0 +1,187 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * +from numpy import array +from numpy.compat import long +import util + +class TestReturnLogical(util.F2PyTest): + def check_function(self, t): + assert_( t(True)==1, repr(t(True))) + assert_( t(False)==0, repr(t(False))) + assert_( t(0)==0) + assert_( t(None)==0) + assert_( t(0.0)==0) + assert_( t(0j)==0) + assert_( t(1j)==1) + assert_( t(234)==1) + assert_( t(234.6)==1) + assert_( t(long(234))==1) + assert_( t(234.6+3j)==1) + assert_( t('234')==1) + assert_( t('aaa')==1) + assert_( t('')==0) + assert_( t([])==0) + assert_( t(())==0) + assert_( t({})==0) + assert_( t(t)==1) + assert_( t(-234)==1) + assert_( t(10**100)==1) + assert_( t([234])==1) + assert_( t((234,))==1) + assert_( t(array(234))==1) + assert_( t(array([234]))==1) + assert_( t(array([[234]]))==1) + assert_( t(array([234], 'b'))==1) + assert_( t(array([234], 'h'))==1) + assert_( t(array([234], 'i'))==1) + assert_( t(array([234], 'l'))==1) + assert_( t(array([234], 'f'))==1) + assert_( t(array([234], 'd'))==1) + assert_( t(array([234+3j], 'F'))==1) + assert_( t(array([234], 'D'))==1) + assert_( t(array(0))==0) + assert_( t(array([0]))==0) + assert_( t(array([[0]]))==0) + assert_( t(array([0j]))==0) + assert_( t(array([1]))==1) + assert_raises(ValueError, t, array([0, 0])) + + +class TestF77ReturnLogical(TestReturnLogical): + code = """ + function t0(value) + logical value + logical t0 + t0 = value + end + function t1(value) + logical*1 value + logical*1 t1 + t1 = value + end + function t2(value) + logical*2 value + logical*2 t2 + t2 = value + end + function t4(value) + logical*4 value + logical*4 t4 + t4 = value + end +c function t8(value) +c logical*8 value +c logical*8 t8 +c t8 = value +c end + + subroutine s0(t0,value) + logical value + logical t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + logical*1 value + logical*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + logical*2 value + logical*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + logical*4 value + logical*4 t4 +cf2py intent(out) t4 + t4 = value + end +c subroutine s8(t8,value) +c logical*8 value +c logical*8 t8 +cf2py intent(out) t8 +c t8 = value +c end + """ + + @dec.slow + def test_all(self): + for name in "t0,t1,t2,t4,s0,s1,s2,s4".split(","): + self.check_function(getattr(self.module, name)) + +class TestF90ReturnLogical(TestReturnLogical): + suffix = ".f90" + code = """ +module f90_return_logical + contains + function t0(value) + logical :: value + logical :: t0 + t0 = value + end function t0 + function t1(value) + logical(kind=1) :: value + logical(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + logical(kind=2) :: value + logical(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + logical(kind=4) :: value + logical(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + logical(kind=8) :: value + logical(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + logical :: value + logical :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + logical(kind=1) :: value + logical(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + logical(kind=2) :: value + logical(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + logical(kind=4) :: value + logical(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + logical(kind=8) :: value + logical(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_logical + """ + + @dec.slow + def test_all(self): + for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","): + self.check_function(getattr(self.module.f90_return_logical, name)) + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_real.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_real.py new file mode 100644 index 0000000000000..f9a09f6207242 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_real.py @@ -0,0 +1,203 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * +from numpy import array +from numpy.compat import long +import math +import util + +class TestReturnReal(util.F2PyTest): + def check_function(self, t): + if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: + err = 1e-5 + else: + err = 0.0 + assert_( abs(t(234)-234.0)<=err) + assert_( abs(t(234.6)-234.6)<=err) + assert_( abs(t(long(234))-234.0)<=err) + assert_( abs(t('234')-234)<=err) + assert_( abs(t('234.6')-234.6)<=err) + assert_( abs(t(-234)+234)<=err) + assert_( abs(t([234])-234)<=err) + assert_( abs(t((234,))-234.)<=err) + assert_( abs(t(array(234))-234.)<=err) + assert_( abs(t(array([234]))-234.)<=err) + assert_( abs(t(array([[234]]))-234.)<=err) + assert_( abs(t(array([234], 'b'))+22)<=err) + assert_( abs(t(array([234], 'h'))-234.)<=err) + assert_( abs(t(array([234], 'i'))-234.)<=err) + assert_( abs(t(array([234], 'l'))-234.)<=err) + assert_( abs(t(array([234], 'B'))-234.)<=err) + assert_( abs(t(array([234], 'f'))-234.)<=err) + assert_( abs(t(array([234], 'd'))-234.)<=err) + if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: + assert_( t(1e200)==t(1e300)) # inf + + #assert_raises(ValueError, t, array([234], 'S1')) + assert_raises(ValueError, t, 'abc') + + assert_raises(IndexError, t, []) + assert_raises(IndexError, t, ()) + + assert_raises(Exception, t, t) + assert_raises(Exception, t, {}) + + try: + r = t(10**400) + assert_( repr(r) in ['inf', 'Infinity'], repr(r)) + except OverflowError: + pass + +class TestCReturnReal(TestReturnReal): + suffix = ".pyf" + module_name = "c_ext_return_real" + code = """ +python module c_ext_return_real +usercode \'\'\' +float t4(float value) { return value; } +void s4(float *t4, float value) { *t4 = value; } +double t8(double value) { return value; } +void s8(double *t8, double value) { *t8 = value; } +\'\'\' +interface + function t4(value) + real*4 intent(c) :: t4,value + end + function t8(value) + real*8 intent(c) :: t8,value + end + subroutine s4(t4,value) + intent(c) s4 + real*4 intent(out) :: t4 + real*4 intent(c) :: value + end + subroutine s8(t8,value) + intent(c) s8 + real*8 intent(out) :: t8 + real*8 intent(c) :: value + end +end interface +end python module c_ext_return_real + """ + + @dec.slow + def test_all(self): + for name in "t4,t8,s4,s8".split(","): + self.check_function(getattr(self.module, name)) + +class TestF77ReturnReal(TestReturnReal): + code = """ + function t0(value) + real value + real t0 + t0 = value + end + function t4(value) + real*4 value + real*4 t4 + t4 = value + end + function t8(value) + real*8 value + real*8 t8 + t8 = value + end + function td(value) + double precision value + double precision td + td = value + end + + subroutine s0(t0,value) + real value + real t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s4(t4,value) + real*4 value + real*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + real*8 value + real*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine sd(td,value) + double precision value + double precision td +cf2py intent(out) td + td = value + end + """ + + @dec.slow + def test_all(self): + for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","): + self.check_function(getattr(self.module, name)) + +class TestF90ReturnReal(TestReturnReal): + suffix = ".f90" + code = """ +module f90_return_real + contains + function t0(value) + real :: value + real :: t0 + t0 = value + end function t0 + function t4(value) + real(kind=4) :: value + real(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + real(kind=8) :: value + real(kind=8) :: t8 + t8 = value + end function t8 + function td(value) + double precision :: value + double precision :: td + td = value + end function td + + subroutine s0(t0,value) + real :: value + real :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s4(t4,value) + real(kind=4) :: value + real(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + real(kind=8) :: value + real(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine sd(td,value) + double precision :: value + double precision :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_real + """ + + @dec.slow + def test_all(self): + for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","): + self.check_function(getattr(self.module.f90_return_real, name)) + + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_size.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_size.py new file mode 100644 index 0000000000000..e4f21b519ca4e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_size.py @@ -0,0 +1,47 @@ +from __future__ import division, absolute_import, print_function + +import os +import math + +from numpy.testing import * +from numpy import array + +import util + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + +class TestSizeSumExample(util.F2PyTest): + sources = [_path('src', 'size', 'foo.f90'), + ] + + @dec.slow + def test_all(self): + r = self.module.foo([[1, 2]]) + assert_equal(r, [3], repr(r)) + + r = self.module.foo([[1, 2], [3, 4]]) + assert_equal(r, [3, 7], repr(r)) + + r = self.module.foo([[1, 2], [3, 4], [5, 6]]) + assert_equal(r, [3, 7, 11], repr(r)) + + @dec.slow + def test_transpose(self): + r = self.module.trans([[1, 2]]) + assert_equal(r, [[1], [2]], repr(r)) + + r = self.module.trans([[1, 2, 3], [4, 5, 6]]) + assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r)) + + @dec.slow + def test_flatten(self): + r = self.module.flatten([[1, 2]]) + assert_equal(r, [1, 2], repr(r)) + + r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) + assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r)) + +if __name__ == "__main__": + import nose + nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/util.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/util.py new file mode 100644 index 0000000000000..56aff2b666fa3 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/util.py @@ -0,0 +1,353 @@ +""" +Utility functions for + +- building and importing modules on test time, using a temporary location +- detecting if compilers are present + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import subprocess +import tempfile +import shutil +import atexit +import textwrap +import re +import random + +import nose + +from numpy.compat import asbytes, asstr +import numpy.f2py + +try: + from hashlib import md5 +except ImportError: + from md5 import new as md5 + +# +# Maintaining a temporary module directory +# + +_module_dir = None + +def _cleanup(): + global _module_dir + if _module_dir is not None: + try: + sys.path.remove(_module_dir) + except ValueError: + pass + try: + shutil.rmtree(_module_dir) + except (IOError, OSError): + pass + _module_dir = None + +def get_module_dir(): + global _module_dir + if _module_dir is None: + _module_dir = tempfile.mkdtemp() + atexit.register(_cleanup) + if _module_dir not in sys.path: + sys.path.insert(0, _module_dir) + return _module_dir + +def get_temp_module_name(): + # Assume single-threaded, and the module dir usable only by this thread + d = get_module_dir() + for j in range(5403, 9999999): + name = "_test_ext_module_%d" % j + fn = os.path.join(d, name) + if name not in sys.modules and not os.path.isfile(fn+'.py'): + return name + raise RuntimeError("Failed to create a temporary module name") + +def _memoize(func): + memo = {} + def wrapper(*a, **kw): + key = repr((a, kw)) + if key not in memo: + try: + memo[key] = func(*a, **kw) + except Exception as e: + memo[key] = e + raise + ret = memo[key] + if isinstance(ret, Exception): + raise ret + return ret + wrapper.__name__ = func.__name__ + return wrapper + +# +# Building modules +# + +@_memoize +def build_module(source_files, options=[], skip=[], only=[], module_name=None): + """ + Compile and import a f2py module, built from the given files. + + """ + + code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; " + "f2py2e.main()" % repr(sys.path)) + + d = get_module_dir() + + # Copy files + dst_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError("%s is not a file" % fn) + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + fn = os.path.join(os.path.dirname(fn), '.f2py_f2cmap') + if os.path.isfile(fn): + dst = os.path.join(d, os.path.basename(fn)) + if not os.path.isfile(dst): + shutil.copyfile(fn, dst) + + # Prepare options + if module_name is None: + module_name = get_temp_module_name() + f2py_opts = ['-c', '-m', module_name] + options + dst_sources + if skip: + f2py_opts += ['skip:'] + skip + if only: + f2py_opts += ['only:'] + only + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, '-c', code] + f2py_opts + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError("Running f2py failed: %s\n%s" + % (cmd[4:], asstr(out))) + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Import + __import__(module_name) + return sys.modules[module_name] + +@_memoize +def build_code(source_code, options=[], skip=[], only=[], suffix=None, + module_name=None): + """ + Compile and import Fortran code using f2py. + + """ + if suffix is None: + suffix = '.f' + + fd, tmp_fn = tempfile.mkstemp(suffix=suffix) + os.write(fd, asbytes(source_code)) + os.close(fd) + + try: + return build_module([tmp_fn], options=options, skip=skip, only=only, + module_name=module_name) + finally: + os.unlink(tmp_fn) + +# +# Check if compilers are available at all... +# + +_compiler_status = None +def _get_compiler_status(): + global _compiler_status + if _compiler_status is not None: + return _compiler_status + + _compiler_status = (False, False, False) + + # XXX: this is really ugly. But I don't know how to invoke Distutils + # in a safer way... + code = """ +import os +import sys +sys.path = %(syspath)s + +def configuration(parent_name='',top_path=None): + global config + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + return config + +from numpy.distutils.core import setup +setup(configuration=configuration) + +config_cmd = config.get_config_cmd() +have_c = config_cmd.try_compile('void foo() {}') +print('COMPILERS:%%d,%%d,%%d' %% (have_c, + config.have_f77c(), + config.have_f90c())) +sys.exit(99) +""" + code = code % dict(syspath=repr(sys.path)) + + fd, script = tempfile.mkstemp(suffix='.py') + os.write(fd, asbytes(code)) + os.close(fd) + + try: + cmd = [sys.executable, script, 'config'] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + m = re.search(asbytes(r'COMPILERS:(\d+),(\d+),(\d+)'), out) + if m: + _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), + bool(int(m.group(3)))) + finally: + os.unlink(script) + + # Finished + return _compiler_status + +def has_c_compiler(): + return _get_compiler_status()[0] + +def has_f77_compiler(): + return _get_compiler_status()[1] + +def has_f90_compiler(): + return _get_compiler_status()[2] + +# +# Building with distutils +# + +@_memoize +def build_module_distutils(source_files, config_code, module_name, **kw): + """ + Build a module via distutils and import it. + + """ + from numpy.distutils.misc_util import Configuration + from numpy.distutils.core import setup + + d = get_module_dir() + + # Copy files + dst_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError("%s is not a file" % fn) + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + # Build script + config_code = textwrap.dedent(config_code).replace("\n", "\n ") + + code = """\ +import os +import sys +sys.path = %(syspath)s + +def configuration(parent_name='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + %(config_code)s + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) +""" % dict(config_code=config_code, syspath = repr(sys.path)) + + script = os.path.join(d, get_temp_module_name() + '.py') + dst_sources.append(script) + f = open(script, 'wb') + f.write(asbytes(code)) + f.close() + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, script, 'build_ext', '-i'] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError("Running distutils build failed: %s\n%s" + % (cmd[4:], asstr(out))) + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Import + __import__(module_name) + return sys.modules[module_name] + +# +# Unittest convenience +# + +class F2PyTest(object): + code = None + sources = None + options = [] + skip = [] + only = [] + suffix = '.f' + module = None + module_name = None + + def setUp(self): + if self.module is not None: + return + + # Check compiler availability first + if not has_c_compiler(): + raise nose.SkipTest("No C compiler available") + + codes = [] + if self.sources: + codes.extend(self.sources) + if self.code is not None: + codes.append(self.suffix) + + needs_f77 = False + needs_f90 = False + for fn in codes: + if fn.endswith('.f'): + needs_f77 = True + elif fn.endswith('.f90'): + needs_f90 = True + if needs_f77 and not has_f77_compiler(): + raise nose.SkipTest("No Fortran 77 compiler available") + if needs_f90 and not has_f90_compiler(): + raise nose.SkipTest("No Fortran 90 compiler available") + + # Build the module + if self.code is not None: + self.module = build_code(self.code, options=self.options, + skip=self.skip, only=self.only, + suffix=self.suffix, + module_name=self.module_name) + + if self.sources is not None: + self.module = build_module(self.sources, options=self.options, + skip=self.skip, only=self.only, + module_name=self.module_name) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/use_rules.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/use_rules.py new file mode 100644 index 0000000000000..6fd72bd774fcf --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/use_rules.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +""" + +Build 'use others module data' mechanism for f2py2e. + +Unfinished. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2000/09/10 12:35:43 $ +Pearu Peterson + +""" +from __future__ import division, absolute_import, print_function + +__version__ = "$Revision: 1.3 $"[10:-1] + +f2py_version='See `f2py -v`' + +import pprint +import sys +errmess=sys.stderr.write +outmess=sys.stdout.write +show=pprint.pprint + +from .auxfuncs import * +############## + +usemodule_rules={ + 'body':""" +#begintitle# +static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ +\t #name# = get_#name#()\\n\\ +Arguments:\\n\\ +#docstr#\"; +extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); +static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { +/*#decl#*/ +\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; +printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); +\treturn Py_BuildValue(\"\"); +capi_fail: +\treturn NULL; +} +""", + 'method':'\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', + 'need':['F_MODFUNC'] + } + +################ + +def buildusevars(m, r): + ret={} + outmess('\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n'%(m['name'])) + varsmap={} + revmap={} + if 'map' in r: + for k in r['map'].keys(): + if r['map'][k] in revmap: + outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n'%(r['map'][k], k, revmap[r['map'][k]])) + else: + revmap[r['map'][k]]=k + if 'only' in r and r['only']: + for v in r['map'].keys(): + if r['map'][v] in m['vars']: + + if revmap[r['map'][v]]==v: + varsmap[v]=r['map'][v] + else: + outmess('\t\t\tIgnoring map "%s=>%s". See above.\n'%(v, r['map'][v])) + else: + outmess('\t\t\tNo definition for variable "%s=>%s". Skipping.\n'%(v, r['map'][v])) + else: + for v in m['vars'].keys(): + if v in revmap: + varsmap[v]=revmap[v] + else: + varsmap[v]=v + for v in varsmap.keys(): + ret=dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) + return ret +def buildusevar(name, realname, vars, usemodulename): + outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n'%(name, realname)) + ret={} + vrd={'name':name, + 'realname':realname, + 'REALNAME':realname.upper(), + 'usemodulename':usemodulename, + 'USEMODULENAME':usemodulename.upper(), + 'texname':name.replace('_', '\\_'), + 'begintitle':gentitle('%s=>%s'%(name, realname)), + 'endtitle':gentitle('end of %s=>%s'%(name, realname)), + 'apiname':'#modulename#_use_%s_from_%s'%(realname, usemodulename) + } + nummap={0:'Ro',1:'Ri',2:'Rii',3:'Riii',4:'Riv',5:'Rv',6:'Rvi',7:'Rvii',8:'Rviii',9:'Rix'} + vrd['texnamename']=name + for i in nummap.keys(): + vrd['texnamename']=vrd['texnamename'].replace(repr(i), nummap[i]) + if hasnote(vars[realname]): vrd['note']=vars[realname]['note'] + rd=dictappend({}, vrd) + var=vars[realname] + + print(name, realname, vars[realname]) + ret=applyrules(usemodule_rules, rd) + return ret diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/__init__.py new file mode 100644 index 0000000000000..96809a94f847f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/__init__.py @@ -0,0 +1,11 @@ +from __future__ import division, absolute_import, print_function + +# To get sub-modules +from .info import __doc__ + +from .fftpack import * +from .helper import * + +from numpy.testing import Tester +test = Tester().test +bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack.py new file mode 100644 index 0000000000000..706fcdd2f0749 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack.py @@ -0,0 +1,1169 @@ +""" +Discrete Fourier Transforms + +Routines in this module: + +fft(a, n=None, axis=-1) +ifft(a, n=None, axis=-1) +rfft(a, n=None, axis=-1) +irfft(a, n=None, axis=-1) +hfft(a, n=None, axis=-1) +ihfft(a, n=None, axis=-1) +fftn(a, s=None, axes=None) +ifftn(a, s=None, axes=None) +rfftn(a, s=None, axes=None) +irfftn(a, s=None, axes=None) +fft2(a, s=None, axes=(-2,-1)) +ifft2(a, s=None, axes=(-2, -1)) +rfft2(a, s=None, axes=(-2,-1)) +irfft2(a, s=None, axes=(-2, -1)) + +i = inverse transform +r = transform of purely real data +h = Hermite transform +n = n-dimensional transform +2 = 2-dimensional transform +(Note: 2D routines are just nD routines with different default +behavior.) + +The underlying code for these functions is an f2c-translated and modified +version of the FFTPACK routines. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', + 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] + +from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \ + take +from . import fftpack_lite as fftpack + +_fft_cache = {} +_real_fft_cache = {} + +def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti, + work_function=fftpack.cfftf, fft_cache = _fft_cache ): + a = asarray(a) + + if n is None: + n = a.shape[axis] + + if n < 1: + raise ValueError("Invalid number of FFT data points (%d) specified." % n) + + try: + # Thread-safety note: We rely on list.pop() here to atomically + # retrieve-and-remove a wsave from the cache. This ensures that no + # other thread can get the same wsave while we're using it. + wsave = fft_cache.setdefault(n, []).pop() + except (IndexError): + wsave = init_function(n) + + if a.shape[axis] != n: + s = list(a.shape) + if s[axis] > n: + index = [slice(None)]*len(s) + index[axis] = slice(0, n) + a = a[index] + else: + index = [slice(None)]*len(s) + index[axis] = slice(0, s[axis]) + s[axis] = n + z = zeros(s, a.dtype.char) + z[index] = a + a = z + + if axis != -1: + a = swapaxes(a, axis, -1) + r = work_function(a, wsave) + if axis != -1: + r = swapaxes(r, axis, -1) + + # As soon as we put wsave back into the cache, another thread could pick it + # up and start using it, so we must not do this until after we're + # completely done using it ourselves. + fft_cache[n].append(wsave) + + return r + + +def fft(a, n=None, axis=-1): + """ + Compute the one-dimensional discrete Fourier Transform. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [CT]. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + if `axes` is larger than the last axis of `a`. + + See Also + -------- + numpy.fft : for definition of the DFT and conventions used. + ifft : The inverse of `fft`. + fft2 : The two-dimensional FFT. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier + Transform (DFT) can be calculated efficiently, by using symmetries in the + calculated terms. The symmetry is highest when `n` is a power of 2, and + the transform is therefore most efficient for these sizes. + + The DFT is defined, with the conventions used in this implementation, in + the documentation for the `numpy.fft` module. + + References + ---------- + .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + + Examples + -------- + >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([ -3.44505240e-16 +1.14383329e-17j, + 8.00000000e+00 -5.71092652e-15j, + 2.33482938e-16 +1.22460635e-16j, + 1.64863782e-15 +1.77635684e-15j, + 9.95839695e-17 +2.33482938e-16j, + 0.00000000e+00 +1.66837030e-15j, + 1.14383329e-17 +1.22460635e-16j, + -1.64863782e-15 +1.77635684e-15j]) + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = np.fft.fft(np.sin(t)) + >>> freq = np.fft.fftfreq(t.shape[-1]) + >>> plt.plot(freq, sp.real, freq, sp.imag) + [, ] + >>> plt.show() + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part, as described in + the `numpy.fft` documentation. + + """ + + return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache) + + +def ifft(a, n=None, axis=-1): + """ + Compute the one-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(a)) == a`` to within numerical accuracy. + For a general description of the algorithm and definitions, + see `numpy.fft`. + + The input should be ordered in the same way as is returned by `fft`, + i.e., ``a[0]`` should contain the zero frequency term, + ``a[1:n/2+1]`` should contain the positive-frequency terms, and + ``a[n/2+1:]`` should contain the negative-frequency terms, in order of + decreasingly negative frequency. See `numpy.fft` for details. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axes` is larger than the last axis of `a`. + + See Also + -------- + numpy.fft : An introduction, with definitions and general explanations. + fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse + ifft2 : The two-dimensional inverse FFT. + ifftn : The n-dimensional inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + Examples + -------- + >>> np.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) + >>> s = np.fft.ifft(n) + >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') + [, ] + >>> plt.legend(('real', 'imaginary')) + + >>> plt.show() + + """ + + a = asarray(a).astype(complex) + if n is None: + n = shape(a)[axis] + return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n + + +def rfft(a, n=None, axis=-1): + """ + Compute the one-dimensional discrete Fourier Transform for real input. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + a : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + irfft : The inverse of `rfft`. + fft : The one-dimensional FFT of general (complex) input. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e. the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> np.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) + >>> np.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + + a = asarray(a).astype(float) + return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache) + + +def irfft(a, n=None, axis=-1): + """ + Compute the inverse of the n-point DFT for real input. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e. the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is determined from + the length of the input along the axis specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. + fft : The one-dimensional FFT. + irfft2 : The inverse of the two-dimensional FFT of real input. + irfftn : The inverse of the *n*-dimensional FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `a`, where `a` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + Examples + -------- + >>> np.fft.ifft([1, -1j, -1, 1j]) + array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) + >>> np.fft.irfft([1, -1j, -1]) + array([ 0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + + a = asarray(a).astype(complex) + if n is None: + n = (shape(a)[axis] - 1) * 2 + return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb, + _real_fft_cache) / n + + +def hfft(a, n=None, axis=-1): + """ + Compute the FFT of a signal which has Hermitian symmetry (real spectrum). + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is determined from + the length of the input along the axis specified by `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is larger than the last axis of `a`. + + See also + -------- + rfft : Compute the one-dimensional FFT for real input. + ihfft : The inverse of `hfft`. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time domain + and is real in the frequency domain. So here it's `hfft` for which + you must supply the length of the result if it is to be odd: + ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. + + Examples + -------- + >>> signal = np.array([1, 2, 3, 4, 3, 2]) + >>> np.fft.fft(signal) + array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) + >>> np.fft.hfft(signal[:4]) # Input first half of signal + array([ 15., -4., 0., -1., 0., -4.]) + >>> np.fft.hfft(signal, 6) # Input entire signal and truncate + array([ 15., -4., 0., -1., 0., -4.]) + + + >>> signal = np.array([[1, 1.j], [-1.j, 2]]) + >>> np.conj(signal.T) - signal # check Hermitian symmetry + array([[ 0.-0.j, 0.+0.j], + [ 0.+0.j, 0.-0.j]]) + >>> freq_spectrum = np.fft.hfft(signal) + >>> freq_spectrum + array([[ 1., 1.], + [ 2., -2.]]) + + """ + + a = asarray(a).astype(complex) + if n is None: + n = (shape(a)[axis] - 1) * 2 + return irfft(conjugate(a), n, axis) * n + + +def ihfft(a, n=None, axis=-1): + """ + Compute the inverse FFT of a signal which has Hermitian symmetry. + + Parameters + ---------- + a : array_like + Input array. + n : int, optional + Length of the inverse FFT. + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + See also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time domain + and is real in the frequency domain. So here it's `hfft` for which + you must supply the length of the result if it is to be odd: + ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. + + Examples + -------- + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> np.fft.ifft(spectrum) + array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j]) + >>> np.fft.ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) + + """ + + a = asarray(a).astype(float) + if n is None: + n = shape(a)[axis] + return conjugate(rfft(a, n, axis))/n + + +def _cook_nd_args(a, s=None, axes=None, invreal=0): + if s is None: + shapeless = 1 + if axes is None: + s = list(a.shape) + else: + s = take(a.shape, axes) + else: + shapeless = 0 + s = list(s) + if axes is None: + axes = list(range(-len(s), 0)) + if len(s) != len(axes): + raise ValueError("Shape and axes have different lengths.") + if invreal and shapeless: + s[-1] = (a.shape[axes[-1]] - 1) * 2 + return s, axes + + +def _raw_fftnd(a, s=None, axes=None, function=fft): + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + itl = list(range(len(axes))) + itl.reverse() + for ii in itl: + a = function(a, n=s[ii], axis=axes[ii]) + return a + + +def fftn(a, s=None, axes=None): + """ + Compute the N-dimensional discrete Fourier Transform. + + This function computes the *N*-dimensional discrete Fourier Transform over + any number of axes in an *M*-dimensional array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). + This corresponds to `n` for `fft(x, n)`. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the transform over that axis is + performed multiple times. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. + fft : The one-dimensional FFT, with definitions and conventions used. + rfftn : The *n*-dimensional FFT of real input. + fft2 : The two-dimensional FFT. + fftshift : Shifts zero-frequency terms to centre of array + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + See `numpy.fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.mgrid[:3, :3, :3][0] + >>> np.fft.fftn(a, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) + >>> FS = np.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + + return _raw_fftnd(a, s, axes, fft) + +def ifftn(a, s=None, axes=None): + """ + Compute the N-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform over any number of axes in an M-dimensional array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(a)) == a`` to within numerical accuracy. + For a description of the definitions and conventions used, see `numpy.fft`. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e. it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. + ifft : The one-dimensional inverse FFT. + ifft2 : The two-dimensional inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + See `numpy.fft` for definitions and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> a = np.eye(4) + >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) + array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) + >>> im = np.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + + return _raw_fftnd(a, s, axes, ifft) + + +def fft2(a, s=None, axes=(-2, -1)): + """ + Compute the 2-dimensional discrete Fourier Transform + + This function computes the *n*-dimensional discrete Fourier Transform + over any axes in an *M*-dimensional array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + a : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). + This corresponds to `n` for `fft(x, n)`. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifft2 : The inverse two-dimensional FFT. + fft : The one-dimensional FFT. + fftn : The *n*-dimensional FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For two-dimensional input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `numpy.fft` for + definitions and conventions used. + + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.fft2(a) + array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ], + [-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ], + [-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ], + [-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ], + [-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j , + 0.0 +0.j , 0.0 +0.j ]]) + + """ + + return _raw_fftnd(a, s, axes, fft) + + +def ifft2(a, s=None, axes=(-2, -1)): + """ + Compute the 2-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the 2-dimensional discrete Fourier + Transform over any number of axes in an M-dimensional array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e. it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the *n*-dimensional FFT. + fft : The one-dimensional FFT. + ifft : The one-dimensional inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `numpy.fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> a = 4 * np.eye(4) + >>> np.fft.ifft2(a) + array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + + return _raw_fftnd(a, s, axes, ifft) + + +def rfftn(a, s=None, axes=None): + """ + Compute the N-dimensional discrete Fourier Transform for real input. + + This function computes the N-dimensional discrete Fourier Transform over + any number of axes in an M-dimensional real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + a : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT + of real input. + fft : The one-dimensional FFT, with definitions and conventions used. + rfft : The one-dimensional FFT of real input. + fftn : The n-dimensional FFT. + rfft2 : The two-dimensional FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.ones((2, 2, 2)) + >>> np.fft.rfftn(a) + array([[[ 8.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j]], + [[ 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j]]]) + + >>> np.fft.rfftn(a, axes=(2, 0)) + array([[[ 4.+0.j, 0.+0.j], + [ 4.+0.j, 0.+0.j]], + [[ 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j]]]) + + """ + + a = asarray(a).astype(float) + s, axes = _cook_nd_args(a, s, axes) + a = rfft(a, s[-1], axes[-1]) + for ii in range(len(axes)-1): + a = fft(a, s[ii], axes[ii]) + return a + +def rfft2(a, s=None, axes=(-2, -1)): + """ + Compute the 2-dimensional FFT of a real array. + + Parameters + ---------- + a : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + axes : sequence of ints, optional + Axes over which to compute the FFT. + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + rfftn : Compute the N-dimensional discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + """ + + return rfftn(a, s, axes) + +def irfftn(a, s=None, axes=None): + """ + Compute the inverse of the N-dimensional FFT of real input. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform for real input over any number of axes in an + M-dimensional array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e. as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + a : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. If `s` is not given, the shape of the input along the + axes specified by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + rfftn : The forward n-dimensional FFT of real input, + of which `ifftn` is the inverse. + fft : The one-dimensional FFT, with definitions and conventions used. + irfft : The inverse of the one-dimensional FFT of real input. + irfft2 : The inverse of the two-dimensional FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + Examples + -------- + >>> a = np.zeros((3, 2, 2)) + >>> a[0, 0, 0] = 3 * 2 * 2 + >>> np.fft.irfftn(a) + array([[[ 1., 1.], + [ 1., 1.]], + [[ 1., 1.], + [ 1., 1.]], + [[ 1., 1.], + [ 1., 1.]]]) + + """ + + a = asarray(a).astype(complex) + s, axes = _cook_nd_args(a, s, axes, invreal=1) + for ii in range(len(axes)-1): + a = ifft(a, s[ii], axes[ii]) + a = irfft(a, s[-1], axes[-1]) + return a + +def irfft2(a, s=None, axes=(-2, -1)): + """ + Compute the 2-dimensional inverse FFT of a real array. + + Parameters + ---------- + a : array_like + The input array + s : sequence of ints, optional + Shape of the inverse FFT. + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default is the last two axes. + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + irfftn : Compute the inverse of the N-dimensional FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + """ + + return irfftn(a, s, axes) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack_lite.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack_lite.py new file mode 100644 index 0000000000000..267b7dba4ed1d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack_lite.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'fftpack_lite.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/helper.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/helper.py new file mode 100644 index 0000000000000..160120e585bd2 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/helper.py @@ -0,0 +1,224 @@ +""" +Discrete Fourier Transforms - helper.py + +""" +from __future__ import division, absolute_import, print_function + +from numpy.compat import integer_types +from numpy.core import ( + asarray, concatenate, arange, take, integer, empty + ) + +# Created by Pearu Peterson, September 2002 + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] + +integer_types = integer_types + (integer,) + + +def fftshift(x, axes=None): + """ + Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + tmp = asarray(x) + ndim = len(tmp.shape) + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, integer_types): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = (n+1)//2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + +def ifftshift(x, axes=None): + """ + The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + tmp = asarray(x) + ndim = len(tmp.shape) + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, integer_types): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = n-(n+1)//2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + +def fftfreq(n, d=1.0): + """ + Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = np.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = np.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + results = empty(n, int) + N = (n-1)//2 + 1 + p1 = arange(0, N, dtype=int) + results[:N] = p1 + p2 = arange(-(n//2), 0, dtype=int) + results[N:] = p2 + return results * val + #return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d) + + +def rfftfreq(n, d=1.0): + """ + Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = np.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = np.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.]) + >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0/(n*d) + N = n//2 + 1 + results = arange(0, N, dtype=int) + return results * val diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/info.py new file mode 100644 index 0000000000000..916d452f20914 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/info.py @@ -0,0 +1,179 @@ +""" +Discrete Fourier Transform (:mod:`numpy.fft`) +============================================= + +.. currentmodule:: numpy.fft + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. + +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the mean of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the normalization by :math:`1/n`. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" +from __future__ import division, absolute_import, print_function + +depends = ['core'] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/setup.py new file mode 100644 index 0000000000000..79f681e5549f7 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/setup.py @@ -0,0 +1,20 @@ +from __future__ import division, print_function + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('fft', parent_package, top_path) + + config.add_data_dir('tests') + + # Configure fftpack_lite + config.add_extension('fftpack_lite', + sources=['fftpack_litemodule.c', 'fftpack.c'] + ) + + + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_fftpack.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_fftpack.py new file mode 100644 index 0000000000000..45b5ac784ee9a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_fftpack.py @@ -0,0 +1,75 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal +from numpy.testing import assert_array_equal +import threading +import sys +if sys.version_info[0] >= 3: + import queue +else: + import Queue as queue + + +def fft1(x): + L = len(x) + phase = -2j*np.pi*(np.arange(L)/float(L)) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x*np.exp(phase), axis=1) + + +class TestFFTShift(TestCase): + + def test_fft_n(self): + self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0) + + +class TestFFT1D(TestCase): + + def test_basic(self): + rand = np.random.random + x = rand(30) + 1j*rand(30) + assert_array_almost_equal(fft1(x), np.fft.fft(x)) + + +class TestFFTThreadSafe(TestCase): + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + # Make sure all threads returned the correct value + for i in range(self.threads): + assert_array_equal(q.get(timeout=5), expected, + 'Function returned wrong value in multithreaded context') + + def test_fft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.fft, a) + + def test_ifft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.ifft, a) + + def test_rfft(self): + a = np.ones(self.input_shape) + self._test_mtsame(np.fft.rfft, a) + + def test_irfft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.irfft, a) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_helper.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_helper.py new file mode 100644 index 0000000000000..7eaa99fdb9881 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_helper.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +"""Test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal +from numpy import fft +from numpy import pi + + +class TestFFTShift(TestCase): + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + + def test_inverse(self): + for n in [1, 4, 9, 100, 211]: + x = np.random.random((n,)) + assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self): + freqs = [[ 0, 1, 2], [ 3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [ 2, 0, 1], [-4, 3, 4]] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) + assert_array_almost_equal(fft.fftshift(freqs, axes=0), + fft.fftshift(freqs, axes=(0,))) + assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + + +class TestFFTFreq(TestCase): + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + assert_array_almost_equal(9*fft.fftfreq(9), x) + assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + assert_array_almost_equal(10*fft.fftfreq(10), x) + assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) + + +class TestRFFTFreq(TestCase): + + def test_definition(self): + x = [0, 1, 2, 3, 4] + assert_array_almost_equal(9*fft.rfftfreq(9), x) + assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, 5] + assert_array_almost_equal(10*fft.rfftfreq(10), x) + assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) + + +class TestIRFFTN(TestCase): + + def test_not_last_axis_success(self): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j*ai + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/__init__.py new file mode 100644 index 0000000000000..8c420b0c33012 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/__init__.py @@ -0,0 +1,46 @@ +from __future__ import division, absolute_import, print_function + +import math + +from .info import __doc__ +from numpy.version import version as __version__ + +from .type_check import * +from .index_tricks import * +from .function_base import * +from .nanfunctions import * +from .shape_base import * +from .stride_tricks import * +from .twodim_base import * +from .ufunclike import * + +from . import scimath as emath +from .polynomial import * +#import convertcode +from .utils import * +from .arraysetops import * +from .npyio import * +from .financial import * +from .arrayterator import * +from .arraypad import * +from ._version import * + +__all__ = ['emath', 'math'] +__all__ += type_check.__all__ +__all__ += index_tricks.__all__ +__all__ += function_base.__all__ +__all__ += shape_base.__all__ +__all__ += stride_tricks.__all__ +__all__ += twodim_base.__all__ +__all__ += ufunclike.__all__ +__all__ += arraypad.__all__ +__all__ += polynomial.__all__ +__all__ += utils.__all__ +__all__ += arraysetops.__all__ +__all__ += npyio.__all__ +__all__ += financial.__all__ +__all__ += nanfunctions.__all__ + +from numpy.testing import Tester +test = Tester().test +bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_compiled_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_compiled_base.py new file mode 100644 index 0000000000000..c560a751011e8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_compiled_base.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, '_compiled_base.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_datasource.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_datasource.py new file mode 100644 index 0000000000000..338c8b3311b0c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_datasource.py @@ -0,0 +1,666 @@ +"""A file interface for handling local and remote data files. + +The goal of datasource is to abstract some of the file system operations +when dealing with data files so the researcher doesn't have to know all the +low-level details. Through datasource, a researcher can obtain and use a +file with one function call, regardless of location of the file. + +DataSource is meant to augment standard python libraries, not replace them. +It should work seemlessly with standard file IO operations and the os +module. + +DataSource files can originate locally or remotely: + +- local files : '/home/guido/src/local/data.txt' +- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' + +DataSource files can also be compressed or uncompressed. Currently only +gzip and bz2 are supported. + +Example:: + + >>> # Create a DataSource, use os.curdir (default) for local storage. + >>> ds = datasource.DataSource() + >>> + >>> # Open a remote file. + >>> # DataSource downloads the file, stores it locally in: + >>> # './www.google.com/index.html' + >>> # opens the file and returns a file object. + >>> fp = ds.open('http://www.google.com/index.html') + >>> + >>> # Use the file as you normally would + >>> fp.read() + >>> fp.close() + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import shutil + +_open = open + + +# Using a class instead of a module-level dictionary +# to reduce the inital 'import numpy' overhead by +# deferring the import of bz2 and gzip until needed + +# TODO: .zip support, .tar support? +class _FileOpeners(object): + """ + Container for different methods to open (un-)compressed files. + + `_FileOpeners` contains a dictionary that holds one method for each + supported file format. Attribute lookup is implemented in such a way + that an instance of `_FileOpeners` itself can be indexed with the keys + of that dictionary. Currently uncompressed files as well as files + compressed with ``gzip`` or ``bz2`` compression are supported. + + Notes + ----- + `_file_openers`, an instance of `_FileOpeners`, is made available for + use in the `_datasource` module. + + Examples + -------- + >>> np.lib._datasource._file_openers.keys() + [None, '.bz2', '.gz'] + >>> np.lib._datasource._file_openers['.gz'] is gzip.open + True + + """ + + def __init__(self): + self._loaded = False + self._file_openers = {None: open} + + def _load(self): + if self._loaded: + return + try: + import bz2 + self._file_openers[".bz2"] = bz2.BZ2File + except ImportError: + pass + try: + import gzip + self._file_openers[".gz"] = gzip.open + except ImportError: + pass + self._loaded = True + + def keys(self): + """ + Return the keys of currently supported file openers. + + Parameters + ---------- + None + + Returns + ------- + keys : list + The keys are None for uncompressed files and the file extension + strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression + methods. + + """ + self._load() + return list(self._file_openers.keys()) + + def __getitem__(self, key): + self._load() + return self._file_openers[key] + +_file_openers = _FileOpeners() + +def open(path, mode='r', destpath=os.curdir): + """ + Open `path` with `mode` and return the file object. + + If ``path`` is an URL, it will be downloaded, stored in the + `DataSource` `destpath` directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. + mode : str, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to + append. Available modes depend on the type of object specified by + path. Default is 'r'. + destpath : str, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Returns + ------- + out : file object + The opened file. + + Notes + ----- + This is a convenience function that instantiates a `DataSource` and + returns the file object from ``DataSource.open(path)``. + + """ + + ds = DataSource(destpath) + return ds.open(path, mode) + + +class DataSource (object): + """ + DataSource(destpath='.') + + A generic data source file (file, http, ftp, ...). + + DataSources can be local files or remote files/URLs. The files may + also be compressed or uncompressed. DataSource hides some of the + low-level details of downloading the file, allowing you to simply pass + in a valid file path (or URL) and obtain a file object. + + Parameters + ---------- + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Notes + ----- + URLs require a scheme string (``http://``) to be used, without it they + will fail:: + + >>> repos = DataSource() + >>> repos.exists('www.google.com/index.html') + False + >>> repos.exists('http://www.google.com/index.html') + True + + Temporary directories are deleted when the DataSource is deleted. + + Examples + -------- + :: + + >>> ds = DataSource('/home/guido') + >>> urlname = 'http://www.google.com/index.html' + >>> gfile = ds.open('http://www.google.com/index.html') # remote file + >>> ds.abspath(urlname) + '/home/guido/www.google.com/site/index.html' + + >>> ds = DataSource(None) # use with temporary file + >>> ds.open('/home/guido/foobar.txt') + + >>> ds.abspath('/home/guido/foobar.txt') + '/tmp/tmpy4pgsP/home/guido/foobar.txt' + + """ + + def __init__(self, destpath=os.curdir): + """Create a DataSource with a local path at destpath.""" + if destpath: + self._destpath = os.path.abspath(destpath) + self._istmpdest = False + else: + import tempfile # deferring import to improve startup time + self._destpath = tempfile.mkdtemp() + self._istmpdest = True + + def __del__(self): + # Remove temp directories + if self._istmpdest: + shutil.rmtree(self._destpath) + + def _iszip(self, filename): + """Test if the filename is a zip file by looking at the file extension. + + """ + fname, ext = os.path.splitext(filename) + return ext in _file_openers.keys() + + def _iswritemode(self, mode): + """Test if the given mode will open a file for writing.""" + + # Currently only used to test the bz2 files. + _writemodes = ("w", "+") + for c in mode: + if c in _writemodes: + return True + return False + + def _splitzipext(self, filename): + """Split zip extension from filename and return filename. + + *Returns*: + base, zip_ext : {tuple} + + """ + + if self._iszip(filename): + return os.path.splitext(filename) + else: + return filename, None + + def _possible_names(self, filename): + """Return a tuple containing compressed filename variations.""" + names = [filename] + if not self._iszip(filename): + for zipext in _file_openers.keys(): + if zipext: + names.append(filename+zipext) + return names + + def _isurl(self, path): + """Test if path is a net location. Tests the scheme and netloc.""" + + # We do this here to reduce the 'import numpy' initial import time. + if sys.version_info[0] >= 3: + from urllib.parse import urlparse + else: + from urlparse import urlparse + + # BUG : URLs require a scheme string ('http://') to be used. + # www.google.com will fail. + # Should we prepend the scheme for those that don't have it and + # test that also? Similar to the way we append .gz and test for + # for compressed versions of files. + + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + return bool(scheme and netloc) + + def _cache(self, path): + """Cache the file specified by path. + + Creates a copy of the file in the datasource cache. + + """ + # We import these here because importing urllib2 is slow and + # a significant fraction of numpy's total import time. + if sys.version_info[0] >= 3: + from urllib.request import urlopen + from urllib.error import URLError + else: + from urllib2 import urlopen + from urllib2 import URLError + + upath = self.abspath(path) + + # ensure directory exists + if not os.path.exists(os.path.dirname(upath)): + os.makedirs(os.path.dirname(upath)) + + # TODO: Doesn't handle compressed files! + if self._isurl(path): + try: + openedurl = urlopen(path) + f = _open(upath, 'wb') + try: + shutil.copyfileobj(openedurl, f) + finally: + f.close() + openedurl.close() + except URLError: + raise URLError("URL not found: %s" % path) + else: + shutil.copyfile(path, upath) + return upath + + def _findfile(self, path): + """Searches for ``path`` and returns full path if found. + + If path is an URL, _findfile will cache a local copy and return the + path to the cached file. If path is a local file, _findfile will + return a path to that local file. + + The search will include possible compressed versions of the file + and return the first occurence found. + + """ + + # Build list of possible local file paths + if not self._isurl(path): + # Valid local paths + filelist = self._possible_names(path) + # Paths in self._destpath + filelist += self._possible_names(self.abspath(path)) + else: + # Cached URLs in self._destpath + filelist = self._possible_names(self.abspath(path)) + # Remote URLs + filelist = filelist + self._possible_names(path) + + for name in filelist: + if self.exists(name): + if self._isurl(name): + name = self._cache(name) + return name + return None + + def abspath(self, path): + """ + Return absolute path of file in the DataSource directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + Notes + ----- + The functionality is based on `os.path.abspath`. + + """ + # We do this here to reduce the 'import numpy' initial import time. + if sys.version_info[0] >= 3: + from urllib.parse import urlparse + else: + from urlparse import urlparse + + # TODO: This should be more robust. Handles case where path includes + # the destpath, but not other sub-paths. Failing case: + # path = /home/guido/datafile.txt + # destpath = /home/alex/ + # upath = self.abspath(path) + # upath == '/home/alex/home/guido/datafile.txt' + + # handle case where path includes self._destpath + splitpath = path.split(self._destpath, 2) + if len(splitpath) > 1: + path = splitpath[1] + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + netloc = self._sanitize_relative_path(netloc) + upath = self._sanitize_relative_path(upath) + return os.path.join(self._destpath, netloc, upath) + + def _sanitize_relative_path(self, path): + """Return a sanitised relative path for which + os.path.abspath(os.path.join(base, path)).startswith(base) + """ + last = None + path = os.path.normpath(path) + while path != last: + last = path + # Note: os.path.join treats '/' as os.sep on Windows + path = path.lstrip(os.sep).lstrip('/') + path = path.lstrip(os.pardir).lstrip('..') + drive, path = os.path.splitdrive(path) # for Windows + return path + + def exists(self, path): + """ + Test if path exists. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + # We import this here because importing urllib2 is slow and + # a significant fraction of numpy's total import time. + if sys.version_info[0] >= 3: + from urllib.request import urlopen + from urllib.error import URLError + else: + from urllib2 import urlopen + from urllib2 import URLError + + # Test local path + if os.path.exists(path): + return True + + # Test cached url + upath = self.abspath(path) + if os.path.exists(upath): + return True + + # Test remote url + if self._isurl(path): + try: + netfile = urlopen(path) + netfile.close() + del(netfile) + return True + except URLError: + return False + return False + + def open(self, path, mode='r'): + """ + Open and return file-like object. + + If `path` is an URL, it will be downloaded, stored in the + `DataSource` directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + + Returns + ------- + out : file object + File object. + + """ + + # TODO: There is no support for opening a file for writing which + # doesn't exist yet (creating a file). Should there be? + + # TODO: Add a ``subdir`` parameter for specifying the subdirectory + # used to store URLs in self._destpath. + + if self._isurl(path) and self._iswritemode(mode): + raise ValueError("URLs are not writeable") + + # NOTE: _findfile will fail on a new file opened for writing. + found = self._findfile(path) + if found: + _fname, ext = self._splitzipext(found) + if ext == 'bz2': + mode.replace("+", "") + return _file_openers[ext](found, mode=mode) + else: + raise IOError("%s not found." % path) + + +class Repository (DataSource): + """ + Repository(baseurl, destpath='.') + + A data repository where multiple DataSource's share a base + URL/directory. + + `Repository` extends `DataSource` by prepending a base URL (or + directory) to all the files it handles. Use `Repository` when you will + be working with multiple files from one base URL. Initialize + `Repository` with the base URL, then refer to each file by its filename + only. + + Parameters + ---------- + baseurl : str + Path to the local directory or remote location that contains the + data files. + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Examples + -------- + To analyze all files in the repository, do something like this + (note: this is not self-contained code):: + + >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') + >>> for filename in filelist: + ... fp = repos.open(filename) + ... fp.analyze() + ... fp.close() + + Similarly you could use a URL for a repository:: + + >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') + + """ + + def __init__(self, baseurl, destpath=os.curdir): + """Create a Repository with a shared url or directory of baseurl.""" + DataSource.__init__(self, destpath=destpath) + self._baseurl = baseurl + + def __del__(self): + DataSource.__del__(self) + + def _fullpath(self, path): + """Return complete path for path. Prepends baseurl if necessary.""" + splitpath = path.split(self._baseurl, 2) + if len(splitpath) == 1: + result = os.path.join(self._baseurl, path) + else: + result = path # path contains baseurl already + return result + + def _findfile(self, path): + """Extend DataSource method to prepend baseurl to ``path``.""" + return DataSource._findfile(self, self._fullpath(path)) + + def abspath(self, path): + """ + Return absolute path of file in the Repository directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + """ + return DataSource.abspath(self, self._fullpath(path)) + + def exists(self, path): + """ + Test if path exists prepending Repository base URL to path. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + return DataSource.exists(self, self._fullpath(path)) + + def open(self, path, mode='r'): + """ + Open and return file-like object prepending Repository base URL. + + If `path` is an URL, it will be downloaded, stored in the + DataSource directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. This may, but does not have to, + include the `baseurl` with which the `Repository` was + initialized. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + + Returns + ------- + out : file object + File object. + + """ + return DataSource.open(self, self._fullpath(path), mode) + + def listdir(self): + """ + List files in the source Repository. + + Returns + ------- + files : list of str + List of file names (not containing a directory part). + + Notes + ----- + Does not currently work for remote repositories. + + """ + if self._isurl(self._baseurl): + raise NotImplementedError( + "Directory listing of URLs, not supported yet.") + else: + return os.listdir(self._baseurl) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_iotools.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_iotools.py new file mode 100644 index 0000000000000..9108b2e4ce169 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_iotools.py @@ -0,0 +1,891 @@ +"""A collection of functions designed to help I/O with ascii files. + +""" +from __future__ import division, absolute_import, print_function + +__docformat__ = "restructuredtext en" + +import sys +import numpy as np +import numpy.core.numeric as nx +from numpy.compat import asbytes, bytes, asbytes_nested, basestring + +if sys.version_info[0] >= 3: + from builtins import bool, int, float, complex, object, str + unicode = str +else: + from __builtin__ import bool, int, float, complex, object, unicode, str + + +if sys.version_info[0] >= 3: + def _bytes_to_complex(s): + return complex(s.decode('ascii')) + + def _bytes_to_name(s): + return s.decode('ascii') +else: + _bytes_to_complex = complex + _bytes_to_name = str + +def _is_string_like(obj): + """ + Check whether obj behaves like a string. + """ + try: + obj + '' + except (TypeError, ValueError): + return False + return True + +def _is_bytes_like(obj): + """ + Check whether obj behaves like a bytes object. + """ + try: + obj + asbytes('') + except (TypeError, ValueError): + return False + return True + + +def _to_filehandle(fname, flag='r', return_opened=False): + """ + Returns the filehandle corresponding to a string or a file. + If the string ends in '.gz', the file is automatically unzipped. + + Parameters + ---------- + fname : string, filehandle + Name of the file whose filehandle must be returned. + flag : string, optional + Flag indicating the status of the file ('r' for read, 'w' for write). + return_opened : boolean, optional + Whether to return the opening status of the file. + """ + if _is_string_like(fname): + if fname.endswith('.gz'): + import gzip + fhd = gzip.open(fname, flag) + elif fname.endswith('.bz2'): + import bz2 + fhd = bz2.BZ2File(fname) + else: + fhd = file(fname, flag) + opened = True + elif hasattr(fname, 'seek'): + fhd = fname + opened = False + else: + raise ValueError('fname must be a string or file handle') + if return_opened: + return fhd, opened + return fhd + + +def has_nested_fields(ndtype): + """ + Returns whether one or several fields of a dtype are nested. + + Parameters + ---------- + ndtype : dtype + Data-type of a structured array. + + Raises + ------ + AttributeError + If `ndtype` does not have a `names` attribute. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) + >>> np.lib._iotools.has_nested_fields(dt) + False + + """ + for name in ndtype.names or (): + if ndtype[name].names: + return True + return False + + +def flatten_dtype(ndtype, flatten_base=False): + """ + Unpack a structured data-type by collapsing nested fields and/or fields + with a shape. + + Note that the field names are lost. + + Parameters + ---------- + ndtype : dtype + The datatype to collapse + flatten_base : {False, True}, optional + Whether to transform a field with a shape into several fields or not. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ... ('block', int, (2, 3))]) + >>> np.lib._iotools.flatten_dtype(dt) + [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')] + >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) + [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'), + dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'), + dtype('int32')] + + """ + names = ndtype.names + if names is None: + if flatten_base: + return [ndtype.base] * int(np.prod(ndtype.shape)) + return [ndtype.base] + else: + types = [] + for field in names: + info = ndtype.fields[field] + flat_dt = flatten_dtype(info[0], flatten_base) + types.extend(flat_dt) + return types + + +class LineSplitter(object): + """ + Object to split a string at a given delimiter or at given places. + + Parameters + ---------- + delimiter : str, int, or sequence of ints, optional + If a string, character used to delimit consecutive fields. + If an integer or a sequence of integers, width(s) of each field. + comment : str, optional + Character used to mark the beginning of a comment. Default is '#'. + autostrip : bool, optional + Whether to strip each individual field. Default is True. + + """ + + def autostrip(self, method): + """ + Wrapper to strip each member of the output of `method`. + + Parameters + ---------- + method : function + Function that takes a single argument and returns a sequence of + strings. + + Returns + ------- + wrapped : function + The result of wrapping `method`. `wrapped` takes a single input + argument and returns a list of strings that are stripped of + white-space. + + """ + return lambda input: [_.strip() for _ in method(input)] + # + + def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True): + self.comments = comments + # Delimiter is a character + if isinstance(delimiter, unicode): + delimiter = delimiter.encode('ascii') + if (delimiter is None) or _is_bytes_like(delimiter): + delimiter = delimiter or None + _handyman = self._delimited_splitter + # Delimiter is a list of field widths + elif hasattr(delimiter, '__iter__'): + _handyman = self._variablewidth_splitter + idx = np.cumsum([0] + list(delimiter)) + delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] + # Delimiter is a single integer + elif int(delimiter): + (_handyman, delimiter) = ( + self._fixedwidth_splitter, int(delimiter)) + else: + (_handyman, delimiter) = (self._delimited_splitter, None) + self.delimiter = delimiter + if autostrip: + self._handyman = self.autostrip(_handyman) + else: + self._handyman = _handyman + # + + def _delimited_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip(asbytes(" \r\n")) + if not line: + return [] + return line.split(self.delimiter) + # + + def _fixedwidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip(asbytes("\r\n")) + if not line: + return [] + fixed = self.delimiter + slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] + return [line[s] for s in slices] + # + + def _variablewidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + if not line: + return [] + slices = self.delimiter + return [line[s] for s in slices] + # + + def __call__(self, line): + return self._handyman(line) + + +class NameValidator(object): + """ + Object to validate a list of strings to use as field names. + + The strings are stripped of any non alphanumeric character, and spaces + are replaced by '_'. During instantiation, the user can define a list + of names to exclude, as well as a list of invalid characters. Names in + the exclusion list are appended a '_' character. + + Once an instance has been created, it can be called with a list of + names, and a list of valid names will be created. The `__call__` + method accepts an optional keyword "default" that sets the default name + in case of ambiguity. By default this is 'f', so that names will + default to `f0`, `f1`, etc. + + Parameters + ---------- + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default + list ['return', 'file', 'print']. Excluded names are appended an + underscore: for example, `file` becomes `file_` if supplied. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + casesensitive : {True, False, 'upper', 'lower'}, optional + * If True, field names are case-sensitive. + * If False or 'upper', field names are converted to upper case. + * If 'lower', field names are converted to lower case. + + The default value is True. + replace_space : '_', optional + Character(s) used in replacement of white spaces. + + Notes + ----- + Calling an instance of `NameValidator` is the same as calling its + method `validate`. + + Examples + -------- + >>> validator = np.lib._iotools.NameValidator() + >>> validator(['file', 'field2', 'with space', 'CaSe']) + ['file_', 'field2', 'with_space', 'CaSe'] + + >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], + deletechars='q', + case_sensitive='False') + >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) + ['excl_', 'field2', 'no_', 'with_space', 'case'] + + """ + # + defaultexcludelist = ['return', 'file', 'print'] + defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + # + + def __init__(self, excludelist=None, deletechars=None, + case_sensitive=None, replace_space='_'): + # Process the exclusion list .. + if excludelist is None: + excludelist = [] + excludelist.extend(self.defaultexcludelist) + self.excludelist = excludelist + # Process the list of characters to delete + if deletechars is None: + delete = self.defaultdeletechars + else: + delete = set(deletechars) + delete.add('"') + self.deletechars = delete + # Process the case option ..... + if (case_sensitive is None) or (case_sensitive is True): + self.case_converter = lambda x: x + elif (case_sensitive is False) or ('u' in case_sensitive): + self.case_converter = lambda x: x.upper() + elif 'l' in case_sensitive: + self.case_converter = lambda x: x.lower() + else: + self.case_converter = lambda x: x + # + self.replace_space = replace_space + + def validate(self, names, defaultfmt="f%i", nbfields=None): + """ + Validate a list of strings as field names for a structured array. + + Parameters + ---------- + names : sequence of str + Strings to be validated. + defaultfmt : str, optional + Default format string, used if validating a given string + reduces its length to zero. + nboutput : integer, optional + Final number of validated names, used to expand or shrink the + initial list of names. + + Returns + ------- + validatednames : list of str + The list of validated field names. + + Notes + ----- + A `NameValidator` instance can be called directly, which is the + same as calling `validate`. For examples, see `NameValidator`. + + """ + # Initial checks .............. + if (names is None): + if (nbfields is None): + return None + names = [] + if isinstance(names, basestring): + names = [names, ] + if nbfields is not None: + nbnames = len(names) + if (nbnames < nbfields): + names = list(names) + [''] * (nbfields - nbnames) + elif (nbnames > nbfields): + names = names[:nbfields] + # Set some shortcuts ........... + deletechars = self.deletechars + excludelist = self.excludelist + case_converter = self.case_converter + replace_space = self.replace_space + # Initializes some variables ... + validatednames = [] + seen = dict() + nbempty = 0 + # + for item in names: + item = case_converter(item).strip() + if replace_space: + item = item.replace(' ', replace_space) + item = ''.join([c for c in item if c not in deletechars]) + if item == '': + item = defaultfmt % nbempty + while item in names: + nbempty += 1 + item = defaultfmt % nbempty + nbempty += 1 + elif item in excludelist: + item += '_' + cnt = seen.get(item, 0) + if cnt > 0: + validatednames.append(item + '_%d' % cnt) + else: + validatednames.append(item) + seen[item] = cnt + 1 + return tuple(validatednames) + # + + def __call__(self, names, defaultfmt="f%i", nbfields=None): + return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) + + +def str2bool(value): + """ + Tries to transform a string supposed to represent a boolean to a boolean. + + Parameters + ---------- + value : str + The string that is transformed to a boolean. + + Returns + ------- + boolval : bool + The boolean representation of `value`. + + Raises + ------ + ValueError + If the string is not 'True' or 'False' (case independent) + + Examples + -------- + >>> np.lib._iotools.str2bool('TRUE') + True + >>> np.lib._iotools.str2bool('false') + False + + """ + value = value.upper() + if value == asbytes('TRUE'): + return True + elif value == asbytes('FALSE'): + return False + else: + raise ValueError("Invalid boolean") + + +class ConverterError(Exception): + """ + Exception raised when an error occurs in a converter for string values. + + """ + pass + +class ConverterLockError(ConverterError): + """ + Exception raised when an attempt is made to upgrade a locked converter. + + """ + pass + +class ConversionWarning(UserWarning): + """ + Warning issued when a string converter has a problem. + + Notes + ----- + In `genfromtxt` a `ConversionWarning` is issued if raising exceptions + is explicitly suppressed with the "invalid_raise" keyword. + + """ + pass + + +class StringConverter(object): + """ + Factory class for function transforming a string into another object + (int, float). + + After initialization, an instance can be called to transform a string + into another object. If the string is recognized as representing a + missing value, a default value is returned. + + Attributes + ---------- + func : function + Function used for the conversion. + default : any + Default value to return when the input corresponds to a missing + value. + type : type + Type of the output. + _status : int + Integer representing the order of the conversion. + _mapper : sequence of tuples + Sequence of tuples (dtype, function, default value) to evaluate in + order. + _locked : bool + Holds `locked` parameter. + + Parameters + ---------- + dtype_or_func : {None, dtype, function}, optional + If a `dtype`, specifies the input data type, used to define a basic + function and a default value for missing data. For example, when + `dtype` is float, the `func` attribute is set to `float` and the + default value to `np.nan`. If a function, this function is used to + convert a string to another object. In this case, it is recommended + to give an associated default value as input. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, `StringConverter` + tries to supply a reasonable default value. + missing_values : sequence of str, optional + Sequence of strings indicating a missing value. + locked : bool, optional + Whether the StringConverter should be locked to prevent automatic + upgrade or not. Default is False. + + """ + # + _mapper = [(nx.bool_, str2bool, False), + (nx.integer, int, -1), + (nx.floating, float, nx.nan), + (complex, _bytes_to_complex, nx.nan + 0j), + (nx.string_, bytes, asbytes('???'))] + (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper) + # + + @classmethod + def _getdtype(cls, val): + """Returns the dtype of the input variable.""" + return np.array(val).dtype + # + + @classmethod + def _getsubdtype(cls, val): + """Returns the type of the dtype of the input variable.""" + return np.array(val).dtype.type + # + # This is a bit annoying. We want to return the "general" type in most + # cases (ie. "string" rather than "S10"), but we want to return the + # specific type for datetime64 (ie. "datetime64[us]" rather than + # "datetime64"). + + @classmethod + def _dtypeortype(cls, dtype): + """Returns dtype for datetime64 and type of dtype otherwise.""" + if dtype.type == np.datetime64: + return dtype + return dtype.type + # + + @classmethod + def upgrade_mapper(cls, func, default=None): + """ + Upgrade the mapper of a StringConverter by adding a new function and + its corresponding default. + + The input function (or sequence of functions) and its associated + default value (if any) is inserted in penultimate position of the + mapper. The corresponding type is estimated from the dtype of the + default value. + + Parameters + ---------- + func : var + Function, or sequence of functions + + Examples + -------- + >>> import dateutil.parser + >>> import datetime + >>> dateparser = datetustil.parser.parse + >>> defaultdate = datetime.date(2000, 1, 1) + >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) + """ + # Func is a single functions + if hasattr(func, '__call__'): + cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) + return + elif hasattr(func, '__iter__'): + if isinstance(func[0], (tuple, list)): + for _ in func: + cls._mapper.insert(-1, _) + return + if default is None: + default = [None] * len(func) + else: + default = list(default) + default.append([None] * (len(func) - len(default))) + for (fct, dft) in zip(func, default): + cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) + # + + def __init__(self, dtype_or_func=None, default=None, missing_values=None, + locked=False): + # Convert unicode (for Py3) + if isinstance(missing_values, unicode): + missing_values = asbytes(missing_values) + elif isinstance(missing_values, (list, tuple)): + missing_values = asbytes_nested(missing_values) + # Defines a lock for upgrade + self._locked = bool(locked) + # No input dtype: minimal initialization + if dtype_or_func is None: + self.func = str2bool + self._status = 0 + self.default = default or False + dtype = np.dtype('bool') + else: + # Is the input a np.dtype ? + try: + self.func = None + dtype = np.dtype(dtype_or_func) + except TypeError: + # dtype_or_func must be a function, then + if not hasattr(dtype_or_func, '__call__'): + errmsg = ("The input argument `dtype` is neither a" + " function nor a dtype (got '%s' instead)") + raise TypeError(errmsg % type(dtype_or_func)) + # Set the function + self.func = dtype_or_func + # If we don't have a default, try to guess it or set it to + # None + if default is None: + try: + default = self.func(asbytes('0')) + except ValueError: + default = None + dtype = self._getdtype(default) + # Set the status according to the dtype + _status = -1 + for (i, (deftype, func, default_def)) in enumerate(self._mapper): + if np.issubdtype(dtype.type, deftype): + _status = i + if default is None: + self.default = default_def + else: + self.default = default + break + if _status == -1: + # We never found a match in the _mapper... + _status = 0 + self.default = default + self._status = _status + # If the input was a dtype, set the function to the last we saw + if self.func is None: + self.func = func + # If the status is 1 (int), change the function to + # something more robust. + if self.func == self._mapper[1][1]: + if issubclass(dtype.type, np.uint64): + self.func = np.uint64 + elif issubclass(dtype.type, np.int64): + self.func = np.int64 + else: + self.func = lambda x: int(float(x)) + # Store the list of strings corresponding to missing values. + if missing_values is None: + self.missing_values = set([asbytes('')]) + else: + if isinstance(missing_values, bytes): + missing_values = missing_values.split(asbytes(",")) + self.missing_values = set(list(missing_values) + [asbytes('')]) + # + self._callingfunction = self._strict_call + self.type = self._dtypeortype(dtype) + self._checked = False + self._initial_default = default + # + + def _loose_call(self, value): + try: + return self.func(value) + except ValueError: + return self.default + # + + def _strict_call(self, value): + try: + return self.func(value) + except ValueError: + if value.strip() in self.missing_values: + if not self._status: + self._checked = False + return self.default + raise ValueError("Cannot convert string '%s'" % value) + # + + def __call__(self, value): + return self._callingfunction(value) + # + + def upgrade(self, value): + """ + Find the best converter for a given string, and return the result. + + The supplied string `value` is converted by testing different + converters in order. First the `func` method of the + `StringConverter` instance is tried, if this fails other available + converters are tried. The order in which these other converters + are tried is determined by the `_status` attribute of the instance. + + Parameters + ---------- + value : str + The string to convert. + + Returns + ------- + out : any + The result of converting `value` with the appropriate converter. + + """ + self._checked = True + try: + self._strict_call(value) + except ValueError: + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + errmsg = "Could not find a valid conversion function" + raise ConverterError(errmsg) + elif _status < _statusmax - 1: + _status += 1 + (self.type, self.func, default) = self._mapper[_status] + self._status = _status + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + self.upgrade(value) + + def iterupgrade(self, value): + self._checked = True + if not hasattr(value, '__iter__'): + value = (value,) + _strict_call = self._strict_call + try: + for _m in value: + _strict_call(_m) + except ValueError: + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + raise ConverterError( + "Could not find a valid conversion function" + ) + elif _status < _statusmax - 1: + _status += 1 + (self.type, self.func, default) = self._mapper[_status] + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + self._status = _status + self.iterupgrade(value) + + def update(self, func, default=None, testing_value=None, + missing_values=asbytes(''), locked=False): + """ + Set StringConverter attributes directly. + + Parameters + ---------- + func : function + Conversion function. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, + `StringConverter` tries to supply a reasonable default value. + testing_value : str, optional + A string representing a standard input value of the converter. + This string is used to help defining a reasonable default + value. + missing_values : sequence of str, optional + Sequence of strings indicating a missing value. + locked : bool, optional + Whether the StringConverter should be locked to prevent + automatic upgrade or not. Default is False. + + Notes + ----- + `update` takes the same parameters as the constructor of + `StringConverter`, except that `func` does not accept a `dtype` + whereas `dtype_or_func` in the constructor does. + + """ + self.func = func + self._locked = locked + # Don't reset the default to None if we can avoid it + if default is not None: + self.default = default + self.type = self._dtypeortype(self._getdtype(default)) + else: + try: + tester = func(testing_value or asbytes('1')) + except (TypeError, ValueError): + tester = None + self.type = self._dtypeortype(self._getdtype(tester)) + # Add the missing values to the existing set + if missing_values is not None: + if _is_bytes_like(missing_values): + self.missing_values.add(missing_values) + elif hasattr(missing_values, '__iter__'): + for val in missing_values: + self.missing_values.add(val) + else: + self.missing_values = [] + + +def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): + """ + Convenience function to create a `np.dtype` object. + + The function processes the input `dtype` and matches it with the given + names. + + Parameters + ---------- + ndtype : var + Definition of the dtype. Can be any string or dictionary recognized + by the `np.dtype` function, or a sequence of types. + names : str or sequence, optional + Sequence of strings to use as field names for a structured dtype. + For convenience, `names` can be a string of a comma-separated list + of names. + defaultfmt : str, optional + Format string used to define missing names, such as ``"f%i"`` + (default) or ``"fields_%02i"``. + validationargs : optional + A series of optional arguments used to initialize a + `NameValidator`. + + Examples + -------- + >>> np.lib._iotools.easy_dtype(float) + dtype('float64') + >>> np.lib._iotools.easy_dtype("i4, f8") + dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") + dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") + dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") + dtype([('a', ' 0): + validate = NameValidator(**validationargs) + # Default initial names : should we change the format ? + if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and + (defaultfmt != "f%i")): + ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt) + # Explicit initial names : just validate + else: + ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt) + return ndtype diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_version.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_version.py new file mode 100644 index 0000000000000..54b9c1dc78125 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_version.py @@ -0,0 +1,156 @@ +"""Utility to compare (Numpy) version strings. + +The NumpyVersion class allows properly comparing numpy version strings. +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. + +""" +from __future__ import division, absolute_import, print_function + +import re + +from numpy.compat import basestring + + +__all__ = ['NumpyVersion'] + + +class NumpyVersion(): + """Parse and compare numpy version strings. + + Numpy has the following versioning scheme (numbers given are examples; they + can be > 9) in principle): + + - Released version: '1.8.0', '1.8.1', etc. + - Alpha: '1.8.0a1', '1.8.0a2', etc. + - Beta: '1.8.0b1', '1.8.0b2', etc. + - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. + - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) + - Development versions after a1: '1.8.0a1.dev-f1234afa', + '1.8.0b2.dev-f1234afa', + '1.8.1rc1.dev-f1234afa', etc. + - Development versions (no git hash available): '1.8.0.dev-Unknown' + + Comparing needs to be done against a valid version string or other + `NumpyVersion` instance. Note that all development versions of the same + (pre-)release compare equal. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + vstring : str + Numpy version string (``np.__version__``). + + Examples + -------- + >>> from numpy.lib import NumpyVersion + >>> if NumpyVersion(np.__version__) < '1.7.0'): + ... print('skip') + skip + + >>> NumpyVersion('1.7') # raises ValueError, add ".0" + + """ + + def __init__(self, vstring): + self.vstring = vstring + ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) + if not ver_main: + raise ValueError("Not a valid numpy version string") + + self.version = ver_main.group() + self.major, self.minor, self.bugfix = [int(x) for x in + self.version.split('.')] + if len(vstring) == ver_main.end(): + self.pre_release = 'final' + else: + alpha = re.match(r'a\d', vstring[ver_main.end():]) + beta = re.match(r'b\d', vstring[ver_main.end():]) + rc = re.match(r'rc\d', vstring[ver_main.end():]) + pre_rel = [m for m in [alpha, beta, rc] if m is not None] + if pre_rel: + self.pre_release = pre_rel[0].group() + else: + self.pre_release = '' + + self.is_devversion = bool(re.search(r'.dev', vstring)) + + def _compare_version(self, other): + """Compare major.minor.bugfix""" + if self.major == other.major: + if self.minor == other.minor: + if self.bugfix == other.bugfix: + vercmp = 0 + elif self.bugfix > other.bugfix: + vercmp = 1 + else: + vercmp = -1 + elif self.minor > other.minor: + vercmp = 1 + else: + vercmp = -1 + elif self.major > other.major: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare_pre_release(self, other): + """Compare alpha/beta/rc/final.""" + if self.pre_release == other.pre_release: + vercmp = 0 + elif self.pre_release == 'final': + vercmp = 1 + elif other.pre_release == 'final': + vercmp = -1 + elif self.pre_release > other.pre_release: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare(self, other): + if not isinstance(other, (basestring, NumpyVersion)): + raise ValueError("Invalid object to compare with NumpyVersion.") + + if isinstance(other, basestring): + other = NumpyVersion(other) + + vercmp = self._compare_version(other) + if vercmp == 0: + # Same x.y.z version, check for alpha/beta/rc + vercmp = self._compare_pre_release(other) + if vercmp == 0: + # Same version and same pre-release, check if dev version + if self.is_devversion is other.is_devversion: + vercmp = 0 + elif self.is_devversion: + vercmp = -1 + else: + vercmp = 1 + + return vercmp + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __eq__(self, other): + return self._compare(other) == 0 + + def __ne__(self, other): + return self._compare(other) != 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def __repr(self): + return "NumpyVersion(%s)" % self.vstring diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraypad.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraypad.py new file mode 100644 index 0000000000000..bbfdce794e770 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraypad.py @@ -0,0 +1,1475 @@ +""" +The arraypad module contains a group of functions to pad values onto the edges +of an n-dimensional array. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.compat import long + + +__all__ = ['pad'] + + +############################################################################### +# Private utility functions. + + +def _arange_ndarray(arr, shape, axis, reverse=False): + """ + Create an ndarray of `shape` with increments along specified `axis` + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + shape : tuple of ints + Shape of desired array. Should be equivalent to `arr.shape` except + `shape[axis]` which may have any positive value. + axis : int + Axis to increment along. + reverse : bool + If False, increment in a positive fashion from 1 to `shape[axis]`, + inclusive. If True, the bounds are the same but the order reversed. + + Returns + ------- + padarr : ndarray + Output array sized to pad `arr` along `axis`, with linear range from + 1 to `shape[axis]` along specified `axis`. + + Notes + ----- + The range is deliberately 1-indexed for this specific use case. Think of + this algorithm as broadcasting `np.arange` to a single `axis` of an + arbitrarily shaped ndarray. + + """ + initshape = tuple(1 if i != axis else shape[axis] + for (i, x) in enumerate(arr.shape)) + if not reverse: + padarr = np.arange(1, shape[axis] + 1) + else: + padarr = np.arange(shape[axis], 0, -1) + padarr = padarr.reshape(initshape) + for i, dim in enumerate(shape): + if padarr.shape[i] != dim: + padarr = padarr.repeat(dim, axis=i) + return padarr + + +def _round_ifneeded(arr, dtype): + """ + Rounds arr inplace if destination dtype is integer. + + Parameters + ---------- + arr : ndarray + Input array. + dtype : dtype + The dtype of the destination array. + + """ + if np.issubdtype(dtype, np.integer): + arr.round(out=arr) + + +def _prepend_const(arr, pad_amt, val, axis=-1): + """ + Prepend constant `val` along `axis` of `arr`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + val : scalar + Constant value to use. For best results should be of type `arr.dtype`; + if not `arr.dtype` will be cast to `arr.dtype`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` constant `val` prepended along `axis`. + + """ + if pad_amt == 0: + return arr + padshape = tuple(x if i != axis else pad_amt + for (i, x) in enumerate(arr.shape)) + if val == 0: + return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr), + axis=axis) + else: + return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype), + arr), axis=axis) + + +def _append_const(arr, pad_amt, val, axis=-1): + """ + Append constant `val` along `axis` of `arr`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + val : scalar + Constant value to use. For best results should be of type `arr.dtype`; + if not `arr.dtype` will be cast to `arr.dtype`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` constant `val` appended along `axis`. + + """ + if pad_amt == 0: + return arr + padshape = tuple(x if i != axis else pad_amt + for (i, x) in enumerate(arr.shape)) + if val == 0: + return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)), + axis=axis) + else: + return np.concatenate( + (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis) + + +def _prepend_edge(arr, pad_amt, axis=-1): + """ + Prepend `pad_amt` to `arr` along `axis` by extending edge values. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, extended by `pad_amt` edge values appended along `axis`. + + """ + if pad_amt == 0: + return arr + + edge_slice = tuple(slice(None) if i != axis else 0 + for (i, x) in enumerate(arr.shape)) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + edge_arr = arr[edge_slice].reshape(pad_singleton) + return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr), + axis=axis) + + +def _append_edge(arr, pad_amt, axis=-1): + """ + Append `pad_amt` to `arr` along `axis` by extending edge values. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, extended by `pad_amt` edge values prepended along + `axis`. + + """ + if pad_amt == 0: + return arr + + edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1 + for (i, x) in enumerate(arr.shape)) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + edge_arr = arr[edge_slice].reshape(pad_singleton) + return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)), + axis=axis) + + +def _prepend_ramp(arr, pad_amt, end, axis=-1): + """ + Prepend linear ramp along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + end : scalar + Constal value to use. For best results should be of type `arr.dtype`; + if not `arr.dtype` will be cast to `arr.dtype`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values prepended along `axis`. The + prepended region ramps linearly from the edge value to `end`. + + """ + if pad_amt == 0: + return arr + + # Generate shape for final concatenated array + padshape = tuple(x if i != axis else pad_amt + for (i, x) in enumerate(arr.shape)) + + # Generate an n-dimensional array incrementing along `axis` + ramp_arr = _arange_ndarray(arr, padshape, axis, + reverse=True).astype(np.float64) + + # Appropriate slicing to extract n-dimensional edge along `axis` + edge_slice = tuple(slice(None) if i != axis else 0 + for (i, x) in enumerate(arr.shape)) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract edge, reshape to original rank, and extend along `axis` + edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) + + # Linear ramp + slope = (end - edge_pad) / float(pad_amt) + ramp_arr = ramp_arr * slope + ramp_arr += edge_pad + _round_ifneeded(ramp_arr, arr.dtype) + + # Ramp values will most likely be float, cast them to the same type as arr + return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis) + + +def _append_ramp(arr, pad_amt, end, axis=-1): + """ + Append linear ramp along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + end : scalar + Constal value to use. For best results should be of type `arr.dtype`; + if not `arr.dtype` will be cast to `arr.dtype`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region ramps linearly from the edge value to `end`. + + """ + if pad_amt == 0: + return arr + + # Generate shape for final concatenated array + padshape = tuple(x if i != axis else pad_amt + for (i, x) in enumerate(arr.shape)) + + # Generate an n-dimensional array incrementing along `axis` + ramp_arr = _arange_ndarray(arr, padshape, axis, + reverse=False).astype(np.float64) + + # Slice a chunk from the edge to calculate stats on + edge_slice = tuple(slice(None) if i != axis else -1 + for (i, x) in enumerate(arr.shape)) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract edge, reshape to original rank, and extend along `axis` + edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) + + # Linear ramp + slope = (end - edge_pad) / float(pad_amt) + ramp_arr = ramp_arr * slope + ramp_arr += edge_pad + _round_ifneeded(ramp_arr, arr.dtype) + + # Ramp values will most likely be float, cast them to the same type as arr + return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis) + + +def _prepend_max(arr, pad_amt, num, axis=-1): + """ + Prepend `pad_amt` maximum values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + num : int + Depth into `arr` along `axis` to calculate maximum. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + prepended region is the maximum of the first `num` values along + `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _prepend_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + max_slice = tuple(slice(None) if i != axis else slice(num) + for (i, x) in enumerate(arr.shape)) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract slice, calculate max, reshape to add singleton dimension back + max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) + + # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` + return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr), + axis=axis) + + +def _append_max(arr, pad_amt, num, axis=-1): + """ + Pad one `axis` of `arr` with the maximum of the last `num` elements. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + num : int + Depth into `arr` along `axis` to calculate maximum. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region is the maximum of the final `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _append_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + end = arr.shape[axis] - 1 + if num is not None: + max_slice = tuple( + slice(None) if i != axis else slice(end, end - num, -1) + for (i, x) in enumerate(arr.shape)) + else: + max_slice = tuple(slice(None) for x in arr.shape) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract slice, calculate max, reshape to add singleton dimension back + max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) + + # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` + return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)), + axis=axis) + + +def _prepend_mean(arr, pad_amt, num, axis=-1): + """ + Prepend `pad_amt` mean values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + num : int + Depth into `arr` along `axis` to calculate mean. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values prepended along `axis`. The + prepended region is the mean of the first `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _prepend_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + mean_slice = tuple(slice(None) if i != axis else slice(num) + for (i, x) in enumerate(arr.shape)) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract slice, calculate mean, reshape to add singleton dimension back + mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton) + _round_ifneeded(mean_chunk, arr.dtype) + + # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` + return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype), + arr), axis=axis) + + +def _append_mean(arr, pad_amt, num, axis=-1): + """ + Append `pad_amt` mean values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + num : int + Depth into `arr` along `axis` to calculate mean. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region is the maximum of the final `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _append_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + end = arr.shape[axis] - 1 + if num is not None: + mean_slice = tuple( + slice(None) if i != axis else slice(end, end - num, -1) + for (i, x) in enumerate(arr.shape)) + else: + mean_slice = tuple(slice(None) for x in arr.shape) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract slice, calculate mean, reshape to add singleton dimension back + mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton) + _round_ifneeded(mean_chunk, arr.dtype) + + # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` + return np.concatenate( + (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) + + +def _prepend_med(arr, pad_amt, num, axis=-1): + """ + Prepend `pad_amt` median values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + num : int + Depth into `arr` along `axis` to calculate median. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values prepended along `axis`. The + prepended region is the median of the first `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _prepend_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + med_slice = tuple(slice(None) if i != axis else slice(num) + for (i, x) in enumerate(arr.shape)) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract slice, calculate median, reshape to add singleton dimension back + med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) + _round_ifneeded(med_chunk, arr.dtype) + + # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` + return np.concatenate( + (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) + + +def _append_med(arr, pad_amt, num, axis=-1): + """ + Append `pad_amt` median values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + num : int + Depth into `arr` along `axis` to calculate median. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region is the median of the final `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _append_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + end = arr.shape[axis] - 1 + if num is not None: + med_slice = tuple( + slice(None) if i != axis else slice(end, end - num, -1) + for (i, x) in enumerate(arr.shape)) + else: + med_slice = tuple(slice(None) for x in arr.shape) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract slice, calculate median, reshape to add singleton dimension back + med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) + _round_ifneeded(med_chunk, arr.dtype) + + # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` + return np.concatenate( + (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) + + +def _prepend_min(arr, pad_amt, num, axis=-1): + """ + Prepend `pad_amt` minimum values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to prepend. + num : int + Depth into `arr` along `axis` to calculate minimum. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values prepended along `axis`. The + prepended region is the minimum of the first `num` values along + `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _prepend_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + min_slice = tuple(slice(None) if i != axis else slice(num) + for (i, x) in enumerate(arr.shape)) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract slice, calculate min, reshape to add singleton dimension back + min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) + + # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` + return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr), + axis=axis) + + +def _append_min(arr, pad_amt, num, axis=-1): + """ + Append `pad_amt` median values along `axis`. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : int + Amount of padding to append. + num : int + Depth into `arr` along `axis` to calculate minimum. + Range: [1, `arr.shape[axis]`] or None (entire axis) + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt` values appended along `axis`. The + appended region is the minimum of the final `num` values along `axis`. + + """ + if pad_amt == 0: + return arr + + # Equivalent to edge padding for single value, so do that instead + if num == 1: + return _append_edge(arr, pad_amt, axis) + + # Use entire array if `num` is too large + if num is not None: + if num >= arr.shape[axis]: + num = None + + # Slice a chunk from the edge to calculate stats on + end = arr.shape[axis] - 1 + if num is not None: + min_slice = tuple( + slice(None) if i != axis else slice(end, end - num, -1) + for (i, x) in enumerate(arr.shape)) + else: + min_slice = tuple(slice(None) for x in arr.shape) + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + + # Extract slice, calculate min, reshape to add singleton dimension back + min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) + + # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` + return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)), + axis=axis) + + +def _pad_ref(arr, pad_amt, method, axis=-1): + """ + Pad `axis` of `arr` by reflection. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : tuple of ints, length 2 + Padding to (prepend, append) along `axis`. + method : str + Controls method of reflection; options are 'even' or 'odd'. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` + values appended along `axis`. Both regions are padded with reflected + values from the original array. + + Notes + ----- + This algorithm does not pad with repetition, i.e. the edges are not + repeated in the reflection. For that behavior, use `method='symmetric'`. + + The modes 'reflect', 'symmetric', and 'wrap' must be padded with a + single function, lest the indexing tricks in non-integer multiples of the + original shape would violate repetition in the final iteration. + + """ + # Implicit booleanness to test for zero (or None) in any scalar type + if pad_amt[0] == 0 and pad_amt[1] == 0: + return arr + + ########################################################################## + # Prepended region + + # Slice off a reverse indexed chunk from near edge to pad `arr` before + ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1) + for (i, x) in enumerate(arr.shape)) + + ref_chunk1 = arr[ref_slice] + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + if pad_amt[0] == 1: + ref_chunk1 = ref_chunk1.reshape(pad_singleton) + + # Memory/computationally more expensive, only do this if `method='odd'` + if 'odd' in method and pad_amt[0] > 0: + edge_slice1 = tuple(slice(None) if i != axis else 0 + for (i, x) in enumerate(arr.shape)) + edge_chunk = arr[edge_slice1].reshape(pad_singleton) + ref_chunk1 = 2 * edge_chunk - ref_chunk1 + del edge_chunk + + ########################################################################## + # Appended region + + # Slice off a reverse indexed chunk from far edge to pad `arr` after + start = arr.shape[axis] - pad_amt[1] - 1 + end = arr.shape[axis] - 1 + ref_slice = tuple(slice(None) if i != axis else slice(start, end) + for (i, x) in enumerate(arr.shape)) + rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) + for (i, x) in enumerate(arr.shape)) + ref_chunk2 = arr[ref_slice][rev_idx] + + if pad_amt[1] == 1: + ref_chunk2 = ref_chunk2.reshape(pad_singleton) + + if 'odd' in method: + edge_slice2 = tuple(slice(None) if i != axis else -1 + for (i, x) in enumerate(arr.shape)) + edge_chunk = arr[edge_slice2].reshape(pad_singleton) + ref_chunk2 = 2 * edge_chunk - ref_chunk2 + del edge_chunk + + # Concatenate `arr` with both chunks, extending along `axis` + return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis) + + +def _pad_sym(arr, pad_amt, method, axis=-1): + """ + Pad `axis` of `arr` by symmetry. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : tuple of ints, length 2 + Padding to (prepend, append) along `axis`. + method : str + Controls method of symmetry; options are 'even' or 'odd'. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` + values appended along `axis`. Both regions are padded with symmetric + values from the original array. + + Notes + ----- + This algorithm DOES pad with repetition, i.e. the edges are repeated. + For a method that does not repeat edges, use `method='reflect'`. + + The modes 'reflect', 'symmetric', and 'wrap' must be padded with a + single function, lest the indexing tricks in non-integer multiples of the + original shape would violate repetition in the final iteration. + + """ + # Implicit booleanness to test for zero (or None) in any scalar type + if pad_amt[0] == 0 and pad_amt[1] == 0: + return arr + + ########################################################################## + # Prepended region + + # Slice off a reverse indexed chunk from near edge to pad `arr` before + sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0]) + for (i, x) in enumerate(arr.shape)) + rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) + for (i, x) in enumerate(arr.shape)) + sym_chunk1 = arr[sym_slice][rev_idx] + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + if pad_amt[0] == 1: + sym_chunk1 = sym_chunk1.reshape(pad_singleton) + + # Memory/computationally more expensive, only do this if `method='odd'` + if 'odd' in method and pad_amt[0] > 0: + edge_slice1 = tuple(slice(None) if i != axis else 0 + for (i, x) in enumerate(arr.shape)) + edge_chunk = arr[edge_slice1].reshape(pad_singleton) + sym_chunk1 = 2 * edge_chunk - sym_chunk1 + del edge_chunk + + ########################################################################## + # Appended region + + # Slice off a reverse indexed chunk from far edge to pad `arr` after + start = arr.shape[axis] - pad_amt[1] + end = arr.shape[axis] + sym_slice = tuple(slice(None) if i != axis else slice(start, end) + for (i, x) in enumerate(arr.shape)) + sym_chunk2 = arr[sym_slice][rev_idx] + + if pad_amt[1] == 1: + sym_chunk2 = sym_chunk2.reshape(pad_singleton) + + if 'odd' in method: + edge_slice2 = tuple(slice(None) if i != axis else -1 + for (i, x) in enumerate(arr.shape)) + edge_chunk = arr[edge_slice2].reshape(pad_singleton) + sym_chunk2 = 2 * edge_chunk - sym_chunk2 + del edge_chunk + + # Concatenate `arr` with both chunks, extending along `axis` + return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis) + + +def _pad_wrap(arr, pad_amt, axis=-1): + """ + Pad `axis` of `arr` via wrapping. + + Parameters + ---------- + arr : ndarray + Input array of arbitrary shape. + pad_amt : tuple of ints, length 2 + Padding to (prepend, append) along `axis`. + axis : int + Axis along which to pad `arr`. + + Returns + ------- + padarr : ndarray + Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` + values appended along `axis`. Both regions are padded wrapped values + from the opposite end of `axis`. + + Notes + ----- + This method of padding is also known as 'tile' or 'tiling'. + + The modes 'reflect', 'symmetric', and 'wrap' must be padded with a + single function, lest the indexing tricks in non-integer multiples of the + original shape would violate repetition in the final iteration. + + """ + # Implicit booleanness to test for zero (or None) in any scalar type + if pad_amt[0] == 0 and pad_amt[1] == 0: + return arr + + ########################################################################## + # Prepended region + + # Slice off a reverse indexed chunk from near edge to pad `arr` before + start = arr.shape[axis] - pad_amt[0] + end = arr.shape[axis] + wrap_slice = tuple(slice(None) if i != axis else slice(start, end) + for (i, x) in enumerate(arr.shape)) + wrap_chunk1 = arr[wrap_slice] + + # Shape to restore singleton dimension after slicing + pad_singleton = tuple(x if i != axis else 1 + for (i, x) in enumerate(arr.shape)) + if pad_amt[0] == 1: + wrap_chunk1 = wrap_chunk1.reshape(pad_singleton) + + ########################################################################## + # Appended region + + # Slice off a reverse indexed chunk from far edge to pad `arr` after + wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1]) + for (i, x) in enumerate(arr.shape)) + wrap_chunk2 = arr[wrap_slice] + + if pad_amt[1] == 1: + wrap_chunk2 = wrap_chunk2.reshape(pad_singleton) + + # Concatenate `arr` with both chunks, extending along `axis` + return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis) + + +def _normalize_shape(narray, shape): + """ + Private function which does some checks and normalizes the possibly + much simpler representations of 'pad_width', 'stat_length', + 'constant_values', 'end_values'. + + Parameters + ---------- + narray : ndarray + Input ndarray + shape : {sequence, int}, optional + The width of padding (pad_width) or the number of elements on the + edge of the narray used for statistics (stat_length). + ((before_1, after_1), ... (before_N, after_N)) unique number of + elements for each axis where `N` is rank of `narray`. + ((before, after),) yields same before and after constants for each + axis. + (constant,) or int is a shortcut for before = after = constant for + all axes. + + Returns + ------- + _normalize_shape : tuple of tuples + int => ((int, int), (int, int), ...) + [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...) + ((int1, int2), (int3, int4), ...) => no change + [[int1, int2], ] => ((int1, int2), (int1, int2), ...) + ((int1, int2), ) => ((int1, int2), (int1, int2), ...) + [[int , ], ] => ((int, int), (int, int), ...) + ((int , ), ) => ((int, int), (int, int), ...) + + """ + normshp = None + shapelen = len(np.shape(narray)) + if (isinstance(shape, int)) or shape is None: + normshp = ((shape, shape), ) * shapelen + elif (isinstance(shape, (tuple, list)) + and isinstance(shape[0], (tuple, list)) + and len(shape) == shapelen): + normshp = shape + for i in normshp: + if len(i) != 2: + fmt = "Unable to create correctly shaped tuple from %s" + raise ValueError(fmt % (normshp,)) + elif (isinstance(shape, (tuple, list)) + and isinstance(shape[0], (int, float, long)) + and len(shape) == 1): + normshp = ((shape[0], shape[0]), ) * shapelen + elif (isinstance(shape, (tuple, list)) + and isinstance(shape[0], (int, float, long)) + and len(shape) == 2): + normshp = (shape, ) * shapelen + if normshp is None: + fmt = "Unable to create correctly shaped tuple from %s" + raise ValueError(fmt % (shape,)) + return normshp + + +def _validate_lengths(narray, number_elements): + """ + Private function which does some checks and reformats pad_width and + stat_length using _normalize_shape. + + Parameters + ---------- + narray : ndarray + Input ndarray + number_elements : {sequence, int}, optional + The width of padding (pad_width) or the number of elements on the edge + of the narray used for statistics (stat_length). + ((before_1, after_1), ... (before_N, after_N)) unique number of + elements for each axis. + ((before, after),) yields same before and after constants for each + axis. + (constant,) or int is a shortcut for before = after = constant for all + axes. + + Returns + ------- + _validate_lengths : tuple of tuples + int => ((int, int), (int, int), ...) + [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...) + ((int1, int2), (int3, int4), ...) => no change + [[int1, int2], ] => ((int1, int2), (int1, int2), ...) + ((int1, int2), ) => ((int1, int2), (int1, int2), ...) + [[int , ], ] => ((int, int), (int, int), ...) + ((int , ), ) => ((int, int), (int, int), ...) + + """ + normshp = _normalize_shape(narray, number_elements) + for i in normshp: + chk = [1 if x is None else x for x in i] + chk = [1 if x >= 0 else -1 for x in chk] + if (chk[0] < 0) or (chk[1] < 0): + fmt = "%s cannot contain negative values." + raise ValueError(fmt % (number_elements,)) + return normshp + + +############################################################################### +# Public functions + + +def pad(array, pad_width, mode=None, **kwargs): + """ + Pads an array. + + Parameters + ---------- + array : array_like of rank N + Input array + pad_width : {sequence, int} + Number of values padded to the edges of each axis. + ((before_1, after_1), ... (before_N, after_N)) unique pad widths + for each axis. + ((before, after),) yields same before and after pad for each axis. + (pad,) or int is a shortcut for before = after = pad width for all + axes. + mode : {str, function} + One of the following string values or a user supplied function. + + 'constant' + Pads with a constant value. + 'edge' + Pads with the edge values of array. + 'linear_ramp' + Pads with the linear ramp between end_value and the + array edge value. + 'maximum' + Pads with the maximum value of all or part of the + vector along each axis. + 'mean' + Pads with the mean value of all or part of the + vector along each axis. + 'median' + Pads with the median value of all or part of the + vector along each axis. + 'minimum' + Pads with the minimum value of all or part of the + vector along each axis. + 'reflect' + Pads with the reflection of the vector mirrored on + the first and last values of the vector along each + axis. + 'symmetric' + Pads with the reflection of the vector mirrored + along the edge of the array. + 'wrap' + Pads with the wrap of the vector along the axis. + The first values are used to pad the end and the + end values are used to pad the beginning. + + Padding function, see Notes. + stat_length : {sequence, int}, optional + Used in 'maximum', 'mean', 'median', and 'minimum'. Number of + values at edge of each axis used to calculate the statistic value. + + ((before_1, after_1), ... (before_N, after_N)) unique statistic + lengths for each axis. + + ((before, after),) yields same before and after statistic lengths + for each axis. + + (stat_length,) or int is a shortcut for before = after = statistic + length for all axes. + + Default is ``None``, to use the entire axis. + constant_values : {sequence, int}, optional + Used in 'constant'. The values to set the padded values for each + axis. + + ((before_1, after_1), ... (before_N, after_N)) unique pad constants + for each axis. + + ((before, after),) yields same before and after constants for each + axis. + + (constant,) or int is a shortcut for before = after = constant for + all axes. + + Default is 0. + end_values : {sequence, int}, optional + Used in 'linear_ramp'. The values used for the ending value of the + linear_ramp and that will form the edge of the padded array. + + ((before_1, after_1), ... (before_N, after_N)) unique end values + for each axis. + + ((before, after),) yields same before and after end values for each + axis. + + (constant,) or int is a shortcut for before = after = end value for + all axes. + + Default is 0. + reflect_type : str {'even', 'odd'}, optional + Used in 'reflect', and 'symmetric'. The 'even' style is the + default with an unaltered reflection around the edge value. For + the 'odd' style, the extented part of the array is created by + subtracting the reflected values from two times the edge value. + + Returns + ------- + pad : ndarray + Padded array of rank equal to `array` with shape increased + according to `pad_width`. + + Notes + ----- + .. versionadded:: 1.7.0 + + For an array with rank greater than 1, some of the padding of later + axes is calculated from padding of previous axes. This is easiest to + think about with a rank 2 array where the corners of the padded array + are calculated by using padded values from the first axis. + + The padding function, if used, should return a rank 1 array equal in + length to the vector argument with padded values replaced. It has the + following signature:: + + padding_func(vector, iaxis_pad_width, iaxis, **kwargs) + + where + + vector : ndarray + A rank 1 array already padded with zeros. Padded values are + vector[:pad_tuple[0]] and vector[-pad_tuple[1]:]. + iaxis_pad_width : tuple + A 2-tuple of ints, iaxis_pad_width[0] represents the number of + values padded at the beginning of vector where + iaxis_pad_width[1] represents the number of values padded at + the end of vector. + iaxis : int + The axis currently being calculated. + kwargs : misc + Any keyword arguments the function requires. + + Examples + -------- + >>> a = [1, 2, 3, 4, 5] + >>> np.lib.pad(a, (2,3), 'constant', constant_values=(4,6)) + array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) + + >>> np.lib.pad(a, (2,3), 'edge') + array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5]) + + >>> np.lib.pad(a, (2,3), 'linear_ramp', end_values=(5,-4)) + array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) + + >>> np.lib.pad(a, (2,), 'maximum') + array([5, 5, 1, 2, 3, 4, 5, 5, 5]) + + >>> np.lib.pad(a, (2,), 'mean') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> np.lib.pad(a, (2,), 'median') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> a = [[1,2], [3,4]] + >>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum') + array([[1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [3, 3, 3, 4, 3, 3, 3], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1]]) + + >>> a = [1, 2, 3, 4, 5] + >>> np.lib.pad(a, (2,3), 'reflect') + array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) + + >>> np.lib.pad(a, (2,3), 'reflect', reflect_type='odd') + array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + >>> np.lib.pad(a, (2,3), 'symmetric') + array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) + + >>> np.lib.pad(a, (2,3), 'symmetric', reflect_type='odd') + array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) + + >>> np.lib.pad(a, (2,3), 'wrap') + array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) + + >>> def padwithtens(vector, pad_width, iaxis, kwargs): + ... vector[:pad_width[0]] = 10 + ... vector[-pad_width[1]:] = 10 + ... return vector + + >>> a = np.arange(6) + >>> a = a.reshape((2,3)) + + >>> np.lib.pad(a, 2, padwithtens) + array([[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]]) + """ + + narray = np.array(array) + pad_width = _validate_lengths(narray, pad_width) + + allowedkwargs = { + 'constant': ['constant_values'], + 'edge': [], + 'linear_ramp': ['end_values'], + 'maximum': ['stat_length'], + 'mean': ['stat_length'], + 'median': ['stat_length'], + 'minimum': ['stat_length'], + 'reflect': ['reflect_type'], + 'symmetric': ['reflect_type'], + 'wrap': [], + } + + kwdefaults = { + 'stat_length': None, + 'constant_values': 0, + 'end_values': 0, + 'reflect_type': 'even', + } + + if isinstance(mode, str): + # Make sure have allowed kwargs appropriate for mode + for key in kwargs: + if key not in allowedkwargs[mode]: + raise ValueError('%s keyword not in allowed keywords %s' % + (key, allowedkwargs[mode])) + + # Set kwarg defaults + for kw in allowedkwargs[mode]: + kwargs.setdefault(kw, kwdefaults[kw]) + + # Need to only normalize particular keywords. + for i in kwargs: + if i == 'stat_length': + kwargs[i] = _validate_lengths(narray, kwargs[i]) + if i in ['end_values', 'constant_values']: + kwargs[i] = _normalize_shape(narray, kwargs[i]) + elif mode is None: + raise ValueError('Keyword "mode" must be a function or one of %s.' % + (list(allowedkwargs.keys()),)) + else: + # Drop back to old, slower np.apply_along_axis mode for user-supplied + # vector function + function = mode + + # Create a new padded array + rank = list(range(len(narray.shape))) + total_dim_increase = [np.sum(pad_width[i]) for i in rank] + offset_slices = [slice(pad_width[i][0], + pad_width[i][0] + narray.shape[i]) + for i in rank] + new_shape = np.array(narray.shape) + total_dim_increase + newmat = np.zeros(new_shape, narray.dtype) + + # Insert the original array into the padded array + newmat[offset_slices] = narray + + # This is the core of pad ... + for iaxis in rank: + np.apply_along_axis(function, + iaxis, + newmat, + pad_width[iaxis], + iaxis, + kwargs) + return newmat + + # If we get here, use new padding method + newmat = narray.copy() + + # API preserved, but completely new algorithm which pads by building the + # entire block to pad before/after `arr` with in one step, for each axis. + if mode == 'constant': + for axis, ((pad_before, pad_after), (before_val, after_val)) \ + in enumerate(zip(pad_width, kwargs['constant_values'])): + newmat = _prepend_const(newmat, pad_before, before_val, axis) + newmat = _append_const(newmat, pad_after, after_val, axis) + + elif mode == 'edge': + for axis, (pad_before, pad_after) in enumerate(pad_width): + newmat = _prepend_edge(newmat, pad_before, axis) + newmat = _append_edge(newmat, pad_after, axis) + + elif mode == 'linear_ramp': + for axis, ((pad_before, pad_after), (before_val, after_val)) \ + in enumerate(zip(pad_width, kwargs['end_values'])): + newmat = _prepend_ramp(newmat, pad_before, before_val, axis) + newmat = _append_ramp(newmat, pad_after, after_val, axis) + + elif mode == 'maximum': + for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ + in enumerate(zip(pad_width, kwargs['stat_length'])): + newmat = _prepend_max(newmat, pad_before, chunk_before, axis) + newmat = _append_max(newmat, pad_after, chunk_after, axis) + + elif mode == 'mean': + for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ + in enumerate(zip(pad_width, kwargs['stat_length'])): + newmat = _prepend_mean(newmat, pad_before, chunk_before, axis) + newmat = _append_mean(newmat, pad_after, chunk_after, axis) + + elif mode == 'median': + for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ + in enumerate(zip(pad_width, kwargs['stat_length'])): + newmat = _prepend_med(newmat, pad_before, chunk_before, axis) + newmat = _append_med(newmat, pad_after, chunk_after, axis) + + elif mode == 'minimum': + for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ + in enumerate(zip(pad_width, kwargs['stat_length'])): + newmat = _prepend_min(newmat, pad_before, chunk_before, axis) + newmat = _append_min(newmat, pad_after, chunk_after, axis) + + elif mode == 'reflect': + for axis, (pad_before, pad_after) in enumerate(pad_width): + # Recursive padding along any axis where `pad_amt` is too large + # for indexing tricks. We can only safely pad the original axis + # length, to keep the period of the reflections consistent. + if ((pad_before > 0) or + (pad_after > 0)) and newmat.shape[axis] == 1: + # Extending singleton dimension for 'reflect' is legacy + # behavior; it really should raise an error. + newmat = _prepend_edge(newmat, pad_before, axis) + newmat = _append_edge(newmat, pad_after, axis) + continue + + method = kwargs['reflect_type'] + safe_pad = newmat.shape[axis] - 1 + while ((pad_before > safe_pad) or (pad_after > safe_pad)): + offset = 0 + pad_iter_b = min(safe_pad, + safe_pad * (pad_before // safe_pad)) + pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) + newmat = _pad_ref(newmat, (pad_iter_b, + pad_iter_a), method, axis) + pad_before -= pad_iter_b + pad_after -= pad_iter_a + if pad_iter_b > 0: + offset += 1 + if pad_iter_a > 0: + offset += 1 + safe_pad += pad_iter_b + pad_iter_a + newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis) + + elif mode == 'symmetric': + for axis, (pad_before, pad_after) in enumerate(pad_width): + # Recursive padding along any axis where `pad_amt` is too large + # for indexing tricks. We can only safely pad the original axis + # length, to keep the period of the reflections consistent. + method = kwargs['reflect_type'] + safe_pad = newmat.shape[axis] + while ((pad_before > safe_pad) or + (pad_after > safe_pad)): + pad_iter_b = min(safe_pad, + safe_pad * (pad_before // safe_pad)) + pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) + newmat = _pad_sym(newmat, (pad_iter_b, + pad_iter_a), method, axis) + pad_before -= pad_iter_b + pad_after -= pad_iter_a + safe_pad += pad_iter_b + pad_iter_a + newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis) + + elif mode == 'wrap': + for axis, (pad_before, pad_after) in enumerate(pad_width): + # Recursive padding along any axis where `pad_amt` is too large + # for indexing tricks. We can only safely pad the original axis + # length, to keep the period of the reflections consistent. + safe_pad = newmat.shape[axis] + while ((pad_before > safe_pad) or + (pad_after > safe_pad)): + pad_iter_b = min(safe_pad, + safe_pad * (pad_before // safe_pad)) + pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) + newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis) + + pad_before -= pad_iter_b + pad_after -= pad_iter_a + safe_pad += pad_iter_b + pad_iter_a + newmat = _pad_wrap(newmat, (pad_before, pad_after), axis) + + return newmat diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraysetops.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraysetops.py new file mode 100644 index 0000000000000..2d98c35d2c9d8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraysetops.py @@ -0,0 +1,463 @@ +""" +Set operations for 1D numeric arrays based on sorting. + +:Contains: + ediff1d, + unique, + intersect1d, + setxor1d, + in1d, + union1d, + setdiff1d + +:Notes: + +For floating point arrays, inaccurate results may appear due to usual round-off +and floating point comparison issues. + +Speed could be gained in some operations by an implementation of +sort(), that can provide directly the permutation vectors, avoiding +thus calls to argsort(). + +To do: Optionally return indices analogously to unique for all functions. + +:Author: Robert Cimrman + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np + + +__all__ = [ + 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique', + 'in1d' + ] + + +def ediff1d(ary, to_end=None, to_begin=None): + """ + The differences between consecutive elements of an array. + + Parameters + ---------- + ary : array_like + If necessary, will be flattened before the differences are taken. + to_end : array_like, optional + Number(s) to append at the end of the returned differences. + to_begin : array_like, optional + Number(s) to prepend at the beginning of the returned differences. + + Returns + ------- + ediff1d : ndarray + The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. + + See Also + -------- + diff, gradient + + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used. + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.ediff1d(x) + array([ 1, 2, 3, -7]) + + >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + array([-99, 1, 2, 3, -7, 88, 99]) + + The returned array is always 1D. + + >>> y = [[1, 2, 4], [1, 6, 24]] + >>> np.ediff1d(y) + array([ 1, 2, -3, 5, 18]) + + """ + ary = np.asanyarray(ary).flat + ed = ary[1:] - ary[:-1] + arrays = [ed] + if to_begin is not None: + arrays.insert(0, to_begin) + if to_end is not None: + arrays.append(to_end) + + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in + # the common case where neither to_begin or to_end was given. + ed = np.hstack(arrays) + + return ed + +def unique(ar, return_index=False, return_inverse=False, return_counts=False): + """ + Find the unique elements of an array. + + Returns the sorted unique elements of an array. There are two optional + outputs in addition to the unique elements: the indices of the input array + that give the unique values, and the indices of the unique array that + reconstruct the input array. + + Parameters + ---------- + ar : array_like + Input array. This will be flattened if it is not already 1-D. + return_index : bool, optional + If True, also return the indices of `ar` that result in the unique + array. + return_inverse : bool, optional + If True, also return the indices of the unique array that can be used + to reconstruct `ar`. + return_counts : bool, optional + .. versionadded:: 1.9.0 + If True, also return the number of times each unique value comes up + in `ar`. + + Returns + ------- + unique : ndarray + The sorted unique values. + unique_indices : ndarray, optional + The indices of the first occurrences of the unique values in the + (flattened) original array. Only provided if `return_index` is True. + unique_inverse : ndarray, optional + The indices to reconstruct the (flattened) original array from the + unique array. Only provided if `return_inverse` is True. + unique_counts : ndarray, optional + .. versionadded:: 1.9.0 + The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> np.unique([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique(a) + array([1, 2, 3]) + + Return the indices of the original array that give the unique values: + + >>> a = np.array(['a', 'b', 'b', 'c', 'a']) + >>> u, indices = np.unique(a, return_index=True) + >>> u + array(['a', 'b', 'c'], + dtype='|S1') + >>> indices + array([0, 1, 3]) + >>> a[indices] + array(['a', 'b', 'c'], + dtype='|S1') + + Reconstruct the input array from the unique values: + + >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> u, indices = np.unique(a, return_inverse=True) + >>> u + array([1, 2, 3, 4, 6]) + >>> indices + array([0, 1, 4, 3, 1, 2, 1]) + >>> u[indices] + array([1, 2, 6, 4, 2, 3, 2]) + + """ + ar = np.asanyarray(ar).flatten() + + optional_indices = return_index or return_inverse + optional_returns = optional_indices or return_counts + + if ar.size == 0: + if not optional_returns: + ret = ar + else: + ret = (ar,) + if return_index: + ret += (np.empty(0, np.bool),) + if return_inverse: + ret += (np.empty(0, np.bool),) + if return_counts: + ret += (np.empty(0, np.intp),) + return ret + + if optional_indices: + perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') + aux = ar[perm] + else: + ar.sort() + aux = ar + flag = np.concatenate(([True], aux[1:] != aux[:-1])) + + if not optional_returns: + ret = aux[flag] + else: + ret = (aux[flag],) + if return_index: + ret += (perm[flag],) + if return_inverse: + iflag = np.cumsum(flag) - 1 + iperm = perm.argsort() + ret += (np.take(iflag, iperm),) + if return_counts: + idx = np.concatenate(np.nonzero(flag) + ([ar.size],)) + ret += (np.diff(idx),) + return ret + +def intersect1d(ar1, ar2, assume_unique=False): + """ + Find the intersection of two arrays. + + Return the sorted, unique values that are in both of the input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + intersect1d : ndarray + Sorted 1D array of common and unique elements. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) + array([1, 3]) + + """ + if not assume_unique: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + ar1 = unique(ar1) + ar2 = unique(ar2) + aux = np.concatenate((ar1, ar2)) + aux.sort() + return aux[:-1][aux[1:] == aux[:-1]] + +def setxor1d(ar1, ar2, assume_unique=False): + """ + Find the set exclusive-or of two arrays. + + Return the sorted, unique values that are in only one (not both) of the + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setxor1d : ndarray + Sorted 1D array of unique values that are in only one of the input + arrays. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4]) + >>> b = np.array([2, 3, 5, 7, 5]) + >>> np.setxor1d(a,b) + array([1, 4, 5, 7]) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = np.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + + aux.sort() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = flag[1:] == flag[:-1] + return aux[flag2] + +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of a 1-D array is also present in a second array. + + Returns a boolean array the same length as `ar1` that is True + where an element of `ar1` is in `ar2` and False otherwise. + + Parameters + ---------- + ar1 : (M,) array_like + Input array. + ar2 : array_like + The values against which to test each value of `ar1`. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted (that is, + False where an element of `ar1` is in `ar2` and True otherwise). + Default is False. ``np.in1d(a, b, invert=True)`` is equivalent + to (but is faster than) ``np.invert(in1d(a, b))``. + + .. versionadded:: 1.8.0 + + Returns + ------- + in1d : (M,) ndarray, bool + The values `ar1[in1d]` are in `ar2`. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Notes + ----- + `in1d` can be considered as an element-wise function version of the + python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly + equivalent to ``np.array([item in b for item in a])``. + + .. versionadded:: 1.4.0 + + Examples + -------- + >>> test = np.array([0, 1, 2, 5, 0]) + >>> states = [0, 2] + >>> mask = np.in1d(test, states) + >>> mask + array([ True, False, True, False, True], dtype=bool) + >>> test[mask] + array([0, 2, 0]) + >>> mask = np.in1d(test, states, invert=True) + >>> mask + array([False, True, False, True, False], dtype=bool) + >>> test[mask] + array([1, 5]) + """ + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # This code is significantly faster when the condition is satisfied. + if len(ar2) < 10 * len(ar1) ** 0.145: + if invert: + mask = np.ones(len(ar1), dtype=np.bool) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=np.bool) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + ar1, rev_idx = np.unique(ar1, return_inverse=True) + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = np.concatenate((bool_ar, [invert])) + indx = order.argsort(kind='mergesort')[:len(ar1)] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] + +def union1d(ar1, ar2): + """ + Find the union of two arrays. + + Return the unique, sorted array of values that are in either of the two + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. They are flattened if they are not already 1D. + + Returns + ------- + union1d : ndarray + Unique, sorted union of the input arrays. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> np.union1d([-1, 0, 1], [-2, 0, 2]) + array([-2, -1, 0, 1, 2]) + + """ + return unique(np.concatenate((ar1, ar2))) + +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Find the set difference of two arrays. + + Return the sorted, unique values in `ar1` that are not in `ar2`. + + Parameters + ---------- + ar1 : array_like + Input array. + ar2 : array_like + Input comparison array. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setdiff1d : ndarray + Sorted 1D array of values in `ar1` that are not in `ar2`. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4, 1]) + >>> b = np.array([3, 4, 5, 6]) + >>> np.setdiff1d(a, b) + array([1, 2]) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + aux = in1d(ar1, ar2, assume_unique=True) + if aux.size == 0: + return aux + else: + return np.asarray(ar1)[aux == 0] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arrayterator.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arrayterator.py new file mode 100644 index 0000000000000..d9839feeb89bd --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arrayterator.py @@ -0,0 +1,226 @@ +""" +A buffered iterator for big arrays. + +This module solves the problem of iterating over a big file-based array +without having to read it into memory. The `Arrayterator` class wraps +an array object, and when iterated it will return sub-arrays with at most +a user-specified number of elements. + +""" +from __future__ import division, absolute_import, print_function + +from operator import mul +from functools import reduce + +from numpy.compat import long + +__all__ = ['Arrayterator'] + + +class Arrayterator(object): + """ + Buffered iterator for big arrays. + + `Arrayterator` creates a buffered iterator for reading big arrays in small + contiguous blocks. The class is useful for objects stored in the + file system. It allows iteration over the object *without* reading + everything in memory; instead, small blocks are read and iterated over. + + `Arrayterator` can be used with any object that supports multidimensional + slices. This includes NumPy arrays, but also variables from + Scientific.IO.NetCDF or pynetcdf for example. + + Parameters + ---------- + var : array_like + The object to iterate over. + buf_size : int, optional + The buffer size. If `buf_size` is supplied, the maximum amount of + data that will be read into memory is `buf_size` elements. + Default is None, which will read as many element as possible + into memory. + + Attributes + ---------- + var + buf_size + start + stop + step + shape + flat + + See Also + -------- + ndenumerate : Multidimensional array iterator. + flatiter : Flat array iterator. + memmap : Create a memory-map to an array stored in a binary file on disk. + + Notes + ----- + The algorithm works by first finding a "running dimension", along which + the blocks will be extracted. Given an array of dimensions + ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the + first dimension will be used. If, on the other hand, + ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. + Blocks are extracted along this dimension, and when the last block is + returned the process continues from the next dimension, until all + elements have been read. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.arrayterator.Arrayterator(a, 2) + >>> a_itor.shape + (3, 4, 5, 6) + + Now we can iterate over ``a_itor``, and it will return arrays of size + two. Since `buf_size` was smaller than any dimension, the first + dimension will be iterated over first: + + >>> for subarr in a_itor: + ... if not subarr.all(): + ... print subarr, subarr.shape + ... + [[[[0 1]]]] (1, 1, 1, 2) + + """ + + def __init__(self, var, buf_size=None): + self.var = var + self.buf_size = buf_size + + self.start = [0 for dim in var.shape] + self.stop = [dim for dim in var.shape] + self.step = [1 for dim in var.shape] + + def __getattr__(self, attr): + return getattr(self.var, attr) + + def __getitem__(self, index): + """ + Return a new arrayterator. + + """ + # Fix index, handling ellipsis and incomplete slices. + if not isinstance(index, tuple): + index = (index,) + fixed = [] + length, dims = len(index), len(self.shape) + for slice_ in index: + if slice_ is Ellipsis: + fixed.extend([slice(None)] * (dims-length+1)) + length = len(fixed) + elif isinstance(slice_, (int, long)): + fixed.append(slice(slice_, slice_+1, 1)) + else: + fixed.append(slice_) + index = tuple(fixed) + if len(index) < dims: + index += (slice(None),) * (dims-len(index)) + + # Return a new arrayterator object. + out = self.__class__(self.var, self.buf_size) + for i, (start, stop, step, slice_) in enumerate( + zip(self.start, self.stop, self.step, index)): + out.start[i] = start + (slice_.start or 0) + out.step[i] = step * (slice_.step or 1) + out.stop[i] = start + (slice_.stop or stop-start) + out.stop[i] = min(stop, out.stop[i]) + return out + + def __array__(self): + """ + Return corresponding data. + + """ + slice_ = tuple(slice(*t) for t in zip( + self.start, self.stop, self.step)) + return self.var[slice_] + + @property + def flat(self): + """ + A 1-D flat iterator for Arrayterator objects. + + This iterator returns elements of the array to be iterated over in + `Arrayterator` one by one. It is similar to `flatiter`. + + See Also + -------- + `Arrayterator` + flatiter + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.arrayterator.Arrayterator(a, 2) + + >>> for subarr in a_itor.flat: + ... if not subarr: + ... print subarr, type(subarr) + ... + 0 + + """ + for block in self: + for value in block.flat: + yield value + + @property + def shape(self): + """ + The shape of the array to be iterated over. + + For an example, see `Arrayterator`. + + """ + return tuple(((stop-start-1)//step+1) for start, stop, step in + zip(self.start, self.stop, self.step)) + + def __iter__(self): + # Skip arrays with degenerate dimensions + if [dim for dim in self.shape if dim <= 0]: + raise StopIteration + + start = self.start[:] + stop = self.stop[:] + step = self.step[:] + ndims = len(self.var.shape) + + while True: + count = self.buf_size or reduce(mul, self.shape) + + # iterate over each dimension, looking for the + # running dimension (ie, the dimension along which + # the blocks will be built from) + rundim = 0 + for i in range(ndims-1, -1, -1): + # if count is zero we ran out of elements to read + # along higher dimensions, so we read only a single position + if count == 0: + stop[i] = start[i]+1 + elif count <= self.shape[i]: + # limit along this dimension + stop[i] = start[i] + count*step[i] + rundim = i + else: + # read everything along this dimension + stop[i] = self.stop[i] + stop[i] = min(self.stop[i], stop[i]) + count = count//self.shape[i] + + # yield a block + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + yield self.var[slice_] + + # Update start position, taking care of overflow to + # other dimensions + start[rundim] = stop[rundim] # start where we stopped + for i in range(ndims-1, 0, -1): + if start[i] >= self.stop[i]: + start[i] = self.start[i] + start[i-1] += self.step[i-1] + if start[0] >= self.stop[0]: + raise StopIteration diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/financial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/financial.py new file mode 100644 index 0000000000000..5b96e5b8e979d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/financial.py @@ -0,0 +1,737 @@ +"""Some simple financial calculations + +patterned after spreadsheet computations. + +There is some complexity in each function +so that the functions behave like ufuncs with +broadcasting and being able to be called with scalars +or arrays (or other sequences). + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np + +__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate', + 'irr', 'npv', 'mirr'] + +_when_to_num = {'end':0, 'begin':1, + 'e':0, 'b':1, + 0:0, 1:1, + 'beginning':1, + 'start':1, + 'finish':0} + +def _convert_when(when): + #Test to see if when has already been converted to ndarray + #This will happen if one function calls another, for example ppmt + if isinstance(when, np.ndarray): + return when + try: + return _when_to_num[when] + except (KeyError, TypeError): + return [_when_to_num[x] for x in when] + + +def fv(rate, nper, pmt, pv, when='end'): + """ + Compute the future value. + + Given: + * a present value, `pv` + * an interest `rate` compounded once per period, of which + there are + * `nper` total + * a (fixed) payment, `pmt`, paid either + * at the beginning (`when` = {'begin', 1}) or the end + (`when` = {'end', 0}) of each period + + Return: + the value at the end of the `nper` periods + + Parameters + ---------- + rate : scalar or array_like of shape(M, ) + Rate of interest as decimal (not per cent) per period + nper : scalar or array_like of shape(M, ) + Number of compounding periods + pmt : scalar or array_like of shape(M, ) + Payment + pv : scalar or array_like of shape(M, ) + Present value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)). + Defaults to {'end', 0}. + + Returns + ------- + out : ndarray + Future values. If all input is scalar, returns a scalar float. If + any input is array_like, returns future values for each input element. + If multiple inputs are array_like, they all must have the same shape. + + Notes + ----- + The future value is computed by solving the equation:: + + fv + + pv*(1+rate)**nper + + pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 + + or, when ``rate == 0``:: + + fv + pv + pmt * nper == 0 + + References + ---------- + .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt + + Examples + -------- + What is the future value after 10 years of saving $100 now, with + an additional monthly savings of $100. Assume the interest rate is + 5% (annually) compounded monthly? + + >>> np.fv(0.05/12, 10*12, -100, -100) + 15692.928894335748 + + By convention, the negative sign represents cash flow out (i.e. money not + available today). Thus, saving $100 a month at 5% annual interest leads + to $15,692.93 available to spend in 10 years. + + If any input is array_like, returns an array of equal shape. Let's + compare different interest rates from the example above. + + >>> a = np.array((0.05, 0.06, 0.07))/12 + >>> np.fv(a, 10*12, -100, -100) + array([ 15692.92889434, 16569.87435405, 17509.44688102]) + + """ + when = _convert_when(when) + (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when]) + temp = (1+rate)**nper + miter = np.broadcast(rate, nper, pmt, pv, when) + zer = np.zeros(miter.shape) + fact = np.where(rate == zer, nper + zer, + (1 + rate*when)*(temp - 1)/rate + zer) + return -(pv*temp + pmt*fact) + +def pmt(rate, nper, pv, fv=0, when='end'): + """ + Compute the payment against loan principal plus interest. + + Given: + * a present value, `pv` (e.g., an amount borrowed) + * a future value, `fv` (e.g., 0) + * an interest `rate` compounded once per period, of which + there are + * `nper` total + * and (optional) specification of whether payment is made + at the beginning (`when` = {'begin', 1}) or the end + (`when` = {'end', 0}) of each period + + Return: + the (fixed) periodic payment. + + Parameters + ---------- + rate : array_like + Rate of interest (per period) + nper : array_like + Number of compounding periods + pv : array_like + Present value + fv : array_like (optional) + Future value (default = 0) + when : {{'begin', 1}, {'end', 0}}, {string, int} + When payments are due ('begin' (1) or 'end' (0)) + + Returns + ------- + out : ndarray + Payment against loan plus interest. If all input is scalar, returns a + scalar float. If any input is array_like, returns payment for each + input element. If multiple inputs are array_like, they all must have + the same shape. + + Notes + ----- + The payment is computed by solving the equation:: + + fv + + pv*(1 + rate)**nper + + pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 + + or, when ``rate == 0``:: + + fv + pv + pmt * nper == 0 + + for ``pmt``. + + Note that computing a monthly mortgage payment is only + one use for this function. For example, pmt returns the + periodic deposit one must make to achieve a specified + future balance given an initial deposit, a fixed, + periodically compounded interest rate, and the total + number of periods. + + References + ---------- + .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php + ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt + + Examples + -------- + What is the monthly payment needed to pay off a $200,000 loan in 15 + years at an annual interest rate of 7.5%? + + >>> np.pmt(0.075/12, 12*15, 200000) + -1854.0247200054619 + + In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained + today, a monthly payment of $1,854.02 would be required. Note that this + example illustrates usage of `fv` having a default value of 0. + + """ + when = _convert_when(when) + (rate, nper, pv, fv, when) = map(np.asarray, [rate, nper, pv, fv, when]) + temp = (1+rate)**nper + miter = np.broadcast(rate, nper, pv, fv, when) + zer = np.zeros(miter.shape) + fact = np.where(rate == zer, nper + zer, + (1 + rate*when)*(temp - 1)/rate + zer) + return -(fv + pv*temp) / fact + +def nper(rate, pmt, pv, fv=0, when='end'): + """ + Compute the number of periodic payments. + + Parameters + ---------- + rate : array_like + Rate of interest (per period) + pmt : array_like + Payment + pv : array_like + Present value + fv : array_like, optional + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)) + + Notes + ----- + The number of periods ``nper`` is computed by solving the equation:: + + fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0 + + but if ``rate = 0`` then:: + + fv + pv + pmt*nper = 0 + + Examples + -------- + If you only had $150/month to pay towards the loan, how long would it take + to pay-off a loan of $8,000 at 7% annual interest? + + >>> print round(np.nper(0.07/12, -150, 8000), 5) + 64.07335 + + So, over 64 months would be required to pay off the loan. + + The same analysis could be done with several different interest rates + and/or payments and/or total amounts to produce an entire table. + + >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12, + ... -150 : -99 : 50 , + ... 8000 : 9001 : 1000])) + array([[[ 64.07334877, 74.06368256], + [ 108.07548412, 127.99022654]], + [[ 66.12443902, 76.87897353], + [ 114.70165583, 137.90124779]]]) + + """ + when = _convert_when(when) + (rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when]) + + use_zero_rate = False + with np.errstate(divide="raise"): + try: + z = pmt*(1.0+rate*when)/rate + except FloatingPointError: + use_zero_rate = True + + if use_zero_rate: + return (-fv + pv) / (pmt + 0.0) + else: + A = -(fv + pv)/(pmt+0.0) + B = np.log((-fv+z) / (pv+z))/np.log(1.0+rate) + miter = np.broadcast(rate, pmt, pv, fv, when) + zer = np.zeros(miter.shape) + return np.where(rate == zer, A + zer, B + zer) + 0.0 + +def ipmt(rate, per, nper, pv, fv=0.0, when='end'): + """ + Compute the interest portion of a payment. + + Parameters + ---------- + rate : scalar or array_like of shape(M, ) + Rate of interest as decimal (not per cent) per period + per : scalar or array_like of shape(M, ) + Interest paid against the loan changes during the life or the loan. + The `per` is the payment period to calculate the interest amount. + nper : scalar or array_like of shape(M, ) + Number of compounding periods + pv : scalar or array_like of shape(M, ) + Present value + fv : scalar or array_like of shape(M, ), optional + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)). + Defaults to {'end', 0}. + + Returns + ------- + out : ndarray + Interest portion of payment. If all input is scalar, returns a scalar + float. If any input is array_like, returns interest payment for each + input element. If multiple inputs are array_like, they all must have + the same shape. + + See Also + -------- + ppmt, pmt, pv + + Notes + ----- + The total payment is made up of payment against principal plus interest. + + ``pmt = ppmt + ipmt`` + + Examples + -------- + What is the amortization schedule for a 1 year loan of $2500 at + 8.24% interest per year compounded monthly? + + >>> principal = 2500.00 + + The 'per' variable represents the periods of the loan. Remember that + financial equations start the period count at 1! + + >>> per = np.arange(1*12) + 1 + >>> ipmt = np.ipmt(0.0824/12, per, 1*12, principal) + >>> ppmt = np.ppmt(0.0824/12, per, 1*12, principal) + + Each element of the sum of the 'ipmt' and 'ppmt' arrays should equal + 'pmt'. + + >>> pmt = np.pmt(0.0824/12, 1*12, principal) + >>> np.allclose(ipmt + ppmt, pmt) + True + + >>> fmt = '{0:2d} {1:8.2f} {2:8.2f} {3:8.2f}' + >>> for payment in per: + ... index = payment - 1 + ... principal = principal + ppmt[index] + ... print fmt.format(payment, ppmt[index], ipmt[index], principal) + 1 -200.58 -17.17 2299.42 + 2 -201.96 -15.79 2097.46 + 3 -203.35 -14.40 1894.11 + 4 -204.74 -13.01 1689.37 + 5 -206.15 -11.60 1483.22 + 6 -207.56 -10.18 1275.66 + 7 -208.99 -8.76 1066.67 + 8 -210.42 -7.32 856.25 + 9 -211.87 -5.88 644.38 + 10 -213.32 -4.42 431.05 + 11 -214.79 -2.96 216.26 + 12 -216.26 -1.49 -0.00 + + >>> interestpd = np.sum(ipmt) + >>> np.round(interestpd, 2) + -112.98 + + """ + when = _convert_when(when) + rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper, + pv, fv, when) + total_pmt = pmt(rate, nper, pv, fv, when) + ipmt = _rbl(rate, per, total_pmt, pv, when)*rate + try: + ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt) + ipmt = np.where(np.logical_and(when == 1, per == 1), 0.0, ipmt) + except IndexError: + pass + return ipmt + +def _rbl(rate, per, pmt, pv, when): + """ + This function is here to simply have a different name for the 'fv' + function to not interfere with the 'fv' keyword argument within the 'ipmt' + function. It is the 'remaining balance on loan' which might be useful as + it's own function, but is easily calculated with the 'fv' function. + """ + return fv(rate, (per - 1), pmt, pv, when) + +def ppmt(rate, per, nper, pv, fv=0.0, when='end'): + """ + Compute the payment against loan principal. + + Parameters + ---------- + rate : array_like + Rate of interest (per period) + per : array_like, int + Amount paid against the loan changes. The `per` is the period of + interest. + nper : array_like + Number of compounding periods + pv : array_like + Present value + fv : array_like, optional + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int} + When payments are due ('begin' (1) or 'end' (0)) + + See Also + -------- + pmt, pv, ipmt + + """ + total = pmt(rate, nper, pv, fv, when) + return total - ipmt(rate, per, nper, pv, fv, when) + +def pv(rate, nper, pmt, fv=0.0, when='end'): + """ + Compute the present value. + + Given: + * a future value, `fv` + * an interest `rate` compounded once per period, of which + there are + * `nper` total + * a (fixed) payment, `pmt`, paid either + * at the beginning (`when` = {'begin', 1}) or the end + (`when` = {'end', 0}) of each period + + Return: + the value now + + Parameters + ---------- + rate : array_like + Rate of interest (per period) + nper : array_like + Number of compounding periods + pmt : array_like + Payment + fv : array_like, optional + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)) + + Returns + ------- + out : ndarray, float + Present value of a series of payments or investments. + + Notes + ----- + The present value is computed by solving the equation:: + + fv + + pv*(1 + rate)**nper + + pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0 + + or, when ``rate = 0``:: + + fv + pv + pmt * nper = 0 + + for `pv`, which is then returned. + + References + ---------- + .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). + Open Document Format for Office Applications (OpenDocument)v1.2, + Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, + Pre-Draft 12. Organization for the Advancement of Structured Information + Standards (OASIS). Billerica, MA, USA. [ODT Document]. + Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt + + Examples + -------- + What is the present value (e.g., the initial investment) + of an investment that needs to total $15692.93 + after 10 years of saving $100 every month? Assume the + interest rate is 5% (annually) compounded monthly. + + >>> np.pv(0.05/12, 10*12, -100, 15692.93) + -100.00067131625819 + + By convention, the negative sign represents cash flow out + (i.e., money not available today). Thus, to end up with + $15,692.93 in 10 years saving $100 a month at 5% annual + interest, one's initial deposit should also be $100. + + If any input is array_like, ``pv`` returns an array of equal shape. + Let's compare different interest rates in the example above: + + >>> a = np.array((0.05, 0.04, 0.03))/12 + >>> np.pv(a, 10*12, -100, 15692.93) + array([ -100.00067132, -649.26771385, -1273.78633713]) + + So, to end up with the same $15692.93 under the same $100 per month + "savings plan," for annual interest rates of 4% and 3%, one would + need initial investments of $649.27 and $1273.79, respectively. + + """ + when = _convert_when(when) + (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when]) + temp = (1+rate)**nper + miter = np.broadcast(rate, nper, pmt, fv, when) + zer = np.zeros(miter.shape) + fact = np.where(rate == zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer) + return -(fv + pmt*fact)/temp + +# Computed with Sage +# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - +# p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + +# p*((r + 1)^n - 1)*w/r) + +def _g_div_gp(r, n, p, x, y, w): + t1 = (r+1)**n + t2 = (r+1)**(n-1) + return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) / + (n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + + p*(t1 - 1)*w/r)) + +# Use Newton's iteration until the change is less than 1e-6 +# for all values or a maximum of 100 iterations is reached. +# Newton's rule is +# r_{n+1} = r_{n} - g(r_n)/g'(r_n) +# where +# g(r) is the formula +# g'(r) is the derivative with respect to r. +def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100): + """ + Compute the rate of interest per period. + + Parameters + ---------- + nper : array_like + Number of compounding periods + pmt : array_like + Payment + pv : array_like + Present value + fv : array_like + Future value + when : {{'begin', 1}, {'end', 0}}, {string, int}, optional + When payments are due ('begin' (1) or 'end' (0)) + guess : float, optional + Starting guess for solving the rate of interest + tol : float, optional + Required tolerance for the solution + maxiter : int, optional + Maximum iterations in finding the solution + + Notes + ----- + The rate of interest is computed by iteratively solving the + (non-linear) equation:: + + fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0 + + for ``rate``. + + References + ---------- + Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document + Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated + Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. + Organization for the Advancement of Structured Information Standards + (OASIS). Billerica, MA, USA. [ODT Document]. Available: + http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula + OpenDocument-formula-20090508.odt + + """ + when = _convert_when(when) + (nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when]) + rn = guess + iter = 0 + close = False + while (iter < maxiter) and not close: + rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when) + diff = abs(rnp1-rn) + close = np.all(diff < tol) + iter += 1 + rn = rnp1 + if not close: + # Return nan's in array of the same shape as rn + return np.nan + rn + else: + return rn + +def irr(values): + """ + Return the Internal Rate of Return (IRR). + + This is the "average" periodically compounded rate of return + that gives a net present value of 0.0; for a more complete explanation, + see Notes below. + + Parameters + ---------- + values : array_like, shape(N,) + Input cash flows per time period. By convention, net "deposits" + are negative and net "withdrawals" are positive. Thus, for + example, at least the first element of `values`, which represents + the initial investment, will typically be negative. + + Returns + ------- + out : float + Internal Rate of Return for periodic input values. + + Notes + ----- + The IRR is perhaps best understood through an example (illustrated + using np.irr in the Examples section below). Suppose one invests 100 + units and then makes the following withdrawals at regular (fixed) + intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100 + unit investment yields 173 units; however, due to the combination of + compounding and the periodic withdrawals, the "average" rate of return + is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution + (for :math:`r`) of the equation: + + .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2} + + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0 + + In general, for `values` :math:`= [v_0, v_1, ... v_M]`, + irr is the solution of the equation: [G]_ + + .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0 + + References + ---------- + .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., + Addison-Wesley, 2003, pg. 348. + + Examples + -------- + >>> round(irr([-100, 39, 59, 55, 20]), 5) + 0.28095 + >>> round(irr([-100, 0, 0, 74]), 5) + -0.0955 + >>> round(irr([-100, 100, 0, -7]), 5) + -0.0833 + >>> round(irr([-100, 100, 0, 7]), 5) + 0.06206 + >>> round(irr([-5, 10.5, 1, -8, 1]), 5) + 0.0886 + + (Compare with the Example given for numpy.lib.financial.npv) + + """ + res = np.roots(values[::-1]) + mask = (res.imag == 0) & (res.real > 0) + if res.size == 0: + return np.nan + res = res[mask].real + # NPV(rate) = 0 can have more than one solution so we return + # only the solution closest to zero. + rate = 1.0/res - 1 + rate = rate.item(np.argmin(np.abs(rate))) + return rate + +def npv(rate, values): + """ + Returns the NPV (Net Present Value) of a cash flow series. + + Parameters + ---------- + rate : scalar + The discount rate. + values : array_like, shape(M, ) + The values of the time series of cash flows. The (fixed) time + interval between cash flow "events" must be the same as that for + which `rate` is given (i.e., if `rate` is per year, then precisely + a year is understood to elapse between each cash flow event). By + convention, investments or "deposits" are negative, income or + "withdrawals" are positive; `values` must begin with the initial + investment, thus `values[0]` will typically be negative. + + Returns + ------- + out : float + The NPV of the input cash flow series `values` at the discount + `rate`. + + Notes + ----- + Returns the result of: [G]_ + + .. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}} + + References + ---------- + .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., + Addison-Wesley, 2003, pg. 346. + + Examples + -------- + >>> np.npv(0.281,[-100, 39, 59, 55, 20]) + -0.0084785916384548798 + + (Compare with the Example given for numpy.lib.financial.irr) + + """ + values = np.asarray(values) + return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0) + +def mirr(values, finance_rate, reinvest_rate): + """ + Modified internal rate of return. + + Parameters + ---------- + values : array_like + Cash flows (must contain at least one positive and one negative + value) or nan is returned. The first value is considered a sunk + cost at time zero. + finance_rate : scalar + Interest rate paid on the cash flows + reinvest_rate : scalar + Interest rate received on the cash flows upon reinvestment + + Returns + ------- + out : float + Modified internal rate of return + + """ + values = np.asarray(values, dtype=np.double) + n = values.size + pos = values > 0 + neg = values < 0 + if not (pos.any() and neg.any()): + return np.nan + numer = np.abs(npv(reinvest_rate, values*pos)) + denom = np.abs(npv(finance_rate, values*neg)) + return (numer/denom)**(1.0/(n - 1))*(1 + reinvest_rate) - 1 diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/format.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/format.py new file mode 100644 index 0000000000000..98743b6ad48f4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/format.py @@ -0,0 +1,730 @@ +""" +Define a simple format for saving numpy arrays to disk with the full +information about them. + +The ``.npy`` format is the standard binary file format in NumPy for +persisting a *single* arbitrary NumPy array on disk. The format stores all +of the shape and dtype information necessary to reconstruct the array +correctly even on another machine with a different architecture. +The format is designed to be as simple as possible while achieving +its limited goals. + +The ``.npz`` format is the standard format for persisting *multiple* NumPy +arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` +files, one for each array. + +Capabilities +------------ + +- Can represent all NumPy arrays including nested record arrays and + object arrays. + +- Represents the data in its native binary form. + +- Supports Fortran-contiguous arrays directly. + +- Stores all of the necessary information to reconstruct the array + including shape and dtype on a machine of a different + architecture. Both little-endian and big-endian arrays are + supported, and a file with little-endian numbers will yield + a little-endian array on any machine reading the file. The + types are described in terms of their actual sizes. For example, + if a machine with a 64-bit C "long int" writes out an array with + "long ints", a reading machine with 32-bit C "long ints" will yield + an array with 64-bit integers. + +- Is straightforward to reverse engineer. Datasets often live longer than + the programs that created them. A competent developer should be + able to create a solution in his preferred programming language to + read most ``.npy`` files that he has been given without much + documentation. + +- Allows memory-mapping of the data. See `open_memmep`. + +- Can be read from a filelike stream object instead of an actual file. + +- Stores object arrays, i.e. arrays containing elements that are arbitrary + Python objects. Files with object arrays are not to be mmapable, but + can be read and written to disk. + +Limitations +----------- + +- Arbitrary subclasses of numpy.ndarray are not completely preserved. + Subclasses will be accepted for writing, but only the array data will + be written out. A regular numpy.ndarray object will be created + upon reading the file. + +.. warning:: + + Due to limitations in the interpretation of structured dtypes, dtypes + with fields with empty names will have the names replaced by 'f0', 'f1', + etc. Such arrays will not round-trip through the format entirely + accurately. The data is intact; only the field names will differ. We are + working on a fix for this. This fix will not require a change in the + file format. The arrays with such structures can still be saved and + restored, and the correct dtype may be restored by using the + ``loadedarray.view(correct_dtype)`` method. + +File extensions +--------------- + +We recommend using the ``.npy`` and ``.npz`` extensions for files saved +in this format. This is by no means a requirement; applications may wish +to use these file formats but use an extension specific to the +application. In the absence of an obvious alternative, however, +we suggest using ``.npy`` and ``.npz``. + +Version numbering +----------------- + +The version numbering of these formats is independent of NumPy version +numbering. If the format is upgraded, the code in `numpy.io` will still +be able to read and write Version 1.0 files. + +Format Version 1.0 +------------------ + +The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. + +The next 1 byte is an unsigned byte: the major version number of the file +format, e.g. ``\\x01``. + +The next 1 byte is an unsigned byte: the minor version number of the file +format, e.g. ``\\x00``. Note: the version of the file format is not tied +to the version of the numpy package. + +The next 2 bytes form a little-endian unsigned short int: the length of +the header data HEADER_LEN. + +The next HEADER_LEN bytes form the header data describing the array's +format. It is an ASCII string which contains a Python literal expression +of a dictionary. It is terminated by a newline (``\\n``) and padded with +spaces (``\\x20``) to make the total length of +``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment +purposes. + +The dictionary contains three keys: + + "descr" : dtype.descr + An object that can be passed as an argument to the `numpy.dtype` + constructor to create the array's dtype. + "fortran_order" : bool + Whether the array data is Fortran-contiguous or not. Since + Fortran-contiguous arrays are a common form of non-C-contiguity, + we allow them to be written directly to disk for efficiency. + "shape" : tuple of int + The shape of the array. + +For repeatability and readability, the dictionary keys are sorted in +alphabetic order. This is for convenience only. A writer SHOULD implement +this if possible. A reader MUST NOT depend on this. + +Following the header comes the array data. If the dtype contains Python +objects (i.e. ``dtype.hasobject is True``), then the data is a Python +pickle of the array. Otherwise the data is the contiguous (either C- +or Fortran-, depending on ``fortran_order``) bytes of the array. +Consumers can figure out the number of bytes by multiplying the number +of elements given by the shape (noting that ``shape=()`` means there is +1 element) by ``dtype.itemsize``. + +Notes +----- +The ``.npy`` format, including reasons for creating it and a comparison of +alternatives, is described fully in the "npy-format" NEP. + +""" +from __future__ import division, absolute_import, print_function + +import numpy +import sys +import io +import warnings +from numpy.lib.utils import safe_eval +from numpy.compat import asbytes, isfileobj, long, basestring + +if sys.version_info[0] >= 3: + import pickle +else: + import cPickle as pickle + +MAGIC_PREFIX = asbytes('\x93NUMPY') +MAGIC_LEN = len(MAGIC_PREFIX) + 2 +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes + +# difference between version 1.0 and 2.0 is a 4 byte (I) header length +# instead of 2 bytes (H) allowing storage of large structured arrays + +def _check_version(version): + if version not in [(1, 0), (2, 0), None]: + msg = "we only support format version (1,0) and (2, 0), not %s" + raise ValueError(msg % (version,)) + +def magic(major, minor): + """ Return the magic string for the given file format version. + + Parameters + ---------- + major : int in [0, 255] + minor : int in [0, 255] + + Returns + ------- + magic : str + + Raises + ------ + ValueError if the version cannot be formatted. + """ + if major < 0 or major > 255: + raise ValueError("major version must be 0 <= major < 256") + if minor < 0 or minor > 255: + raise ValueError("minor version must be 0 <= minor < 256") + if sys.version_info[0] < 3: + return MAGIC_PREFIX + chr(major) + chr(minor) + else: + return MAGIC_PREFIX + bytes([major, minor]) + +def read_magic(fp): + """ Read the magic string to get the version of the file format. + + Parameters + ---------- + fp : filelike object + + Returns + ------- + major : int + minor : int + """ + magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") + if magic_str[:-2] != MAGIC_PREFIX: + msg = "the magic string is not correct; expected %r, got %r" + raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) + if sys.version_info[0] < 3: + major, minor = map(ord, magic_str[-2:]) + else: + major, minor = magic_str[-2:] + return major, minor + +def dtype_to_descr(dtype): + """ + Get a serializable descriptor from the dtype. + + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. + + """ + if dtype.names is not None: + # This is a record array. The .descr is fine. XXX: parts of the + # record array with an empty name, like padding bytes, still get + # fiddled with. This needs to be fixed in the C implementation of + # dtype(). + return dtype.descr + else: + return dtype.str + +def header_data_from_array_1_0(array): + """ Get the dictionary of header metadata from a numpy.ndarray. + + Parameters + ---------- + array : numpy.ndarray + + Returns + ------- + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + """ + d = {} + d['shape'] = array.shape + if array.flags.c_contiguous: + d['fortran_order'] = False + elif array.flags.f_contiguous: + d['fortran_order'] = True + else: + # Totally non-contiguous data. We will have to make it C-contiguous + # before writing. Note that we need to test for C_CONTIGUOUS first + # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. + d['fortran_order'] = False + + d['descr'] = dtype_to_descr(array.dtype) + return d + +def _write_array_header(fp, d, version=None): + """ Write the header for an array and returns the version used + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + version: tuple or None + None means use oldest that works + explicit version will raise a ValueError if the format does not + allow saving this data. Default: None + Returns + ------- + version : tuple of int + the file version which needs to be used to store the data + """ + import struct + header = ["{"] + for key, value in sorted(d.items()): + # Need to use repr here, since we eval these when reading + header.append("'%s': %s, " % (key, repr(value))) + header.append("}") + header = "".join(header) + # Pad the header with spaces and a final newline such that the magic + # string, the header-length short and the header are aligned on a + # 16-byte boundary. Hopefully, some system, possibly memory-mapping, + # can take advantage of our premature optimization. + current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline + topad = 16 - (current_header_len % 16) + header = asbytes(header + ' '*topad + '\n') + + if len(header) >= (256*256) and version == (1, 0): + raise ValueError("header does not fit inside %s bytes required by the" + " 1.0 format" % (256*256)) + if len(header) < (256*256): + header_len_str = struct.pack('= 1.9", UserWarning) + + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + + if array.dtype.hasobject: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out with version 2 of the + # pickle protocol. + pickle.dump(array, fp, protocol=2) + elif array.flags.f_contiguous and not array.flags.c_contiguous: + if isfileobj(fp): + array.T.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='F'): + fp.write(chunk.tobytes('C')) + else: + if isfileobj(fp): + array.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) + + +def read_array(fp): + """ + Read an array from an NPY file. + + Parameters + ---------- + fp : file_like object + If this is not a real file object, then this may take extra memory + and time. + + Returns + ------- + array : ndarray + The array from the data on disk. + + Raises + ------ + ValueError + If the data is invalid. + + """ + version = read_magic(fp) + _check_version(version) + shape, fortran_order, dtype = _read_array_header(fp, version) + if len(shape) == 0: + count = 1 + else: + count = numpy.multiply.reduce(shape) + + # Now read the actual data. + if dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + array = pickle.load(fp) + else: + if isfileobj(fp): + # We can use the fast fromfile() function. + array = numpy.fromfile(fp, dtype=dtype, count=count) + else: + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) + + array = numpy.empty(count, dtype=dtype) + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * dtype.itemsize) + data = _read_bytes(fp, read_size, "array data") + array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, + count=read_count) + + if fortran_order: + array.shape = shape[::-1] + array = array.transpose() + else: + array.shape = shape + + return array + + +def open_memmap(filename, mode='r+', dtype=None, shape=None, + fortran_order=False, version=None): + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. + + Parameters + ---------- + filename : str + The name of the file on disk. This may *not* be a file-like + object. + mode : str, optional + The mode in which to open the file; the default is 'r+'. In + addition to the standard file modes, 'c' is also accepted to mean + "copy on write." See `memmap` for the available mode strings. + dtype : data-type, optional + The data type of the array if we are creating a new file in "write" + mode, if not, `dtype` is ignored. The default value is None, which + results in a data-type of `float64`. + shape : tuple of int + The shape of the array if we are creating a new file in "write" + mode, in which case this parameter is required. Otherwise, this + parameter is ignored and is thus optional. + fortran_order : bool, optional + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False, the default) if we are creating a new file in + "write" mode. + version : tuple of int (major, minor) or None + If the mode is a "write" mode, then this is the version of the file + format used to create the file. None means use the oldest + supported version that is able to store the data. Default: None + + Returns + ------- + marray : memmap + The memory-mapped array. + + Raises + ------ + ValueError + If the data or the mode is invalid. + IOError + If the file is not found or cannot be opened correctly. + + See Also + -------- + memmap + + """ + if not isinstance(filename, basestring): + raise ValueError("Filename must be a string. Memmap cannot use" + " existing file handles.") + + if 'w' in mode: + # We are creating the file, not reading it. + # Check if we ought to create the file. + _check_version(version) + # Ensure that the given dtype is an authentic dtype object rather + # than just something that can be interpreted as a dtype object. + dtype = numpy.dtype(dtype) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + d = dict( + descr=dtype_to_descr(dtype), + fortran_order=fortran_order, + shape=shape, + ) + # If we got here, then it should be safe to create the file. + fp = open(filename, mode+'b') + try: + used_ver = _write_array_header(fp, d, version) + # this warning can be removed when 1.9 has aged enough + if version != (2, 0) and used_ver == (2, 0): + warnings.warn("Stored array in format 2.0. It can only be" + "read by NumPy >= 1.9", UserWarning) + offset = fp.tell() + finally: + fp.close() + else: + # Read the header of the file first. + fp = open(filename, 'rb') + try: + version = read_magic(fp) + _check_version(version) + + shape, fortran_order, dtype = _read_array_header(fp, version) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + offset = fp.tell() + finally: + fp.close() + + if fortran_order: + order = 'F' + else: + order = 'C' + + # We need to change a write-only mode to a read-write mode since we've + # already written data to the file. + if mode == 'w+': + mode = 'r+' + + marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, + mode=mode, offset=offset) + + return marray + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = bytes() + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except io.BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/function_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/function_base.py new file mode 100644 index 0000000000000..47be2f12fc973 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/function_base.py @@ -0,0 +1,3872 @@ +from __future__ import division, absolute_import, print_function + +import warnings +import sys +import collections +import operator + +import numpy as np +import numpy.core.numeric as _nx +from numpy.core import linspace, atleast_1d, atleast_2d +from numpy.core.numeric import ( + ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, + empty_like, ndarray, around, floor, ceil, take, dot, where, intp, + integer, isscalar + ) +from numpy.core.umath import ( + pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, + mod, exp, log10 + ) +from numpy.core.fromnumeric import ( + ravel, nonzero, sort, partition, mean + ) +from numpy.core.numerictypes import typecodes, number +from numpy.lib.twodim_base import diag +from .utils import deprecate +from ._compiled_base import _insert, add_docstring +from ._compiled_base import digitize, bincount, interp as compiled_interp +from ._compiled_base import add_newdoc_ufunc +from numpy.compat import long + +# Force range to be a generator, for np.delete's usage. +if sys.version_info[0] < 3: + range = xrange + + +__all__ = [ + 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', + 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', + 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', + 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', + 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', + 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', + 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' + ] + + +def iterable(y): + """ + Check whether or not an object can be iterated over. + + Parameters + ---------- + y : object + Input object. + + Returns + ------- + b : {0, 1} + Return 1 if the object has an iterator method or is a sequence, + and 0 otherwise. + + + Examples + -------- + >>> np.iterable([1, 2, 3]) + 1 + >>> np.iterable(2) + 0 + + """ + try: + iter(y) + except: + return 0 + return 1 + + +def histogram(a, bins=10, range=None, normed=False, weights=None, + density=None): + """ + Compute the histogram of a set of data. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a sequence, + it defines the bin edges, including the rightmost edge, allowing + for non-uniform bin widths. + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. + normed : bool, optional + This keyword is deprecated in Numpy 1.6 due to confusing/buggy + behavior. It will be removed in Numpy 2.0. Use the density keyword + instead. + If False, the result will contain the number of samples + in each bin. If True, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that this latter behavior is + known to be buggy with unequal bin widths; use `density` instead. + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in `a` + only contributes its associated weight towards the bin count + (instead of 1). If `normed` is True, the weights are normalized, + so that the integral of the density over the range remains 1 + density : bool, optional + If False, the result will contain the number of samples + in each bin. If True, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that the sum of the + histogram values will not be equal to 1 unless bins of unity + width are chosen; it is not a probability *mass* function. + Overrides the `normed` keyword if given. + + Returns + ------- + hist : array + The values of the histogram. See `normed` and `weights` for a + description of the possible semantics. + bin_edges : array of dtype float + Return the bin edges ``(length(hist)+1)``. + + + See Also + -------- + histogramdd, bincount, searchsorted, digitize + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is:: + + [1, 2, 3, 4] + + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the + second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* + 4. + + Examples + -------- + >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) + (array([0, 2, 1]), array([0, 1, 2, 3])) + >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) + (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) + (array([1, 4, 1]), array([0, 1, 2, 3])) + + >>> a = np.arange(5) + >>> hist, bin_edges = np.histogram(a, density=True) + >>> hist + array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + >>> hist.sum() + 2.4999999999999996 + >>> np.sum(hist*np.diff(bin_edges)) + 1.0 + + """ + + a = asarray(a) + if weights is not None: + weights = asarray(weights) + if np.any(weights.shape != a.shape): + raise ValueError( + 'weights should have the same shape as a.') + weights = weights.ravel() + a = a.ravel() + + if (range is not None): + mn, mx = range + if (mn > mx): + raise AttributeError( + 'max must be larger than min in range parameter.') + + if not iterable(bins): + if np.isscalar(bins) and bins < 1: + raise ValueError( + '`bins` should be a positive integer.') + if range is None: + if a.size == 0: + # handle empty arrays. Can't determine range, so use 0-1. + range = (0, 1) + else: + range = (a.min(), a.max()) + mn, mx = [mi + 0.0 for mi in range] + if mn == mx: + mn -= 0.5 + mx += 0.5 + bins = linspace(mn, mx, bins + 1, endpoint=True) + else: + bins = asarray(bins) + if (np.diff(bins) < 0).any(): + raise AttributeError( + 'bins must increase monotonically.') + + # Histogram is an integer or a float array depending on the weights. + if weights is None: + ntype = int + else: + ntype = weights.dtype + n = np.zeros(bins.shape, ntype) + + block = 65536 + if weights is None: + for i in arange(0, len(a), block): + sa = sort(a[i:i+block]) + n += np.r_[sa.searchsorted(bins[:-1], 'left'), + sa.searchsorted(bins[-1], 'right')] + else: + zero = array(0, dtype=ntype) + for i in arange(0, len(a), block): + tmp_a = a[i:i+block] + tmp_w = weights[i:i+block] + sorting_index = np.argsort(tmp_a) + sa = tmp_a[sorting_index] + sw = tmp_w[sorting_index] + cw = np.concatenate(([zero, ], sw.cumsum())) + bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), + sa.searchsorted(bins[-1], 'right')] + n += cw[bin_index] + + n = np.diff(n) + + if density is not None: + if density: + db = array(np.diff(bins), float) + return n/db/n.sum(), bins + else: + return n, bins + else: + # deprecated, buggy behavior. Remove for Numpy 2.0 + if normed: + db = array(np.diff(bins), float) + return n/(n*db).sum(), bins + else: + return n, bins + + +def histogramdd(sample, bins=10, range=None, normed=False, weights=None): + """ + Compute the multidimensional histogram of some data. + + Parameters + ---------- + sample : array_like + The data to be histogrammed. It must be an (N,D) array or data + that can be converted to such. The rows of the resulting array + are the coordinates of points in a D dimensional polytope. + bins : sequence or int, optional + The bin specification: + + * A sequence of arrays describing the bin edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + + range : sequence, optional + A sequence of lower and upper bin edges to be used if the edges are + not given explicitly in `bins`. Defaults to the minimum and maximum + values along each dimension. + normed : bool, optional + If False, returns the number of samples in each bin. If True, + returns the bin density ``bin_count / sample_count / bin_volume``. + weights : array_like (N,), optional + An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. + Weights are normalized to 1 if normed is True. If normed is False, + the values of the returned histogram are equal to the sum of the + weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray + The multidimensional histogram of sample x. See normed and weights + for the different possible semantics. + edges : list + A list of D arrays describing the bin edges for each dimension. + + See Also + -------- + histogram: 1-D histogram + histogram2d: 2-D histogram + + Examples + -------- + >>> r = np.random.randn(100,3) + >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) + >>> H.shape, edges[0].size, edges[1].size, edges[2].size + ((5, 8, 4), 6, 9, 5) + + """ + + try: + # Sample is an ND-array. + N, D = sample.shape + except (AttributeError, ValueError): + # Sample is a sequence of 1D arrays. + sample = atleast_2d(sample).T + N, D = sample.shape + + nbin = empty(D, int) + edges = D*[None] + dedges = D*[None] + if weights is not None: + weights = asarray(weights) + + try: + M = len(bins) + if M != D: + raise AttributeError( + 'The dimension of bins must be equal to the dimension of the ' + ' sample x.') + except TypeError: + # bins is an integer + bins = D*[bins] + + # Select range for each dimension + # Used only if number of bins is given. + if range is None: + # Handle empty input. Range can't be determined in that case, use 0-1. + if N == 0: + smin = zeros(D) + smax = ones(D) + else: + smin = atleast_1d(array(sample.min(0), float)) + smax = atleast_1d(array(sample.max(0), float)) + else: + smin = zeros(D) + smax = zeros(D) + for i in arange(D): + smin[i], smax[i] = range[i] + + # Make sure the bins have a finite width. + for i in arange(len(smin)): + if smin[i] == smax[i]: + smin[i] = smin[i] - .5 + smax[i] = smax[i] + .5 + + # avoid rounding issues for comparisons when dealing with inexact types + if np.issubdtype(sample.dtype, np.inexact): + edge_dt = sample.dtype + else: + edge_dt = float + # Create edge arrays + for i in arange(D): + if isscalar(bins[i]): + if bins[i] < 1: + raise ValueError( + "Element at index %s in `bins` should be a positive " + "integer." % i) + nbin[i] = bins[i] + 2 # +2 for outlier bins + edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) + else: + edges[i] = asarray(bins[i], edge_dt) + nbin[i] = len(edges[i]) + 1 # +1 for outlier bins + dedges[i] = diff(edges[i]) + if np.any(np.asarray(dedges[i]) <= 0): + raise ValueError( + "Found bin edge of size <= 0. Did you specify `bins` with" + "non-monotonic sequence?") + + nbin = asarray(nbin) + + # Handle empty input. + if N == 0: + return np.zeros(nbin-2), edges + + # Compute the bin number each sample falls into. + Ncount = {} + for i in arange(D): + Ncount[i] = digitize(sample[:, i], edges[i]) + + # Using digitize, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right edge to be + # counted in the last bin, and not as an outlier. + for i in arange(D): + # Rounding precision + mindiff = dedges[i].min() + if not np.isinf(mindiff): + decimal = int(-log10(mindiff)) + 6 + # Find which points are on the rightmost edge. + not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) + on_edge = (around(sample[:, i], decimal) == + around(edges[i][-1], decimal)) + # Shift these points one bin to the left. + Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 + + # Flattened histogram matrix (1D) + # Reshape is used so that overlarge arrays + # will raise an error. + hist = zeros(nbin, float).reshape(-1) + + # Compute the sample indices in the flattened histogram matrix. + ni = nbin.argsort() + xy = zeros(N, int) + for i in arange(0, D-1): + xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() + xy += Ncount[ni[-1]] + + # Compute the number of repetitions in xy and assign it to the + # flattened histmat. + if len(xy) == 0: + return zeros(nbin-2, int), edges + + flatcount = bincount(xy, weights) + a = arange(len(flatcount)) + hist[a] = flatcount + + # Shape into a proper matrix + hist = hist.reshape(sort(nbin)) + for i in arange(nbin.size): + j = ni.argsort()[i] + hist = hist.swapaxes(i, j) + ni[i], ni[j] = ni[j], ni[i] + + # Remove outliers (indices 0 and -1 for each dimension). + core = D*[slice(1, -1)] + hist = hist[core] + + # Normalize if normed is True + if normed: + s = hist.sum() + for i in arange(D): + shape = ones(D, int) + shape[i] = nbin[i] - 2 + hist = hist / dedges[i].reshape(shape) + hist /= s + + if (hist.shape != nbin - 2).any(): + raise RuntimeError( + "Internal Shape Error") + return hist, edges + + +def average(a, axis=None, weights=None, returned=False): + """ + Compute the weighted average along the specified axis. + + Parameters + ---------- + a : array_like + Array containing data to be averaged. If `a` is not an array, a + conversion is attempted. + axis : int, optional + Axis along which to average `a`. If `None`, averaging is done over + the flattened array. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + returned : bool, optional + Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) + is returned, otherwise only the average is returned. + If `weights=None`, `sum_of_weights` is equivalent to the number of + elements over which the average is taken. + + + Returns + ------- + average, [sum_of_weights] : {array_type, double} + Return the average along the specified axis. When returned is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. The return type is `Float` + if `a` is of integer type, otherwise it is of the same type as `a`. + `sum_of_weights` is of the same type as `average`. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When the length of 1D `weights` is not the same as the shape of `a` + along axis. + + See Also + -------- + mean + + ma.average : average for masked arrays -- useful if your data contains + "missing" values + + Examples + -------- + >>> data = range(1,5) + >>> data + [1, 2, 3, 4] + >>> np.average(data) + 2.5 + >>> np.average(range(1,11), weights=range(10,0,-1)) + 4.0 + + >>> data = np.arange(6).reshape((3,2)) + >>> data + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.average(data, axis=1, weights=[1./4, 3./4]) + array([ 0.75, 2.75, 4.75]) + >>> np.average(data, weights=[1./4, 3./4]) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of a and weights differ. + + """ + if not isinstance(a, np.matrix): + a = np.asarray(a) + + if weights is None: + avg = a.mean(axis) + scl = avg.dtype.type(a.size/avg.size) + else: + a = a + 0.0 + wgt = np.array(weights, dtype=a.dtype, copy=0) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ.") + if wgt.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis.") + + # setup wgt to broadcast along axis + wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis) + + scl = wgt.sum(axis=axis) + if (scl == 0.0).any(): + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized") + + avg = np.multiply(a, wgt).sum(axis)/scl + + if returned: + scl = np.multiply(avg, 0) + scl + return avg, scl + else: + return avg + + +def asarray_chkfinite(a, dtype=None, order=None): + """ + Convert the input to an array, checking for NaNs or Infs. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. Success requires no NaNs or Infs. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + Raises + ------ + ValueError + Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). + + See Also + -------- + asarray : Create and array. + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array. If all elements are finite + ``asarray_chkfinite`` is identical to ``asarray``. + + >>> a = [1, 2] + >>> np.asarray_chkfinite(a, dtype=float) + array([1., 2.]) + + Raises ValueError if array_like contains Nans or Infs. + + >>> a = [1, 2, np.inf] + >>> try: + ... np.asarray_chkfinite(a) + ... except ValueError: + ... print 'ValueError' + ... + ValueError + + """ + a = asarray(a, dtype=dtype, order=order) + if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): + raise ValueError( + "array must not contain infs or NaNs") + return a + + +def piecewise(x, condlist, funclist, *args, **kw): + """ + Evaluate a piecewise-defined function. + + Given a set of conditions and corresponding functions, evaluate each + function on the input data wherever its condition is true. + + Parameters + ---------- + x : ndarray + The input domain. + condlist : list of bool arrays + Each boolean array corresponds to a function in `funclist`. Wherever + `condlist[i]` is True, `funclist[i](x)` is used as the output value. + + Each boolean array in `condlist` selects a piece of `x`, + and should therefore be of the same shape as `x`. + + The length of `condlist` must correspond to that of `funclist`. + If one extra function is given, i.e. if + ``len(funclist) - len(condlist) == 1``, then that extra function + is the default value, used wherever all conditions are false. + funclist : list of callables, f(x,*args,**kw), or scalars + Each function is evaluated over `x` wherever its corresponding + condition is True. It should take an array as input and give an array + or a scalar value as output. If, instead of a callable, + a scalar is provided then a constant function (``lambda x: scalar``) is + assumed. + args : tuple, optional + Any further arguments given to `piecewise` are passed to the functions + upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then + each function is called as ``f(x, 1, 'a')``. + kw : dict, optional + Keyword arguments used in calling `piecewise` are passed to the + functions upon execution, i.e., if called + ``piecewise(..., ..., lambda=1)``, then each function is called as + ``f(x, lambda=1)``. + + Returns + ------- + out : ndarray + The output is the same shape and type as x and is found by + calling the functions in `funclist` on the appropriate portions of `x`, + as defined by the boolean arrays in `condlist`. Portions not covered + by any condition have a default value of 0. + + + See Also + -------- + choose, select, where + + Notes + ----- + This is similar to choose or select, except that functions are + evaluated on elements of `x` that satisfy the corresponding condition from + `condlist`. + + The result is:: + + |-- + |funclist[0](x[condlist[0]]) + out = |funclist[1](x[condlist[1]]) + |... + |funclist[n2](x[condlist[n2]]) + |-- + + Examples + -------- + Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. + + >>> x = np.linspace(-2.5, 2.5, 6) + >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) + array([-1., -1., -1., 1., 1., 1.]) + + Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for + ``x >= 0``. + + >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + + """ + x = asanyarray(x) + n2 = len(funclist) + if (isscalar(condlist) or not (isinstance(condlist[0], list) or + isinstance(condlist[0], ndarray))): + condlist = [condlist] + condlist = array(condlist, dtype=bool) + n = len(condlist) + # This is a hack to work around problems with NumPy's + # handling of 0-d arrays and boolean indexing with + # numpy.bool_ scalars + zerod = False + if x.ndim == 0: + x = x[None] + zerod = True + if condlist.shape[-1] != 1: + condlist = condlist.T + if n == n2 - 1: # compute the "otherwise" condition. + totlist = np.logical_or.reduce(condlist, axis=0) + condlist = np.vstack([condlist, ~totlist]) + n += 1 + if (n != n2): + raise ValueError( + "function list and condition list must be the same") + + y = zeros(x.shape, x.dtype) + for k in range(n): + item = funclist[k] + if not isinstance(item, collections.Callable): + y[condlist[k]] = item + else: + vals = x[condlist[k]] + if vals.size > 0: + y[condlist[k]] = item(vals, *args, **kw) + if zerod: + y = y.squeeze() + return y + + +def select(condlist, choicelist, default=0): + """ + Return an array drawn from elements in choicelist, depending on conditions. + + Parameters + ---------- + condlist : list of bool ndarrays + The list of conditions which determine from which array in `choicelist` + the output elements are taken. When multiple conditions are satisfied, + the first one encountered in `condlist` is used. + choicelist : list of ndarrays + The list of arrays from which the output elements are taken. It has + to be of the same length as `condlist`. + default : scalar, optional + The element inserted in `output` when all conditions evaluate to False. + + Returns + ------- + output : ndarray + The output at position m is the m-th element of the array in + `choicelist` where the m-th element of the corresponding array in + `condlist` is True. + + See Also + -------- + where : Return elements from one of two arrays depending on condition. + take, choose, compress, diag, diagonal + + Examples + -------- + >>> x = np.arange(10) + >>> condlist = [x<3, x>5] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist) + array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) + + """ + # Check the size of condlist and choicelist are the same, or abort. + if len(condlist) != len(choicelist): + raise ValueError( + 'list of cases must be same length as list of conditions') + + # Now that the dtype is known, handle the deprecated select([], []) case + if len(condlist) == 0: + warnings.warn("select with an empty condition list is not possible" + "and will be deprecated", + DeprecationWarning) + return np.asarray(default)[()] + + choicelist = [np.asarray(choice) for choice in choicelist] + choicelist.append(np.asarray(default)) + + # need to get the result type before broadcasting for correct scalar + # behaviour + dtype = np.result_type(*choicelist) + + # Convert conditions to arrays and broadcast conditions and choices + # as the shape is needed for the result. Doing it seperatly optimizes + # for example when all choices are scalars. + condlist = np.broadcast_arrays(*condlist) + choicelist = np.broadcast_arrays(*choicelist) + + # If cond array is not an ndarray in boolean format or scalar bool, abort. + deprecated_ints = False + for i in range(len(condlist)): + cond = condlist[i] + if cond.dtype.type is not np.bool_: + if np.issubdtype(cond.dtype, np.integer): + # A previous implementation accepted int ndarrays accidentally. + # Supported here deliberately, but deprecated. + condlist[i] = condlist[i].astype(bool) + deprecated_ints = True + else: + raise ValueError( + 'invalid entry in choicelist: should be boolean ndarray') + + if deprecated_ints: + msg = "select condlists containing integer ndarrays is deprecated " \ + "and will be removed in the future. Use `.astype(bool)` to " \ + "convert to bools." + warnings.warn(msg, DeprecationWarning) + + if choicelist[0].ndim == 0: + # This may be common, so avoid the call. + result_shape = condlist[0].shape + else: + result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape + + result = np.full(result_shape, choicelist[-1], dtype) + + # Use np.copyto to burn each choicelist array onto result, using the + # corresponding condlist as a boolean mask. This is done in reverse + # order since the first choice should take precedence. + choicelist = choicelist[-2::-1] + condlist = condlist[::-1] + for choice, cond in zip(choicelist, condlist): + np.copyto(result, choice, where=cond) + + return result + + +def copy(a, order='K'): + """ + Return an array copy of the given object. + + Parameters + ---------- + a : array_like + Input data. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :meth:ndarray.copy are very + similar, but have different default values for their order= + arguments.) + + Returns + ------- + arr : ndarray + Array interpretation of `a`. + + Notes + ----- + This is equivalent to + + >>> np.array(a, copy=True) #doctest: +SKIP + + Examples + -------- + Create an array x, with a reference y and a copy z: + + >>> x = np.array([1, 2, 3]) + >>> y = x + >>> z = np.copy(x) + + Note that, when we modify x, y changes, but not z: + + >>> x[0] = 10 + >>> x[0] == y[0] + True + >>> x[0] == z[0] + False + + """ + return array(a, order=order, copy=True) + +# Basic operations + + +def gradient(f, *varargs): + """ + Return the gradient of an N-dimensional array. + + The gradient is computed using second order accurate central differences + in the interior and second order accurate one-sides (forward or backwards) + differences at the boundaries. The returned gradient hence has the same + shape as the input array. + + Parameters + ---------- + f : array_like + An N-dimensional array containing samples of a scalar function. + `*varargs` : scalars + 0, 1, or N scalars specifying the sample distances in each direction, + that is: `dx`, `dy`, `dz`, ... The default distance is 1. + + Returns + ------- + gradient : ndarray + N arrays of the same shape as `f` giving the derivative of `f` with + respect to each dimension. + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) + >>> np.gradient(x) + array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + >>> np.gradient(x, 2) + array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) + [array([[ 2., 2., -1.], + [ 2., 2., -1.]]), + array([[ 1. , 2.5, 4. ], + [ 1. , 1. , 1. ]])] + + >>> x = np.array([0,1,2,3,4]) + >>> dx = gradient(x) + >>> y = x**2 + >>> gradient(y,dx) + array([0., 2., 4., 6., 8.]) + """ + f = np.asanyarray(f) + N = len(f.shape) # number of dimensions + n = len(varargs) + if n == 0: + dx = [1.0]*N + elif n == 1: + dx = [varargs[0]]*N + elif n == N: + dx = list(varargs) + else: + raise SyntaxError( + "invalid number of arguments") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)]*N + slice2 = [slice(None)]*N + slice3 = [slice(None)]*N + slice4 = [slice(None)]*N + + otype = f.dtype.char + if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: + otype = 'd' + + # Difference of datetime64 elements results in timedelta64 + if otype == 'M': + # Need to use the full dtype name because it contains unit information + otype = f.dtype.name.replace('datetime', 'timedelta') + elif otype == 'm': + # Needs to keep the specific units, can't be a general unit + otype = f.dtype + + # Convert datetime64 data into ints. Make dummy variable `y` + # that is a view of ints if the data is datetime64, otherwise + # just set y equal to the the array `f`. + if f.dtype.char in ["M", "m"]: + y = f.view('int64') + else: + y = f + + for axis in range(N): + + if y.shape[axis] < 2: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least two elements are required.") + + # Numerical differentiation: 1st order edges, 2nd order interior + if y.shape[axis] == 2: + # Use first order differences for time data + out = np.empty_like(y, dtype=otype) + + slice1[axis] = slice(1, -1) + slice2[axis] = slice(2, None) + slice3[axis] = slice(None, -2) + # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 + out[slice1] = (y[slice2] - y[slice3])/2.0 + + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + # 1D equivalent -- out[0] = (y[1] - y[0]) + out[slice1] = (y[slice2] - y[slice3]) + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + # 1D equivalent -- out[-1] = (y[-1] - y[-2]) + out[slice1] = (y[slice2] - y[slice3]) + + # Numerical differentiation: 2st order edges, 2nd order interior + else: + # Use second order differences where possible + out = np.empty_like(y, dtype=otype) + + slice1[axis] = slice(1, -1) + slice2[axis] = slice(2, None) + slice3[axis] = slice(None, -2) + # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 + out[slice1] = (y[slice2] - y[slice3])/2.0 + + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + # 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0 + out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + slice4[axis] = -3 + # 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3]) + out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 + + # divide by step size + outvals.append(out / dx[axis]) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if N == 1: + return outvals[0] + else: + return outvals + + +def diff(a, n=1, axis=-1): + """ + Calculate the n-th order discrete difference along given axis. + + The first order difference is given by ``out[n] = a[n+1] - a[n]`` along + the given axis, higher order differences are calculated by using `diff` + recursively. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. + axis : int, optional + The axis along which the difference is taken, default is the last axis. + + Returns + ------- + diff : ndarray + The `n` order differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. + + See Also + -------- + gradient, ediff1d, cumsum + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + """ + if n == 0: + return a + if n < 0: + raise ValueError( + "order must be non-negative but got " + repr(n)) + a = asanyarray(a) + nd = len(a.shape) + slice1 = [slice(None)]*nd + slice2 = [slice(None)]*nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + if n > 1: + return diff(a[slice1]-a[slice2], n-1, axis=axis) + else: + return a[slice1]-a[slice2] + + +def interp(x, xp, fp, left=None, right=None): + """ + One-dimensional linear interpolation. + + Returns the one-dimensional piecewise linear interpolant to a function + with given values at discrete data-points. + + Parameters + ---------- + x : array_like + The x-coordinates of the interpolated values. + + xp : 1-D sequence of floats + The x-coordinates of the data points, must be increasing. + + fp : 1-D sequence of floats + The y-coordinates of the data points, same length as `xp`. + + left : float, optional + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : float, optional + Value to return for `x > xp[-1]`, default is `fp[-1]`. + + Returns + ------- + y : {float, ndarray} + The interpolated values, same shape as `x`. + + Raises + ------ + ValueError + If `xp` and `fp` have different length + + Notes + ----- + Does not check that the x-coordinate sequence `xp` is increasing. + If `xp` is not increasing, the results are nonsense. + A simple check for increasing is:: + + np.all(np.diff(xp) > 0) + + + Examples + -------- + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> np.interp(2.5, xp, fp) + 1.0 + >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) + array([ 3. , 3. , 2.5 , 0.56, 0. ]) + >>> UNDEF = -99.0 + >>> np.interp(3.14, xp, fp, right=UNDEF) + -99.0 + + Plot an interpolant to the sine function: + + >>> x = np.linspace(0, 2*np.pi, 10) + >>> y = np.sin(x) + >>> xvals = np.linspace(0, 2*np.pi, 50) + >>> yinterp = np.interp(xvals, x, y) + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o') + [] + >>> plt.plot(xvals, yinterp, '-x') + [] + >>> plt.show() + + """ + if isinstance(x, (float, int, number)): + return compiled_interp([x], xp, fp, left, right).item() + elif isinstance(x, np.ndarray) and x.ndim == 0: + return compiled_interp([x], xp, fp, left, right).item() + else: + return compiled_interp(x, xp, fp, left, right) + + +def angle(z, deg=0): + """ + Return the angle of the complex argument. + + Parameters + ---------- + z : array_like + A complex number or sequence of complex numbers. + deg : bool, optional + Return angle in degrees if True, radians if False (default). + + Returns + ------- + angle : {ndarray, scalar} + The counterclockwise angle from the positive real axis on + the complex plane, with dtype as numpy.float64. + + See Also + -------- + arctan2 + absolute + + + + Examples + -------- + >>> np.angle([1.0, 1.0j, 1+1j]) # in radians + array([ 0. , 1.57079633, 0.78539816]) + >>> np.angle(1+1j, deg=True) # in degrees + 45.0 + + """ + if deg: + fact = 180/pi + else: + fact = 1.0 + z = asarray(z) + if (issubclass(z.dtype.type, _nx.complexfloating)): + zimag = z.imag + zreal = z.real + else: + zimag = 0 + zreal = z + return arctan2(zimag, zreal) * fact + + +def unwrap(p, discont=pi, axis=-1): + """ + Unwrap by changing deltas between values to 2*pi complement. + + Unwrap radian phase `p` by changing absolute jumps greater than + `discont` to their 2*pi complement along the given axis. + + Parameters + ---------- + p : array_like + Input array. + discont : float, optional + Maximum discontinuity between values, default is ``pi``. + axis : int, optional + Axis along which unwrap will operate, default is the last axis. + + Returns + ------- + out : ndarray + Output array. + + See Also + -------- + rad2deg, deg2rad + + Notes + ----- + If the discontinuity in `p` is smaller than ``pi``, but larger than + `discont`, no unwrapping is done because taking the 2*pi complement + would only make the discontinuity larger. + + Examples + -------- + >>> phase = np.linspace(0, np.pi, num=5) + >>> phase[3:] += np.pi + >>> phase + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) + >>> np.unwrap(phase) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) + + """ + p = asarray(p) + nd = len(p.shape) + dd = diff(p, axis=axis) + slice1 = [slice(None, None)]*nd # full slices + slice1[axis] = slice(1, None) + ddmod = mod(dd + pi, 2*pi) - pi + _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) + ph_correct = ddmod - dd + _nx.copyto(ph_correct, 0, where=abs(dd) < discont) + up = array(p, copy=True, dtype='d') + up[slice1] = p[slice1] + ph_correct.cumsum(axis) + return up + + +def sort_complex(a): + """ + Sort a complex array using the real part first, then the imaginary part. + + Parameters + ---------- + a : array_like + Input array + + Returns + ------- + out : complex ndarray + Always returns a sorted complex array. + + Examples + -------- + >>> np.sort_complex([5, 3, 6, 2, 1]) + array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + + >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) + array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + + """ + b = array(a, copy=True) + b.sort() + if not issubclass(b.dtype.type, _nx.complexfloating): + if b.dtype.char in 'bhBH': + return b.astype('F') + elif b.dtype.char == 'g': + return b.astype('G') + else: + return b.astype('D') + else: + return b + + +def trim_zeros(filt, trim='fb'): + """ + Trim the leading and/or trailing zeros from a 1-D array or sequence. + + Parameters + ---------- + filt : 1-D array or sequence + Input array. + trim : str, optional + A string with 'f' representing trim from front and 'b' to trim from + back. Default is 'fb', trim zeros from both front and back of the + array. + + Returns + ------- + trimmed : 1-D array or sequence + The result of trimming the input. The input data type is preserved. + + Examples + -------- + >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) + >>> np.trim_zeros(a) + array([1, 2, 3, 0, 2, 1]) + + >>> np.trim_zeros(a, 'b') + array([0, 0, 0, 1, 2, 3, 0, 2, 1]) + + The input data type is preserved, list/tuple in means list/tuple out. + + >>> np.trim_zeros([0, 1, 2, 0]) + [1, 2] + + """ + first = 0 + trim = trim.upper() + if 'F' in trim: + for i in filt: + if i != 0.: + break + else: + first = first + 1 + last = len(filt) + if 'B' in trim: + for i in filt[::-1]: + if i != 0.: + break + else: + last = last - 1 + return filt[first:last] + + +@deprecate +def unique(x): + """ + This function is deprecated. Use numpy.lib.arraysetops.unique() + instead. + """ + try: + tmp = x.flatten() + if tmp.size == 0: + return tmp + tmp.sort() + idx = concatenate(([True], tmp[1:] != tmp[:-1])) + return tmp[idx] + except AttributeError: + items = sorted(set(x)) + return asarray(items) + + +def extract(condition, arr): + """ + Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If + `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + + Parameters + ---------- + condition : array_like + An array whose nonzero or True entries indicate the elements of `arr` + to extract. + arr : array_like + Input array of the same size as `condition`. + + Returns + ------- + extract : ndarray + Rank 1 array of values from `arr` where `condition` is True. + + See Also + -------- + take, put, copyto, compress + + Examples + -------- + >>> arr = np.arange(12).reshape((3, 4)) + >>> arr + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> condition = np.mod(arr, 3)==0 + >>> condition + array([[ True, False, False, True], + [False, False, True, False], + [False, True, False, False]], dtype=bool) + >>> np.extract(condition, arr) + array([0, 3, 6, 9]) + + + If `condition` is boolean: + + >>> arr[condition] + array([0, 3, 6, 9]) + + """ + return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) + + +def place(arr, mask, vals): + """ + Change elements of an array based on conditional and input values. + + Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that + `place` uses the first N elements of `vals`, where N is the number of + True values in `mask`, while `copyto` uses the elements where `mask` + is True. + + Note that `extract` does the exact opposite of `place`. + + Parameters + ---------- + arr : array_like + Array to put data into. + mask : array_like + Boolean mask array. Must have the same size as `a`. + vals : 1-D sequence + Values to put into `a`. Only the first N elements are used, where + N is the number of True values in `mask`. If `vals` is smaller + than N it will be repeated. + + See Also + -------- + copyto, put, take, extract + + Examples + -------- + >>> arr = np.arange(6).reshape(2, 3) + >>> np.place(arr, arr>2, [44, 55]) + >>> arr + array([[ 0, 1, 2], + [44, 55, 44]]) + + """ + return _insert(arr, mask, vals) + + +def disp(mesg, device=None, linefeed=True): + """ + Display a message on a device. + + Parameters + ---------- + mesg : str + Message to display. + device : object + Device to write message. If None, defaults to ``sys.stdout`` which is + very similar to ``print``. `device` needs to have ``write()`` and + ``flush()`` methods. + linefeed : bool, optional + Option whether to print a line feed or not. Defaults to True. + + Raises + ------ + AttributeError + If `device` does not have a ``write()`` or ``flush()`` method. + + Examples + -------- + Besides ``sys.stdout``, a file-like object can also be used as it has + both required methods: + + >>> from StringIO import StringIO + >>> buf = StringIO() + >>> np.disp('"Display" in a file', device=buf) + >>> buf.getvalue() + '"Display" in a file\\n' + + """ + if device is None: + device = sys.stdout + if linefeed: + device.write('%s\n' % mesg) + else: + device.write('%s' % mesg) + device.flush() + return + + +class vectorize(object): + """ + vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False) + + Generalized function class. + + Define a vectorized function which takes a nested sequence + of objects or numpy arrays as inputs and returns a + numpy array as output. The vectorized function evaluates `pyfunc` over + successive tuples of the input arrays like the python map function, + except it uses the broadcasting rules of numpy. + + The data type of the output of `vectorized` is determined by calling + the function with the first element of the input. This can be avoided + by specifying the `otypes` argument. + + Parameters + ---------- + pyfunc : callable + A python function or method. + otypes : str or list of dtypes, optional + The output data type. It must be specified as either a string of + typecode characters or a list of data type specifiers. There should + be one data type specifier for each output. + doc : str, optional + The docstring for the function. If `None`, the docstring will be the + ``pyfunc.__doc__``. + excluded : set, optional + Set of strings or integers representing the positional or keyword + arguments for which the function will not be vectorized. These will be + passed directly to `pyfunc` unmodified. + + .. versionadded:: 1.7.0 + + cache : bool, optional + If `True`, then cache the first function call that determines the number + of outputs if `otypes` is not provided. + + .. versionadded:: 1.7.0 + + Returns + ------- + vectorized : callable + Vectorized function. + + Examples + -------- + >>> def myfunc(a, b): + ... "Return a-b if a>b, otherwise return a+b" + ... if a > b: + ... return a - b + ... else: + ... return a + b + + >>> vfunc = np.vectorize(myfunc) + >>> vfunc([1, 2, 3, 4], 2) + array([3, 4, 1, 2]) + + The docstring is taken from the input function to `vectorize` unless it + is specified + + >>> vfunc.__doc__ + 'Return a-b if a>b, otherwise return a+b' + >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') + >>> vfunc.__doc__ + 'Vectorized `myfunc`' + + The output type is determined by evaluating the first element of the input, + unless it is specified + + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + + The `excluded` argument can be used to prevent vectorizing over certain + arguments. This can be useful for array-like arguments of a fixed length + such as the coefficients for a polynomial as in `polyval`: + + >>> def mypolyval(p, x): + ... _p = list(p) + ... res = _p.pop(0) + ... while _p: + ... res = res*x + _p.pop(0) + ... return res + >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) + + Positional arguments may also be excluded by specifying their position: + + >>> vpolyval.excluded.add(0) + >>> vpolyval([1, 2, 3], x=[0, 1]) + array([3, 6]) + + Notes + ----- + The `vectorize` function is provided primarily for convenience, not for + performance. The implementation is essentially a for loop. + + If `otypes` is not specified, then a call to the function with the + first argument will be used to determine the number of outputs. The + results of this call will be cached if `cache` is `True` to prevent + calling the function twice. However, to implement the cache, the + original function must be wrapped which will slow down subsequent + calls, so only do this if your function is expensive. + + The new keyword argument interface and `excluded` argument support + further degrades performance. + + """ + + def __init__(self, pyfunc, otypes='', doc=None, excluded=None, + cache=False): + self.pyfunc = pyfunc + self.cache = cache + self._ufunc = None # Caching to improve default performance + + if doc is None: + self.__doc__ = pyfunc.__doc__ + else: + self.__doc__ = doc + + if isinstance(otypes, str): + self.otypes = otypes + for char in self.otypes: + if char not in typecodes['All']: + raise ValueError( + "Invalid otype specified: %s" % (char,)) + elif iterable(otypes): + self.otypes = ''.join([_nx.dtype(x).char for x in otypes]) + else: + raise ValueError( + "Invalid otype specification") + + # Excluded variable support + if excluded is None: + excluded = set() + self.excluded = set(excluded) + + def __call__(self, *args, **kwargs): + """ + Return arrays with the results of `pyfunc` broadcast (vectorized) over + `args` and `kwargs` not in `excluded`. + """ + excluded = self.excluded + if not kwargs and not excluded: + func = self.pyfunc + vargs = args + else: + # The wrapper accepts only positional arguments: we use `names` and + # `inds` to mutate `the_args` and `kwargs` to pass to the original + # function. + nargs = len(args) + + names = [_n for _n in kwargs if _n not in excluded] + inds = [_i for _i in range(nargs) if _i not in excluded] + the_args = list(args) + + def func(*vargs): + for _n, _i in enumerate(inds): + the_args[_i] = vargs[_n] + kwargs.update(zip(names, vargs[len(inds):])) + return self.pyfunc(*the_args, **kwargs) + + vargs = [args[_i] for _i in inds] + vargs.extend([kwargs[_n] for _n in names]) + + return self._vectorize_call(func=func, args=vargs) + + def _get_ufunc_and_otypes(self, func, args): + """Return (ufunc, otypes).""" + # frompyfunc will fail if args is empty + if not args: + raise ValueError('args can not be empty') + + if self.otypes: + otypes = self.otypes + nout = len(otypes) + + # Note logic here: We only *use* self._ufunc if func is self.pyfunc + # even though we set self._ufunc regardless. + if func is self.pyfunc and self._ufunc is not None: + ufunc = self._ufunc + else: + ufunc = self._ufunc = frompyfunc(func, len(args), nout) + else: + # Get number of outputs and output types by calling the function on + # the first entries of args. We also cache the result to prevent + # the subsequent call when the ufunc is evaluated. + # Assumes that ufunc first evaluates the 0th elements in the input + # arrays (the input values are not checked to ensure this) + inputs = [asarray(_a).flat[0] for _a in args] + outputs = func(*inputs) + + # Performance note: profiling indicates that -- for simple + # functions at least -- this wrapping can almost double the + # execution time. + # Hence we make it optional. + if self.cache: + _cache = [outputs] + + def _func(*vargs): + if _cache: + return _cache.pop() + else: + return func(*vargs) + else: + _func = func + + if isinstance(outputs, tuple): + nout = len(outputs) + else: + nout = 1 + outputs = (outputs,) + + otypes = ''.join([asarray(outputs[_k]).dtype.char + for _k in range(nout)]) + + # Performance note: profiling indicates that creating the ufunc is + # not a significant cost compared with wrapping so it seems not + # worth trying to cache this. + ufunc = frompyfunc(_func, len(args), nout) + + return ufunc, otypes + + def _vectorize_call(self, func, args): + """Vectorized call to `func` over positional `args`.""" + if not args: + _res = func() + else: + ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + + # Convert args to object arrays first + inputs = [array(_a, copy=False, subok=True, dtype=object) + for _a in args] + + outputs = ufunc(*inputs) + + if ufunc.nout == 1: + _res = array(outputs, + copy=False, subok=True, dtype=otypes[0]) + else: + _res = tuple([array(_x, copy=False, subok=True, dtype=_t) + for _x, _t in zip(outputs, otypes)]) + return _res + + +def cov(m, y=None, rowvar=1, bias=0, ddof=None): + """ + Estimate a covariance matrix, given data. + + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. + + Parameters + ---------- + m : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + form as that of `m`. + rowvar : int, optional + If `rowvar` is non-zero (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : int, optional + Default normalization is by ``(N - 1)``, where ``N`` is the number of + observations given (unbiased estimate). If `bias` is 1, then + normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + .. versionadded:: 1.5 + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Examples + -------- + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.vstack((x,y)) + >>> print np.cov(X) + [[ 11.71 -4.286 ] + [ -4.286 2.14413333]] + >>> print np.cov(x, y) + [[ 11.71 -4.286 ] + [ -4.286 2.14413333]] + >>> print np.cov(x) + 11.71 + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError( + "ddof must be integer") + + # Handles complex arrays too + m = np.asarray(m) + if y is None: + dtype = np.result_type(m, np.float64) + else: + y = np.asarray(y) + dtype = np.result_type(m, y, np.float64) + X = array(m, ndmin=2, dtype=dtype) + + if X.shape[0] == 1: + rowvar = 1 + if rowvar: + N = X.shape[1] + axis = 0 + else: + N = X.shape[0] + axis = 1 + + # check ddof + if ddof is None: + if bias == 0: + ddof = 1 + else: + ddof = 0 + fact = float(N - ddof) + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning) + fact = 0.0 + + if y is not None: + y = array(y, copy=False, ndmin=2, dtype=dtype) + X = concatenate((X, y), axis) + + X -= X.mean(axis=1-axis, keepdims=True) + if not rowvar: + return (dot(X.T, X.conj()) / fact).squeeze() + else: + return (dot(X, X.T.conj()) / fact).squeeze() + + +def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None): + """ + Return correlation coefficients. + + Please refer to the documentation for `cov` for more detail. The + relationship between the correlation coefficient matrix, `P`, and the + covariance matrix, `C`, is + + .. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } + + The values of `P` are between -1 and 1, inclusive. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `m`. + rowvar : int, optional + If `rowvar` is non-zero (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : int, optional + Default normalization is by ``(N - 1)``, where ``N`` is the number of + observations (unbiased estimate). If `bias` is 1, then + normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : {None, int}, optional + .. versionadded:: 1.5 + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + Returns + ------- + out : ndarray + The correlation coefficient matrix of the variables. + + See Also + -------- + cov : Covariance matrix + + """ + c = cov(x, y, rowvar, bias, ddof) + try: + d = diag(c) + except ValueError: # scalar covariance + # nan if incorrect value (nan, inf, 0), 1 otherwise + return c / c + return c / sqrt(multiply.outer(d, d)) + + +def blackman(M): + """ + Return the Blackman window. + + The Blackman window is a taper formed by using the first three + terms of a summation of cosines. It was designed to have close to the + minimal leakage possible. It is close to optimal, only slightly worse + than a Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, hamming, hanning, kaiser + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the kaiser window. + + References + ---------- + Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, + Dover Publications, New York. + + Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + + Examples + -------- + >>> np.blackman(12) + array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) + + + Plot the window and the frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.blackman(51) + >>> plt.plot(window) + [] + >>> plt.title("Blackman window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Blackman window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(0, M) + return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) + + +def bartlett(M): + """ + Return the Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The triangular window, with the maximum value normalized to one + (the value one appears only if the number of samples is odd), with + the first and last samples equal to zero. + + See Also + -------- + blackman, hamming, hanning, kaiser + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \\frac{2}{M-1} \\left( + \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| + \\right) + + Most references to the Bartlett window come from the signal + processing literature, where it is used as one of many windowing + functions for smoothing values. Note that convolution with this + window produces linear interpolation. It is also known as an + apodization (which means"removing the foot", i.e. smoothing + discontinuities at the beginning and end of the sampled signal) or + tapering function. The fourier transform of the Bartlett is the product + of two sinc functions. + Note the excellent discussion in Kanasewich. + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + + Examples + -------- + >>> np.bartlett(12) + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, + 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, + 0.18181818, 0. ]) + + Plot the window and its frequency response (requires SciPy and matplotlib): + + >>> from numpy.fft import fft, fftshift + >>> window = np.bartlett(51) + >>> plt.plot(window) + [] + >>> plt.title("Bartlett window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Bartlett window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(0, M) + return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) + + +def hanning(M): + """ + Return the Hanning window. + + The Hanning window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray, shape(M,) + The window, with the maximum value normalized to one (the value + one appears only if `M` is odd). + + See Also + -------- + bartlett, blackman, hamming, kaiser + + Notes + ----- + The Hanning window is defined as + + .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hanning was named for Julius van Hann, an Austrian meteorologist. + It is also known as the Cosine Bell. Some authors prefer that it be + called a Hann window, to help avoid confusion with the very similar + Hamming window. + + Most references to the Hanning window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hanning(12) + array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) + + Plot the window and its frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.hanning(51) + >>> plt.plot(window) + [] + >>> plt.title("Hann window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of the Hann window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(0, M) + return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) + + +def hamming(M): + """ + Return the Hamming window. + + The Hamming window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hanning, kaiser + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey + and is described in Blackman and Tukey. It was recommended for + smoothing the truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hamming(12) + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, + 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, + 0.15302337, 0.08 ]) + + Plot the window and the frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.hamming(51) + >>> plt.plot(window) + [] + >>> plt.title("Hamming window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Hamming window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(0, M) + return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) + +## Code from cephes for i0 + +_i0A = [ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1 + ] + +_i0B = [ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1 + ] + + +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x*b1 - b2 + vals[i] + + return 0.5*(b0 - b2) + + +def _i0_1(x): + return exp(x) * _chbevl(x/2.0-2, _i0A) + + +def _i0_2(x): + return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) + + +def i0(x): + """ + Modified Bessel function of the first kind, order 0. + + Usually denoted :math:`I_0`. This function does broadcast, but will *not* + "up-cast" int dtype arguments unless accompanied by at least one float or + complex dtype argument (see Raises below). + + Parameters + ---------- + x : array_like, dtype float or complex + Argument of the Bessel function. + + Returns + ------- + out : ndarray, shape = x.shape, dtype = x.dtype + The modified Bessel function evaluated at each of the elements of `x`. + + Raises + ------ + TypeError: array cannot be safely cast to required type + If argument consists exclusively of int dtypes. + + See Also + -------- + scipy.special.iv, scipy.special.ive + + Notes + ----- + We use the algorithm published by Clenshaw [1]_ and referenced by + Abramowitz and Stegun [2]_, for which the function domain is + partitioned into the two intervals [0,8] and (8,inf), and Chebyshev + polynomial expansions are employed in each interval. Relative error on + the domain [0,30] using IEEE arithmetic is documented [3]_ as having a + peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). + + References + ---------- + .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in + *National Physical Laboratory Mathematical Tables*, vol. 5, London: + Her Majesty's Stationery Office, 1962. + .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical + Functions*, 10th printing, New York: Dover, 1964, pp. 379. + http://www.math.sfu.ca/~cbm/aands/page_379.htm + .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html + + Examples + -------- + >>> np.i0([0.]) + array(1.0) + >>> np.i0([0., 1. + 2j]) + array([ 1.00000000+0.j , 0.18785373+0.64616944j]) + + """ + x = atleast_1d(x).copy() + y = empty_like(x) + ind = (x < 0) + x[ind] = -x[ind] + ind = (x <= 8.0) + y[ind] = _i0_1(x[ind]) + ind2 = ~ind + y[ind2] = _i0_2(x[ind2]) + return y.squeeze() + +## End of cephes code for i0 + + +def kaiser(M, beta): + """ + Return the Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + beta : float + Shape parameter for window. + + Returns + ------- + out : array + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, hanning + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + \\right)/I_0(\\beta) + + with + + .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple + approximation to the DPSS window based on Bessel functions. The Kaiser + window is a very good approximation to the Digital Prolate Spheroidal + Sequence, or Slepian window, which is the transform which maximizes the + energy in the main lobe of the window relative to total energy. + + The Kaiser can approximate many other windows by varying the beta + parameter. + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hanning + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + get returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + + Examples + -------- + >>> np.kaiser(12, 14) + array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + + + Plot the window and the frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.kaiser(51, 14) + >>> plt.plot(window) + [] + >>> plt.title("Kaiser window") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("Sample") + + >>> plt.show() + + >>> plt.figure() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Kaiser window") + + >>> plt.ylabel("Magnitude [dB]") + + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) + >>> plt.show() + + """ + from numpy.dual import i0 + if M == 1: + return np.array([1.]) + n = arange(0, M) + alpha = (M-1)/2.0 + return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) + + +def sinc(x): + """ + Return the sinc function. + + The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. + + Parameters + ---------- + x : ndarray + Array (possibly multi-dimensional) of values for which to to + calculate ``sinc(x)``. + + Returns + ------- + out : ndarray + ``sinc(x)``, which has the same shape as the input. + + Notes + ----- + ``sinc(0)`` is the limit value 1. + + The name sinc is short for "sine cardinal" or "sinus cardinalis". + + The sinc function is used in various signal processing applications, + including in anti-aliasing, in the construction of a Lanczos resampling + filter, and in interpolation. + + For bandlimited interpolation of discrete-time signals, the ideal + interpolation kernel is proportional to the sinc function. + + References + ---------- + .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web + Resource. http://mathworld.wolfram.com/SincFunction.html + .. [2] Wikipedia, "Sinc function", + http://en.wikipedia.org/wiki/Sinc_function + + Examples + -------- + >>> x = np.linspace(-4, 4, 41) + >>> np.sinc(x) + array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, + -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, + -4.92362781e-02, -3.89804309e-17]) + + >>> plt.plot(x, np.sinc(x)) + [] + >>> plt.title("Sinc Function") + + >>> plt.ylabel("Amplitude") + + >>> plt.xlabel("X") + + >>> plt.show() + + It works in 2-D as well: + + >>> x = np.linspace(-4, 4, 401) + >>> xx = np.outer(x, x) + >>> plt.imshow(np.sinc(xx)) + + + """ + x = np.asanyarray(x) + y = pi * where(x == 0, 1.0e-20, x) + return sin(y)/y + + +def msort(a): + """ + Return a copy of an array sorted along the first axis. + + Parameters + ---------- + a : array_like + Array to be sorted. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + sort + + Notes + ----- + ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. + + """ + b = array(a, subok=True, copy=True) + b.sort(0) + return b + + +def _ureduce(a, func, **kwargs): + """ + Internal Function. + Call `func` with `a` as first argument swapping the axes to use extended + axis on functions that don't support it natively. + + Returns result and a.shape with axis dims set to 1. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + func : callable + Reduction function Kapable of receiving an axis argument. + It is is called with `a` as first argument followed by `kwargs`. + kwargs : keyword arguments + additional keyword arguments to pass to `func`. + + Returns + ------- + result : tuple + Result of func(a, **kwargs) and a.shape with axis dims set to 1 + which can be used to reshape the result to the same shape a ufunc with + keepdims=True would produce. + + """ + a = np.asanyarray(a) + axis = kwargs.get('axis', None) + if axis is not None: + keepdim = list(a.shape) + nd = a.ndim + try: + axis = operator.index(axis) + if axis >= nd or axis < -nd: + raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim)) + keepdim[axis] = 1 + except TypeError: + sax = set() + for x in axis: + if x >= nd or x < -nd: + raise IndexError("axis %d out of bounds (%d)" % (x, nd)) + if x in sax: + raise ValueError("duplicate value in axis") + sax.add(x % nd) + keepdim[x] = 1 + keep = sax.symmetric_difference(frozenset(range(nd))) + nkeep = len(keep) + # swap axis that should not be reduced to front + for i, s in enumerate(sorted(keep)): + a = a.swapaxes(i, s) + # merge reduced axis + a = a.reshape(a.shape[:nkeep] + (-1,)) + kwargs['axis'] = -1 + else: + keepdim = [1] * a.ndim + + r = func(a, **kwargs) + return r, keepdim + + +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int or sequence of int, optional + Axis along which the medians are computed. The default (axis=None) + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve the + contents of the input array. Treat the input as undefined, but it + will probably be fully or partially sorted. Default is False. Note + that, if `overwrite_input` is True and the input is not already an + ndarray, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + .. versionadded:: 1.9.0 + + + Returns + ------- + median : ndarray + A new array holding the result (unless `out` is specified, in which + case that array is returned instead). If the input contains + integers, or floats of smaller precision than 64, then the output + data-type is float64. Otherwise, the output data-type is the same + as that of the input. + + See Also + -------- + mean, percentile + + Notes + ----- + Given a vector V of length N, the median of V is the middle value of + a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is + odd. When N is even, it is the average of the two middle values of + ``V_sorted``. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.median(a) + 3.5 + >>> np.median(a, axis=0) + array([ 6.5, 4.5, 2.5]) + >>> np.median(a, axis=1) + array([ 7., 2.]) + >>> m = np.median(a, axis=0) + >>> out = np.zeros_like(m) + >>> np.median(a, axis=0, out=m) + array([ 6.5, 4.5, 2.5]) + >>> m + array([ 6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.median(b, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.median(b, axis=None, overwrite_input=True) + 3.5 + >>> assert not np.all(a==b) + + """ + r, k = _ureduce(a, func=_median, axis=axis, out=out, + overwrite_input=overwrite_input) + if keepdims: + return r.reshape(k) + else: + return r + +def _median(a, axis=None, out=None, overwrite_input=False): + # can't be reasonably be implemented in terms of percentile as we have to + # call mean to not break astropy + a = np.asanyarray(a) + if axis is not None and axis >= a.ndim: + raise IndexError( + "axis %d out of bounds (%d)" % (axis, a.ndim)) + + if overwrite_input: + if axis is None: + part = a.ravel() + sz = part.size + if sz % 2 == 0: + szh = sz // 2 + part.partition((szh - 1, szh)) + else: + part.partition((sz - 1) // 2) + else: + sz = a.shape[axis] + if sz % 2 == 0: + szh = sz // 2 + a.partition((szh - 1, szh), axis=axis) + else: + a.partition((sz - 1) // 2, axis=axis) + part = a + else: + if axis is None: + sz = a.size + else: + sz = a.shape[axis] + if sz % 2 == 0: + part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis) + else: + part = partition(a, (sz - 1) // 2, axis=axis) + if part.shape == (): + # make 0-D arrays work + return part.item() + if axis is None: + axis = 0 + indexer = [slice(None)] * part.ndim + index = part.shape[axis] // 2 + if part.shape[axis] % 2 == 1: + # index with slice to allow mean (below) to work + indexer[axis] = slice(index, index+1) + else: + indexer[axis] = slice(index-1, index+1) + # Use mean in odd and even case to coerce data type + # and check, use out array. + return mean(part[indexer], axis=axis, out=out) + + +def percentile(a, q, axis=None, out=None, + overwrite_input=False, interpolation='linear', keepdims=False): + """ + Compute the qth percentile of the data along the specified axis. + + Returns the qth percentile of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : float in range of [0,100] (or sequence of floats) + Percentile to compute which must be between 0 and 100 inclusive. + axis : int or sequence of int, optional + Axis along which the percentiles are computed. The default (None) + is to compute the percentiles along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + percentile. This will save memory when you do not need to preserve + the contents of the input array. In this case you should not make + any assumptions about the content of the passed in array `a` after + this function completes -- treat it as undefined. Default is False. + Note that, if the `a` input is not already an array this parameter + will have no effect, `a` will be converted to an array internally + regardless of the value of this parameter. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + + .. versionadded:: 1.9.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + .. versionadded:: 1.9.0 + + Returns + ------- + percentile : scalar or ndarray + If a single percentile `q` is given and axis=None a scalar is + returned. If multiple percentiles `q` are given an array holding + the result is returned. The results are listed in the first axis. + (If `out` is specified, in which case that array is returned + instead). If the input contains integers, or floats of smaller + precision than 64, then the output data-type is float64. Otherwise, + the output data-type is the same as that of the input. + + See Also + -------- + mean, median + + Notes + ----- + Given a vector V of length N, the q-th percentile of V is the q-th ranked + value in a sorted copy of V. The values and distances of the two + nearest neighbors as well as the `interpolation` parameter will + determine the percentile if the normalized ranking does not match q + exactly. This function is the same as the median if ``q=50``, the same + as the minimum if ``q=0`` and the same as the maximum if ``q=100``. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.percentile(a, 50) + array([ 3.5]) + >>> np.percentile(a, 50, axis=0) + array([[ 6.5, 4.5, 2.5]]) + >>> np.percentile(a, 50, axis=1) + array([[ 7.], + [ 2.]]) + + >>> m = np.percentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.percentile(a, 50, axis=0, out=m) + array([[ 6.5, 4.5, 2.5]]) + >>> m + array([[ 6.5, 4.5, 2.5]]) + + >>> b = a.copy() + >>> np.percentile(b, 50, axis=1, overwrite_input=True) + array([[ 7.], + [ 2.]]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.percentile(b, 50, axis=None, overwrite_input=True) + array([ 3.5]) + + """ + q = array(q, dtype=np.float64, copy=True) + r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, + overwrite_input=overwrite_input, + interpolation=interpolation) + if keepdims: + if q.ndim == 0: + return r.reshape(k) + else: + return r.reshape([len(q)] + k) + else: + return r + + +def _percentile(a, q, axis=None, out=None, + overwrite_input=False, interpolation='linear', keepdims=False): + a = asarray(a) + if q.ndim == 0: + # Do not allow 0-d arrays because following code fails for scalar + zerod = True + q = q[None] + else: + zerod = False + + # avoid expensive reductions, relevant for arrays with < O(1000) elements + if q.size < 10: + for i in range(q.size): + if q[i] < 0. or q[i] > 100.: + raise ValueError("Percentiles must be in the range [0,100]") + q[i] /= 100. + else: + # faster than any() + if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): + raise ValueError("Percentiles must be in the range [0,100]") + q /= 100. + + # prepare a for partioning + if overwrite_input: + if axis is None: + ap = a.ravel() + else: + ap = a + else: + if axis is None: + ap = a.flatten() + else: + ap = a.copy() + + if axis is None: + axis = 0 + + Nx = ap.shape[axis] + indices = q * (Nx - 1) + + # round fractional indices according to interpolation method + if interpolation == 'lower': + indices = floor(indices).astype(intp) + elif interpolation == 'higher': + indices = ceil(indices).astype(intp) + elif interpolation == 'midpoint': + indices = floor(indices) + 0.5 + elif interpolation == 'nearest': + indices = around(indices).astype(intp) + elif interpolation == 'linear': + pass # keep index as fraction and interpolate + else: + raise ValueError( + "interpolation can only be 'linear', 'lower' 'higher', " + "'midpoint', or 'nearest'") + + if indices.dtype == intp: # take the points along axis + ap.partition(indices, axis=axis) + # ensure axis with qth is first + ap = np.rollaxis(ap, axis, 0) + axis = 0 + + if zerod: + indices = indices[0] + r = take(ap, indices, axis=axis, out=out) + else: # weight the points above and below the indices + indices_below = floor(indices).astype(intp) + indices_above = indices_below + 1 + indices_above[indices_above > Nx - 1] = Nx - 1 + + weights_above = indices - indices_below + weights_below = 1.0 - weights_above + + weights_shape = [1, ] * ap.ndim + weights_shape[axis] = len(indices) + weights_below.shape = weights_shape + weights_above.shape = weights_shape + + ap.partition(concatenate((indices_below, indices_above)), axis=axis) + x1 = take(ap, indices_below, axis=axis) * weights_below + x2 = take(ap, indices_above, axis=axis) * weights_above + + # ensure axis with qth is first + x1 = np.rollaxis(x1, axis, 0) + x2 = np.rollaxis(x2, axis, 0) + + if zerod: + x1 = x1.squeeze(0) + x2 = x2.squeeze(0) + + if out is not None: + r = add(x1, x2, out=out) + else: + r = add(x1, x2) + + return r + + +def trapz(y, x=None, dx=1.0, axis=-1): + """ + Integrate along the given axis using the composite trapezoidal rule. + + Integrate `y` (`x`) along given axis. + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + If `x` is None, then spacing between all `y` elements is `dx`. + dx : scalar, optional + If `x` is None, spacing given by `dx` is assumed. Default is 1. + axis : int, optional + Specify the axis. + + Returns + ------- + trapz : float + Definite integral as approximated by trapezoidal rule. + + See Also + -------- + sum, cumsum + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + + References + ---------- + .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + >>> np.trapz([1,2,3]) + 4.0 + >>> np.trapz([1,2,3], x=[4,6,8]) + 8.0 + >>> np.trapz([1,2,3], dx=2) + 8.0 + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.trapz(a, axis=0) + array([ 1.5, 2.5, 3.5]) + >>> np.trapz(a, axis=1) + array([ 2., 8.]) + + """ + y = asanyarray(y) + if x is None: + d = dx + else: + x = asanyarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1]*y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) + nd = len(y.shape) + slice1 = [slice(None)]*nd + slice2 = [slice(None)]*nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) + return ret + + +#always succeed +def add_newdoc(place, obj, doc): + """Adds documentation to obj which is in module place. + + If doc is a string add it to obj as a docstring + + If doc is a tuple, then the first element is interpreted as + an attribute of obj and the second as the docstring + (method, docstring) + + If doc is a list, then each element of the list should be a + sequence of length two --> [(method1, docstring1), + (method2, docstring2), ...] + + This routine never raises an error. + + This routine cannot modify read-only docstrings, as appear + in new-style classes or built-in functions. Because this + routine never raises an error the caller must check manually + that the docstrings were changed. + """ + try: + new = getattr(__import__(place, globals(), {}, [obj]), obj) + if isinstance(doc, str): + add_docstring(new, doc.strip()) + elif isinstance(doc, tuple): + add_docstring(getattr(new, doc[0]), doc[1].strip()) + elif isinstance(doc, list): + for val in doc: + add_docstring(getattr(new, val[0]), val[1].strip()) + except: + pass + + +# Based on scitools meshgrid +def meshgrid(*xi, **kwargs): + """ + Return coordinate matrices from coordinate vectors. + + Make N-D coordinate arrays for vectorized evaluations of + N-D scalar/vector fields over N-D grids, given + one-dimensional coordinate arrays x1, x2,..., xn. + + .. versionchanged:: 1.9 + 1-D and 0-D cases are allowed. + + Parameters + ---------- + x1, x2,..., xn : array_like + 1-D arrays representing the coordinates of a grid. + indexing : {'xy', 'ij'}, optional + Cartesian ('xy', default) or matrix ('ij') indexing of output. + See Notes for more details. + + .. versionadded:: 1.7.0 + sparse : bool, optional + If True a sparse grid is returned in order to conserve memory. + Default is False. + + .. versionadded:: 1.7.0 + copy : bool, optional + If False, a view into the original arrays are returned in order to + conserve memory. Default is True. Please note that + ``sparse=False, copy=False`` will likely return non-contiguous + arrays. Furthermore, more than one element of a broadcast array + may refer to a single memory location. If you need to write to the + arrays, make copies first. + + .. versionadded:: 1.7.0 + + Returns + ------- + X1, X2,..., XN : ndarray + For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , + return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' + or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' + with the elements of `xi` repeated to fill the matrix along + the first dimension for `x1`, the second for `x2` and so on. + + Notes + ----- + This function supports both indexing conventions through the indexing + keyword argument. Giving the string 'ij' returns a meshgrid with + matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. + In the 2-D case with inputs of length M and N, the outputs are of shape + (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case + with inputs of length M, N and P, outputs are of shape (N, M, P) for + 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is + illustrated by the following code snippet:: + + xv, yv = meshgrid(x, y, sparse=False, indexing='ij') + for i in range(nx): + for j in range(ny): + # treat xv[i,j], yv[i,j] + + xv, yv = meshgrid(x, y, sparse=False, indexing='xy') + for i in range(nx): + for j in range(ny): + # treat xv[j,i], yv[j,i] + + In the 1-D and 0-D case, the indexing and sparse keywords have no effect. + + See Also + -------- + index_tricks.mgrid : Construct a multi-dimensional "meshgrid" + using indexing notation. + index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" + using indexing notation. + + Examples + -------- + >>> nx, ny = (3, 2) + >>> x = np.linspace(0, 1, nx) + >>> y = np.linspace(0, 1, ny) + >>> xv, yv = meshgrid(x, y) + >>> xv + array([[ 0. , 0.5, 1. ], + [ 0. , 0.5, 1. ]]) + >>> yv + array([[ 0., 0., 0.], + [ 1., 1., 1.]]) + >>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays + >>> xv + array([[ 0. , 0.5, 1. ]]) + >>> yv + array([[ 0.], + [ 1.]]) + + `meshgrid` is very useful to evaluate functions on a grid. + + >>> x = np.arange(-5, 5, 0.1) + >>> y = np.arange(-5, 5, 0.1) + >>> xx, yy = meshgrid(x, y, sparse=True) + >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) + >>> h = plt.contourf(x,y,z) + + """ + ndim = len(xi) + + copy_ = kwargs.pop('copy', True) + sparse = kwargs.pop('sparse', False) + indexing = kwargs.pop('indexing', 'xy') + + if kwargs: + raise TypeError("meshgrid() got an unexpected keyword argument '%s'" + % (list(kwargs)[0],)) + + if indexing not in ['xy', 'ij']: + raise ValueError( + "Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::]) + for i, x in enumerate(xi)] + + shape = [x.size for x in output] + + if indexing == 'xy' and ndim > 1: + # switch first and second axis + output[0].shape = (1, -1) + (1,)*(ndim - 2) + output[1].shape = (-1, 1) + (1,)*(ndim - 2) + shape[0], shape[1] = shape[1], shape[0] + + if sparse: + if copy_: + return [x.copy() for x in output] + else: + return output + else: + # Return the full N-D matrix (not only the 1-D vector) + if copy_: + mult_fact = np.ones(shape, dtype=int) + return [x * mult_fact for x in output] + else: + return np.broadcast_arrays(*output) + + +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int or array of ints + Indicate which sub-arrays to remove. + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + See Also + -------- + insert : Insert elements into an array. + append : Append elements at the end of an array. + + Notes + ----- + Often it is preferable to use a boolean mask. For example: + + >>> mask = np.ones(len(arr), dtype=bool) + >>> mask[[0,2,4]] = False + >>> result = arr[mask,...] + + Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further + use of `mask`. + + Examples + -------- + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + >>> np.delete(arr, 1, 0) + array([[ 1, 2, 3, 4], + [ 9, 10, 11, 12]]) + + >>> np.delete(arr, np.s_[::2], 1) + array([[ 2, 4], + [ 6, 8], + [10, 12]]) + >>> np.delete(arr, [1,3,5], None) + array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) + + """ + wrap = None + if type(arr) is not ndarray: + try: + wrap = arr.__array_wrap__ + except AttributeError: + pass + + arr = asarray(arr) + ndim = arr.ndim + if axis is None: + if ndim != 1: + arr = arr.ravel() + ndim = arr.ndim + axis = ndim - 1 + if ndim == 0: + warnings.warn( + "in the future the special handling of scalars will be removed " + "from delete and raise an error", DeprecationWarning) + if wrap: + return wrap(arr) + else: + return arr.copy() + + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + start, stop, step = obj.indices(N) + xr = range(start, stop, step) + numtodel = len(xr) + + if numtodel <= 0: + if wrap: + return wrap(arr.copy()) + else: + return arr.copy() + + # Invert if step is negative: + if step < 0: + step = -step + start = xr[-1] + stop = xr[0] + 1 + + newshape[axis] -= numtodel + new = empty(newshape, arr.dtype, arr.flags.fnc) + # copy initial chunk + if start == 0: + pass + else: + slobj[axis] = slice(None, start) + new[slobj] = arr[slobj] + # copy end chunck + if stop == N: + pass + else: + slobj[axis] = slice(stop-numtodel, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(stop, None) + new[slobj] = arr[slobj2] + # copy middle pieces + if step == 1: + pass + else: # use array indexing. + keep = ones(stop-start, dtype=bool) + keep[:stop-start:step] = False + slobj[axis] = slice(start, stop-numtodel) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(start, stop) + arr = arr[slobj2] + slobj2[axis] = keep + new[slobj] = arr[slobj2] + if wrap: + return wrap(new) + else: + return new + + _obj = obj + obj = np.asarray(obj) + # After removing the special handling of booleans and out of + # bounds values, the conversion to the array can be removed. + if obj.dtype == bool: + warnings.warn( + "in the future insert will treat boolean arrays and array-likes " + "as boolean index instead of casting it to integer", FutureWarning) + obj = obj.astype(intp) + if isinstance(_obj, (int, long, integer)): + # optimization for a single value + obj = obj.item() + if (obj < -N or obj >= N): + raise IndexError( + "index %i is out of bounds for axis %i with " + "size %i" % (obj, axis, N)) + if (obj < 0): + obj += N + newshape[axis] -= 1 + new = empty(newshape, arr.dtype, arr.flags.fnc) + slobj[axis] = slice(None, obj) + new[slobj] = arr[slobj] + slobj[axis] = slice(obj, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(obj+1, None) + new[slobj] = arr[slobj2] + else: + if obj.size == 0 and not isinstance(_obj, np.ndarray): + obj = obj.astype(intp) + if not np.can_cast(obj, intp, 'same_kind'): + # obj.size = 1 special case always failed and would just + # give superfluous warnings. + warnings.warn( + "using a non-integer array as obj in delete will result in an " + "error in the future", DeprecationWarning) + obj = obj.astype(intp) + keep = ones(N, dtype=bool) + + # Test if there are out of bound indices, this is deprecated + inside_bounds = (obj < N) & (obj >= -N) + if not inside_bounds.all(): + warnings.warn( + "in the future out of bounds indices will raise an error " + "instead of being ignored by `numpy.delete`.", + DeprecationWarning) + obj = obj[inside_bounds] + positive_indices = obj >= 0 + if not positive_indices.all(): + warnings.warn( + "in the future negative indices will not be ignored by " + "`numpy.delete`.", FutureWarning) + obj = obj[positive_indices] + + keep[obj, ] = False + slobj[axis] = keep + new = arr[slobj] + + if wrap: + return wrap(new) + else: + return new + + +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : array_like + Input array. + obj : int, slice or sequence of ints + Object that defines the index or indices before which `values` is + inserted. + + .. versionadded:: 1.8.0 + + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (similar to calling insert multiple + times). + values : array_like + Values to insert into `arr`. If the type of `values` is different + from that of `arr`, `values` is converted to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + See Also + -------- + append : Append elements at the end of an array. + concatenate : Join a sequence of arrays together. + delete : Delete elements from an array. + + Notes + ----- + Note that for higher dimensional inserts `obj=0` behaves very different + from `obj=[0]` just like `arr[:,0,:] = values` is different from + `arr[:,[0],:] = values`. + + Examples + -------- + >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> a + array([[1, 1], + [2, 2], + [3, 3]]) + >>> np.insert(a, 1, 5) + array([1, 5, 1, 2, 2, 3, 3]) + >>> np.insert(a, 1, 5, axis=1) + array([[1, 5, 1], + [2, 5, 2], + [3, 5, 3]]) + + Difference between sequence and scalars: + >>> np.insert(a, [1], [[1],[2],[3]], axis=1) + array([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]]) + >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), + ... np.insert(a, [1], [[1],[2],[3]], axis=1)) + True + + >>> b = a.flatten() + >>> b + array([1, 1, 2, 2, 3, 3]) + >>> np.insert(b, [2, 2], [5, 6]) + array([1, 1, 5, 6, 2, 2, 3, 3]) + + >>> np.insert(b, slice(2, 4), [5, 6]) + array([1, 1, 5, 2, 6, 2, 3, 3]) + + >>> np.insert(b, [2, 2], [7.13, False]) # type casting + array([1, 1, 7, 0, 2, 2, 3, 3]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = (1, 3) + >>> np.insert(x, idx, 999, axis=1) + array([[ 0, 999, 1, 2, 999, 3], + [ 4, 999, 5, 6, 999, 7]]) + + """ + wrap = None + if type(arr) is not ndarray: + try: + wrap = arr.__array_wrap__ + except AttributeError: + pass + + arr = asarray(arr) + ndim = arr.ndim + if axis is None: + if ndim != 1: + arr = arr.ravel() + ndim = arr.ndim + axis = ndim - 1 + else: + if ndim > 0 and (axis < -ndim or axis >= ndim): + raise IndexError( + "axis %i is out of bounds for an array of " + "dimension %i" % (axis, ndim)) + if (axis < 0): + axis += ndim + if (ndim == 0): + warnings.warn( + "in the future the special handling of scalars will be removed " + "from insert and raise an error", DeprecationWarning) + arr = arr.copy() + arr[...] = values + if wrap: + return wrap(arr) + else: + return arr + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + # turn it into a range object + indices = arange(*obj.indices(N), **{'dtype': intp}) + else: + # need to copy obj, because indices will be changed in-place + indices = np.array(obj) + if indices.dtype == bool: + # See also delete + warnings.warn( + "in the future insert will treat boolean arrays and " + "array-likes as a boolean index instead of casting it to " + "integer", FutureWarning) + indices = indices.astype(intp) + # Code after warning period: + #if obj.ndim != 1: + # raise ValueError('boolean array argument obj to insert ' + # 'must be one dimensional') + #indices = np.flatnonzero(obj) + elif indices.ndim > 1: + raise ValueError( + "index array argument obj to insert must be one dimensional " + "or scalar") + if indices.size == 1: + index = indices.item() + if index < -N or index > N: + raise IndexError( + "index %i is out of bounds for axis %i with " + "size %i" % (obj, axis, N)) + if (index < 0): + index += N + + # There are some object array corner cases here, but we cannot avoid + # that: + values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) + if indices.ndim == 0: + # broadcasting is very different here, since a[:,0,:] = ... behaves + # very different from a[:,[0],:] = ...! This changes values so that + # it works likes the second case. (here a[:,0:1,:]) + values = np.rollaxis(values, 0, (axis % values.ndim) + 1) + numnew = values.shape[axis] + newshape[axis] += numnew + new = empty(newshape, arr.dtype, arr.flags.fnc) + slobj[axis] = slice(None, index) + new[slobj] = arr[slobj] + slobj[axis] = slice(index, index+numnew) + new[slobj] = values + slobj[axis] = slice(index+numnew, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(index, None) + new[slobj] = arr[slobj2] + if wrap: + return wrap(new) + return new + elif indices.size == 0 and not isinstance(obj, np.ndarray): + # Can safely cast the empty list to intp + indices = indices.astype(intp) + + if not np.can_cast(indices, intp, 'same_kind'): + warnings.warn( + "using a non-integer array as obj in insert will result in an " + "error in the future", DeprecationWarning) + indices = indices.astype(intp) + + indices[indices < 0] += N + + numnew = len(indices) + order = indices.argsort(kind='mergesort') # stable sort + indices[order] += np.arange(numnew) + + newshape[axis] += numnew + old_mask = ones(newshape[axis], dtype=bool) + old_mask[indices] = False + + new = empty(newshape, arr.dtype, arr.flags.fnc) + slobj2 = [slice(None)]*ndim + slobj[axis] = indices + slobj2[axis] = old_mask + new[slobj] = values + new[slobj2] = arr + + if wrap: + return wrap(new) + return new + + +def append(arr, values, axis=None): + """ + Append values to the end of an array. + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If + `axis` is not specified, `values` can be any shape and will be + flattened before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not + given, both `arr` and `values` are flattened before use. + + Returns + ------- + append : ndarray + A copy of `arr` with `values` appended to `axis`. Note that + `append` does not occur in-place: a new array is allocated and + filled. If `axis` is None, `out` is a flattened array. + + See Also + -------- + insert : Insert elements into an array. + delete : Delete elements from an array. + + Examples + -------- + >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + + When `axis` is specified, `values` must have the correct shape. + + >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) + Traceback (most recent call last): + ... + ValueError: arrays must have same number of dimensions + + """ + arr = asanyarray(arr) + if axis is None: + if arr.ndim != 1: + arr = arr.ravel() + values = ravel(values) + axis = arr.ndim-1 + return concatenate((arr, values), axis=axis) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/index_tricks.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/index_tricks.py new file mode 100644 index 0000000000000..98c6b291b41c2 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/index_tricks.py @@ -0,0 +1,869 @@ +from __future__ import division, absolute_import, print_function + +import sys +import math + +import numpy.core.numeric as _nx +from numpy.core.numeric import ( + asarray, ScalarType, array, alltrue, cumprod, arange + ) +from numpy.core.numerictypes import find_common_type + +from . import function_base +import numpy.matrixlib as matrix +from .function_base import diff +from numpy.lib._compiled_base import ravel_multi_index, unravel_index +from numpy.lib.stride_tricks import as_strided + +makemat = matrix.matrix + + +__all__ = [ + 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', + 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', + 'diag_indices', 'diag_indices_from' + ] + + +def ix_(*args): + """ + Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. + + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array + ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. + + Parameters + ---------- + args : 1-D sequences + + Returns + ------- + out : tuple of ndarrays + N arrays with N dimensions each, with N the number of input + sequences. Together these arrays form an open mesh. + + See Also + -------- + ogrid, mgrid, meshgrid + + Examples + -------- + >>> a = np.arange(10).reshape(2, 5) + >>> a + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> ixgrid = np.ix_([0,1], [2,4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + >>> ixgrid[0].shape, ixgrid[1].shape + ((2, 1), (1, 2)) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + """ + out = [] + nd = len(args) + baseshape = [1]*nd + for k in range(nd): + new = _nx.asarray(args[k]) + if (new.ndim != 1): + raise ValueError("Cross index must be 1 dimensional") + if issubclass(new.dtype.type, _nx.bool_): + new = new.nonzero()[0] + baseshape[k] = len(new) + new = new.reshape(tuple(baseshape)) + out.append(new) + baseshape[k] = 1 + return tuple(out) + +class nd_grid(object): + """ + Construct a multi-dimensional "meshgrid". + + ``grid = nd_grid()`` creates an instance which will return a mesh-grid + when indexed. The dimension and number of the output arrays are equal + to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + If instantiated with an argument of ``sparse=True``, the mesh-grid is + open (or not fleshed out) so that only one-dimension of each returned + argument is greater than 1. + + Parameters + ---------- + sparse : bool, optional + Whether the grid is sparse or not. Default is False. + + Notes + ----- + Two instances of `nd_grid` are made available in the NumPy namespace, + `mgrid` and `ogrid`:: + + mgrid = nd_grid(sparse=False) + ogrid = nd_grid(sparse=True) + + Users should use these pre-defined instances instead of using `nd_grid` + directly. + + Examples + -------- + >>> mgrid = np.lib.index_tricks.nd_grid() + >>> mgrid[0:5,0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + + >>> ogrid = np.lib.index_tricks.nd_grid(sparse=True) + >>> ogrid[0:5,0:5] + [array([[0], + [1], + [2], + [3], + [4]]), array([[0, 1, 2, 3, 4]])] + + """ + + def __init__(self, sparse=False): + self.sparse = sparse + + def __getitem__(self, key): + try: + size = [] + typ = int + for k in range(len(key)): + step = key[k].step + start = key[k].start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, complex): + size.append(int(abs(step))) + typ = float + else: + size.append( + int(math.ceil((key[k].stop - start)/(step*1.0)))) + if (isinstance(step, float) or + isinstance(start, float) or + isinstance(key[k].stop, float)): + typ = float + if self.sparse: + nn = [_nx.arange(_x, dtype=_t) + for _x, _t in zip(size, (typ,)*len(size))] + else: + nn = _nx.indices(size, typ) + for k in range(len(size)): + step = key[k].step + start = key[k].start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, complex): + step = int(abs(step)) + if step != 1: + step = (key[k].stop - start)/float(step-1) + nn[k] = (nn[k]*step+start) + if self.sparse: + slobj = [_nx.newaxis]*len(size) + for k in range(len(size)): + slobj[k] = slice(None, None) + nn[k] = nn[k][slobj] + slobj[k] = _nx.newaxis + return nn + except (IndexError, TypeError): + step = key.step + stop = key.stop + start = key.start + if start is None: + start = 0 + if isinstance(step, complex): + step = abs(step) + length = int(step) + if step != 1: + step = (key.stop-start)/float(step-1) + stop = key.stop + step + return _nx.arange(0, length, 1, float)*step + start + else: + return _nx.arange(start, stop, step) + + def __getslice__(self, i, j): + return _nx.arange(i, j) + + def __len__(self): + return 0 + +mgrid = nd_grid(sparse=False) +ogrid = nd_grid(sparse=True) +mgrid.__doc__ = None # set in numpy.add_newdocs +ogrid.__doc__ = None # set in numpy.add_newdocs + +class AxisConcatenator(object): + """ + Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see `r_`. + + """ + + def _retval(self, res): + if self.matrix: + oldndim = res.ndim + res = makemat(res) + if oldndim == 1 and self.col: + res = res.T + self.axis = self._axis + self.matrix = self._matrix + self.col = 0 + return res + + def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): + self._axis = axis + self._matrix = matrix + self.axis = axis + self.matrix = matrix + self.col = 0 + self.trans1d = trans1d + self.ndmin = ndmin + + def __getitem__(self, key): + trans1d = self.trans1d + ndmin = self.ndmin + if isinstance(key, str): + frame = sys._getframe().f_back + mymat = matrix.bmat(key, frame.f_globals, frame.f_locals) + return mymat + if not isinstance(key, tuple): + key = (key,) + objs = [] + scalars = [] + arraytypes = [] + scalartypes = [] + for k in range(len(key)): + scalar = False + if isinstance(key[k], slice): + step = key[k].step + start = key[k].start + stop = key[k].stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, complex): + size = int(abs(step)) + newobj = function_base.linspace(start, stop, num=size) + else: + newobj = _nx.arange(start, stop, step) + if ndmin > 1: + newobj = array(newobj, copy=False, ndmin=ndmin) + if trans1d != -1: + newobj = newobj.swapaxes(-1, trans1d) + elif isinstance(key[k], str): + if k != 0: + raise ValueError("special directives must be the " + "first entry.") + key0 = key[0] + if key0 in 'rc': + self.matrix = True + self.col = (key0 == 'c') + continue + if ',' in key0: + vec = key0.split(',') + try: + self.axis, ndmin = \ + [int(x) for x in vec[:2]] + if len(vec) == 3: + trans1d = int(vec[2]) + continue + except: + raise ValueError("unknown special directive") + try: + self.axis = int(key[k]) + continue + except (ValueError, TypeError): + raise ValueError("unknown special directive") + elif type(key[k]) in ScalarType: + newobj = array(key[k], ndmin=ndmin) + scalars.append(k) + scalar = True + scalartypes.append(newobj.dtype) + else: + newobj = key[k] + if ndmin > 1: + tempobj = array(newobj, copy=False, subok=True) + newobj = array(newobj, copy=False, subok=True, + ndmin=ndmin) + if trans1d != -1 and tempobj.ndim < ndmin: + k2 = ndmin-tempobj.ndim + if (trans1d < 0): + trans1d += k2 + 1 + defaxes = list(range(ndmin)) + k1 = trans1d + axes = defaxes[:k1] + defaxes[k2:] + \ + defaxes[k1:k2] + newobj = newobj.transpose(axes) + del tempobj + objs.append(newobj) + if not scalar and isinstance(newobj, _nx.ndarray): + arraytypes.append(newobj.dtype) + + # Esure that scalars won't up-cast unless warranted + final_dtype = find_common_type(arraytypes, scalartypes) + if final_dtype is not None: + for k in scalars: + objs[k] = objs[k].astype(final_dtype) + + res = _nx.concatenate(tuple(objs), axis=self.axis) + return self._retval(res) + + def __getslice__(self, i, j): + res = _nx.arange(i, j) + return self._retval(res) + + def __len__(self): + return 0 + +# separate classes are used here instead of just making r_ = concatentor(0), +# etc. because otherwise we couldn't get the doc string to come out right +# in help(r_) + +class RClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the first axis. + + This is a simple way to build up arrays quickly. There are two use cases. + + 1. If the index expression contains comma separated arrays, then stack + them along their first axis. + 2. If the index expression contains slice notation or scalars then create + a 1-D array with a range indicated by the slice notation. + + If slice notation is used, the syntax ``start:stop:step`` is equivalent + to ``np.arange(start, stop, step)`` inside of the brackets. However, if + ``step`` is an imaginary number (i.e. 100j) then its integer portion is + interpreted as a number-of-points desired and the start and stop are + inclusive. In other words ``start:stop:stepj`` is interpreted as + ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. + After expansion of slice notation, all comma separated sequences are + concatenated together. + + Optional character strings placed as the first element of the index + expression can be used to change the output. The strings 'r' or 'c' result + in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) + matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 + (column) matrix is produced. If the result is 2-D then both provide the + same matrix result. + + A string integer specifies which axis to stack multiple comma separated + arrays along. A string of two comma-separated integers allows indication + of the minimum number of dimensions to force each entry into as the + second integer (the axis to concatenate along is still the first integer). + + A string with three comma-separated integers allows specification of the + axis to concatenate along, the minimum number of dimensions to force the + entries to, and which axis should contain the start of the arrays which + are less than the specified number of dimensions. In other words the third + integer allows you to specify where the 1's should be placed in the shape + of the arrays that have their shapes upgraded. By default, they are placed + in the front of the shape tuple. The third argument allows you to specify + where the start of the array should be instead. Thus, a third argument of + '0' would place the 1's at the end of the array shape. Negative integers + specify where in the new shape tuple the last dimension of upgraded arrays + should be placed, so the default is '-1'. + + Parameters + ---------- + Not a function, so takes no parameters + + + Returns + ------- + A concatenated ndarray or matrix. + + See Also + -------- + concatenate : Join a sequence of arrays together. + c_ : Translates slice objects to concatenation along the second axis. + + Examples + -------- + >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] + array([1, 2, 3, 0, 0, 4, 5, 6]) + >>> np.r_[-1:1:6j, [0]*3, 5, 6] + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) + + String integers specify the axis to concatenate along or the minimum + number of dimensions to force entries into. + + >>> a = np.array([[0, 1, 2], [3, 4, 5]]) + >>> np.r_['-1', a, a] # concatenate along last axis + array([[0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5]]) + >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.r_['0,2,0', [1,2,3], [4,5,6]] + array([[1], + [2], + [3], + [4], + [5], + [6]]) + >>> np.r_['1,2,0', [1,2,3], [4,5,6]] + array([[1, 4], + [2, 5], + [3, 6]]) + + Using 'r' or 'c' as a first string argument creates a matrix. + + >>> np.r_['r',[1,2,3], [4,5,6]] + matrix([[1, 2, 3, 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, 0) + +r_ = RClass() + +class CClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the second axis. + + This is short-hand for ``np.r_['-1,2,0', index expression]``, which is + useful because of its common occurrence. In particular, arrays will be + stacked along their last axis after being upgraded to at least 2-D with + 1's post-pended to the shape (column vectors made out of 1-D arrays). + + For detailed documentation, see `r_`. + + Examples + -------- + >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] + array([[1, 2, 3, 0, 0, 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) + +c_ = CClass() + +class ndenumerate(object): + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values. + + Parameters + ---------- + a : ndarray + Input array. + + See Also + -------- + ndindex, flatiter + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> for index, x in np.ndenumerate(a): + ... print index, x + (0, 0) 1 + (0, 1) 2 + (1, 0) 3 + (1, 1) 4 + + """ + + def __init__(self, arr): + self.iter = asarray(arr).flat + + def __next__(self): + """ + Standard iterator method, returns the index tuple and array value. + + Returns + ------- + coords : tuple of ints + The indices of the current iteration. + val : scalar + The array element of the current iteration. + + """ + return self.iter.coords, next(self.iter) + + def __iter__(self): + return self + + next = __next__ + + +class ndindex(object): + """ + An N-dimensional iterator object to index arrays. + + Given the shape of an array, an `ndindex` instance iterates over + the N-dimensional index of the array. At each iteration a tuple + of indices is returned, the last dimension is iterated over first. + + Parameters + ---------- + `*args` : ints + The size of each dimension of the array. + + See Also + -------- + ndenumerate, flatiter + + Examples + -------- + >>> for index in np.ndindex(3, 2, 1): + ... print index + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + """ + + def __init__(self, *shape): + if len(shape) == 1 and isinstance(shape[0], tuple): + shape = shape[0] + x = as_strided(_nx.zeros(1), shape=shape, + strides=_nx.zeros_like(shape)) + self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], + order='C') + + def __iter__(self): + return self + + def ndincr(self): + """ + Increment the multi-dimensional index by one. + + This method is for backward compatibility only: do not use. + """ + next(self) + + def __next__(self): + """ + Standard iterator method, updates the index and returns the index + tuple. + + Returns + ------- + val : tuple of ints + Returns a tuple containing the indices of the current + iteration. + + """ + next(self._it) + return self._it.multi_index + + next = __next__ + + +# You can do all this with slice() plus a few special objects, +# but there's a lot to remember. This version is simpler because +# it uses the standard array indexing syntax. +# +# Written by Konrad Hinsen +# last revision: 1999-7-23 +# +# Cosmetic changes by T. Oliphant 2001 +# +# + +class IndexExpression(object): + """ + A nicer way to build up index tuples for arrays. + + .. note:: + Use one of the two predefined instances `index_exp` or `s_` + rather than directly using `IndexExpression`. + + For any index combination, including slicing and axis insertion, + ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any + array `a`. However, ``np.index_exp[indices]`` can be used anywhere + in Python code and returns a tuple of slice objects that can be + used in the construction of complex index expressions. + + Parameters + ---------- + maketuple : bool + If True, always returns a tuple. + + See Also + -------- + index_exp : Predefined instance that always returns a tuple: + `index_exp = IndexExpression(maketuple=True)`. + s_ : Predefined instance without tuple conversion: + `s_ = IndexExpression(maketuple=False)`. + + Notes + ----- + You can do all this with `slice()` plus a few special objects, + but there's a lot to remember and this version is simpler because + it uses the standard array indexing syntax. + + Examples + -------- + >>> np.s_[2::2] + slice(2, None, 2) + >>> np.index_exp[2::2] + (slice(2, None, 2),) + + >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] + array([2, 4]) + + """ + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + +# End contribution from Konrad. + + +# The following functions complement those in twodim_base, but are +# applicable to N-dimensions. + +def fill_diagonal(a, val, wrap=False): + """Fill the main diagonal of the given array of any dimensionality. + + For an array `a` with ``a.ndim > 2``, the diagonal is the list of + locations with indices ``a[i, i, ..., i]`` all identical. This function + modifies the input array in-place, it does not return a value. + + Parameters + ---------- + a : array, at least 2-D. + Array whose diagonal is to be filled, it gets modified in-place. + + val : scalar + Value to be written on the diagonal, its type must be compatible with + that of the array a. + + wrap : bool + For tall matrices in NumPy version up to 1.6.2, the + diagonal "wrapped" after N columns. You can have this behavior + with this option. This affect only tall matrices. + + See also + -------- + diag_indices, diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + This functionality can be obtained via `diag_indices`, but internally + this version uses a much faster implementation that never constructs the + indices and uses simple slicing. + + Examples + -------- + >>> a = np.zeros((3, 3), int) + >>> np.fill_diagonal(a, 5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + The same function can operate on a 4-D array: + + >>> a = np.zeros((3, 3, 3, 3), int) + >>> np.fill_diagonal(a, 4) + + We only show a few blocks for clarity: + + >>> a[0, 0] + array([[4, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + >>> a[1, 1] + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 0]]) + >>> a[2, 2] + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 4]]) + + # tall matrices no wrap + >>> a = np.zeros((5, 3),int) + >>> fill_diagonal(a, 4) + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [0, 0, 0]]) + + # tall matrices wrap + >>> a = np.zeros((5, 3),int) + >>> fill_diagonal(a, 4) + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [4, 0, 0]]) + + # wide matrices + >>> a = np.zeros((3, 5),int) + >>> fill_diagonal(a, 4) + array([[4, 0, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 0, 4, 0, 0]]) + + """ + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + end = None + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + #This is needed to don't have tall matrix have the diagonal wrap. + if not wrap: + end = a.shape[1] * a.shape[1] + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not alltrue(diff(a.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + step = 1 + (cumprod(a.shape[:-1])).sum() + + # Write the value out into the diagonal. + a.flat[:end:step] = val + + +def diag_indices(n, ndim=2): + """ + Return the indices to access the main diagonal of an array. + + This returns a tuple of indices that can be used to access the main + diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape + (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for + ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` + for ``i = [0..n-1]``. + + Parameters + ---------- + n : int + The size, along each dimension, of the arrays for which the returned + indices can be used. + + ndim : int, optional + The number of dimensions. + + See also + -------- + diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Create a set of indices to access the diagonal of a (4, 4) array: + + >>> di = np.diag_indices(4) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> a[di] = 100 + >>> a + array([[100, 1, 2, 3], + [ 4, 100, 6, 7], + [ 8, 9, 100, 11], + [ 12, 13, 14, 100]]) + + Now, we create indices to manipulate a 3-D array: + + >>> d3 = np.diag_indices(2, 3) + >>> d3 + (array([0, 1]), array([0, 1]), array([0, 1])) + + And use it to set the diagonal of an array of zeros to 1: + + >>> a = np.zeros((2, 2, 2), dtype=np.int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + + """ + idx = arange(n) + return (idx,) * ndim + + +def diag_indices_from(arr): + """ + Return the indices to access the main diagonal of an n-dimensional array. + + See `diag_indices` for full details. + + Parameters + ---------- + arr : array, at least 2-D + + See Also + -------- + diag_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not alltrue(diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/info.py new file mode 100644 index 0000000000000..3fbbab7695630 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/info.py @@ -0,0 +1,151 @@ +""" +Basic functions used by several sub-packages and +useful to have in the main name-space. + +Type Handling +------------- +================ =================== +iscomplexobj Test for complex object, scalar result +isrealobj Test for real object, scalar result +iscomplex Test for complex elements, array result +isreal Test for real elements, array result +imag Imaginary part +real Real part +real_if_close Turns complex number with tiny imaginary part to real +isneginf Tests for negative infinity, array result +isposinf Tests for positive infinity, array result +isnan Tests for nans, array result +isinf Tests for infinity, array result +isfinite Tests for finite numbers, array result +isscalar True if argument is a scalar +nan_to_num Replaces NaN's with 0 and infinities with large numbers +cast Dictionary of functions to force cast to each type +common_type Determine the minimum common type code for a group + of arrays +mintypecode Return minimal allowed common typecode. +================ =================== + +Index Tricks +------------ +================ =================== +mgrid Method which allows easy construction of N-d + 'mesh-grids' +``r_`` Append and construct arrays: turns slice objects into + ranges and concatenates them, for 2d arrays appends rows. +index_exp Konrad Hinsen's index_expression class instance which + can be useful for building complicated slicing syntax. +================ =================== + +Useful Functions +---------------- +================ =================== +select Extension of where to multiple conditions and choices +extract Extract 1d array from flattened array according to mask +insert Insert 1d array of values into Nd array according to mask +linspace Evenly spaced samples in linear space +logspace Evenly spaced samples in logarithmic space +fix Round x to nearest integer towards zero +mod Modulo mod(x,y) = x % y except keeps sign of y +amax Array maximum along axis +amin Array minimum along axis +ptp Array max-min along axis +cumsum Cumulative sum along axis +prod Product of elements along axis +cumprod Cumluative product along axis +diff Discrete differences along axis +angle Returns angle of complex argument +unwrap Unwrap phase along given axis (1-d algorithm) +sort_complex Sort a complex-array (based on real, then imaginary) +trim_zeros Trim the leading and trailing zeros from 1D array. +vectorize A class that wraps a Python function taking scalar + arguments into a generalized function which can handle + arrays of arguments using the broadcast rules of + numerix Python. +================ =================== + +Shape Manipulation +------------------ +================ =================== +squeeze Return a with length-one dimensions removed. +atleast_1d Force arrays to be > 1D +atleast_2d Force arrays to be > 2D +atleast_3d Force arrays to be > 3D +vstack Stack arrays vertically (row on row) +hstack Stack arrays horizontally (column on column) +column_stack Stack 1D arrays as columns into 2D array +dstack Stack arrays depthwise (along third dimension) +split Divide array into a list of sub-arrays +hsplit Split into columns +vsplit Split into rows +dsplit Split along third dimension +================ =================== + +Matrix (2D Array) Manipulations +------------------------------- +================ =================== +fliplr 2D array with columns flipped +flipud 2D array with rows flipped +rot90 Rotate a 2D array a multiple of 90 degrees +eye Return a 2D array with ones down a given diagonal +diag Construct a 2D array from a vector, or return a given + diagonal from a 2D array. +mat Construct a Matrix +bmat Build a Matrix from blocks +================ =================== + +Polynomials +----------- +================ =================== +poly1d A one-dimensional polynomial class +poly Return polynomial coefficients from roots +roots Find roots of polynomial given coefficients +polyint Integrate polynomial +polyder Differentiate polynomial +polyadd Add polynomials +polysub Substract polynomials +polymul Multiply polynomials +polydiv Divide polynomials +polyval Evaluate polynomial at given argument +================ =================== + +Import Tricks +------------- +================ =================== +ppimport Postpone module import until trying to use it +ppimport_attr Postpone module import until trying to use its attribute +ppresolve Import postponed module and return it. +================ =================== + +Machine Arithmetics +------------------- +================ =================== +machar_single Single precision floating point arithmetic parameters +machar_double Double precision floating point arithmetic parameters +================ =================== + +Threading Tricks +---------------- +================ =================== +ParallelExec Execute commands in parallel thread. +================ =================== + +1D Array Set Operations +----------------------- +Set operations for 1D numeric arrays based on sort() function. + +================ =================== +ediff1d Array difference (auxiliary function). +unique Unique elements of an array. +intersect1d Intersection of 1D arrays with unique elements. +setxor1d Set exclusive-or of 1D arrays with unique elements. +in1d Test whether elements in a 1D array are also present in + another array. +union1d Union of 1D arrays with unique elements. +setdiff1d Set difference of 1D arrays with unique elements. +================ =================== + +""" +from __future__ import division, absolute_import, print_function + +depends = ['core', 'testing'] +global_symbols = ['*'] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/nanfunctions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/nanfunctions.py new file mode 100644 index 0000000000000..f5ac35e54e70a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/nanfunctions.py @@ -0,0 +1,1158 @@ +""" +Functions that ignore NaN. + +Functions +--------- + +- `nanmin` -- minimum non-NaN value +- `nanmax` -- maximum non-NaN value +- `nanargmin` -- index of minimum non-NaN value +- `nanargmax` -- index of maximum non-NaN value +- `nansum` -- sum of non-NaN values +- `nanmean` -- mean of non-NaN values +- `nanvar` -- variance of non-NaN values +- `nanstd` -- standard deviation of non-NaN values + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +from numpy.lib.function_base import _ureduce as _ureduce + +__all__ = [ + 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', + 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd' + ] + + +def _replace_nan(a, val): + """ + If `a` is of inexact type, make a copy of `a`, replace NaNs with + the `val` value, and return the copy together with a boolean mask + marking the locations where NaNs were present. If `a` is not of + inexact type, do nothing and return `a` together with a mask of None. + + Parameters + ---------- + a : array-like + Input array. + val : float + NaN values are set to val before doing the operation. + + Returns + ------- + y : ndarray + If `a` is of inexact type, return a copy of `a` with the NaNs + replaced by the fill value, otherwise return `a`. + mask: {bool, None} + If `a` is of inexact type, return a boolean mask marking locations of + NaNs, otherwise return None. + + """ + is_new = not isinstance(a, np.ndarray) + if is_new: + a = np.array(a) + if not issubclass(a.dtype.type, np.inexact): + return a, None + if not is_new: + # need copy + a = np.array(a, subok=True) + + mask = np.isnan(a) + np.copyto(a, val, where=mask) + return a, mask + + +def _copyto(a, val, mask): + """ + Replace values in `a` with NaN where `mask` is True. This differs from + copyto in that it will deal with the case where `a` is a numpy scalar. + + Parameters + ---------- + a : ndarray or numpy scalar + Array or numpy scalar some of whose values are to be replaced + by val. + val : numpy scalar + Value used a replacement. + mask : ndarray, scalar + Boolean array. Where True the corresponding element of `a` is + replaced by `val`. Broadcasts. + + Returns + ------- + res : ndarray, scalar + Array with elements replaced or scalar `val`. + + """ + if isinstance(a, np.ndarray): + np.copyto(a, val, where=mask, casting='unsafe') + else: + a = a.dtype.type(val) + return a + + +def _divide_by_count(a, b, out=None): + """ + Compute a/b ignoring invalid results. If `a` is an array the division + is done in place. If `a` is a scalar, then its type is preserved in the + output. If out is None, then then a is used instead so that the + division is in place. Note that this is only called with `a` an inexact + type. + + Parameters + ---------- + a : {ndarray, numpy scalar} + Numerator. Expected to be of inexact type but not checked. + b : {ndarray, numpy scalar} + Denominator. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + + Returns + ------- + ret : {ndarray, numpy scalar} + The return value is a/b. If `a` was an ndarray the division is done + in place. If `a` is a numpy scalar, the division preserves its type. + + """ + with np.errstate(invalid='ignore'): + if isinstance(a, np.ndarray): + if out is None: + return np.divide(a, b, out=a, casting='unsafe') + else: + return np.divide(a, b, out=out, casting='unsafe') + else: + if out is None: + return a.dtype.type(a / b) + else: + # This is questionable, but currently a numpy scalar can + # be output to a zero dimensional array. + return np.divide(a, b, out=out, casting='unsafe') + + +def nanmin(a, axis=None, out=None, keepdims=False): + """ + Return minimum of an array or minimum along an axis, ignoring any NaNs. + When all-NaN slices are encountered a ``RuntimeWarning`` is raised and + Nan is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose minimum is desired. If `a` is not an + array, a conversion is attempted. + axis : int, optional + Axis along which the minimum is computed. The default is to compute + the minimum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + `doc.ufuncs` for details. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original `a`. + + .. versionadded:: 1.8.0 + + Returns + ------- + nanmin : ndarray + An array with the same shape as `a`, with the specified axis + removed. If `a` is a 0-d array, or if axis is None, an ndarray + scalar is returned. The same dtype as `a` is returned. + + See Also + -------- + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + amin : + The minimum value of an array along a given axis, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amax, fmax, maximum + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.min. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + 1.0 + >>> np.nanmin(a, axis=0) + array([ 1., 2.]) + >>> np.nanmin(a, axis=1) + array([ 1., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmin([1, 2, np.nan, np.inf]) + 1.0 + >>> np.nanmin([1, 2, np.nan, np.NINF]) + -inf + + """ + if not isinstance(a, np.ndarray) or type(a) is np.ndarray: + # Fast, but not safe for subclasses of ndarray + res = np.fmin.reduce(a, axis=axis, out=out, keepdims=keepdims) + if np.isnan(res).any(): + warnings.warn("All-NaN axis encountered", RuntimeWarning) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, +np.inf) + res = np.amin(a, axis=axis, out=out, keepdims=keepdims) + if mask is None: + return res + + # Check for all-NaN axis + mask = np.all(mask, axis=axis, keepdims=keepdims) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning) + return res + + +def nanmax(a, axis=None, out=None, keepdims=False): + """ + Return the maximum of an array or maximum along an axis, ignoring any + NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is + raised and NaN is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose maximum is desired. If `a` is not an + array, a conversion is attempted. + axis : int, optional + Axis along which the maximum is computed. The default is to compute + the maximum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + `doc.ufuncs` for details. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original `a`. + + .. versionadded:: 1.8.0 + + Returns + ------- + nanmax : ndarray + An array with the same shape as `a`, with the specified axis removed. + If `a` is a 0-d array, or if axis is None, an ndarray scalar is + returned. The same dtype as `a` is returned. + + See Also + -------- + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + amax : + The maximum value of an array along a given axis, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amin, fmin, minimum + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.max. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + 3.0 + >>> np.nanmax(a, axis=0) + array([ 3., 2.]) + >>> np.nanmax(a, axis=1) + array([ 2., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmax([1, 2, np.nan, np.NINF]) + 2.0 + >>> np.nanmax([1, 2, np.nan, np.inf]) + inf + + """ + if not isinstance(a, np.ndarray) or type(a) is np.ndarray: + # Fast, but not safe for subclasses of ndarray + res = np.fmax.reduce(a, axis=axis, out=out, keepdims=keepdims) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, -np.inf) + res = np.amax(a, axis=axis, out=out, keepdims=keepdims) + if mask is None: + return res + + # Check for all-NaN axis + mask = np.all(mask, axis=axis, keepdims=keepdims) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning) + return res + + +def nanargmin(a, axis=None): + """ + Return the indices of the minimum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results + cannot be trusted if a slice contains only NaNs and Infs. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmin, nanargmax + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmin(a) + 0 + >>> np.nanargmin(a) + 2 + >>> np.nanargmin(a, axis=0) + array([1, 1]) + >>> np.nanargmin(a, axis=1) + array([1, 0]) + + """ + a, mask = _replace_nan(a, np.inf) + res = np.argmin(a, axis=axis) + if mask is not None: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + return res + + +def nanargmax(a, axis=None): + """ + Return the indices of the maximum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the + results cannot be trusted if a slice contains only NaNs and -Infs. + + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmax, nanargmin + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + 0 + >>> np.nanargmax(a) + 1 + >>> np.nanargmax(a, axis=0) + array([1, 0]) + >>> np.nanargmax(a, axis=1) + array([1, 1]) + + """ + a, mask = _replace_nan(a, -np.inf) + res = np.argmax(a, axis=axis) + if mask is not None: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + return res + + +def nansum(a, axis=None, dtype=None, out=None, keepdims=0): + """ + Return the sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. + + In Numpy versions <= 1.8 Nan is returned for slices that are all-NaN or + empty. In later versions zero is returned. + + Parameters + ---------- + a : array_like + Array containing numbers whose sum is desired. If `a` is not an + array, a conversion is attempted. + axis : int, optional + Axis along which the sum is computed. The default is to compute the + sum of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + + .. versionadded:: 1.8.0 + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + `doc.ufuncs` for details. The casting of NaN to integer can yield + unexpected results. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will + broadcast correctly against the original `arr`. + + .. versionadded:: 1.8.0 + + Returns + ------- + y : ndarray or numpy scalar + + See Also + -------- + numpy.sum : Sum across array propagating NaNs. + isnan : Show which elements are NaN. + isfinite: Show which elements are not NaN or +/-inf. + + Notes + ----- + If both positive and negative infinity are present, the sum will be Not + A Number (NaN). + + Numpy integer arithmetic is modular. If the size of a sum exceeds the + size of an integer accumulator, its value will wrap around and the + result will be incorrect. Specifying ``dtype=double`` can alleviate + that problem. + + Examples + -------- + >>> np.nansum(1) + 1 + >>> np.nansum([1]) + 1 + >>> np.nansum([1, np.nan]) + 1.0 + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a) + 3.0 + >>> np.nansum(a, axis=0) + array([ 2., 1.]) + >>> np.nansum([1, np.nan, np.inf]) + inf + >>> np.nansum([1, np.nan, np.NINF]) + -inf + >>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + nan + + """ + a, mask = _replace_nan(a, 0) + return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + + +def nanmean(a, axis=None, dtype=None, out=None, keepdims=False): + """ + Compute the arithmetic mean along the specified axis, ignoring NaNs. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : int, optional + Axis along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for inexact inputs, it is the same as the input + dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + `doc.ufuncs` for details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original `arr`. + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. Nan is + returned for slices that contain only NaNs. + + See Also + -------- + average : Weighted average + mean : Arithmetic mean taken while not ignoring NaNs + var, nanvar + + Notes + ----- + The arithmetic mean is the sum of the non-NaN elements along the axis + divided by the number of non-NaN elements. + + Note that for floating-point input, the mean is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32`. Specifying a + higher-precision accumulator using the `dtype` keyword can alleviate + this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanmean(a) + 2.6666666666666665 + >>> np.nanmean(a, axis=0) + array([ 2., 4.]) + >>> np.nanmean(a, axis=1) + array([ 1., 3.5]) + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + # The warning context speeds things up. + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims) + tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + avg = _divide_by_count(tot, cnt, out=out) + + isbad = (cnt == 0) + if isbad.any(): + warnings.warn("Mean of empty slice", RuntimeWarning) + # NaN is the only possible bad value, so no further + # action is needed to handle bad results. + return avg + + +def _nanmedian1d(arr1d, overwrite_input=False): + """ + Private function for rank 1 arrays. Compute the median ignoring NaNs. + See nanmedian for parameter usage + """ + c = np.isnan(arr1d) + s = np.where(c)[0] + if s.size == arr1d.size: + warnings.warn("All-NaN slice encountered", RuntimeWarning) + return np.nan + elif s.size == 0: + return np.median(arr1d, overwrite_input=overwrite_input) + else: + if overwrite_input: + x = arr1d + else: + x = arr1d.copy() + # select non-nans at end of array + enonan = arr1d[-s.size:][~c[-s.size:]] + # fill nans in beginning of array with non-nans of end + x[s[:enonan.size]] = enonan + # slice nans away + return np.median(x[:-s.size], overwrite_input=True) + + +def _nanmedian(a, axis=None, out=None, overwrite_input=False): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanmedian for parameter usage + + """ + if axis is None or a.ndim == 1: + part = a.ravel() + if out is None: + return _nanmedian1d(part, overwrite_input) + else: + out[...] = _nanmedian1d(part, overwrite_input) + return out + else: + # for small medians use sort + indexing which is still faster than + # apply_along_axis + if a.shape[axis] < 400: + return _nanmedian_small(a, axis, out, overwrite_input) + result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) + if out is not None: + out[...] = result + return result + +def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): + """ + sort + indexing median, faster for small medians along multiple dimensions + due to the high overhead of apply_along_axis + see nanmedian for parameter usage + """ + a = np.ma.masked_array(a, np.isnan(a)) + m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) + for i in range(np.count_nonzero(m.mask.ravel())): + warnings.warn("All-NaN slice encountered", RuntimeWarning) + if out is not None: + out[...] = m.filled(np.nan) + return out + return m.filled(np.nan) + +def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis, while ignoring NaNs. + + Returns the median of the array elements. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int, optional + Axis along which the medians are computed. The default (axis=None) + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True and the input + is not already an ndarray, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers, or + floats of smaller precision than 64, then the output data-type is + float64. Otherwise, the output data-type is the same as that of the + input. + + See Also + -------- + mean, median, percentile + + Notes + ----- + Given a vector V of length N, the median of V is the middle value of + a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is + odd. When N is even, it is the average of the two middle values of + ``V_sorted``. + + Examples + -------- + >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) + >>> a[0, 1] = np.nan + >>> a + array([[ 10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.median(a) + nan + >>> np.nanmedian(a) + 3.0 + >>> np.nanmedian(a, axis=0) + array([ 6.5, 2., 2.5]) + >>> np.median(a, axis=1) + array([ 7., 2.]) + >>> b = a.copy() + >>> np.nanmedian(b, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.nanmedian(b, axis=None, overwrite_input=True) + 3.0 + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + # apply_along_axis in _nanmedian doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + r, k = _ureduce(a, func=_nanmedian, axis=axis, out=out, + overwrite_input=overwrite_input) + if keepdims: + return r.reshape(k) + else: + return r + + +def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=False): + """ + Compute the qth percentile of the data along the specified axis, while + ignoring nan values. + + Returns the qth percentile of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : float in range of [0,100] (or sequence of floats) + Percentile to compute which must be between 0 and 100 inclusive. + axis : int or sequence of int, optional + Axis along which the percentiles are computed. The default (None) + is to compute the percentiles along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + percentile. This will save memory when you do not need to preserve + the contents of the input array. In this case you should not make + any assumptions about the content of the passed in array `a` after + this function completes -- treat it as undefined. Default is False. + Note that, if the `a` input is not already an array this parameter + will have no effect, `a` will be converted to an array internally + regardless of the value of this parameter. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + + Returns + ------- + nanpercentile : scalar or ndarray + If a single percentile `q` is given and axis=None a scalar is + returned. If multiple percentiles `q` are given an array holding + the result is returned. The results are listed in the first axis. + (If `out` is specified, in which case that array is returned + instead). If the input contains integers, or floats of smaller + precision than 64, then the output data-type is float64. Otherwise, + the output data-type is the same as that of the input. + + See Also + -------- + nanmean, nanmedian, percentile, median, mean + + Notes + ----- + Given a vector V of length N, the q-th percentile of V is the q-th ranked + value in a sorted copy of V. The values and distances of the two + nearest neighbors as well as the `interpolation` parameter will + determine the percentile if the normalized ranking does not match q + exactly. This function is the same as the median if ``q=50``, the same + as the minimum if ``q=0``and the same as the maximum if ``q=100``. + + Examples + -------- + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[ 10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.percentile(a, 50) + nan + >>> np.nanpercentile(a, 50) + 3.5 + >>> np.nanpercentile(a, 50, axis=0) + array([[ 6.5, 4.5, 2.5]]) + >>> np.nanpercentile(a, 50, axis=1) + array([[ 7.], + [ 2.]]) + >>> m = np.nanpercentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanpercentile(a, 50, axis=0, out=m) + array([[ 6.5, 4.5, 2.5]]) + >>> m + array([[ 6.5, 4.5, 2.5]]) + >>> b = a.copy() + >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) + array([[ 7.], + [ 2.]]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.nanpercentile(b, 50, axis=None, overwrite_input=True) + array([ 3.5]) + + """ + + a = np.asanyarray(a) + q = np.asanyarray(q) + # apply_along_axis in _nanpercentile doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out, + overwrite_input=overwrite_input, + interpolation=interpolation) + if keepdims: + if q.ndim == 0: + return r.reshape(k) + else: + return r.reshape([len(q)] + k) + else: + return r + + +def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=False): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + + """ + if axis is None: + part = a.ravel() + result = _nanpercentile1d(part, q, overwrite_input, interpolation) + else: + result = np.apply_along_axis(_nanpercentile1d, axis, a, q, + overwrite_input, interpolation) + + if out is not None: + out[...] = result + return result + + +def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'): + """ + Private function for rank 1 arrays. Compute percentile ignoring NaNs. + See nanpercentile for parameter usage + + """ + c = np.isnan(arr1d) + s = np.where(c)[0] + if s.size == arr1d.size: + warnings.warn("All-NaN slice encountered", RuntimeWarning) + return np.nan + elif s.size == 0: + return np.percentile(arr1d, q, overwrite_input=overwrite_input, + interpolation=interpolation) + else: + if overwrite_input: + x = arr1d + else: + x = arr1d.copy() + # select non-nans at end of array + enonan = arr1d[-s.size:][~c[-s.size:]] + # fill nans in beginning of array with non-nans of end + x[s[:enonan.size]] = enonan + # slice nans away + return np.percentile(x[:-s.size], q, overwrite_input=True, + interpolation=interpolation) + + +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + Returns the variance of the array elements, a measure of the spread of + a distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : int, optional + Axis along which the variance is computed. The default is to compute + the variance of the flattened array. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float32`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : int, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + variance : ndarray, see dtype parameter above + If `out` is None, return a new array containing the variance, + otherwise return a reference to the output array. If ddof is >= the + number of non-NaN elements in a slice or the slice contains only + NaNs, then the result for that slice is NaN. + + See Also + -------- + std : Standard deviation + mean : Average + var : Variance while not ignoring NaNs + nanstd, nanmean + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite + population. ``ddof=0`` provides a maximum likelihood estimate of the + variance for normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.var(a) + 1.5555555555555554 + >>> np.nanvar(a, axis=0) + array([ 1., 0.]) + >>> np.nanvar(a, axis=1) + array([ 0., 0.25]) + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + + # Compute mean + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=True) + avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=True) + avg = _divide_by_count(avg, cnt) + + # Compute squared deviation from mean. + arr -= avg + arr = _copyto(arr, 0, mask) + if issubclass(arr.dtype.type, np.complexfloating): + sqr = np.multiply(arr, arr.conj(), out=arr).real + else: + sqr = np.multiply(arr, arr, out=arr) + + # Compute variance. + var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + if var.ndim < cnt.ndim: + # Subclasses of ndarray may ignore keepdims, so check here. + cnt = cnt.squeeze(axis) + dof = cnt - ddof + var = _divide_by_count(var, dof) + + isbad = (dof <= 0) + if np.any(isbad): + warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning) + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + var = _copyto(var, np.nan, isbad) + return var + + +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): + """ + Compute the standard deviation along the specified axis, while + ignoring NaNs. + + Returns the standard deviation, a measure of the spread of a + distribution, of the non-NaN array elements. The standard deviation is + computed for the flattened array by default, otherwise over the + specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Calculate the standard deviation of the non-NaN values. + axis : int, optional + Axis along which the standard deviation is computed. The default is + to compute the standard deviation of the flattened array. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it + is the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the + calculated values) will be cast if necessary. + ddof : int, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard + deviation, otherwise return a reference to the output array. If + ddof is >= the number of non-NaN elements in a slice or the slice + contains only NaNs, then the result for that slice is NaN. + + See Also + -------- + var, mean, std + nanvar, nanmean + numpy.doc.ufuncs : Section "Output arguments" + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is + specified, the divisor ``N - ddof`` is used instead. In standard + statistical practice, ``ddof=1`` provides an unbiased estimator of the + variance of the infinite population. ``ddof=0`` provides a maximum + likelihood estimate of the variance for normally distributed variables. + The standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute value before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example + below). Specifying a higher-accuracy accumulator using the `dtype` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanstd(a) + 1.247219128924647 + >>> np.nanstd(a, axis=0) + array([ 1., 0.]) + >>> np.nanstd(a, axis=1) + array([ 0., 0.5]) + + """ + var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) + if isinstance(var, np.ndarray): + std = np.sqrt(var, out=var) + else: + std = var.dtype.type(np.sqrt(var)) + return std diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/npyio.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/npyio.py new file mode 100644 index 0000000000000..138b75510906f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/npyio.py @@ -0,0 +1,1912 @@ +from __future__ import division, absolute_import, print_function + +import sys +import os +import re +import itertools +import warnings +import weakref +from operator import itemgetter + +import numpy as np +from . import format +from ._datasource import DataSource +from ._compiled_base import packbits, unpackbits +from ._iotools import ( + LineSplitter, NameValidator, StringConverter, ConverterError, + ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields, + flatten_dtype, easy_dtype, _bytes_to_name + ) + +from numpy.compat import ( + asbytes, asstr, asbytes_nested, bytes, basestring, unicode + ) + +if sys.version_info[0] >= 3: + import pickle +else: + import cPickle as pickle + from future_builtins import map + +loads = pickle.loads + +__all__ = [ + 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', + 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', + 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' + ] + + +def seek_gzip_factory(f): + """Use this factory to produce the class so that we can do a lazy + import on gzip. + + """ + import gzip + + class GzipFile(gzip.GzipFile): + + def seek(self, offset, whence=0): + # figure out new position (we can only seek forwards) + if whence == 1: + offset = self.offset + offset + + if whence not in [0, 1]: + raise IOError("Illegal argument") + + if offset < self.offset: + # for negative seek, rewind and do positive seek + self.rewind() + count = offset - self.offset + for i in range(count // 1024): + self.read(1024) + self.read(count % 1024) + + def tell(self): + return self.offset + + if isinstance(f, str): + f = GzipFile(f) + elif isinstance(f, gzip.GzipFile): + # cast to our GzipFile if its already a gzip.GzipFile + + try: + name = f.name + except AttributeError: + # Backward compatibility for <= 2.5 + name = f.filename + mode = f.mode + + f = GzipFile(fileobj=f.fileobj, filename=name) + f.mode = mode + + return f + + +class BagObj(object): + """ + BagObj(obj) + + Convert attribute look-ups to getitems on the object passed in. + + Parameters + ---------- + obj : class instance + Object on which attribute look-up is performed. + + Examples + -------- + >>> from numpy.lib.npyio import BagObj as BO + >>> class BagDemo(object): + ... def __getitem__(self, key): # An instance of BagObj(BagDemo) + ... # will call this method when any + ... # attribute look-up is required + ... result = "Doesn't matter what you want, " + ... return result + "you're gonna get this" + ... + >>> demo_obj = BagDemo() + >>> bagobj = BO(demo_obj) + >>> bagobj.hello_there + "Doesn't matter what you want, you're gonna get this" + >>> bagobj.I_can_be_anything + "Doesn't matter what you want, you're gonna get this" + + """ + + def __init__(self, obj): + # Use weakref to make NpzFile objects collectable by refcount + self._obj = weakref.proxy(obj) + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, '_obj')[key] + except KeyError: + raise AttributeError(key) + + +def zipfile_factory(*args, **kwargs): + import zipfile + kwargs['allowZip64'] = True + return zipfile.ZipFile(*args, **kwargs) + + +class NpzFile(object): + """ + NpzFile(fid) + + A dictionary-like object with lazy-loading of files in the zipped + archive provided on construction. + + `NpzFile` is used to load files in the NumPy ``.npz`` data archive + format. It assumes that files in the archive have a ``.npy`` extension, + other files are ignored. + + The arrays and file strings are lazily loaded on either + getitem access using ``obj['key']`` or attribute lookup using + ``obj.f.key``. A list of all files (without ``.npy`` extensions) can + be obtained with ``obj.files`` and the ZipFile object itself using + ``obj.zip``. + + Attributes + ---------- + files : list of str + List of all files in the archive with a ``.npy`` extension. + zip : ZipFile instance + The ZipFile object initialized with the zipped archive. + f : BagObj instance + An object on which attribute can be performed as an alternative + to getitem access on the `NpzFile` instance itself. + + Parameters + ---------- + fid : file or str + The zipped archive to open. This is either a file-like object + or a string containing the path to the archive. + own_fid : bool, optional + Whether NpzFile should close the file handle. + Requires that `fid` is a file-like object. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + >>> np.savez(outfile, x=x, y=y) + >>> outfile.seek(0) + + >>> npz = np.load(outfile) + >>> isinstance(npz, np.lib.io.NpzFile) + True + >>> npz.files + ['y', 'x'] + >>> npz['x'] # getitem access + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> npz.f.x # attribute lookup + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + + def __init__(self, fid, own_fid=False): + # Import is postponed to here since zipfile depends on gzip, an + # optional component of the so-called standard library. + _zip = zipfile_factory(fid) + self._files = _zip.namelist() + self.files = [] + for x in self._files: + if x.endswith('.npy'): + self.files.append(x[:-4]) + else: + self.files.append(x) + self.zip = _zip + self.f = BagObj(self) + if own_fid: + self.fid = fid + else: + self.fid = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + """ + Close the file. + + """ + if self.zip is not None: + self.zip.close() + self.zip = None + if self.fid is not None: + self.fid.close() + self.fid = None + self.f = None # break reference cycle + + def __del__(self): + self.close() + + def __getitem__(self, key): + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + member = 0 + if key in self._files: + member = 1 + elif key in self.files: + member = 1 + key += '.npy' + if member: + bytes = self.zip.open(key) + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.close() + if magic == format.MAGIC_PREFIX: + bytes = self.zip.open(key) + return format.read_array(bytes) + else: + return self.zip.read(key) + else: + raise KeyError("%s is not a file in the archive" % key) + + def __iter__(self): + return iter(self.files) + + def items(self): + """ + Return a list of tuples, with each tuple (filename, array in file). + + """ + return [(f, self[f]) for f in self.files] + + def iteritems(self): + """Generator that returns tuples (filename, array in file).""" + for f in self.files: + yield (f, self[f]) + + def keys(self): + """Return files in the archive with a ``.npy`` extension.""" + return self.files + + def iterkeys(self): + """Return an iterator over the files in the archive.""" + return self.__iter__() + + def __contains__(self, key): + return self.files.__contains__(key) + + +def load(file, mmap_mode=None): + """ + Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. + + Parameters + ---------- + file : file-like object or string + The file to read. File-like objects must support the + ``seek()`` and ``read()`` methods. Pickled files require that the + file-like object support the ``readline()`` method as well. + mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, then memory-map the file, using the given mode (see + `numpy.memmap` for a detailed description of the modes). A + memory-mapped array is kept on disk. However, it can be accessed + and sliced like any ndarray. Memory mapping is especially useful + for accessing small fragments of large files without reading the + entire file into memory. + + Returns + ------- + result : array, tuple, dict, etc. + Data stored in the file. For ``.npz`` files, the returned instance + of NpzFile class must be closed to avoid leaking file descriptors. + + Raises + ------ + IOError + If the input file does not exist or cannot be read. + + See Also + -------- + save, savez, savez_compressed, loadtxt + memmap : Create a memory-map to an array stored in a file on disk. + + Notes + ----- + - If the file contains pickle data, then whatever object is stored + in the pickle is returned. + - If the file is a ``.npy`` file, then a single array is returned. + - If the file is a ``.npz`` file, then a dictionary-like object is + returned, containing ``{filename: array}`` key-value pairs, one for + each file in the archive. + - If the file is a ``.npz`` file, the returned value supports the + context manager protocol in a similar fashion to the open function:: + + with load('foo.npz') as data: + a = data['a'] + + The underlying file descriptor is closed when exiting the 'with' + block. + + Examples + -------- + Store data to disk, and load it again: + + >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) + >>> np.load('/tmp/123.npy') + array([[1, 2, 3], + [4, 5, 6]]) + + Store compressed data to disk, and load it again: + + >>> a=np.array([[1, 2, 3], [4, 5, 6]]) + >>> b=np.array([1, 2]) + >>> np.savez('/tmp/123.npz', a=a, b=b) + >>> data = np.load('/tmp/123.npz') + >>> data['a'] + array([[1, 2, 3], + [4, 5, 6]]) + >>> data['b'] + array([1, 2]) + >>> data.close() + + Mem-map the stored array, and then access the second row + directly from disk: + + >>> X = np.load('/tmp/123.npy', mmap_mode='r') + >>> X[1, :] + memmap([4, 5, 6]) + + """ + import gzip + + own_fid = False + if isinstance(file, basestring): + fid = open(file, "rb") + own_fid = True + elif isinstance(file, gzip.GzipFile): + fid = seek_gzip_factory(file) + else: + fid = file + + try: + # Code to distinguish from NumPy binary files and pickles. + _ZIP_PREFIX = asbytes('PK\x03\x04') + N = len(format.MAGIC_PREFIX) + magic = fid.read(N) + fid.seek(-N, 1) # back-up + if magic.startswith(_ZIP_PREFIX): + # zip-file (assume .npz) + # Transfer file ownership to NpzFile + tmp = own_fid + own_fid = False + return NpzFile(fid, own_fid=tmp) + elif magic == format.MAGIC_PREFIX: + # .npy file + if mmap_mode: + return format.open_memmap(file, mode=mmap_mode) + else: + return format.read_array(fid) + else: + # Try a pickle + try: + return pickle.load(fid) + except: + raise IOError( + "Failed to interpret file %s as a pickle" % repr(file)) + finally: + if own_fid: + fid.close() + + +def save(file, arr): + """ + Save an array to a binary file in NumPy ``.npy`` format. + + Parameters + ---------- + file : file or str + File or filename to which the data is saved. If file is a file-object, + then the filename is unchanged. If file is a string, a ``.npy`` + extension will be appended to the file name if it does not already + have one. + arr : array_like + Array data to be saved. + + See Also + -------- + savez : Save several arrays into a ``.npz`` archive + savetxt, load + + Notes + ----- + For a description of the ``.npy`` format, see `format`. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + + >>> x = np.arange(10) + >>> np.save(outfile, x) + + >>> outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> np.load(outfile) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + own_fid = False + if isinstance(file, basestring): + if not file.endswith('.npy'): + file = file + '.npy' + fid = open(file, "wb") + own_fid = True + else: + fid = file + + try: + arr = np.asanyarray(arr) + format.write_array(fid, arr) + finally: + if own_fid: + fid.close() + + +def savez(file, *args, **kwds): + """ + Save several arrays into a single file in uncompressed ``.npz`` format. + + If arguments are passed in with no keywords, the corresponding variable + names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword + arguments are given, the corresponding variable names, in the ``.npz`` + file will match the keyword names. + + Parameters + ---------- + file : str or file + Either the file name (string) or an open file (file-like object) + where the data will be saved. If file is a string, the ``.npz`` + extension will be appended to the file name if it is not already there. + args : Arguments, optional + Arrays to save to the file. Since it is not possible for Python to + know the names of the arrays outside `savez`, the arrays will be saved + with names "arr_0", "arr_1", and so on. These arguments can be any + expression. + kwds : Keyword arguments, optional + Arrays to save to the file. Arrays will be saved in the file with the + keyword names. + + Returns + ------- + None + + See Also + -------- + save : Save a single array to a binary file in NumPy format. + savetxt : Save an array to a file as plain text. + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is not compressed and each file + in the archive contains one variable in ``.npy`` format. For a + description of the ``.npy`` format, see `format`. + + When opening the saved ``.npz`` file with `load` a `NpzFile` object is + returned. This is a dictionary-like object which can be queried for + its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + + Using `savez` with \\*args, the arrays are saved with default names. + + >>> np.savez(outfile, x, y) + >>> outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['arr_1', 'arr_0'] + >>> npzfile['arr_0'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + Using `savez` with \\**kwds, the arrays are saved with the keyword names. + + >>> outfile = TemporaryFile() + >>> np.savez(outfile, x=x, y=y) + >>> outfile.seek(0) + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['y', 'x'] + >>> npzfile['x'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + _savez(file, args, kwds, False) + + +def savez_compressed(file, *args, **kwds): + """ + Save several arrays into a single file in compressed ``.npz`` format. + + If keyword arguments are given, then filenames are taken from the keywords. + If arguments are passed in with no keywords, then stored file names are + arr_0, arr_1, etc. + + Parameters + ---------- + file : str + File name of ``.npz`` file. + args : Arguments + Function arguments. + kwds : Keyword arguments + Keywords. + + See Also + -------- + numpy.savez : Save several arrays into an uncompressed ``.npz`` file format + numpy.load : Load the files created by savez_compressed. + + """ + _savez(file, args, kwds, True) + + +def _savez(file, args, kwds, compress): + # Import is postponed to here since zipfile depends on gzip, an optional + # component of the so-called standard library. + import zipfile + # Import deferred for startup time improvement + import tempfile + + if isinstance(file, basestring): + if not file.endswith('.npz'): + file = file + '.npz' + + namedict = kwds + for i, val in enumerate(args): + key = 'arr_%d' % i + if key in namedict.keys(): + raise ValueError( + "Cannot use un-named variables and keyword %s" % key) + namedict[key] = val + + if compress: + compression = zipfile.ZIP_DEFLATED + else: + compression = zipfile.ZIP_STORED + + zipf = zipfile_factory(file, mode="w", compression=compression) + + # Stage arrays in a temporary file on disk, before writing to zip. + fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy') + os.close(fd) + try: + for key, val in namedict.items(): + fname = key + '.npy' + fid = open(tmpfile, 'wb') + try: + format.write_array(fid, np.asanyarray(val)) + fid.close() + fid = None + zipf.write(tmpfile, arcname=fname) + finally: + if fid: + fid.close() + finally: + os.remove(tmpfile) + + zipf.close() + + +def _getconv(dtype): + """ Find the correct dtype converter. Adapted from matplotlib """ + typ = dtype.type + if issubclass(typ, np.bool_): + return lambda x: bool(int(x)) + if issubclass(typ, np.uint64): + return np.uint64 + if issubclass(typ, np.int64): + return np.int64 + if issubclass(typ, np.integer): + return lambda x: int(float(x)) + elif issubclass(typ, np.floating): + return float + elif issubclass(typ, np.complex): + return complex + elif issubclass(typ, np.bytes_): + return bytes + else: + return str + + +def loadtxt(fname, dtype=float, comments='#', delimiter=None, + converters=None, skiprows=0, usecols=None, unpack=False, + ndmin=0): + """ + Load data from a text file. + + Each row in the text file must have the same number of values. + + Parameters + ---------- + fname : file or str + File, filename, or generator to read. If the filename extension is + ``.gz`` or ``.bz2``, the file is first decompressed. Note that + generators should return byte strings for Python 3k. + dtype : data-type, optional + Data-type of the resulting array; default: float. If this is a + record data-type, the resulting array will be 1-dimensional, and + each row will be interpreted as an element of the array. In this + case, the number of columns used must match the number of fields in + the data-type. + comments : str, optional + The character used to indicate the start of a comment; + default: '#'. + delimiter : str, optional + The string used to separate values. By default, this is any + whitespace. + converters : dict, optional + A dictionary mapping column number to a function that will convert + that column to a float. E.g., if column 0 is a date string: + ``converters = {0: datestr2num}``. Converters can also be used to + provide a default value for missing data (but see also `genfromtxt`): + ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. + skiprows : int, optional + Skip the first `skiprows` lines; default: 0. + usecols : sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + The default, None, results in all columns being read. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)``. When used with a record + data-type, arrays are returned for each field. Default is False. + ndmin : int, optional + The returned array will have at least `ndmin` dimensions. + Otherwise mono-dimensional axes will be squeezed. + Legal values: 0 (default), 1 or 2. + + .. versionadded:: 1.6.0 + + Returns + ------- + out : ndarray + Data read from the text file. + + See Also + -------- + load, fromstring, fromregex + genfromtxt : Load data with missing values handled as specified. + scipy.io.loadmat : reads MATLAB data files + + Notes + ----- + This function aims to be a fast reader for simply formatted files. The + `genfromtxt` function provides more sophisticated handling of, e.g., + lines with missing values. + + Examples + -------- + >>> from StringIO import StringIO # StringIO behaves like a file object + >>> c = StringIO("0 1\\n2 3") + >>> np.loadtxt(c) + array([[ 0., 1.], + [ 2., 3.]]) + + >>> d = StringIO("M 21 72\\nF 35 58") + >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), + ... 'formats': ('S1', 'i4', 'f4')}) + array([('M', 21, 72.0), ('F', 35, 58.0)], + dtype=[('gender', '|S1'), ('age', '>> c = StringIO("1,0,2\\n3,0,4") + >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) + >>> x + array([ 1., 3.]) + >>> y + array([ 2., 4.]) + + """ + # Type conversions for Py3 convenience + comments = asbytes(comments) + user_converters = converters + if delimiter is not None: + delimiter = asbytes(delimiter) + if usecols is not None: + usecols = list(usecols) + + fown = False + try: + if _is_string_like(fname): + fown = True + if fname.endswith('.gz'): + fh = iter(seek_gzip_factory(fname)) + elif fname.endswith('.bz2'): + import bz2 + fh = iter(bz2.BZ2File(fname)) + elif sys.version_info[0] == 2: + fh = iter(open(fname, 'U')) + else: + fh = iter(open(fname)) + else: + fh = iter(fname) + except TypeError: + raise ValueError('fname must be a string, file handle, or generator') + X = [] + + def flatten_dtype(dt): + """Unpack a structured data-type, and produce re-packing info.""" + if dt.names is None: + # If the dtype is flattened, return. + # If the dtype has a shape, the dtype occurs + # in the list more than once. + shape = dt.shape + if len(shape) == 0: + return ([dt.base], None) + else: + packing = [(shape[-1], list)] + if len(shape) > 1: + for dim in dt.shape[-2::-1]: + packing = [(dim*packing[0][0], packing*dim)] + return ([dt.base] * int(np.prod(dt.shape)), packing) + else: + types = [] + packing = [] + for field in dt.names: + tp, bytes = dt.fields[field] + flat_dt, flat_packing = flatten_dtype(tp) + types.extend(flat_dt) + # Avoid extra nesting for subarrays + if len(tp.shape) > 0: + packing.extend(flat_packing) + else: + packing.append((len(flat_dt), flat_packing)) + return (types, packing) + + def pack_items(items, packing): + """Pack items into nested lists based on re-packing info.""" + if packing is None: + return items[0] + elif packing is tuple: + return tuple(items) + elif packing is list: + return list(items) + else: + start = 0 + ret = [] + for length, subpacking in packing: + ret.append(pack_items(items[start:start+length], subpacking)) + start += length + return tuple(ret) + + def split_line(line): + """Chop off comments, strip, and split at delimiter.""" + line = asbytes(line).split(comments)[0].strip(asbytes('\r\n')) + if line: + return line.split(delimiter) + else: + return [] + + try: + # Make sure we're dealing with a proper dtype + dtype = np.dtype(dtype) + defconv = _getconv(dtype) + + # Skip the first `skiprows` lines + for i in range(skiprows): + next(fh) + + # Read until we find a line with some values, and use + # it to estimate the number of columns, N. + first_vals = None + try: + while not first_vals: + first_line = next(fh) + first_vals = split_line(first_line) + except StopIteration: + # End of lines reached + first_line = '' + first_vals = [] + warnings.warn('loadtxt: Empty input file: "%s"' % fname) + N = len(usecols or first_vals) + + dtype_types, packing = flatten_dtype(dtype) + if len(dtype_types) > 1: + # We're dealing with a structured array, each field of + # the dtype matches a column + converters = [_getconv(dt) for dt in dtype_types] + else: + # All fields have the same dtype + converters = [defconv for i in range(N)] + if N > 1: + packing = [(N, tuple)] + + # By preference, use the converters specified by the user + for i, conv in (user_converters or {}).items(): + if usecols: + try: + i = usecols.index(i) + except ValueError: + # Unused converter specified + continue + converters[i] = conv + + # Parse each line, including the first + for i, line in enumerate(itertools.chain([first_line], fh)): + vals = split_line(line) + if len(vals) == 0: + continue + if usecols: + vals = [vals[i] for i in usecols] + if len(vals) != N: + line_num = i + skiprows + 1 + raise ValueError("Wrong number of columns at line %d" + % line_num) + + # Convert each value according to its column and store + items = [conv(val) for (conv, val) in zip(converters, vals)] + # Then pack it according to the dtype's nesting + items = pack_items(items, packing) + X.append(items) + finally: + if fown: + fh.close() + + X = np.array(X, dtype) + # Multicolumn data are returned with shape (1, N, M), i.e. + # (1, 1, M) for a single row - remove the singleton dimension there + if X.ndim == 3 and X.shape[:2] == (1, 1): + X.shape = (1, -1) + + # Verify that the array has at least dimensions `ndmin`. + # Check correctness of the values of `ndmin` + if ndmin not in [0, 1, 2]: + raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) + # Tweak the size and shape of the arrays - remove extraneous dimensions + if X.ndim > ndmin: + X = np.squeeze(X) + # and ensure we have the minimum number of dimensions asked for + # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 + if X.ndim < ndmin: + if ndmin == 1: + X = np.atleast_1d(X) + elif ndmin == 2: + X = np.atleast_2d(X).T + + if unpack: + if len(dtype_types) > 1: + # For structured arrays, return an array for each field. + return [X[field] for field in dtype.names] + else: + return X.T + else: + return X + + +def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', + footer='', comments='# '): + """ + Save an array to a text file. + + Parameters + ---------- + fname : filename or file handle + If the filename ends in ``.gz``, the file is automatically saved in + compressed gzip format. `loadtxt` understands gzipped files + transparently. + X : array_like + Data to be saved to a text file. + fmt : str or sequence of strs, optional + A single format (%10.5f), a sequence of formats, or a + multi-format string, e.g. 'Iteration %d -- %10.5f', in which + case `delimiter` is ignored. For complex `X`, the legal options + for `fmt` are: + a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted + like `' (%s+%sj)' % (fmt, fmt)` + b) a full string specifying every real and imaginary part, e.g. + `' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns + c) a list of specifiers, one per column - in this case, the real + and imaginary part must have separate specifiers, + e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns + delimiter : str, optional + String or character separating columns. + newline : str, optional + String or character separating lines. + + .. versionadded:: 1.5.0 + header : str, optional + String that will be written at the beginning of the file. + + .. versionadded:: 1.7.0 + footer : str, optional + String that will be written at the end of the file. + + .. versionadded:: 1.7.0 + comments : str, optional + String that will be prepended to the ``header`` and ``footer`` strings, + to mark them as comments. Default: '# ', as expected by e.g. + ``numpy.loadtxt``. + + .. versionadded:: 1.7.0 + + + See Also + -------- + save : Save an array to a binary file in NumPy ``.npy`` format + savez : Save several arrays into an uncompressed ``.npz`` archive + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + Further explanation of the `fmt` parameter + (``%[flag]width[.precision]specifier``): + + flags: + ``-`` : left justify + + ``+`` : Forces to precede result with + or -. + + ``0`` : Left pad the number with zeros instead of space (see width). + + width: + Minimum number of characters to be printed. The value is not truncated + if it has more characters. + + precision: + - For integer specifiers (eg. ``d,i,o,x``), the minimum number of + digits. + - For ``e, E`` and ``f`` specifiers, the number of digits to print + after the decimal point. + - For ``g`` and ``G``, the maximum number of significant digits. + - For ``s``, the maximum number of characters. + + specifiers: + ``c`` : character + + ``d`` or ``i`` : signed decimal integer + + ``e`` or ``E`` : scientific notation with ``e`` or ``E``. + + ``f`` : decimal floating point + + ``g,G`` : use the shorter of ``e,E`` or ``f`` + + ``o`` : signed octal + + ``s`` : string of characters + + ``u`` : unsigned decimal integer + + ``x,X`` : unsigned hexadecimal integer + + This explanation of ``fmt`` is not complete, for an exhaustive + specification see [1]_. + + References + ---------- + .. [1] `Format Specification Mini-Language + `_, Python Documentation. + + Examples + -------- + >>> x = y = z = np.arange(0.0,5.0,1.0) + >>> np.savetxt('test.out', x, delimiter=',') # X is an array + >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation + + """ + + # Py3 conversions first + if isinstance(fmt, bytes): + fmt = asstr(fmt) + delimiter = asstr(delimiter) + + own_fh = False + if _is_string_like(fname): + own_fh = True + if fname.endswith('.gz'): + import gzip + fh = gzip.open(fname, 'wb') + else: + if sys.version_info[0] >= 3: + fh = open(fname, 'wb') + else: + fh = open(fname, 'w') + elif hasattr(fname, 'write'): + fh = fname + else: + raise ValueError('fname must be a string or file handle') + + try: + X = np.asarray(X) + + # Handle 1-dimensional arrays + if X.ndim == 1: + # Common case -- 1d array of numbers + if X.dtype.names is None: + X = np.atleast_2d(X).T + ncol = 1 + + # Complex dtype -- each field indicates a separate column + else: + ncol = len(X.dtype.descr) + else: + ncol = X.shape[1] + + iscomplex_X = np.iscomplexobj(X) + # `fmt` can be a string with multiple insertion points or a + # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') + if type(fmt) in (list, tuple): + if len(fmt) != ncol: + raise AttributeError('fmt has wrong shape. %s' % str(fmt)) + format = asstr(delimiter).join(map(asstr, fmt)) + elif isinstance(fmt, str): + n_fmt_chars = fmt.count('%') + error = ValueError('fmt has wrong number of %% formats: %s' % fmt) + if n_fmt_chars == 1: + if iscomplex_X: + fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol + else: + fmt = [fmt, ] * ncol + format = delimiter.join(fmt) + elif iscomplex_X and n_fmt_chars != (2 * ncol): + raise error + elif ((not iscomplex_X) and n_fmt_chars != ncol): + raise error + else: + format = fmt + else: + raise ValueError('invalid fmt: %r' % (fmt,)) + + if len(header) > 0: + header = header.replace('\n', '\n' + comments) + fh.write(asbytes(comments + header + newline)) + if iscomplex_X: + for row in X: + row2 = [] + for number in row: + row2.append(number.real) + row2.append(number.imag) + fh.write(asbytes(format % tuple(row2) + newline)) + else: + for row in X: + fh.write(asbytes(format % tuple(row) + newline)) + if len(footer) > 0: + footer = footer.replace('\n', '\n' + comments) + fh.write(asbytes(comments + footer + newline)) + finally: + if own_fh: + fh.close() + + +def fromregex(file, regexp, dtype): + """ + Construct an array from a text file, using regular expression parsing. + + The returned array is always a structured array, and is constructed from + all matches of the regular expression in the file. Groups in the regular + expression are converted to fields of the structured array. + + Parameters + ---------- + file : str or file + File name or file object to read. + regexp : str or regexp + Regular expression used to parse the file. + Groups in the regular expression correspond to fields in the dtype. + dtype : dtype or list of dtypes + Dtype for the structured array. + + Returns + ------- + output : ndarray + The output array, containing the part of the content of `file` that + was matched by `regexp`. `output` is always a structured array. + + Raises + ------ + TypeError + When `dtype` is not a valid dtype for a structured array. + + See Also + -------- + fromstring, loadtxt + + Notes + ----- + Dtypes for structured arrays can be specified in several forms, but all + forms specify at least the data type and field name. For details see + `doc.structured_arrays`. + + Examples + -------- + >>> f = open('test.dat', 'w') + >>> f.write("1312 foo\\n1534 bar\\n444 qux") + >>> f.close() + + >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] + >>> output = np.fromregex('test.dat', regexp, + ... [('num', np.int64), ('key', 'S3')]) + >>> output + array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], + dtype=[('num', '>> output['num'] + array([1312, 1534, 444], dtype=int64) + + """ + own_fh = False + if not hasattr(file, "read"): + file = open(file, 'rb') + own_fh = True + + try: + if not hasattr(regexp, 'match'): + regexp = re.compile(asbytes(regexp)) + if not isinstance(dtype, np.dtype): + dtype = np.dtype(dtype) + + seq = regexp.findall(file.read()) + if seq and not isinstance(seq[0], tuple): + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + + return output + finally: + if own_fh: + file.close() + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, + skiprows=0, skip_header=0, skip_footer=0, converters=None, + missing='', missing_values=None, filling_values=None, + usecols=None, names=None, + excludelist=None, deletechars=None, replace_space='_', + autostrip=False, case_sensitive=True, defaultfmt="f%i", + unpack=None, usemask=False, loose=True, invalid_raise=True): + """ + Load data from a text file, with missing values handled as specified. + + Each line past the first `skip_header` lines is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + Parameters + ---------- + fname : file or str + File, filename, or generator to read. If the filename extension is + `.gz` or `.bz2`, the file is first decompressed. Note that + generators must return byte strings in Python 3k. + dtype : dtype, optional + Data type of the resulting array. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : str, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded + delimiter : str, int, or sequence, optional + The string used to separate values. By default, any consecutive + whitespaces act as delimiter. An integer or sequence of integers + can also be provided as width(s) of each field. + skip_rows : int, optional + `skip_rows` was deprecated in numpy 1.5, and will be removed in + numpy 2.0. Please use `skip_header` instead. + skip_header : int, optional + The number of lines to skip at the beginning of the file. + skip_footer : int, optional + The number of lines to skip at the end of the file. + converters : variable, optional + The set of functions that convert the data of a column to a value. + The converters can also be used to provide a default value + for missing data: ``converters = {3: lambda s: float(s or 0)}``. + missing : variable, optional + `missing` was deprecated in numpy 1.5, and will be removed in + numpy 2.0. Please use `missing_values` instead. + missing_values : variable, optional + The set of strings corresponding to missing data. + filling_values : variable, optional + The set of values to be used as default when the data are missing. + usecols : sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, str, sequence}, optional + If `names` is True, the field names are read from the first valid line + after the first `skip_header` lines. + If `names` is a sequence or a single-string of comma-separated names, + the names will be used to define the field names in a structured dtype. + If `names` is None, the names of the dtype fields will be used, if any. + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended an underscore: + for example, `file` would become `file_`. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + defaultfmt : str, optional + A format used to define default field names, such as "f%i" or "f_%02i". + autostrip : bool, optional + Whether to automatically strip white spaces from the variables. + replace_space : char, optional + Character(s) used in replacement of white spaces in the variables + names. By default, use a '_'. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)`` + usemask : bool, optional + If True, return a masked array. + If False, return a regular array. + loose : bool, optional + If True, do not raise errors for invalid values. + invalid_raise : bool, optional + If True, an exception is raised if an inconsistency is detected in the + number of columns. + If False, a warning is emitted and the offending lines are skipped. + + Returns + ------- + out : ndarray + Data read from the text file. If `usemask` is True, this is a + masked array. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + Notes + ----- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When the variables are named (either by a flexible dtype or with `names`, + there must not be any header in the file (else a ValueError + exception is raised). + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + + References + ---------- + .. [1] Numpy User Guide, section `I/O with Numpy + `_. + + Examples + --------- + >>> from StringIO import StringIO + >>> import numpy as np + + Comma delimited file with mixed dtype + + >>> s = StringIO("1,1.3,abcde") + >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), + ... ('mystring','S5')], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> s.seek(0) # needed for StringIO example only + >>> data = np.genfromtxt(s, dtype=None, + ... names = ['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> s.seek(0) + >>> data = np.genfromtxt(s, dtype="i8,f8,S5", + ... names=['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> s = StringIO("11.3abcde") + >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], + ... delimiter=[1,3,5]) + >>> data + array((1, 1.3, 'abcde'), + dtype=[('intvar', ' nbcols): + descr = dtype.descr + dtype = np.dtype([descr[_] for _ in usecols]) + names = list(dtype.names) + # If `names` is not None, update the names + elif (names is not None) and (len(names) > nbcols): + names = [names[_] for _ in usecols] + elif (names is not None) and (dtype is not None): + names = list(dtype.names) + + # Process the missing values ............................... + # Rename missing_values for convenience + user_missing_values = missing_values or () + + # Define the list of missing_values (one column: one list) + missing_values = [list([asbytes('')]) for _ in range(nbcols)] + + # We have a dictionary: process it field by field + if isinstance(user_missing_values, dict): + # Loop on the items + for (key, val) in user_missing_values.items(): + # Is the key a string ? + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key as needed if it's a column number + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Transform the value as a list of string + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val), ] + # Add the value(s) to the current list of missing + if key is None: + # None acts as default + for miss in missing_values: + miss.extend(val) + else: + missing_values[key].extend(val) + # We have a sequence : each item matches a column + elif isinstance(user_missing_values, (list, tuple)): + for (value, entry) in zip(user_missing_values, missing_values): + value = str(value) + if value not in entry: + entry.append(value) + # We have a string : apply it to all entries + elif isinstance(user_missing_values, bytes): + user_value = user_missing_values.split(asbytes(",")) + for entry in missing_values: + entry.extend(user_value) + # We have something else: apply it to all entries + else: + for entry in missing_values: + entry.extend([str(user_missing_values)]) + + # Process the deprecated `missing` + if missing != asbytes(''): + warnings.warn( + "The use of `missing` is deprecated, it will be removed in " + "Numpy 2.0.\nPlease use `missing_values` instead.", + DeprecationWarning) + values = [str(_) for _ in missing.split(asbytes(","))] + for entry in missing_values: + entry.extend(values) + + # Process the filling_values ............................... + # Rename the input for convenience + user_filling_values = filling_values or [] + # Define the default + filling_values = [None] * nbcols + # We have a dictionary : update each entry individually + if isinstance(user_filling_values, dict): + for (key, val) in user_filling_values.items(): + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped, + continue + # Redefine the key if it's a column number and usecols is defined + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Add the value to the list + filling_values[key] = val + # We have a sequence : update on a one-to-one basis + elif isinstance(user_filling_values, (list, tuple)): + n = len(user_filling_values) + if (n <= nbcols): + filling_values[:n] = user_filling_values + else: + filling_values = user_filling_values[:nbcols] + # We have something else : use it for all entries + else: + filling_values = [user_filling_values] * nbcols + + # Initialize the converters ................................ + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times the same + # ... converter, instead of 3 different converters. + converters = [StringConverter(None, missing_values=miss, default=fill) + for (miss, fill) in zip(missing_values, filling_values)] + else: + dtype_flat = flatten_dtype(dtype, flatten_base=True) + # Initialize the converters + if len(dtype_flat) > 1: + # Flexible type : get a converter from each dtype + zipit = zip(dtype_flat, missing_values, filling_values) + converters = [StringConverter(dt, locked=True, + missing_values=miss, default=fill) + for (dt, miss, fill) in zipit] + else: + # Set to a default converter (but w/ different missing values) + zipit = zip(missing_values, filling_values) + converters = [StringConverter(dtype, locked=True, + missing_values=miss, default=fill) + for (miss, fill) in zipit] + # Update the converters to use the user-defined ones + uc_update = [] + for (j, conv) in user_converters.items(): + # If the converter is specified by column names, use the index instead + if _is_string_like(j): + try: + j = names.index(j) + i = j + except ValueError: + continue + elif usecols: + try: + i = usecols.index(j) + except ValueError: + # Unused converter specified + continue + else: + i = j + # Find the value to test - first_line is not filtered by usecols: + if len(first_line): + testing_value = first_values[j] + else: + testing_value = None + converters[i].update(conv, locked=True, + testing_value=testing_value, + default=filling_values[i], + missing_values=missing_values[i],) + uc_update.append((i, conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Fixme: possible error as following variable never used. + #miss_chars = [_.missing_values for _ in converters] + + # Initialize the output lists ... + # ... rows + rows = [] + append_to_rows = rows.append + # ... masks + if usemask: + masks = [] + append_to_masks = masks.append + # ... invalid + invalid = [] + append_to_invalid = invalid.append + + # Parse each line + for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): + values = split_line(line) + nbvalues = len(values) + # Skip an empty line + if nbvalues == 0: + continue + # Select only the columns we need + if usecols: + try: + values = [values[_] for _ in usecols] + except IndexError: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + elif nbvalues != nbcols: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple([v.strip() in m + for (v, m) in zip(values, missing_values)])) + + if own_fhd: + fhd.close() + + # Upgrade the converters (if needed) + if dtype is None: + for (i, converter) in enumerate(converters): + current_column = [itemgetter(i)(_m) for _m in rows] + try: + converter.iterupgrade(current_column) + except ConverterLockError: + errmsg = "Converter #%i is locked and cannot be upgraded: " % i + current_column = map(itemgetter(i), rows) + for (j, value) in enumerate(current_column): + try: + converter.upgrade(value) + except (ConverterError, ValueError): + errmsg += "(occurred line #%i for value '%s')" + errmsg %= (j + 1 + skip_header, value) + raise ConverterError(errmsg) + + # Check that we don't have invalid values + nbinvalid = len(invalid) + if nbinvalid > 0: + nbrows = len(rows) + nbinvalid - skip_footer + # Construct the error message + template = " Line #%%i (got %%i columns instead of %i)" % nbcols + if skip_footer > 0: + nbinvalid_skipped = len([_ for _ in invalid + if _[0] > nbrows + skip_header]) + invalid = invalid[:nbinvalid - nbinvalid_skipped] + skip_footer -= nbinvalid_skipped +# +# nbrows -= skip_footer +# errmsg = [template % (i, nb) +# for (i, nb) in invalid if i < nbrows] +# else: + errmsg = [template % (i, nb) + for (i, nb) in invalid] + if len(errmsg): + errmsg.insert(0, "Some errors were detected !") + errmsg = "\n".join(errmsg) + # Raise an exception ? + if invalid_raise: + raise ValueError(errmsg) + # Issue a warning ? + else: + warnings.warn(errmsg, ConversionWarning) + + # Strip the last skip_footer data + if skip_footer > 0: + rows = rows[:-skip_footer] + if usemask: + masks = masks[:-skip_footer] + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + rows = list( + zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + else: + rows = list( + zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + column_types = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(column_types) + if v in (type('S'), np.string_)] + # ... and take the largest number of chars. + for i in strcolidx: + column_types[i] = "|S%i" % max(len(row[i]) for row in data) + # + if names is None: + # If the dtype is uniform, don't define names, else use '' + base = set([c.type for c in converters if c._checked]) + if len(base) == 1: + (ddtype, mdtype) = (list(base)[0], np.bool) + else: + ddtype = [(defaultfmt % i, dt) + for (i, dt) in enumerate(column_types)] + if usemask: + mdtype = [(defaultfmt % i, np.bool) + for (i, dt) in enumerate(column_types)] + else: + ddtype = list(zip(names, column_types)) + mdtype = list(zip(names, [np.bool] * len(column_types))) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names: + dtype.names = names + # Case 1. We have a structured type + if len(dtype_flat) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if 'O' in (_.char for _ in dtype_flat): + if has_nested_fields(dtype): + raise NotImplementedError( + "Nested fields involving objects are not supported...") + else: + output = np.array(data, dtype=dtype) + else: + rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) + output = rows.view(dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array( + masks, dtype=np.dtype([('', np.bool) for t in dtype_flat])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for i, ttype in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if ttype == np.string_: + ttype = "|S%i" % max(len(row[i]) for row in data) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names: + mdtype = [(_, np.bool) for _ in dtype.names] + else: + mdtype = np.bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + names = output.dtype.names + if usemask and names: + for (name, conv) in zip(names or (), converters): + missing_values = [conv(_) for _ in conv.missing_values + if _ != asbytes('')] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + if unpack: + return output.squeeze().T + return output.squeeze() + + +def ndfromtxt(fname, **kwargs): + """ + Load ASCII data stored in a file and return it as a single array. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function. + + """ + kwargs['usemask'] = False + return genfromtxt(fname, **kwargs) + + +def mafromtxt(fname, **kwargs): + """ + Load ASCII data stored in a text file and return a masked array. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + """ + kwargs['usemask'] = True + return genfromtxt(fname, **kwargs) + + +def recfromtxt(fname, **kwargs): + """ + Load ASCII data from a file and return it in a record array. + + If ``usemask=False`` a standard `recarray` is returned, + if ``usemask=True`` a MaskedRecords array is returned. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + kwargs.setdefault("dtype", None) + usemask = kwargs.get('usemask', False) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, **kwargs): + """ + Load ASCII data stored in a comma-separated file. + + The returned array is a record array (if ``usemask=False``, see + `recarray`) or a masked record array (if ``usemask=True``, + see `ma.mrecords.MaskedRecords`). + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + # Set default kwargs for genfromtxt as relevant to csv import. + kwargs.setdefault("case_sensitive", "lower") + kwargs.setdefault("names", True) + kwargs.setdefault("delimiter", ",") + kwargs.setdefault("dtype", None) + output = genfromtxt(fname, **kwargs) + + usemask = kwargs.get("usemask", False) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/polynomial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/polynomial.py new file mode 100644 index 0000000000000..6a1adc7730806 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/polynomial.py @@ -0,0 +1,1271 @@ +""" +Functions to operate on polynomials. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', + 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', + 'polyfit', 'RankWarning'] + +import re +import warnings +import numpy.core.numeric as NX + +from numpy.core import isscalar, abs, finfo, atleast_1d, hstack, dot +from numpy.lib.twodim_base import diag, vander +from numpy.lib.function_base import trim_zeros, sort_complex +from numpy.lib.type_check import iscomplex, real, imag +from numpy.linalg import eigvals, lstsq, inv + +class RankWarning(UserWarning): + """ + Issued by `polyfit` when the Vandermonde matrix is rank deficient. + + For more information, a way to suppress the warning, and an example of + `RankWarning` being issued, see `polyfit`. + + """ + pass + +def poly(seq_of_zeros): + """ + Find the coefficients of a polynomial with the given sequence of roots. + + Returns the coefficients of the polynomial whose leading coefficient + is one for the given sequence of zeros (multiple roots must be included + in the sequence as many times as their multiplicity; see Examples). + A square matrix (or array, which will be treated as a matrix) can also + be given, in which case the coefficients of the characteristic polynomial + of the matrix are returned. + + Parameters + ---------- + seq_of_zeros : array_like, shape (N,) or (N, N) + A sequence of polynomial roots, or a square array or matrix object. + + Returns + ------- + c : ndarray + 1D array of polynomial coefficients from highest to lowest degree: + + ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` + where c[0] always equals 1. + + Raises + ------ + ValueError + If input is the wrong shape (the input must be a 1-D or square + 2-D array). + + See Also + -------- + polyval : Evaluate a polynomial at a point. + roots : Return the roots of a polynomial. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + Specifying the roots of a polynomial still leaves one degree of + freedom, typically represented by an undetermined leading + coefficient. [1]_ In the case of this function, that coefficient - + the first one in the returned array - is always taken as one. (If + for some reason you have one other point, the only automatic way + presently to leverage that information is to use ``polyfit``.) + + The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` + matrix **A** is given by + + :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, + + where **I** is the `n`-by-`n` identity matrix. [2]_ + + References + ---------- + .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry, + Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. + + .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," + Academic Press, pg. 182, 1980. + + Examples + -------- + Given a sequence of a polynomial's zeros: + + >>> np.poly((0, 0, 0)) # Multiple root example + array([1, 0, 0, 0]) + + The line above represents z**3 + 0*z**2 + 0*z + 0. + + >>> np.poly((-1./2, 0, 1./2)) + array([ 1. , 0. , -0.25, 0. ]) + + The line above represents z**3 - z/4 + + >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) #random + + Given a square array object: + + >>> P = np.array([[0, 1./3], [-1./2, 0]]) + >>> np.poly(P) + array([ 1. , 0. , 0.16666667]) + + Or a square matrix object: + + >>> np.poly(np.matrix(P)) + array([ 1. , 0. , 0.16666667]) + + Note how in all cases the leading coefficient is always 1. + + """ + seq_of_zeros = atleast_1d(seq_of_zeros) + sh = seq_of_zeros.shape + if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: + seq_of_zeros = eigvals(seq_of_zeros) + elif len(sh) == 1: + pass + else: + raise ValueError("input must be 1d or non-empty square 2d array.") + + if len(seq_of_zeros) == 0: + return 1.0 + + a = [1] + for k in range(len(seq_of_zeros)): + a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full') + + if issubclass(a.dtype.type, NX.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = NX.asarray(seq_of_zeros, complex) + pos_roots = sort_complex(NX.compress(roots.imag > 0, roots)) + neg_roots = NX.conjugate(sort_complex( + NX.compress(roots.imag < 0, roots))) + if (len(pos_roots) == len(neg_roots) and + NX.alltrue(neg_roots == pos_roots)): + a = a.real.copy() + + return a + +def roots(p): + """ + Return the roots of a polynomial with coefficients given in p. + + The values in the rank-1 array `p` are coefficients of a polynomial. + If the length of `p` is n+1 then the polynomial is described by:: + + p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] + + Parameters + ---------- + p : array_like + Rank-1 array of polynomial coefficients. + + Returns + ------- + out : ndarray + An array containing the complex roots of the polynomial. + + Raises + ------ + ValueError + When `p` cannot be converted to a rank-1 array. + + See also + -------- + poly : Find the coefficients of a polynomial with a given sequence + of roots. + polyval : Evaluate a polynomial at a point. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + The algorithm relies on computing the eigenvalues of the + companion matrix [1]_. + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> coeff = [3.2, 2, 1] + >>> np.roots(coeff) + array([-0.3125+0.46351241j, -0.3125-0.46351241j]) + + """ + # If input is scalar, this makes it an array + p = atleast_1d(p) + if len(p.shape) != 1: + raise ValueError("Input must be a rank-1 array.") + + # find non-zero array entries + non_zero = NX.nonzero(NX.ravel(p))[0] + + # Return an empty array if polynomial is all zeros + if len(non_zero) == 0: + return NX.array([]) + + # find the number of trailing zeros -- this is the number of roots at 0. + trailing_zeros = len(p) - non_zero[-1] - 1 + + # strip leading and trailing zeros + p = p[int(non_zero[0]):int(non_zero[-1])+1] + + # casting: if incoming array isn't floating point, make it floating point. + if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): + p = p.astype(float) + + N = len(p) + if N > 1: + # build companion matrix and find its eigenvalues (the roots) + A = diag(NX.ones((N-2,), p.dtype), -1) + A[0,:] = -p[1:] / p[0] + roots = eigvals(A) + else: + roots = NX.array([]) + + # tack any zeros onto the back of the array + roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) + return roots + +def polyint(p, m=1, k=None): + """ + Return an antiderivative (indefinite integral) of a polynomial. + + The returned order `m` antiderivative `P` of polynomial `p` satisfies + :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` + integration constants `k`. The constants determine the low-order + polynomial part + + .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} + + of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. + + Parameters + ---------- + p : {array_like, poly1d} + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of the antiderivative. (Default: 1) + k : {None, list of `m` scalars, scalar}, optional + Integration constants. They are given in the order of integration: + those corresponding to highest-order terms come first. + + If ``None`` (default), all constants are assumed to be zero. + If `m = 1`, a single scalar can be given instead of a list. + + See Also + -------- + polyder : derivative of a polynomial + poly1d.integ : equivalent method + + Examples + -------- + The defining property of the antiderivative: + + >>> p = np.poly1d([1,1,1]) + >>> P = np.polyint(p) + >>> P + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) + >>> np.polyder(P) == p + True + + The integration constants default to zero, but can be specified: + + >>> P = np.polyint(p, 3) + >>> P(0) + 0.0 + >>> np.polyder(P)(0) + 0.0 + >>> np.polyder(P, 2)(0) + 0.0 + >>> P = np.polyint(p, 3, k=[6,5,3]) + >>> P + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) + + Note that 3 = 6 / 2!, and that the constants are given in the order of + integrations. Constant of the highest-order polynomial term comes first: + + >>> np.polyder(P, 2)(0) + 6.0 + >>> np.polyder(P, 1)(0) + 5.0 + >>> P(0) + 3.0 + + """ + m = int(m) + if m < 0: + raise ValueError("Order of integral must be positive (see polyder)") + if k is None: + k = NX.zeros(m, float) + k = atleast_1d(k) + if len(k) == 1 and m > 1: + k = k[0]*NX.ones(m, float) + if len(k) < m: + raise ValueError( + "k must be a scalar or a rank-1 array of length 1 or >m.") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + if m == 0: + if truepoly: + return poly1d(p) + return p + else: + # Note: this must work also with object and integer arrays + y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) + val = polyint(y, m - 1, k=k[1:]) + if truepoly: + return poly1d(val) + return val + +def polyder(p, m=1): + """ + Return the derivative of the specified order of a polynomial. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of differentiation (default: 1) + + Returns + ------- + der : poly1d + A new polynomial representing the derivative. + + See Also + -------- + polyint : Anti-derivative of a polynomial. + poly1d : Class for one-dimensional polynomials. + + Examples + -------- + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + + >>> p = np.poly1d([1,1,1,1]) + >>> p2 = np.polyder(p) + >>> p2 + poly1d([3, 2, 1]) + + which evaluates to: + + >>> p2(2.) + 17.0 + + We can verify this, approximating the derivative with + ``(f(x + h) - f(x))/h``: + + >>> (p(2. + 0.001) - p(2.)) / 0.001 + 17.007000999997857 + + The fourth-order derivative of a 3rd-order polynomial is zero: + + >>> np.polyder(p, 2) + poly1d([6, 2]) + >>> np.polyder(p, 3) + poly1d([6]) + >>> np.polyder(p, 4) + poly1d([ 0.]) + + """ + m = int(m) + if m < 0: + raise ValueError("Order of derivative must be positive (see polyint)") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + n = len(p) - 1 + y = p[:-1] * NX.arange(n, 0, -1) + if m == 0: + val = p + else: + val = polyder(y, m - 1) + if truepoly: + val = poly1d(val) + return val + +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Least squares polynomial fit. + + Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + to points `(x, y)`. Returns a vector of coefficients `p` that minimises + the squared error. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (M,), optional + weights to apply to the y-coordinates of the sample points. + cov : bool, optional + Return the estimate and the covariance matrix of the estimate + If full is True, then cov is not returned. + + Returns + ------- + p : ndarray, shape (M,) or (M, K) + Polynomial coefficients, highest power first. If `y` was 2-D, the + coefficients for `k`-th data set are in ``p[:,k]``. + + residuals, rank, singular_values, rcond : + Present only if `full` = True. Residuals of the least-squares fit, + the effective rank of the scaled Vandermonde coefficient matrix, + its singular values, and the specified value of `rcond`. For more + details, see `linalg.lstsq`. + + V : ndarray, shape (M,M) or (M,M,K) + Present only if `full` = False and `cov`=True. The covariance + matrix of the polynomial coefficient estimates. The diagonal of + this matrix are the variance estimates for each coefficient. If y + is a 2-D array, then the covariance matrix for the `k`-th data set + are in ``V[:,:,k]`` + + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. + + The warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + polyval : Computes polynomial values. + linalg.lstsq : Computes a least-squares fit. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution minimizes the squared error + + .. math :: + E = \\sum_{j=0}^k |p(x_j) - y_j|^2 + + in the equations:: + + x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] + x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] + ... + x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] + + The coefficient matrix of the coefficients `p` is a Vandermonde matrix. + + `polyfit` issues a `RankWarning` when the least-squares fit is badly + conditioned. This implies that the best fit is not well-defined due + to numerical error. The results may be improved by lowering the polynomial + degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter + can also be set to a value smaller than its default, but the resulting + fit may be spurious: including contributions from the small singular + values can add numerical noise to the result. + + Note that fitting polynomial coefficients is inherently badly conditioned + when the degree of the polynomial is large or the interval of sample points + is badly centered. The quality of the fit should always be checked in these + cases. When polynomial fits are not satisfactory, splines may be a good + alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + http://en.wikipedia.org/wiki/Curve_fitting + .. [2] Wikipedia, "Polynomial interpolation", + http://en.wikipedia.org/wiki/Polynomial_interpolation + + Examples + -------- + >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) + >>> z = np.polyfit(x, y, 3) + >>> z + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) + + It is convenient to use `poly1d` objects for dealing with polynomials: + + >>> p = np.poly1d(z) + >>> p(0.5) + 0.6143849206349179 + >>> p(3.5) + -0.34732142857143039 + >>> p(10) + 22.579365079365115 + + High-order polynomials may oscillate wildly: + + >>> p30 = np.poly1d(np.polyfit(x, y, 30)) + /... RankWarning: Polyfit may be poorly conditioned... + >>> p30(4) + -0.80000000000000204 + >>> p30(5) + -0.99999999999999445 + >>> p30(4.5) + -0.10547061179440398 + + Illustration: + + >>> import matplotlib.pyplot as plt + >>> xp = np.linspace(-2, 6, 100) + >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') + >>> plt.ylim(-2,2) + (-2, 2) + >>> plt.show() + + """ + order = int(deg) + 1 + x = NX.asarray(x) + 0.0 + y = NX.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if x.shape[0] != y.shape[0]: + raise TypeError("expected x and y to have same length") + + # set rcond + if rcond is None: + rcond = len(x)*finfo(x.dtype).eps + + # set up least squares equation for powers of x + lhs = vander(x, order) + rhs = y + + # apply weighting + if w is not None: + w = NX.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + lhs *= w[:, NX.newaxis] + if rhs.ndim == 2: + rhs *= w[:, NX.newaxis] + else: + rhs *= w + + # scale lhs to improve condition number and solve + scale = NX.sqrt((lhs*lhs).sum(axis=0)) + lhs /= scale + c, resids, rank, s = lstsq(lhs, rhs, rcond) + c = (c.T/scale).T # broadcast scale coefficients + + # warn on rank reduction, which indicates an ill conditioned matrix + if rank != order and not full: + msg = "Polyfit may be poorly conditioned" + warnings.warn(msg, RankWarning) + + if full: + return c, resids, rank, s, rcond + elif cov: + Vbase = inv(dot(lhs.T, lhs)) + Vbase /= NX.outer(scale, scale) + # Some literature ignores the extra -2.0 factor in the denominator, but + # it is included here because the covariance of Multivariate Student-T + # (which is implied by a Bayesian uncertainty analysis) includes it. + # Plus, it gives a slightly more conservative estimate of uncertainty. + fac = resids / (len(x) - order - 2.0) + if y.ndim == 1: + return c, Vbase * fac + else: + return c, Vbase[:,:, NX.newaxis] * fac + else: + return c + + +def polyval(p, x): + """ + Evaluate a polynomial at specific values. + + If `p` is of length N, this function returns the value: + + ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` + + If `x` is a sequence, then `p(x)` is returned for each element of `x`. + If `x` is another polynomial then the composite polynomial `p(x(t))` + is returned. + + Parameters + ---------- + p : array_like or poly1d object + 1D array of polynomial coefficients (including coefficients equal + to zero) from highest degree to the constant term, or an + instance of poly1d. + x : array_like or poly1d object + A number, a 1D array of numbers, or an instance of poly1d, "at" + which to evaluate `p`. + + Returns + ------- + values : ndarray or poly1d + If `x` is a poly1d instance, the result is the composition of the two + polynomials, i.e., `x` is "substituted" in `p` and the simplified + result is returned. In addition, the type of `x` - array_like or + poly1d - governs the type of the output: `x` array_like => `values` + array_like, `x` a poly1d object => `values` is also. + + See Also + -------- + poly1d: A polynomial class. + + Notes + ----- + Horner's scheme [1]_ is used to evaluate the polynomial. Even so, + for polynomials of high degree the values may be inaccurate due to + rounding errors. Use carefully. + + References + ---------- + .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. + trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand + Reinhold Co., 1985, pg. 720. + + Examples + -------- + >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 + 76 + >>> np.polyval([3,0,1], np.poly1d(5)) + poly1d([ 76.]) + >>> np.polyval(np.poly1d([3,0,1]), 5) + 76 + >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) + poly1d([ 76.]) + + """ + p = NX.asarray(p) + if isinstance(x, poly1d): + y = 0 + else: + x = NX.asarray(x) + y = NX.zeros_like(x) + for i in range(len(p)): + y = x * y + p[i] + return y + +def polyadd(a1, a2): + """ + Find the sum of two polynomials. + + Returns the polynomial resulting from the sum of two input polynomials. + Each input must be either a poly1d object or a 1D sequence of polynomial + coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The sum of the inputs. If either input is a poly1d object, then the + output is also a poly1d object. Otherwise, it is a 1D array of + polynomial coefficients from highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + + Examples + -------- + >>> np.polyadd([1, 2], [9, 5, 4]) + array([9, 6, 6]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2]) + >>> p2 = np.poly1d([9, 5, 4]) + >>> print p1 + 1 x + 2 + >>> print p2 + 2 + 9 x + 5 x + 4 + >>> print np.polyadd(p1, p2) + 2 + 9 x + 6 x + 6 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 + a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) + a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 + NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + +def polysub(a1, a2): + """ + Difference (subtraction) of two polynomials. + + Given two polynomials `a1` and `a2`, returns ``a1 - a2``. + `a1` and `a2` can be either array_like sequences of the polynomials' + coefficients (including coefficients equal to zero), or `poly1d` objects. + + Parameters + ---------- + a1, a2 : array_like or poly1d + Minuend and subtrahend polynomials, respectively. + + Returns + ------- + out : ndarray or poly1d + Array or `poly1d` object of the difference polynomial's coefficients. + + See Also + -------- + polyval, polydiv, polymul, polyadd + + Examples + -------- + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + + >>> np.polysub([2, 10, -2], [3, 10, -4]) + array([-1, 0, 2]) + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 - a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) - a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 - NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +def polymul(a1, a2): + """ + Find the product of two polynomials. + + Finds the polynomial resulting from the multiplication of the two input + polynomials. Each input must be either a poly1d object or a 1D sequence + of polynomial coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The polynomial resulting from the multiplication of the inputs. If + either inputs is a poly1d object, then the output is also a poly1d + object. Otherwise, it is a 1D array of polynomial coefficients from + highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, + polyval + convolve : Array convolution. Same output as polymul, but has parameter + for overlap mode. + + Examples + -------- + >>> np.polymul([1, 2, 3], [9, 5, 1]) + array([ 9, 23, 38, 17, 3]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2, 3]) + >>> p2 = np.poly1d([9, 5, 1]) + >>> print p1 + 2 + 1 x + 2 x + 3 + >>> print p2 + 2 + 9 x + 5 x + 1 + >>> print np.polymul(p1, p2) + 4 3 2 + 9 x + 23 x + 38 x + 17 x + 3 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1, a2 = poly1d(a1), poly1d(a2) + val = NX.convolve(a1, a2) + if truepoly: + val = poly1d(val) + return val + +def polydiv(u, v): + """ + Returns the quotient and remainder of polynomial division. + + The input arrays are the coefficients (including any coefficients + equal to zero) of the "numerator" (dividend) and "denominator" + (divisor) polynomials, respectively. + + Parameters + ---------- + u : array_like or poly1d + Dividend polynomial's coefficients. + + v : array_like or poly1d + Divisor polynomial's coefficients. + + Returns + ------- + q : ndarray + Coefficients, including those equal to zero, of the quotient. + r : ndarray + Coefficients, including those equal to zero, of the remainder. + + See Also + -------- + poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub, + polyval + + Notes + ----- + Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need + not equal `v.ndim`. In other words, all four possible combinations - + ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, + ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. + + Examples + -------- + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + + >>> x = np.array([3.0, 5.0, 2.0]) + >>> y = np.array([2.0, 1.0]) + >>> np.polydiv(x, y) + (array([ 1.5 , 1.75]), array([ 0.25])) + + """ + truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) + u = atleast_1d(u) + 0.0 + v = atleast_1d(v) + 0.0 + # w has the common type + w = u[0] + v[0] + m = len(u) - 1 + n = len(v) - 1 + scale = 1. / v[0] + q = NX.zeros((max(m - n + 1, 1),), w.dtype) + r = u.copy() + for k in range(0, m-n+1): + d = scale * r[k] + q[k] = d + r[k:k+n+1] -= d*v + while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): + r = r[1:] + if truepoly: + return poly1d(q), poly1d(r) + return q, r + +_poly_mat = re.compile(r"[*][*]([0-9]*)") +def _raise_power(astr, wrap=70): + n = 0 + line1 = '' + line2 = '' + output = ' ' + while True: + mat = _poly_mat.search(astr, n) + if mat is None: + break + span = mat.span() + power = mat.groups()[0] + partstr = astr[n:span[0]] + n = span[1] + toadd2 = partstr + ' '*(len(power)-1) + toadd1 = ' '*(len(partstr)-1) + power + if ((len(line2) + len(toadd2) > wrap) or + (len(line1) + len(toadd1) > wrap)): + output += line1 + "\n" + line2 + "\n " + line1 = toadd1 + line2 = toadd2 + else: + line2 += partstr + ' '*(len(power)-1) + line1 += ' '*(len(partstr)-1) + power + output += line1 + "\n" + line2 + return output + astr[n:] + + +class poly1d(object): + """ + A one-dimensional polynomial class. + + A convenience class, used to encapsulate "natural" operations on + polynomials so that said operations may take on their customary + form in code (see Examples). + + Parameters + ---------- + c_or_r : array_like + The polynomial's coefficients, in decreasing powers, or if + the value of the second parameter is True, the polynomial's + roots (values where the polynomial evaluates to 0). For example, + ``poly1d([1, 2, 3])`` returns an object that represents + :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns + one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. + r : bool, optional + If True, `c_or_r` specifies the polynomial's roots; the default + is False. + variable : str, optional + Changes the variable used when printing `p` from `x` to `variable` + (see Examples). + + Examples + -------- + Construct the polynomial :math:`x^2 + 2x + 3`: + + >>> p = np.poly1d([1, 2, 3]) + >>> print np.poly1d(p) + 2 + 1 x + 2 x + 3 + + Evaluate the polynomial at :math:`x = 0.5`: + + >>> p(0.5) + 4.25 + + Find the roots: + + >>> p.r + array([-1.+1.41421356j, -1.-1.41421356j]) + >>> p(p.r) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) + + These numbers in the previous line represent (0, 0) to machine precision + + Show the coefficients: + + >>> p.c + array([1, 2, 3]) + + Display the order (the leading zero-coefficients are removed): + + >>> p.order + 2 + + Show the coefficient of the k-th power in the polynomial + (which is equivalent to ``p.c[-(i+1)]``): + + >>> p[1] + 2 + + Polynomials can be added, subtracted, multiplied, and divided + (returns quotient and remainder): + + >>> p * p + poly1d([ 1, 4, 10, 12, 9]) + + >>> (p**3 + 4) / p + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.])) + + ``asarray(p)`` gives the coefficient array, so polynomials can be + used in all functions that accept arrays: + + >>> p**2 # square of polynomial + poly1d([ 1, 4, 10, 12, 9]) + + >>> np.square(p) # square of individual coefficients + array([1, 4, 9]) + + The variable used in the string representation of `p` can be modified, + using the `variable` parameter: + + >>> p = np.poly1d([1,2,3], variable='z') + >>> print p + 2 + 1 z + 2 z + 3 + + Construct a polynomial from its roots: + + >>> np.poly1d([1, 2], True) + poly1d([ 1, -3, 2]) + + This is the same polynomial as obtained by: + + >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) + poly1d([ 1, -3, 2]) + + """ + coeffs = None + order = None + variable = None + __hash__ = None + + def __init__(self, c_or_r, r=0, variable=None): + if isinstance(c_or_r, poly1d): + for key in c_or_r.__dict__.keys(): + self.__dict__[key] = c_or_r.__dict__[key] + if variable is not None: + self.__dict__['variable'] = variable + return + if r: + c_or_r = poly(c_or_r) + c_or_r = atleast_1d(c_or_r) + if len(c_or_r.shape) > 1: + raise ValueError("Polynomial must be 1d only.") + c_or_r = trim_zeros(c_or_r, trim='f') + if len(c_or_r) == 0: + c_or_r = NX.array([0.]) + self.__dict__['coeffs'] = c_or_r + self.__dict__['order'] = len(c_or_r) - 1 + if variable is None: + variable = 'x' + self.__dict__['variable'] = variable + + def __array__(self, t=None): + if t: + return NX.asarray(self.coeffs, t) + else: + return NX.asarray(self.coeffs) + + def __repr__(self): + vals = repr(self.coeffs) + vals = vals[6:-1] + return "poly1d(%s)" % vals + + def __len__(self): + return self.order + + def __str__(self): + thestr = "0" + var = self.variable + + # Remove leading zeros + coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] + N = len(coeffs)-1 + + def fmt_float(q): + s = '%.4g' % q + if s.endswith('.0000'): + s = s[:-5] + return s + + for k in range(len(coeffs)): + if not iscomplex(coeffs[k]): + coefstr = fmt_float(real(coeffs[k])) + elif real(coeffs[k]) == 0: + coefstr = '%sj' % fmt_float(imag(coeffs[k])) + else: + coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])), + fmt_float(imag(coeffs[k]))) + + power = (N-k) + if power == 0: + if coefstr != '0': + newstr = '%s' % (coefstr,) + else: + if k == 0: + newstr = '0' + else: + newstr = '' + elif power == 1: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = var + else: + newstr = '%s %s' % (coefstr, var) + else: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = '%s**%d' % (var, power,) + else: + newstr = '%s %s**%d' % (coefstr, var, power) + + if k > 0: + if newstr != '': + if newstr.startswith('-'): + thestr = "%s - %s" % (thestr, newstr[1:]) + else: + thestr = "%s + %s" % (thestr, newstr) + else: + thestr = newstr + return _raise_power(thestr) + + def __call__(self, val): + return polyval(self.coeffs, val) + + def __neg__(self): + return poly1d(-self.coeffs) + + def __pos__(self): + return self + + def __mul__(self, other): + if isscalar(other): + return poly1d(self.coeffs * other) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __rmul__(self, other): + if isscalar(other): + return poly1d(other * self.coeffs) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __add__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __radd__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __pow__(self, val): + if not isscalar(val) or int(val) != val or val < 0: + raise ValueError("Power to non-negative integers only.") + res = [1] + for _ in range(val): + res = polymul(self.coeffs, res) + return poly1d(res) + + def __sub__(self, other): + other = poly1d(other) + return poly1d(polysub(self.coeffs, other.coeffs)) + + def __rsub__(self, other): + other = poly1d(other) + return poly1d(polysub(other.coeffs, self.coeffs)) + + def __div__(self, other): + if isscalar(other): + return poly1d(self.coeffs/other) + else: + other = poly1d(other) + return polydiv(self, other) + + __truediv__ = __div__ + + def __rdiv__(self, other): + if isscalar(other): + return poly1d(other/self.coeffs) + else: + other = poly1d(other) + return polydiv(other, self) + + __rtruediv__ = __rdiv__ + + def __eq__(self, other): + if self.coeffs.shape != other.coeffs.shape: + return False + return (self.coeffs == other.coeffs).all() + + def __ne__(self, other): + return not self.__eq__(other) + + def __setattr__(self, key, val): + raise ValueError("Attributes cannot be changed this way.") + + def __getattr__(self, key): + if key in ['r', 'roots']: + return roots(self.coeffs) + elif key in ['c', 'coef', 'coefficients']: + return self.coeffs + elif key in ['o']: + return self.order + else: + try: + return self.__dict__[key] + except KeyError: + raise AttributeError( + "'%s' has no attribute '%s'" % (self.__class__, key)) + + def __getitem__(self, val): + ind = self.order - val + if val > self.order: + return 0 + if val < 0: + return 0 + return self.coeffs[ind] + + def __setitem__(self, key, val): + ind = self.order - key + if key < 0: + raise ValueError("Does not support negative powers.") + if key > self.order: + zr = NX.zeros(key-self.order, self.coeffs.dtype) + self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs)) + self.__dict__['order'] = key + ind = 0 + self.__dict__['coeffs'][ind] = val + return + + def __iter__(self): + return iter(self.coeffs) + + def integ(self, m=1, k=0): + """ + Return an antiderivative (indefinite integral) of this polynomial. + + Refer to `polyint` for full documentation. + + See Also + -------- + polyint : equivalent function + + """ + return poly1d(polyint(self.coeffs, m=m, k=k)) + + def deriv(self, m=1): + """ + Return a derivative of this polynomial. + + Refer to `polyder` for full documentation. + + See Also + -------- + polyder : equivalent function + + """ + return poly1d(polyder(self.coeffs, m=m)) + +# Stuff to do on module import + +warnings.simplefilter('always', RankWarning) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/recfunctions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/recfunctions.py new file mode 100644 index 0000000000000..a61b1749b566f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/recfunctions.py @@ -0,0 +1,1003 @@ +""" +Collection of utilities to manipulate structured arrays. + +Most of these functions were initially implemented by John Hunter for +matplotlib. They have been rewritten and extended for convenience. + +""" +from __future__ import division, absolute_import, print_function + +import sys +import itertools +import numpy as np +import numpy.ma as ma +from numpy import ndarray, recarray +from numpy.ma import MaskedArray +from numpy.ma.mrecords import MaskedRecords +from numpy.lib._iotools import _is_string_like +from numpy.compat import basestring + +if sys.version_info[0] < 3: + from future_builtins import zip + +_check_fill_value = np.ma.core._check_fill_value + + +__all__ = [ + 'append_fields', 'drop_fields', 'find_duplicates', + 'get_fieldstructure', 'join_by', 'merge_arrays', + 'rec_append_fields', 'rec_drop_fields', 'rec_join', + 'recursive_fill_fields', 'rename_fields', 'stack_arrays', + ] + + +def recursive_fill_fields(input, output): + """ + Fills fields from output with fields from input, + with support for nested structures. + + Parameters + ---------- + input : ndarray + Input array. + output : ndarray + Output array. + + Notes + ----- + * `output` should be at least the same size as `input` + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + >>> b = np.zeros((3,), dtype=a.dtype) + >>> rfn.recursive_fill_fields(a, b) + array([(1, 10.0), (2, 20.0), (0, 0.0)], + dtype=[('A', '>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names(np.empty((1,), dtype=int)) is None + True + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names(adtype) + ('a', ('b', ('ba', 'bb'))) + """ + listnames = [] + names = adtype.names + for name in names: + current = adtype[name] + if current.names: + listnames.append((name, tuple(get_names(current)))) + else: + listnames.append(name) + return tuple(listnames) or None + + +def get_names_flat(adtype): + """ + Returns the field names of the input datatype as a tuple. Nested structure + are flattend beforehand. + + Parameters + ---------- + adtype : dtype + Input datatype + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None + True + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names_flat(adtype) + ('a', 'b', 'ba', 'bb') + """ + listnames = [] + names = adtype.names + for name in names: + listnames.append(name) + current = adtype[name] + if current.names: + listnames.extend(get_names_flat(current)) + return tuple(listnames) or None + + +def flatten_descr(ndtype): + """ + Flatten a structured data-type description. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) + (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) + + """ + names = ndtype.names + if names is None: + return ndtype.descr + else: + descr = [] + for field in names: + (typ, _) = ndtype.fields[field] + if typ.names: + descr.extend(flatten_descr(typ)) + else: + descr.append((field, typ)) + return tuple(descr) + + +def zip_descr(seqarrays, flatten=False): + """ + Combine the dtype description of a series of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays + flatten : {boolean}, optional + Whether to collapse nested descriptions. + """ + newdtype = [] + if flatten: + for a in seqarrays: + newdtype.extend(flatten_descr(a.dtype)) + else: + for a in seqarrays: + current = a.dtype + names = current.names or () + if len(names) > 1: + newdtype.append(('', current.descr)) + else: + newdtype.extend(current.descr) + return np.dtype(newdtype).descr + + +def get_fieldstructure(adtype, lastname=None, parents=None,): + """ + Returns a dictionary with fields indexing lists of their parent fields. + + This function is used to simplify access to fields nested in other fields. + + Parameters + ---------- + adtype : np.dtype + Input datatype + lastname : optional + Last processed field name (used internally during recursion). + parents : dictionary + Dictionary of parent fields (used interbally during recursion). + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('A', int), + ... ('B', [('BA', int), + ... ('BB', [('BBA', int), ('BBB', int)])])]) + >>> rfn.get_fieldstructure(ndtype) + ... # XXX: possible regression, order of BBA and BBB is swapped + {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + + """ + if parents is None: + parents = {} + names = adtype.names + for name in names: + current = adtype[name] + if current.names: + if lastname: + parents[name] = [lastname, ] + else: + parents[name] = [] + parents.update(get_fieldstructure(current, name, parents)) + else: + lastparent = [_ for _ in (parents.get(lastname, []) or [])] + if lastparent: + lastparent.append(lastname) + elif lastname: + lastparent = [lastname, ] + parents[name] = lastparent or [] + return parents or None + + +def _izip_fields_flat(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays, + collapsing any nested structure. + + """ + for element in iterable: + if isinstance(element, np.void): + for f in _izip_fields_flat(tuple(element)): + yield f + else: + yield element + + +def _izip_fields(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays. + + """ + for element in iterable: + if (hasattr(element, '__iter__') and + not isinstance(element, basestring)): + for f in _izip_fields(element): + yield f + elif isinstance(element, np.void) and len(tuple(element)) == 1: + for f in _izip_fields(element): + yield f + else: + yield element + + +def izip_records(seqarrays, fill_value=None, flatten=True): + """ + Returns an iterator of concatenated items from a sequence of arrays. + + Parameters + ---------- + seqarray : sequence of arrays + Sequence of arrays. + fill_value : {None, integer} + Value used to pad shorter iterables. + flatten : {True, False}, + Whether to + """ + # OK, that's a complete ripoff from Python2.6 itertools.izip_longest + def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop): + "Yields the fill_value or raises IndexError" + yield counter() + # + fillers = itertools.repeat(fill_value) + iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays] + # Should we flatten the items, or just use a nested approach + if flatten: + zipfunc = _izip_fields_flat + else: + zipfunc = _izip_fields + # + try: + for tup in zip(*iters): + yield tuple(zipfunc(tup)) + except IndexError: + pass + + +def _fix_output(output, usemask=True, asrecarray=False): + """ + Private function: return a recarray, a ndarray, a MaskedArray + or a MaskedRecords depending on the input parameters + """ + if not isinstance(output, MaskedArray): + usemask = False + if usemask: + if asrecarray: + output = output.view(MaskedRecords) + else: + output = ma.filled(output) + if asrecarray: + output = output.view(recarray) + return output + + +def _fix_defaults(output, defaults=None): + """ + Update the fill_value and masked data of `output` + from the default given in a dictionary defaults. + """ + names = output.dtype.names + (data, mask, fill_value) = (output.data, output.mask, output.fill_value) + for (k, v) in (defaults or {}).items(): + if k in names: + fill_value[k] = v + data[k][mask[k]] = v + return output + + +def merge_arrays(seqarrays, fill_value=-1, flatten=False, + usemask=False, asrecarray=False): + """ + Merge arrays field by field. + + Parameters + ---------- + seqarrays : sequence of ndarrays + Sequence of arrays + fill_value : {float}, optional + Filling value used to pad missing data on the shorter arrays. + flatten : {False, True}, optional + Whether to collapse nested fields. + usemask : {False, True}, optional + Whether to return a masked array or not. + asrecarray : {False, True}, optional + Whether to return a recarray (MaskedRecords) or not. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) + masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)], + mask = [(False, False) (False, False) (True, False)], + fill_value = (999999, 1e+20), + dtype = [('f0', '>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])), + ... usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]), + ... np.array([10., 20., 30.])), + ... usemask=False, asrecarray=True) + rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + >>> rfn.drop_fields(a, 'a') + array([((2.0, 3),), ((5.0, 6),)], + dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') + array([(1, (3,)), (4, (6,))], + dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) + array([(1,), (4,)], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) + >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) + array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))], + dtype=[('A', ' 1: + data = merge_arrays(data, flatten=True, usemask=usemask, + fill_value=fill_value) + else: + data = data.pop() + # + output = ma.masked_all(max(len(base), len(data)), + dtype=base.dtype.descr + data.dtype.descr) + output = recursive_fill_fields(base, output) + output = recursive_fill_fields(data, output) + # + return _fix_output(output, usemask=usemask, asrecarray=asrecarray) + + +def rec_append_fields(base, names, data, dtypes=None): + """ + Add new fields to an existing array. + + The names of the fields are given with the `names` arguments, + the corresponding values with the `data` arguments. + If a single field is appended, `names`, `data` and `dtypes` do not have + to be lists but just values. + + Parameters + ---------- + base : array + Input array to extend. + names : string, sequence + String or sequence of strings corresponding to the names + of the new fields. + data : array or sequence of arrays + Array or sequence of arrays storing the fields to add to the base. + dtypes : sequence of datatypes, optional + Datatype or sequence of datatypes. + If None, the datatypes are estimated from the `data`. + + See Also + -------- + append_fields + + Returns + ------- + appended_array : np.recarray + """ + return append_fields(base, names, data=data, dtypes=dtypes, + asrecarray=True, usemask=False) + + +def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, + autoconvert=False): + """ + Superposes arrays fields by fields + + Parameters + ---------- + seqarrays : array or sequence + Sequence of input arrays. + defaults : dictionary, optional + Dictionary mapping field names to the corresponding default values. + usemask : {True, False}, optional + Whether to return a MaskedArray (or MaskedRecords is + `asrecarray==True`) or a ndarray. + asrecarray : {False, True}, optional + Whether to return a recarray (or MaskedRecords if `usemask==True`) + or just a flexible-type ndarray. + autoconvert : {False, True}, optional + Whether automatically cast the type of the field to the maximum. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> x = np.array([1, 2,]) + >>> rfn.stack_arrays(x) is x + True + >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) + >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + ... dtype=[('A', '|S3'), ('B', float), ('C', float)]) + >>> test = rfn.stack_arrays((z,zz)) + >>> test + masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0) + ('c', 30.0, 300.0)], + mask = [(False, False, True) (False, False, True) (False, False, False) + (False, False, False) (False, False, False)], + fill_value = ('N/A', 1e+20, 1e+20), + dtype = [('A', '|S3'), ('B', ' np.dtype(current_descr[-1]): + current_descr = list(current_descr) + current_descr[-1] = descr[1] + newdescr[nameidx] = tuple(current_descr) + elif descr[1] != current_descr[-1]: + raise TypeError("Incompatible type '%s' <> '%s'" % + (dict(newdescr)[name], descr[1])) + # Only one field: use concatenate + if len(newdescr) == 1: + output = ma.concatenate(seqarrays) + else: + # + output = ma.masked_all((np.sum(nrecords),), newdescr) + offset = np.cumsum(np.r_[0, nrecords]) + seen = [] + for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): + names = a.dtype.names + if names is None: + output['f%i' % len(seen)][i:j] = a + else: + for name in n: + output[name][i:j] = a[name] + if name not in seen: + seen.append(name) + # + return _fix_output(_fix_defaults(output, defaults), + usemask=usemask, asrecarray=asrecarray) + + +def find_duplicates(a, key=None, ignoremask=True, return_index=False): + """ + Find the duplicates in a structured array along a given key + + Parameters + ---------- + a : array-like + Input array + key : {string, None}, optional + Name of the fields along which to check the duplicates. + If None, the search is performed by records + ignoremask : {True, False}, optional + Whether masked data should be discarded or considered as duplicates. + return_index : {False, True}, optional + Whether to return the indices of the duplicated values. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = [('a', int)] + >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], + ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) + ... # XXX: judging by the output, the ignoremask flag has no effect + """ + a = np.asanyarray(a).ravel() + # Get a dictionary of fields + fields = get_fieldstructure(a.dtype) + # Get the sorting data (by selecting the corresponding field) + base = a + if key: + for f in fields[key]: + base = base[f] + base = base[key] + # Get the sorting indices and the sorted data + sortidx = base.argsort() + sortedbase = base[sortidx] + sorteddata = sortedbase.filled() + # Compare the sorting data + flag = (sorteddata[:-1] == sorteddata[1:]) + # If masked data must be ignored, set the flag to false where needed + if ignoremask: + sortedmask = sortedbase.recordmask + flag[sortedmask[1:]] = False + flag = np.concatenate(([False], flag)) + # We need to take the point on the left as well (else we're missing it) + flag[:-1] = flag[:-1] + flag[1:] + duplicates = a[sortidx][flag] + if return_index: + return (duplicates, sortidx[flag]) + else: + return duplicates + + +def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None, usemask=True, asrecarray=False): + """ + Join arrays `r1` and `r2` on key `key`. + + The key should be either a string or a sequence of string corresponding + to the fields used to join the array. An exception is raised if the + `key` field cannot be found in the two input arrays. Neither `r1` nor + `r2` should have any duplicates along `key`: the presence of duplicates + will make the output quite unreliable. Note that duplicates are not + looked for by the algorithm. + + Parameters + ---------- + key : {string, sequence} + A string or a sequence of strings corresponding to the fields used + for comparison. + r1, r2 : arrays + Structured arrays. + jointype : {'inner', 'outer', 'leftouter'}, optional + If 'inner', returns the elements common to both r1 and r2. + If 'outer', returns the common elements as well as the elements of + r1 not in r2 and the elements of not in r2. + If 'leftouter', returns the common elements and the elements of r1 + not in r2. + r1postfix : string, optional + String appended to the names of the fields of r1 that are present + in r2 but absent of the key. + r2postfix : string, optional + String appended to the names of the fields of r2 that are present + in r1 but absent of the key. + defaults : {dictionary}, optional + Dictionary mapping field names to the corresponding default values. + usemask : {True, False}, optional + Whether to return a MaskedArray (or MaskedRecords is + `asrecarray==True`) or a ndarray. + asrecarray : {False, True}, optional + Whether to return a recarray (or MaskedRecords if `usemask==True`) + or just a flexible-type ndarray. + + Notes + ----- + * The output is sorted along the key. + * A temporary array is formed by dropping the fields not in the key for + the two arrays and concatenating the result. This array is then + sorted, and the common entries selected. The output is constructed by + filling the fields with the selected entries. Matching is not + preserved if there are some duplicates... + + """ + # Check jointype + if jointype not in ('inner', 'outer', 'leftouter'): + raise ValueError( + "The 'jointype' argument should be in 'inner', " + "'outer' or 'leftouter' (got '%s' instead)" % jointype + ) + # If we have a single key, put it in a tuple + if isinstance(key, basestring): + key = (key,) + + # Check the keys + for name in key: + if name not in r1.dtype.names: + raise ValueError('r1 does not have key field %s' % name) + if name not in r2.dtype.names: + raise ValueError('r2 does not have key field %s' % name) + + # Make sure we work with ravelled arrays + r1 = r1.ravel() + r2 = r2.ravel() + # Fixme: nb2 below is never used. Commenting out for pyflakes. + # (nb1, nb2) = (len(r1), len(r2)) + nb1 = len(r1) + (r1names, r2names) = (r1.dtype.names, r2.dtype.names) + + # Check the names for collision + if (set.intersection(set(r1names), set(r2names)).difference(key) and + not (r1postfix or r2postfix)): + msg = "r1 and r2 contain common names, r1postfix and r2postfix " + msg += "can't be empty" + raise ValueError(msg) + + # Make temporary arrays of just the keys + r1k = drop_fields(r1, [n for n in r1names if n not in key]) + r2k = drop_fields(r2, [n for n in r2names if n not in key]) + + # Concatenate the two arrays for comparison + aux = ma.concatenate((r1k, r2k)) + idx_sort = aux.argsort(order=key) + aux = aux[idx_sort] + # + # Get the common keys + flag_in = ma.concatenate(([False], aux[1:] == aux[:-1])) + flag_in[:-1] = flag_in[1:] + flag_in[:-1] + idx_in = idx_sort[flag_in] + idx_1 = idx_in[(idx_in < nb1)] + idx_2 = idx_in[(idx_in >= nb1)] - nb1 + (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) + if jointype == 'inner': + (r1spc, r2spc) = (0, 0) + elif jointype == 'outer': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) + (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) + elif jointype == 'leftouter': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) + # Select the entries from each input + (s1, s2) = (r1[idx_1], r2[idx_2]) + # + # Build the new description of the output array ....... + # Start with the key fields + ndtype = [list(_) for _ in r1k.dtype.descr] + # Add the other fields + ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key) + # Find the new list of names (it may be different from r1names) + names = list(_[0] for _ in ndtype) + for desc in r2.dtype.descr: + desc = list(desc) + name = desc[0] + # Have we seen the current name already ? + if name in names: + nameidx = ndtype.index(desc) + current = ndtype[nameidx] + # The current field is part of the key: take the largest dtype + if name in key: + current[-1] = max(desc[1], current[-1]) + # The current field is not part of the key: add the suffixes + else: + current[0] += r1postfix + desc[0] += r2postfix + ndtype.insert(nameidx + 1, desc) + #... we haven't: just add the description to the current list + else: + names.extend(desc[0]) + ndtype.append(desc) + # Revert the elements to tuples + ndtype = [tuple(_) for _ in ndtype] + # Find the largest nb of common fields : + # r1cmn and r2cmn should be equal, but... + cmn = max(r1cmn, r2cmn) + # Construct an empty array + output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) + names = output.dtype.names + for f in r1names: + selected = s1[f] + if f not in names or (f in r2names and not r2postfix and f not in key): + f += r1postfix + current = output[f] + current[:r1cmn] = selected[:r1cmn] + if jointype in ('outer', 'leftouter'): + current[cmn:cmn + r1spc] = selected[r1cmn:] + for f in r2names: + selected = s2[f] + if f not in names or (f in r1names and not r1postfix and f not in key): + f += r2postfix + current = output[f] + current[:r2cmn] = selected[:r2cmn] + if (jointype == 'outer') and r2spc: + current[-r2spc:] = selected[r2cmn:] + # Sort and finalize the output + output.sort(order=key) + kwargs = dict(usemask=usemask, asrecarray=asrecarray) + return _fix_output(_fix_defaults(output, defaults), **kwargs) + + +def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None): + """ + Join arrays `r1` and `r2` on keys. + Alternative to join_by, that always returns a np.recarray. + + See Also + -------- + join_by : equivalent function + """ + kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, + defaults=defaults, usemask=False, asrecarray=True) + return join_by(key, r1, r2, **kwargs) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/scimath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/scimath.py new file mode 100644 index 0000000000000..e07caf805ed27 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/scimath.py @@ -0,0 +1,566 @@ +""" +Wrapper functions to more user-friendly calling of certain math functions +whose output data-type is different than the input data-type in certain +domains of the input. + +For example, for functions like `log` with branch cuts, the versions in this +module provide the mathematically valid answers in the complex plane:: + + >>> import math + >>> from numpy.lib import scimath + >>> scimath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions are +correctly handled. See their respective docstrings for specific examples. + +""" +from __future__ import division, absolute_import, print_function + +import numpy.core.numeric as nx +import numpy.core.numerictypes as nt +from numpy.core.numeric import asarray, any +from numpy.lib.type_check import isreal + + +__all__ = [ + 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', + 'arctanh' + ] + + +_ln2 = nx.log(2.0) + + +def _tocomplex(arr): + """Convert its input `arr` to a complex array. + + The input is returned as a complex array of the smallest type that will fit + the original data: types like single, byte, short, etc. become csingle, + while others become cdouble. + + A copy of the input is always made. + + Parameters + ---------- + arr : array + + Returns + ------- + array + An array with the same input data as the input but in complex form. + + Examples + -------- + + First, consider an input of type short: + + >>> a = np.array([1,2,3],np.short) + + >>> ac = np.lib.scimath._tocomplex(a); ac + array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> ac.dtype + dtype('complex64') + + If the input is of type double, the output is correspondingly of the + complex double type as well: + + >>> b = np.array([1,2,3],np.double) + + >>> bc = np.lib.scimath._tocomplex(b); bc + array([ 1.+0.j, 2.+0.j, 3.+0.j]) + + >>> bc.dtype + dtype('complex128') + + Note that even if the input was complex to begin with, a copy is still + made, since the astype() method always copies: + + >>> c = np.array([1,2,3],np.csingle) + + >>> cc = np.lib.scimath._tocomplex(c); cc + array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> c *= 2; c + array([ 2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + + >>> cc + array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + """ + if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, + nt.ushort, nt.csingle)): + return arr.astype(nt.csingle) + else: + return arr.astype(nt.cdouble) + +def _fix_real_lt_zero(x): + """Convert `x` to complex if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_real_lt_zero([-1,2]) + array([-1.+0.j, 2.+0.j]) + + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = _tocomplex(x) + return x + +def _fix_int_lt_zero(x): + """Convert `x` to double if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_int_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_int_lt_zero([-1,2]) + array([-1., 2.]) + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = x * 1.0 + return x + +def _fix_real_abs_gt_1(x): + """Convert `x` to complex if it has real components x_i with abs(x_i)>1. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) + array([0, 1]) + + >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) + array([ 0.+0.j, 2.+0.j]) + """ + x = asarray(x) + if any(isreal(x) & (abs(x) > 1)): + x = _tocomplex(x) + return x + +def sqrt(x): + """ + Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike `numpy.sqrt` which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray or scalar + The square root of `x`. If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.sqrt + + Examples + -------- + For real, non-negative inputs this works just like `numpy.sqrt`: + + >>> np.lib.scimath.sqrt(1) + 1.0 + >>> np.lib.scimath.sqrt([1, 4]) + array([ 1., 2.]) + + But it automatically handles negative inputs: + + >>> np.lib.scimath.sqrt(-1) + (0.0+1.0j) + >>> np.lib.scimath.sqrt([-1,4]) + array([ 0.+1.j, 2.+0.j]) + + """ + x = _fix_real_lt_zero(x) + return nx.sqrt(x) + +def log(x): + """ + Compute the natural logarithm of `x`. + + Return the "principal value" (for a description of this, see `numpy.log`) + of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` + returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the + complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log is (are) required. + + Returns + ------- + out : ndarray or scalar + The log of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log + + Notes + ----- + For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` + (note, however, that otherwise `numpy.log` and this `log` are identical, + i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, + notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> np.emath.log(np.exp(1)) + 1.0 + + Negative arguments are handled "correctly" (recall that + ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): + + >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) + True + + """ + x = _fix_real_lt_zero(x) + return nx.log(x) + +def log10(x): + """ + Compute the logarithm base 10 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this + is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` + returns ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose log base 10 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array object is returned. + + See Also + -------- + numpy.log10 + + Notes + ----- + For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` + (note, however, that otherwise `numpy.log10` and this `log10` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + + (We set the printing precision so the example can be auto-tested) + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log10(10**1) + 1.0 + + >>> np.emath.log10([-10**1, -10**2, 10**2]) + array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log10(x) + +def logn(n, x): + """ + Take log base n of x. + + If `x` contains negative inputs, the answer is computed and returned in the + complex domain. + + Parameters + ---------- + n : int + The base in which the log is taken. + x : array_like + The value(s) whose log base `n` is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base `n` of the `x` value(s). If `x` was a scalar, so is + `out`, otherwise an array is returned. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.lib.scimath.logn(2, [4, 8]) + array([ 2., 3.]) + >>> np.lib.scimath.logn(2, [-4, -8, 8]) + array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + n = _fix_real_lt_zero(n) + return nx.log(x)/nx.log(n) + +def log2(x): + """ + Compute the logarithm base 2 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is + a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns + ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log base 2 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log2 + + Notes + ----- + For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` + (note, however, that otherwise `numpy.log2` and this `log2` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + We set the printing precision so the example can be auto-tested: + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log2(8) + 3.0 + >>> np.emath.log2([-4, -8, 8]) + array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log2(x) + +def power(x, p): + """ + Return x to the power p, (x**p). + + If `x` contains negative values, the output is converted to the + complex domain. + + Parameters + ---------- + x : array_like + The input value(s). + p : array_like of ints + The power(s) to which `x` is raised. If `x` contains multiple values, + `p` has to either be a scalar, or contain the same number of values + as `x`. In the latter case, the result is + ``x[0]**p[0], x[1]**p[1], ...``. + + Returns + ------- + out : ndarray or scalar + The result of ``x**p``. If `x` and `p` are scalars, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.power + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.lib.scimath.power([2, 4], 2) + array([ 4, 16]) + >>> np.lib.scimath.power([2, 4], -2) + array([ 0.25 , 0.0625]) + >>> np.lib.scimath.power([-2, 4], 2) + array([ 4.+0.j, 16.+0.j]) + + """ + x = _fix_real_lt_zero(x) + p = _fix_int_lt_zero(p) + return nx.power(x, p) + +def arccos(x): + """ + Compute the inverse cosine of x. + + Return the "principal value" (for a description of this, see + `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arccos is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arccos + + Notes + ----- + For an arccos() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arccos`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arccos(1) # a scalar is returned + 0.0 + + >>> np.emath.arccos([1,2]) + array([ 0.-0.j , 0.+1.317j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arccos(x) + +def arcsin(x): + """ + Compute the inverse sine of x. + + Return the "principal value" (for a description of this, see + `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is + returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arcsin is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse sine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arcsin + + Notes + ----- + For an arcsin() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arcsin`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arcsin(0) + 0.0 + + >>> np.emath.arcsin([0,1]) + array([ 0. , 1.5708]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arcsin(x) + +def arctanh(x): + """ + Compute the inverse hyperbolic tangent of `x`. + + Return the "principal value" (for a description of this, see + `numpy.arctanh`) of `arctanh(x)`. For real `x` such that + `abs(x) < 1`, this is a real number. If `abs(x) > 1`, or if `x` is + complex, the result is complex. Finally, `x = 1` returns``inf`` and + `x=-1` returns ``-inf``. + + Parameters + ---------- + x : array_like + The value(s) whose arctanh is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was + a scalar so is `out`, otherwise an array is returned. + + + See Also + -------- + numpy.arctanh + + Notes + ----- + For an arctanh() that returns ``NAN`` when real `x` is not in the + interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does + return +/-inf for `x = +/-1`). + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arctanh(np.matrix(np.eye(2))) + array([[ Inf, 0.], + [ 0., Inf]]) + >>> np.emath.arctanh([1j]) + array([ 0.+0.7854j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arctanh(x) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/setup.py new file mode 100644 index 0000000000000..68d99c33a78e2 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/setup.py @@ -0,0 +1,23 @@ +from __future__ import division, print_function + +from os.path import join + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('lib', parent_package, top_path) + + config.add_include_dirs(join('..', 'core', 'include')) + + config.add_extension('_compiled_base', + sources=[join('src', '_compiled_base.c')] + ) + + config.add_data_dir('benchmarks') + config.add_data_dir('tests') + + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/shape_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/shape_base.py new file mode 100644 index 0000000000000..70fa3ab032c99 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/shape_base.py @@ -0,0 +1,865 @@ +from __future__ import division, absolute_import, print_function + +import warnings + +import numpy.core.numeric as _nx +from numpy.core.numeric import ( + asarray, zeros, outer, concatenate, isscalar, array, asanyarray + ) +from numpy.core.fromnumeric import product, reshape +from numpy.core import vstack, atleast_3d + + +__all__ = [ + 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', + 'apply_along_axis', 'kron', 'tile', 'get_array_wrap' + ] + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + Apply a function to 1-D slices along the given axis. + + Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a` + is a 1-D slice of `arr` along `axis`. + + Parameters + ---------- + func1d : function + This function should accept 1-D arrays. It is applied to 1-D + slices of `arr` along the specified axis. + axis : integer + Axis along which `arr` is sliced. + arr : ndarray + Input array. + args : any + Additional arguments to `func1d`. + kwargs: any + Additional named arguments to `func1d`. + + .. versionadded:: 1.9.0 + + + Returns + ------- + apply_along_axis : ndarray + The output array. The shape of `outarr` is identical to the shape of + `arr`, except along the `axis` dimension, where the length of `outarr` + is equal to the size of the return value of `func1d`. If `func1d` + returns a scalar `outarr` will have one fewer dimensions than `arr`. + + See Also + -------- + apply_over_axes : Apply a function repeatedly over multiple axes. + + Examples + -------- + >>> def my_func(a): + ... \"\"\"Average first and last element of a 1-D array\"\"\" + ... return (a[0] + a[-1]) * 0.5 + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(my_func, 0, b) + array([ 4., 5., 6.]) + >>> np.apply_along_axis(my_func, 1, b) + array([ 2., 5., 8.]) + + For a function that doesn't return a scalar, the number of dimensions in + `outarr` is the same as `arr`. + + >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) + >>> np.apply_along_axis(sorted, 1, b) + array([[1, 7, 8], + [3, 4, 9], + [2, 5, 6]]) + + """ + arr = asarray(arr) + nd = arr.ndim + if axis < 0: + axis += nd + if (axis >= nd): + raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." + % (axis, nd)) + ind = [0]*(nd-1) + i = zeros(nd, 'O') + indlist = list(range(nd)) + indlist.remove(axis) + i[axis] = slice(None, None) + outshape = asarray(arr.shape).take(indlist) + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + # if res is a number, then we have a smaller output array + if isscalar(res): + outarr = zeros(outshape, asarray(res).dtype) + outarr[tuple(ind)] = res + Ntot = product(outshape) + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= outshape[n]) and (n > (1-nd)): + ind[n-1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(ind)] = res + k += 1 + return outarr + else: + Ntot = product(outshape) + holdshape = outshape + outshape = list(arr.shape) + outshape[axis] = len(res) + outarr = zeros(outshape, asarray(res).dtype) + outarr[tuple(i.tolist())] = res + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1-nd)): + ind[n-1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(i.tolist())] = res + k += 1 + return outarr + + +def apply_over_axes(func, a, axes): + """ + Apply a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, where `axis` is the first + element of `axes`. The result `res` of the function call must have + either the same dimensions as `a` or one less dimension. If `res` + has one less dimension than `a`, a dimension is inserted before + `axis`. The call to `func` is then repeated for each axis in `axes`, + with `res` as the first argument. + + Parameters + ---------- + func : function + This function must take two arguments, `func(a, axis)`. + a : array_like + Input array. + axes : array_like + Axes over which `func` is applied; the elements must be integers. + + Returns + ------- + apply_over_axis : ndarray + The output array. The number of dimensions is the same as `a`, + but the shape can be different. This depends on whether `func` + changes the shape of its output with respect to its input. + + See Also + -------- + apply_along_axis : + Apply a function to 1-D slices of an array along the given axis. + + Notes + ------ + This function is equivalent to tuple axis arguments to reorderable ufuncs + with keepdims=True. Tuple axis arguments to ufuncs have been availabe since + version 1.7.0. + + Examples + -------- + >>> a = np.arange(24).reshape(2,3,4) + >>> a + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + + Sum over axes 0 and 2. The result has same number of dimensions + as the original array: + + >>> np.apply_over_axes(np.sum, a, [0,2]) + array([[[ 60], + [ 92], + [124]]]) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.sum(a, axis=(0,2), keepdims=True) + array([[[ 60], + [ 92], + [124]]]) + + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + +def expand_dims(a, axis): + """ + Expand the shape of an array. + + Insert a new axis, corresponding to a given position in the array shape. + + Parameters + ---------- + a : array_like + Input array. + axis : int + Position (amongst axes) where new axis is to be inserted. + + Returns + ------- + res : ndarray + Output array. The number of dimensions is one greater than that of + the input array. + + See Also + -------- + doc.indexing, atleast_1d, atleast_2d, atleast_3d + + Examples + -------- + >>> x = np.array([1,2]) + >>> x.shape + (2,) + + The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``: + + >>> y = np.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + + >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis] + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + Note that some examples may use ``None`` instead of ``np.newaxis``. These + are the same objects: + + >>> np.newaxis is None + True + + """ + a = asarray(a) + shape = a.shape + if axis < 0: + axis = axis + len(shape) + 1 + return a.reshape(shape[:axis] + (1,) + shape[axis:]) + +row_stack = vstack + +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + arrays = [] + for v in tup: + arr = array(v, copy=False, subok=True) + if arr.ndim < 2: + arr = array(arr, copy=False, subok=True, ndmin=2).T + arrays.append(arr) + return _nx.concatenate(arrays, 1) + +def dstack(tup): + """ + Stack arrays in sequence depth wise (along third axis). + + Takes a sequence of arrays and stack them along the third axis + to make a single array. Rebuilds arrays divided by `dsplit`. + This is a simple way to stack 2D arrays (images) into a single + 3D array for processing. + + Parameters + ---------- + tup : sequence of arrays + Arrays to stack. All of them must have the same shape along all + but the third axis. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays. + + See Also + -------- + vstack : Stack along first axis. + hstack : Stack along second axis. + concatenate : Join arrays. + dsplit : Split array along third axis. + + Notes + ----- + Equivalent to ``np.concatenate(tup, axis=2)``. + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.dstack((a,b)) + array([[[1, 2], + [2, 3], + [3, 4]]]) + + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.dstack((a,b)) + array([[[1, 2]], + [[2, 3]], + [[3, 4]]]) + + """ + return _nx.concatenate([atleast_3d(_m) for _m in tup], 2) + +def _replace_zero_by_x_arrays(sub_arys): + for i in range(len(sub_arys)): + if len(_nx.shape(sub_arys[i])) == 0: + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + return sub_arys + +def array_split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays. + + Please refer to the ``split`` documentation. The only difference + between these functions is that ``array_split`` allows + `indices_or_sections` to be an integer that does *not* equally + divide the axis. + + See Also + -------- + split : Split array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(8.0) + >>> np.array_split(x, 3) + [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])] + + """ + try: + Ntotal = ary.shape[axis] + except AttributeError: + Ntotal = len(ary) + try: + # handle scalar case. + Nsections = len(indices_or_sections) + 1 + div_points = [0] + list(indices_or_sections) + [Ntotal] + except TypeError: + # indices_or_sections is a scalar, not an array. + Nsections = int(indices_or_sections) + if Nsections <= 0: + raise ValueError('number sections must be larger than 0.') + Neach_section, extras = divmod(Ntotal, Nsections) + section_sizes = ([0] + + extras * [Neach_section+1] + + (Nsections-extras) * [Neach_section]) + div_points = _nx.array(section_sizes).cumsum() + + sub_arys = [] + sary = _nx.swapaxes(ary, axis, 0) + for i in range(Nsections): + st = div_points[i] + end = div_points[i + 1] + sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) + + # This "kludge" was introduced here to replace arrays shaped (0, 10) + # or similar with an array shaped (0,). + # There seems no need for this, so give a FutureWarning to remove later. + if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1: + warnings.warn("in the future np.array_split will retain the shape of " + "arrays with a zero size, instead of replacing them by " + "`array([])`, which always has a shape of (0,).", + FutureWarning) + sub_arys = _replace_zero_by_x_arrays(sub_arys) + + return sub_arys + +def split(ary,indices_or_sections,axis=0): + """ + Split an array into multiple sub-arrays. + + Parameters + ---------- + ary : ndarray + Array to be divided into sub-arrays. + indices_or_sections : int or 1-D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If such a split is not possible, + an error is raised. + + If `indices_or_sections` is a 1-D array of sorted integers, the entries + indicate where along `axis` the array is split. For example, + ``[2, 3]`` would, for ``axis=0``, result in + + - ary[:2] + - ary[2:3] + - ary[3:] + + If an index exceeds the dimension of the array along `axis`, + an empty sub-array is returned correspondingly. + axis : int, optional + The axis along which to split, default is 0. + + Returns + ------- + sub-arrays : list of ndarrays + A list of sub-arrays. + + Raises + ------ + ValueError + If `indices_or_sections` is given as an integer, but + a split does not result in equal division. + + See Also + -------- + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. Does not raise an exception if + an equal division cannot be made. + hsplit : Split array into multiple sub-arrays horizontally (column-wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + concatenate : Join arrays together. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> x = np.arange(9.0) + >>> np.split(x, 3) + [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])] + + >>> x = np.arange(8.0) + >>> np.split(x, [3, 5, 6, 10]) + [array([ 0., 1., 2.]), + array([ 3., 4.]), + array([ 5.]), + array([ 6., 7.]), + array([], dtype=float64)] + + """ + try: + len(indices_or_sections) + except TypeError: + sections = indices_or_sections + N = ary.shape[axis] + if N % sections: + raise ValueError( + 'array split does not result in an equal division') + res = array_split(ary, indices_or_sections, axis) + return res + +def hsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays horizontally (column-wise). + + Please refer to the `split` documentation. `hsplit` is equivalent + to `split` with ``axis=1``, the array is always split along the second + axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]]) + >>> np.hsplit(x, 2) + [array([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [ 12., 13.]]), + array([[ 2., 3.], + [ 6., 7.], + [ 10., 11.], + [ 14., 15.]])] + >>> np.hsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [ 12., 13., 14.]]), + array([[ 3.], + [ 7.], + [ 11.], + [ 15.]]), + array([], dtype=float64)] + + With a higher dimensional array the split is still along the second axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[ 0., 1.], + [ 2., 3.]], + [[ 4., 5.], + [ 6., 7.]]]) + >>> np.hsplit(x, 2) + [array([[[ 0., 1.]], + [[ 4., 5.]]]), + array([[[ 2., 3.]], + [[ 6., 7.]]])] + + """ + if len(_nx.shape(ary)) == 0: + raise ValueError('hsplit only works on arrays of 1 or more dimensions') + if len(ary.shape) > 1: + return split(ary, indices_or_sections, 1) + else: + return split(ary, indices_or_sections, 0) + +def vsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays vertically (row-wise). + + Please refer to the ``split`` documentation. ``vsplit`` is equivalent + to ``split`` with `axis=0` (default), the array is always split along the + first axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]]) + >>> np.vsplit(x, 2) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]]), + array([[ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]])] + >>> np.vsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + array([[ 12., 13., 14., 15.]]), + array([], dtype=float64)] + + With a higher dimensional array the split is still along the first axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[ 0., 1.], + [ 2., 3.]], + [[ 4., 5.], + [ 6., 7.]]]) + >>> np.vsplit(x, 2) + [array([[[ 0., 1.], + [ 2., 3.]]]), + array([[[ 4., 5.], + [ 6., 7.]]])] + + """ + if len(_nx.shape(ary)) < 2: + raise ValueError('vsplit only works on arrays of 2 or more dimensions') + return split(ary, indices_or_sections, 0) + +def dsplit(ary, indices_or_sections): + """ + Split array into multiple sub-arrays along the 3rd axis (depth). + + Please refer to the `split` documentation. `dsplit` is equivalent + to `split` with ``axis=2``, the array is always split along the third + axis provided the array dimension is greater than or equal to 3. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> x + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [ 12., 13., 14., 15.]]]) + >>> np.dsplit(x, 2) + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [ 12., 13.]]]), + array([[[ 2., 3.], + [ 6., 7.]], + [[ 10., 11.], + [ 14., 15.]]])] + >>> np.dsplit(x, np.array([3, 6])) + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [ 12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[ 11.], + [ 15.]]]), + array([], dtype=float64)] + + """ + if len(_nx.shape(ary)) < 3: + raise ValueError('dsplit only works on arrays of 3 or more dimensions') + return split(ary, indices_or_sections, 2) + +def get_array_prepare(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None + """ + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_prepare__) for i, x in enumerate(args) + if hasattr(x, '__array_prepare__')) + if wrappers: + return wrappers[-1][-1] + return None + +def get_array_wrap(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None + """ + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_wrap__) for i, x in enumerate(args) + if hasattr(x, '__array_wrap__')) + if wrappers: + return wrappers[-1][-1] + return None + +def kron(a, b): + """ + Kronecker product of two arrays. + + Computes the Kronecker product, a composite array made of blocks of the + second array scaled by the first. + + Parameters + ---------- + a, b : array_like + + Returns + ------- + out : ndarray + + See Also + -------- + outer : The outer product + + Notes + ----- + The function assumes that the number of dimenensions of `a` and `b` + are the same, if necessary prepending the smallest with ones. + If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`, + the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`. + The elements are products of elements from `a` and `b`, organized + explicitly by:: + + kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] + + where:: + + kt = it * st + jt, t = 0,...,N + + In the common 2-D case (N=1), the block structure can be visualized:: + + [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], + [ ... ... ], + [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] + + + Examples + -------- + >>> np.kron([1,10,100], [5,6,7]) + array([ 5, 6, 7, 50, 60, 70, 500, 600, 700]) + >>> np.kron([5,6,7], [1,10,100]) + array([ 5, 50, 500, 6, 60, 600, 7, 70, 700]) + + >>> np.kron(np.eye(2), np.ones((2,2))) + array([[ 1., 1., 0., 0.], + [ 1., 1., 0., 0.], + [ 0., 0., 1., 1.], + [ 0., 0., 1., 1.]]) + + >>> a = np.arange(100).reshape((2,5,2,5)) + >>> b = np.arange(24).reshape((2,3,4)) + >>> c = np.kron(a,b) + >>> c.shape + (2, 10, 6, 20) + >>> I = (1,3,0,2) + >>> J = (0,2,1) + >>> J1 = (0,) + J # extend to ndim=4 + >>> S1 = (1,) + b.shape + >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) + >>> c[K] == a[I]*b[J] + True + + """ + b = asanyarray(b) + a = array(a, copy=False, subok=True, ndmin=b.ndim) + ndb, nda = b.ndim, a.ndim + if (nda == 0 or ndb == 0): + return _nx.multiply(a, b) + as_ = a.shape + bs = b.shape + if not a.flags.contiguous: + a = reshape(a, as_) + if not b.flags.contiguous: + b = reshape(b, bs) + nd = ndb + if (ndb != nda): + if (ndb > nda): + as_ = (1,)*(ndb-nda) + as_ + else: + bs = (1,)*(nda-ndb) + bs + nd = nda + result = outer(a, b).reshape(as_+bs) + axis = nd-1 + for _ in range(nd): + result = concatenate(result, axis=axis) + wrapper = get_array_prepare(a, b) + if wrapper is not None: + result = wrapper(result) + wrapper = get_array_wrap(a, b) + if wrapper is not None: + result = wrapper(result) + return result + + +def tile(A, reps): + """ + Construct an array by repeating A the number of times given by reps. + + If `reps` has length ``d``, the result will have dimension of + ``max(d, A.ndim)``. + + If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, + or shape (1, 1, 3) for 3-D replication. If this is not the desired + behavior, promote `A` to d-dimensions manually before calling this + function. + + If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. + Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as + (1, 1, 2, 2). + + Parameters + ---------- + A : array_like + The input array. + reps : array_like + The number of repetitions of `A` along each axis. + + Returns + ------- + c : ndarray + The tiled output array. + + See Also + -------- + repeat : Repeat elements of an array. + + Examples + -------- + >>> a = np.array([0, 1, 2]) + >>> np.tile(a, 2) + array([0, 1, 2, 0, 1, 2]) + >>> np.tile(a, (2, 2)) + array([[0, 1, 2, 0, 1, 2], + [0, 1, 2, 0, 1, 2]]) + >>> np.tile(a, (2, 1, 2)) + array([[[0, 1, 2, 0, 1, 2]], + [[0, 1, 2, 0, 1, 2]]]) + + >>> b = np.array([[1, 2], [3, 4]]) + >>> np.tile(b, 2) + array([[1, 2, 1, 2], + [3, 4, 3, 4]]) + >>> np.tile(b, (2, 1)) + array([[1, 2], + [3, 4], + [1, 2], + [3, 4]]) + + """ + try: + tup = tuple(reps) + except TypeError: + tup = (reps,) + d = len(tup) + c = _nx.array(A, copy=False, subok=True, ndmin=d) + shape = list(c.shape) + n = max(c.size, 1) + if (d < c.ndim): + tup = (1,)*(c.ndim-d) + tup + for i, nrep in enumerate(tup): + if nrep != 1: + c = c.reshape(-1, n).repeat(nrep, 0) + dim_in = shape[i] + dim_out = dim_in*nrep + shape[i] = dim_out + n //= max(dim_in, 1) + return c.reshape(shape) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/stride_tricks.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/stride_tricks.py new file mode 100644 index 0000000000000..12f8bbf131e39 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/stride_tricks.py @@ -0,0 +1,123 @@ +""" +Utilities that manipulate strides to achieve desirable effects. + +An explanation of strides can be found in the "ndarray.rst" file in the +NumPy reference guide. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np + +__all__ = ['broadcast_arrays'] + +class DummyArray(object): + """Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + +def as_strided(x, shape=None, strides=None): + """ Make an ndarray from the given array with the given shape and strides. + """ + interface = dict(x.__array_interface__) + if shape is not None: + interface['shape'] = tuple(shape) + if strides is not None: + interface['strides'] = tuple(strides) + array = np.asarray(DummyArray(interface, base=x)) + # Make sure dtype is correct in case of custom dtype + if array.dtype.kind == 'V': + array.dtype = x.dtype + return array + +def broadcast_arrays(*args): + """ + Broadcast any number of arrays against each other. + + Parameters + ---------- + `*args` : array_likes + The arrays to broadcast. + + Returns + ------- + broadcasted : list of arrays + These arrays are views on the original arrays. They are typically + not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. If you + need to write to the arrays, make copies first. + + Examples + -------- + >>> x = np.array([[1,2,3]]) + >>> y = np.array([[1],[2],[3]]) + >>> np.broadcast_arrays(x, y) + [array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]), array([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]])] + + Here is a useful idiom for getting contiguous copies instead of + non-contiguous views. + + >>> [np.array(a) for a in np.broadcast_arrays(x, y)] + [array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]), array([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]])] + + """ + args = [np.asarray(_m) for _m in args] + shapes = [x.shape for x in args] + if len(set(shapes)) == 1: + # Common case where nothing needs to be broadcasted. + return args + shapes = [list(s) for s in shapes] + strides = [list(x.strides) for x in args] + nds = [len(s) for s in shapes] + biggest = max(nds) + # Go through each array and prepend dimensions of length 1 to each of + # the shapes in order to make the number of dimensions equal. + for i in range(len(args)): + diff = biggest - nds[i] + if diff > 0: + shapes[i] = [1] * diff + shapes[i] + strides[i] = [0] * diff + strides[i] + # Chech each dimension for compatibility. A dimension length of 1 is + # accepted as compatible with any other length. + common_shape = [] + for axis in range(biggest): + lengths = [s[axis] for s in shapes] + unique = set(lengths + [1]) + if len(unique) > 2: + # There must be at least two non-1 lengths for this axis. + raise ValueError("shape mismatch: two or more arrays have " + "incompatible dimensions on axis %r." % (axis,)) + elif len(unique) == 2: + # There is exactly one non-1 length. The common shape will take + # this value. + unique.remove(1) + new_length = unique.pop() + common_shape.append(new_length) + # For each array, if this axis is being broadcasted from a + # length of 1, then set its stride to 0 so that it repeats its + # data. + for i in range(len(args)): + if shapes[i][axis] == 1: + shapes[i][axis] = new_length + strides[i][axis] = 0 + else: + # Every array has a length of 1 on this axis. Strides can be + # left alone as nothing is broadcasted. + common_shape.append(1) + + # Construct the new arrays. + broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in + zip(args, shapes, strides)] + return broadcasted diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__datasource.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__datasource.py new file mode 100644 index 0000000000000..090f71f670c92 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__datasource.py @@ -0,0 +1,351 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +from tempfile import mkdtemp, mkstemp, NamedTemporaryFile +from shutil import rmtree + +from numpy.compat import asbytes +from numpy.testing import ( + run_module_suite, TestCase, assert_ + ) +import numpy.lib._datasource as datasource + +if sys.version_info[0] >= 3: + import urllib.request as urllib_request + from urllib.parse import urlparse + from urllib.error import URLError +else: + import urllib2 as urllib_request + from urlparse import urlparse + from urllib2 import URLError + + +def urlopen_stub(url, data=None): + '''Stub to replace urlopen for testing.''' + if url == valid_httpurl(): + tmpfile = NamedTemporaryFile(prefix='urltmp_') + return tmpfile + else: + raise URLError('Name or service not known') + +# setup and teardown +old_urlopen = None + + +def setup(): + global old_urlopen + + old_urlopen = urllib_request.urlopen + urllib_request.urlopen = urlopen_stub + + +def teardown(): + urllib_request.urlopen = old_urlopen + +# A valid website for more robust testing +http_path = 'http://www.google.com/' +http_file = 'index.html' + +http_fakepath = 'http://fake.abc.web/site/' +http_fakefile = 'fake.txt' + +malicious_files = ['/etc/shadow', '../../shadow', + '..\\system.dat', 'c:\\windows\\system.dat'] + +magic_line = asbytes('three is the magic number') + + +# Utility functions used by many TestCases +def valid_textfile(filedir): + # Generate and return a valid temporary file. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) + os.close(fd) + return path + + +def invalid_textfile(filedir): + # Generate and return an invalid filename. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) + os.close(fd) + os.remove(path) + return path + + +def valid_httpurl(): + return http_path+http_file + + +def invalid_httpurl(): + return http_fakepath+http_fakefile + + +def valid_baseurl(): + return http_path + + +def invalid_baseurl(): + return http_fakepath + + +def valid_httpfile(): + return http_file + + +def invalid_httpfile(): + return http_fakefile + + +class TestDataSourceOpen(TestCase): + def setUp(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def tearDown(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + fh = self.ds.open(valid_httpurl()) + assert_(fh) + fh.close() + + def test_InvalidHTTP(self): + url = invalid_httpurl() + self.assertRaises(IOError, self.ds.open, url) + try: + self.ds.open(url) + except IOError as e: + # Regression test for bug fixed in r4342. + assert_(e.errno is None) + + def test_InvalidHTTPCacheURLError(self): + self.assertRaises(URLError, self.ds._cache, invalid_httpurl()) + + def test_ValidFile(self): + local_file = valid_textfile(self.tmpdir) + fh = self.ds.open(local_file) + assert_(fh) + fh.close() + + def test_InvalidFile(self): + invalid_file = invalid_textfile(self.tmpdir) + self.assertRaises(IOError, self.ds.open, invalid_file) + + def test_ValidGzipFile(self): + try: + import gzip + except ImportError: + # We don't have the gzip capabilities to test. + import nose + raise nose.SkipTest + # Test datasource's internal file_opener for Gzip files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') + fp = gzip.open(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + self.assertEqual(magic_line, result) + + def test_ValidBz2File(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + import nose + raise nose.SkipTest + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + self.assertEqual(magic_line, result) + + +class TestDataSourceExists(TestCase): + def setUp(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def tearDown(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + assert_(self.ds.exists(valid_httpurl())) + + def test_InvalidHTTP(self): + self.assertEqual(self.ds.exists(invalid_httpurl()), False) + + def test_ValidFile(self): + # Test valid file in destpath + tmpfile = valid_textfile(self.tmpdir) + assert_(self.ds.exists(tmpfile)) + # Test valid local file not in destpath + localdir = mkdtemp() + tmpfile = valid_textfile(localdir) + assert_(self.ds.exists(tmpfile)) + rmtree(localdir) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + self.assertEqual(self.ds.exists(tmpfile), False) + + +class TestDataSourceAbspath(TestCase): + def setUp(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.ds = datasource.DataSource(self.tmpdir) + + def tearDown(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + self.assertEqual(local_path, self.ds.abspath(valid_httpurl())) + + def test_ValidFile(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + self.assertEqual(tmpfile, self.ds.abspath(tmpfilename)) + # Test filename with complete path + self.assertEqual(tmpfile, self.ds.abspath(tmpfile)) + + def test_InvalidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl())) + + def test_InvalidFile(self): + invalidfile = valid_textfile(self.tmpdir) + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename)) + # Test filename with complete path + self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile)) + + def test_sandboxing(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + + tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) + + assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(tmpfile).startswith(self.tmpdir)) + assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_ValidFile() + self.test_InvalidHTTP() + self.test_InvalidFile() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryAbspath(TestCase): + def setUp(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def tearDown(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.repos._destpath, netloc, + upath.strip(os.sep).strip('/')) + filepath = self.repos.abspath(valid_httpfile()) + self.assertEqual(local_path, filepath) + + def test_sandboxing(self): + tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) + assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryExists(TestCase): + def setUp(self): + self.tmpdir = mkdtemp() + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def tearDown(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidFile(self): + # Create local temp file + tmpfile = valid_textfile(self.tmpdir) + assert_(self.repos.exists(tmpfile)) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + self.assertEqual(self.repos.exists(tmpfile), False) + + def test_RemoveHTTPFile(self): + assert_(self.repos.exists(valid_httpurl())) + + def test_CachedHTTPFile(self): + localfile = valid_httpurl() + # Create a locally cached temp file with an URL based + # directory structure. This is similar to what Repository.open + # would do. + scheme, netloc, upath, pms, qry, frg = urlparse(localfile) + local_path = os.path.join(self.repos._destpath, netloc) + os.mkdir(local_path, 0o0700) + tmpfile = valid_textfile(local_path) + assert_(self.repos.exists(tmpfile)) + + +class TestOpenFunc(TestCase): + def setUp(self): + self.tmpdir = mkdtemp() + + def tearDown(self): + rmtree(self.tmpdir) + + def test_DataSourceOpen(self): + local_file = valid_textfile(self.tmpdir) + # Test case where destpath is passed in + fp = datasource.open(local_file, destpath=self.tmpdir) + assert_(fp) + fp.close() + # Test case where default destpath is used + fp = datasource.open(local_file) + assert_(fp) + fp.close() + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__iotools.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__iotools.py new file mode 100644 index 0000000000000..4db19382a71ca --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__iotools.py @@ -0,0 +1,326 @@ +from __future__ import division, absolute_import, print_function + +import sys +import time +from datetime import date + +import numpy as np +from numpy.compat import asbytes, asbytes_nested +from numpy.testing import ( + run_module_suite, TestCase, assert_, assert_equal + ) +from numpy.lib._iotools import ( + LineSplitter, NameValidator, StringConverter, + has_nested_fields, easy_dtype, flatten_dtype + ) + + +class TestLineSplitter(TestCase): + "Tests the LineSplitter class." + + def test_no_delimiter(self): + "Test LineSplitter w/o delimiter" + strg = asbytes(" 1 2 3 4 5 # test") + test = LineSplitter()(strg) + assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5'])) + test = LineSplitter('')(strg) + assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5'])) + + def test_space_delimiter(self): + "Test space delimiter" + strg = asbytes(" 1 2 3 4 5 # test") + test = LineSplitter(asbytes(' '))(strg) + assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) + test = LineSplitter(asbytes(' '))(strg) + assert_equal(test, asbytes_nested(['1 2 3 4', '5'])) + + def test_tab_delimiter(self): + "Test tab delimiter" + strg = asbytes(" 1\t 2\t 3\t 4\t 5 6") + test = LineSplitter(asbytes('\t'))(strg) + assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5 6'])) + strg = asbytes(" 1 2\t 3 4\t 5 6") + test = LineSplitter(asbytes('\t'))(strg) + assert_equal(test, asbytes_nested(['1 2', '3 4', '5 6'])) + + def test_other_delimiter(self): + "Test LineSplitter on delimiter" + strg = asbytes("1,2,3,4,,5") + test = LineSplitter(asbytes(','))(strg) + assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) + # + strg = asbytes(" 1,2,3,4,,5 # test") + test = LineSplitter(asbytes(','))(strg) + assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) + + def test_constant_fixed_width(self): + "Test LineSplitter w/ fixed-width fields" + strg = asbytes(" 1 2 3 4 5 # test") + test = LineSplitter(3)(strg) + assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5', ''])) + # + strg = asbytes(" 1 3 4 5 6# test") + test = LineSplitter(20)(strg) + assert_equal(test, asbytes_nested(['1 3 4 5 6'])) + # + strg = asbytes(" 1 3 4 5 6# test") + test = LineSplitter(30)(strg) + assert_equal(test, asbytes_nested(['1 3 4 5 6'])) + + def test_variable_fixed_width(self): + strg = asbytes(" 1 3 4 5 6# test") + test = LineSplitter((3, 6, 6, 3))(strg) + assert_equal(test, asbytes_nested(['1', '3', '4 5', '6'])) + # + strg = asbytes(" 1 3 4 5 6# test") + test = LineSplitter((6, 6, 9))(strg) + assert_equal(test, asbytes_nested(['1', '3 4', '5 6'])) + +#------------------------------------------------------------------------------- + + +class TestNameValidator(TestCase): + + def test_case_sensitivity(self): + "Test case sensitivity" + names = ['A', 'a', 'b', 'c'] + test = NameValidator().validate(names) + assert_equal(test, ['A', 'a', 'b', 'c']) + test = NameValidator(case_sensitive=False).validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='upper').validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='lower').validate(names) + assert_equal(test, ['a', 'a_1', 'b', 'c']) + + def test_excludelist(self): + "Test excludelist" + names = ['dates', 'data', 'Other Data', 'mask'] + validator = NameValidator(excludelist=['dates', 'data', 'mask']) + test = validator.validate(names) + assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) + + def test_missing_names(self): + "Test validate missing names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist), ['a', 'b', 'c']) + namelist = ('', 'b', 'c') + assert_equal(validator(namelist), ['f0', 'b', 'c']) + namelist = ('a', 'b', '') + assert_equal(validator(namelist), ['a', 'b', 'f0']) + namelist = ('', 'f0', '') + assert_equal(validator(namelist), ['f1', 'f0', 'f2']) + + def test_validate_nb_names(self): + "Test validate nb names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist, nbfields=1), ('a',)) + assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), + ['a', 'b', 'c', 'g0', 'g1']) + + def test_validate_wo_names(self): + "Test validate no names" + namelist = None + validator = NameValidator() + assert_(validator(namelist) is None) + assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) + +#------------------------------------------------------------------------------- + + +def _bytes_to_date(s): + if sys.version_info[0] >= 3: + return date(*time.strptime(s.decode('latin1'), "%Y-%m-%d")[:3]) + else: + return date(*time.strptime(s, "%Y-%m-%d")[:3]) + + +class TestStringConverter(TestCase): + "Test StringConverter" + + def test_creation(self): + "Test creation of a StringConverter" + converter = StringConverter(int, -99999) + assert_equal(converter._status, 1) + assert_equal(converter.default, -99999) + + def test_upgrade(self): + "Tests the upgrade method." + converter = StringConverter() + assert_equal(converter._status, 0) + converter.upgrade(asbytes('0')) + assert_equal(converter._status, 1) + converter.upgrade(asbytes('0.')) + assert_equal(converter._status, 2) + converter.upgrade(asbytes('0j')) + assert_equal(converter._status, 3) + converter.upgrade(asbytes('a')) + assert_equal(converter._status, len(converter._mapper) - 1) + + def test_missing(self): + "Tests the use of missing values." + converter = StringConverter(missing_values=(asbytes('missing'), + asbytes('missed'))) + converter.upgrade(asbytes('0')) + assert_equal(converter(asbytes('0')), 0) + assert_equal(converter(asbytes('')), converter.default) + assert_equal(converter(asbytes('missing')), converter.default) + assert_equal(converter(asbytes('missed')), converter.default) + try: + converter('miss') + except ValueError: + pass + + def test_upgrademapper(self): + "Tests updatemapper" + dateparser = _bytes_to_date + StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) + convert = StringConverter(dateparser, date(2000, 1, 1)) + test = convert(asbytes('2001-01-01')) + assert_equal(test, date(2001, 1, 1)) + test = convert(asbytes('2009-01-01')) + assert_equal(test, date(2009, 1, 1)) + test = convert(asbytes('')) + assert_equal(test, date(2000, 1, 1)) + + def test_string_to_object(self): + "Make sure that string-to-object functions are properly recognized" + conv = StringConverter(_bytes_to_date) + assert_equal(conv._mapper[-2][0](0), 0j) + assert_(hasattr(conv, 'default')) + + def test_keep_default(self): + "Make sure we don't lose an explicit default" + converter = StringConverter(None, missing_values=asbytes(''), + default=-999) + converter.upgrade(asbytes('3.14159265')) + assert_equal(converter.default, -999) + assert_equal(converter.type, np.dtype(float)) + # + converter = StringConverter( + None, missing_values=asbytes(''), default=0) + converter.upgrade(asbytes('3.14159265')) + assert_equal(converter.default, 0) + assert_equal(converter.type, np.dtype(float)) + + def test_keep_default_zero(self): + "Check that we don't lose a default of 0" + converter = StringConverter(int, default=0, + missing_values=asbytes("N/A")) + assert_equal(converter.default, 0) + + def test_keep_missing_values(self): + "Check that we're not losing missing values" + converter = StringConverter(int, default=0, + missing_values=asbytes("N/A")) + assert_equal( + converter.missing_values, set(asbytes_nested(['', 'N/A']))) + + def test_int64_dtype(self): + "Check that int64 integer types can be specified" + converter = StringConverter(np.int64, default=0) + val = asbytes("-9223372036854775807") + assert_(converter(val) == -9223372036854775807) + val = asbytes("9223372036854775807") + assert_(converter(val) == 9223372036854775807) + + def test_uint64_dtype(self): + "Check that uint64 integer types can be specified" + converter = StringConverter(np.uint64, default=0) + val = asbytes("9223372043271415339") + assert_(converter(val) == 9223372043271415339) + + +class TestMiscFunctions(TestCase): + + def test_has_nested_dtype(self): + "Test has_nested_dtype" + ndtype = np.dtype(np.float) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + assert_equal(has_nested_fields(ndtype), True) + + def test_easy_dtype(self): + "Test ndtype on dtypes" + # Simple case + ndtype = float + assert_equal(easy_dtype(ndtype), np.dtype(float)) + # As string w/o names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', "i4"), ('f1', "f8")])) + # As string w/o names but different default format + assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), + np.dtype([('field_000', "i4"), ('field_001', "f8")])) + # As string w/ names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (too many) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (not enough) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names=", b"), + np.dtype([('f0', "i4"), ('b', "f8")])) + # ... (with different default format) + assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), + np.dtype([('a', "i4"), ('f00', "f8")])) + # As list of tuples w/o names + ndtype = [('A', int), ('B', float)] + assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) + # As list of tuples w/ names + assert_equal(easy_dtype(ndtype, names="a,b"), + np.dtype([('a', int), ('b', float)])) + # As list of tuples w/ not enough names + assert_equal(easy_dtype(ndtype, names="a"), + np.dtype([('a', int), ('f0', float)])) + # As list of tuples w/ too many names + assert_equal(easy_dtype(ndtype, names="a,b,c"), + np.dtype([('a', int), ('b', float)])) + # As list of types w/o names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', int), ('f1', float), ('f2', float)])) + # As list of types w names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', int), ('b', float), ('c', float)])) + # As simple dtype w/ names + ndtype = np.dtype(float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([(_, float) for _ in ('a', 'b', 'c')])) + # As simple dtype w/o names (but multiple fields) + ndtype = np.dtype(float) + assert_equal( + easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), + np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) + + def test_flatten_dtype(self): + "Testing flatten_dtype" + # Standard dtype + dt = np.dtype([("a", "f8"), ("b", "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + # Recursive dtype + dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) + # dtype with shaped fields + dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, int]) + dt_flat = flatten_dtype(dt, True) + assert_equal(dt_flat, [float] * 2 + [int] * 3) + # dtype w/ titles + dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__version.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__version.py new file mode 100644 index 0000000000000..bbafe68eb3554 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__version.py @@ -0,0 +1,57 @@ +"""Tests for the NumpyVersion class. + +""" +from __future__ import division, absolute_import, print_function + +from numpy.testing import assert_, run_module_suite, assert_raises +from numpy.lib import NumpyVersion + + +def test_main_versions(): + assert_(NumpyVersion('1.8.0') == '1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1']: + assert_(NumpyVersion('1.8.0') < ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert_(NumpyVersion('1.8.0') > ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert_(NumpyVersion('1.9.0') < '1.10.0') + assert_(NumpyVersion('1.11.0') < '1.11.1') + assert_(NumpyVersion('1.11.0') == '1.11.0') + assert_(NumpyVersion('1.99.11') < '1.99.12') + + +def test_alpha_beta_rc(): + assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert_(NumpyVersion('1.8.0rc1') < ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert_(NumpyVersion('1.8.0rc1') > ver) + + assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') + + +def test_dev_version(): + assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: + assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') + + +def test_dev_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') + assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') + + +def test_raises(): + for ver in ['1.9', '1,9.0', '1.7.x']: + assert_raises(ValueError, NumpyVersion, ver) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraypad.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraypad.py new file mode 100644 index 0000000000000..f8ba8643abddc --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraypad.py @@ -0,0 +1,560 @@ +"""Tests for the pad functions. + +""" +from __future__ import division, absolute_import, print_function + +from numpy.testing import TestCase, run_module_suite, assert_array_equal +from numpy.testing import assert_raises, assert_array_almost_equal +import numpy as np +from numpy.lib import pad + + +class TestStatistic(TestCase): + def test_check_mean_stat_length(self): + a = np.arange(100).astype('f') + a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) + b = np.array( + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. + ]) + assert_array_equal(a, b) + + def test_check_maximum_1(self): + a = np.arange(100) + a = pad(a, (25, 20), 'maximum') + b = np.array( + [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] + ) + assert_array_equal(a, b) + + def test_check_maximum_2(self): + a = np.arange(100) + 1 + a = pad(a, (25, 20), 'maximum') + b = np.array( + [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_minimum_1(self): + a = np.arange(100) + a = pad(a, (25, 20), 'minimum') + b = np.array( + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_minimum_2(self): + a = np.arange(100) + 2 + a = pad(a, (25, 20), 'minimum') + b = np.array( + [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + assert_array_equal(a, b) + + def test_check_median(self): + a = np.arange(100).astype('f') + a = pad(a, (25, 20), 'median') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + def test_check_median_01(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = pad(a, 1, 'median') + b = np.array( + [[4, 4, 5, 4, 4], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [4, 4, 5, 4, 4]] + ) + assert_array_equal(a, b) + + def test_check_median_02(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = pad(a.T, 1, 'median').T + b = np.array( + [[5, 4, 5, 4, 5], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [5, 4, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_mean_shape_one(self): + a = [[4, 5, 6]] + a = pad(a, (5, 7), 'mean', stat_length=2) + b = np.array( + [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_mean_2(self): + a = np.arange(100).astype('f') + a = pad(a, (25, 20), 'mean') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + +class TestConstant(TestCase): + def test_check_constant(self): + a = np.arange(100) + a = pad(a, (25, 20), 'constant', constant_values=(10, 20)) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] + ) + assert_array_equal(a, b) + + +class TestLinearRamp(TestCase): + def test_check_simple(self): + a = np.arange(100).astype('f') + a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) + b = np.array( + [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, + 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, + 0.80, 0.64, 0.48, 0.32, 0.16, + + 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, + 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, + 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, + 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, + 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, + 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, + 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, + 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, + 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, + + 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, + 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] + ) + assert_array_almost_equal(a, b, decimal=5) + + +class TestReflect(TestCase): + def test_check_simple(self): + a = np.arange(100) + a = pad(a, (25, 20), 'reflect') + b = np.array( + [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = pad(a, (5, 7), 'reflect') + b = np.array( + [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = pad(a, (5, 7), 'reflect') + b = np.array( + [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = pad([1, 2, 3], 2, 'reflect') + b = np.array([3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_02(self): + a = pad([1, 2, 3], 3, 'reflect') + b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_03(self): + a = pad([1, 2, 3], 4, 'reflect') + b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestWrap(TestCase): + def test_check_simple(self): + a = np.arange(100) + a = pad(a, (25, 20), 'wrap') + b = np.array( + [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = np.arange(12) + a = np.reshape(a, (3, 4)) + a = pad(a, (10, 12), 'wrap') + b = np.array( + [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = pad([1, 2, 3], 3, 'wrap') + b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_02(self): + a = pad([1, 2, 3], 4, 'wrap') + b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) + assert_array_equal(a, b) + + +class TestStatLen(TestCase): + def test_check_simple(self): + a = np.arange(30) + a = np.reshape(a, (6, 5)) + a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) + b = np.array( + [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + + [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], + [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], + + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] + ) + assert_array_equal(a, b) + + +class TestEdge(TestCase): + def test_check_simple(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = pad(a, ((2, 3), (3, 2)), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + +class TestZeroPadWidth(TestCase): + def test_zero_pad_width(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + for pad_width in (0, (0, 0), ((0, 0), (0, 0))): + assert_array_equal(arr, pad(arr, pad_width, mode='constant')) + + +class ValueError1(TestCase): + def test_check_simple(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(3, )) + assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)), + **kwargs) + + def test_check_negative_stat_length(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(-3, )) + assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)), + **kwargs) + + def test_check_negative_pad_width(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(3, )) + assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), + **kwargs) + + +class ValueError2(TestCase): + def test_check_simple(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(3, )) + assert_raises(ValueError, pad, arr, ((2, 3, 4), (3, 2)), + **kwargs) + + +class ValueError3(TestCase): + def test_check_simple(self): + arr = np.arange(30) + arr = np.reshape(arr, (6, 5)) + kwargs = dict(mode='mean', stat_length=(3, )) + assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), + **kwargs) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraysetops.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraysetops.py new file mode 100644 index 0000000000000..e83f8552e2663 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraysetops.py @@ -0,0 +1,301 @@ +"""Test functions for 1D array set operations. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import ( + run_module_suite, TestCase, assert_array_equal + ) +from numpy.lib.arraysetops import ( + ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d + ) + + +class TestSetOps(TestCase): + + def test_unique(self): + + def check_all(a, b, i1, i2, c, dt): + base_msg = 'check {0} failed for type {1}' + + msg = base_msg.format('values', dt) + v = unique(a) + assert_array_equal(v, b, msg) + + msg = base_msg.format('return_index', dt) + v, j = unique(a, 1, 0, 0) + assert_array_equal(v, b, msg) + assert_array_equal(j, i1, msg) + + msg = base_msg.format('return_inverse', dt) + v, j = unique(a, 0, 1, 0) + assert_array_equal(v, b, msg) + assert_array_equal(j, i2, msg) + + msg = base_msg.format('return_counts', dt) + v, j = unique(a, 0, 0, 1) + assert_array_equal(v, b, msg) + assert_array_equal(j, c, msg) + + msg = base_msg.format('return_index and return_inverse', dt) + v, j1, j2 = unique(a, 1, 1, 0) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + + msg = base_msg.format('return_index and return_counts', dt) + v, j1, j2 = unique(a, 1, 0, 1) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format('return_inverse and return_counts', dt) + v, j1, j2 = unique(a, 0, 1, 1) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i2, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format(('return_index, return_inverse ' + 'and return_counts'), dt) + v, j1, j2, j3 = unique(a, 1, 1, 1) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert_array_equal(j3, c, msg) + + a = [5, 7, 1, 2, 1, 5, 7]*10 + b = [1, 2, 5, 7] + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3]*10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + for dt in types: + aa = np.array(a, dt) + bb = np.array(b, dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for object arrays + dt = 'O' + aa = np.empty(len(a), dt) + aa[:] = a + bb = np.empty(len(b), dt) + bb[:] = b + check_all(aa, bb, i1, i2, c, dt) + + # test for structured arrays + dt = [('', 'i'), ('', 'i')] + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for ticket #2799 + aa = [1. + 0.j, 1 - 1.j, 1] + assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + + # test for ticket #4785 + a = [(1, 2), (1, 2), (2, 3)] + unq = [1, 2, 3] + inv = [0, 1, 0, 1, 1, 2] + a1 = unique(a) + assert_array_equal(a1, unq) + a2, a2_inv = unique(a, return_inverse=True) + assert_array_equal(a2, unq) + assert_array_equal(a2_inv, inv) + + def test_intersect1d(self): + # unique inputs + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([1, 2, 5]) + c = intersect1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + # non-unique inputs + a = np.array([5, 5, 7, 1, 2]) + b = np.array([2, 1, 4, 3, 3, 1, 5]) + + ed = np.array([1, 2, 5]) + c = intersect1d(a, b) + assert_array_equal(c, ed) + + assert_array_equal([], intersect1d([], [])) + + def test_setxor1d(self): + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([3, 4, 7]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 2, 3]) + b = np.array([6, 5, 4]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setxor1d([], [])) + + def test_ediff1d(self): + zero_elem = np.array([]) + one_elem = np.array([1]) + two_elem = np.array([1, 2]) + + assert_array_equal([], ediff1d(zero_elem)) + assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) + assert_array_equal([0], ediff1d(zero_elem, to_end=0)) + assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) + assert_array_equal([], ediff1d(one_elem)) + assert_array_equal([1], ediff1d(two_elem)) + + def test_in1d(self): + # we use two different sizes for the b array here to test the + # two different paths in in1d(). + for mult in (1, 10): + # One check without np.array, to make sure lists are handled correct + a = [5, 7, 1, 2] + b = [2, 4, 3, 1, 5] * mult + ec = np.array([True, False, True, True]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a[0] = 8 + ec = np.array([False, False, True, True]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a[0], a[3] = 4, 8 + ec = np.array([True, False, True, False]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + ec = [False, True, False, True, True, True, True, True, True, False, + True, False, False, False] + c = in1d(a, b) + assert_array_equal(c, ec) + + b = b + [5, 5, 4] * mult + ec = [True, True, True, True, True, True, True, True, True, True, + True, False, True, True] + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5] * mult) + ec = np.array([True, False, True, True]) + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 1, 2]) + b = np.array([2, 4, 3, 3, 1, 5] * mult) + ec = np.array([True, False, True, True, True]) + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5, 5]) + b = np.array([2, 2] * mult) + ec = np.array([False, False]) + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5]) + b = np.array([2]) + ec = np.array([False]) + c = in1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal(in1d([], []), []) + + def test_in1d_char_array(self): + a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) + b = np.array(['a', 'c']) + + ec = np.array([True, False, True, False, False, True, False, False]) + c = in1d(a, b) + + assert_array_equal(c, ec) + + def test_in1d_invert(self): + "Test in1d's invert parameter" + # We use two different sizes for the b array here to test the + # two different paths in in1d(). + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + def test_in1d_ravel(self): + # Test that in1d ravels its input arrays. This is not documented + # behavior however. The test is to ensure consistentency. + a = np.arange(6).reshape(2, 3) + b = np.arange(3, 9).reshape(3, 2) + long_b = np.arange(3, 63).reshape(30, 2) + ec = np.array([False, False, False, True, True, True]) + + assert_array_equal(in1d(a, b, assume_unique=True), ec) + assert_array_equal(in1d(a, b, assume_unique=False), ec) + assert_array_equal(in1d(a, long_b, assume_unique=True), ec) + assert_array_equal(in1d(a, long_b, assume_unique=False), ec) + + def test_union1d(self): + a = np.array([5, 4, 7, 1, 2]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([1, 2, 3, 4, 5, 7]) + c = union1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([6, 7]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + a = np.arange(21) + b = np.arange(19) + ec = np.array([19, 20]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setdiff1d([], [])) + + def test_setdiff1d_char_array(self): + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + def test_manyways(self): + a = np.array([5, 7, 1, 2, 8]) + b = np.array([9, 8, 2, 4, 3, 1, 5]) + + c1 = setxor1d(a, b) + aux1 = intersect1d(a, b) + aux2 = union1d(a, b) + c2 = setdiff1d(aux2, aux1) + assert_array_equal(c1, c2) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arrayterator.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arrayterator.py new file mode 100644 index 0000000000000..64ad7f4de4b53 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arrayterator.py @@ -0,0 +1,52 @@ +from __future__ import division, absolute_import, print_function + +from operator import mul +from functools import reduce + +import numpy as np +from numpy.random import randint +from numpy.lib import Arrayterator +from numpy.testing import assert_ + + +def test(): + np.random.seed(np.arange(10)) + + # Create a random array + ndims = randint(5)+1 + shape = tuple(randint(10)+1 for dim in range(ndims)) + els = reduce(mul, shape) + a = np.arange(els) + a.shape = shape + + buf_size = randint(2*els) + b = Arrayterator(a, buf_size) + + # Check that each block has at most ``buf_size`` elements + for block in b: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that all elements are iterated correctly + assert_(list(b.flat) == list(a.flat)) + + # Slice arrayterator + start = [randint(dim) for dim in shape] + stop = [randint(dim)+1 for dim in shape] + step = [randint(dim)+1 for dim in shape] + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + c = b[slice_] + d = a[slice_] + + # Check that each block has at most ``buf_size`` elements + for block in c: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that the arrayterator is sliced correctly + assert_(np.all(c.__array__() == d)) + + # Check that all elements are iterated correctly + assert_(list(c.flat) == list(d.flat)) + +if __name__ == '__main__': + from numpy.testing import run_module_suite + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_financial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_financial.py new file mode 100644 index 0000000000000..a4b9cfe2ed32a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_financial.py @@ -0,0 +1,160 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import ( + run_module_suite, TestCase, assert_, assert_almost_equal + ) + + +class TestFinancial(TestCase): + def test_rate(self): + assert_almost_equal(np.rate(10, 0, -3500, 10000), + 0.1107, 4) + + def test_irr(self): + v = [-150000, 15000, 25000, 35000, 45000, 60000] + assert_almost_equal(np.irr(v), + 0.0524, 2) + v = [-100, 0, 0, 74] + assert_almost_equal(np.irr(v), + -0.0955, 2) + v = [-100, 39, 59, 55, 20] + assert_almost_equal(np.irr(v), + 0.28095, 2) + v = [-100, 100, 0, -7] + assert_almost_equal(np.irr(v), + -0.0833, 2) + v = [-100, 100, 0, 7] + assert_almost_equal(np.irr(v), + 0.06206, 2) + v = [-5, 10.5, 1, -8, 1] + assert_almost_equal(np.irr(v), + 0.0886, 2) + + def test_pv(self): + assert_almost_equal(np.pv(0.07, 20, 12000, 0), + -127128.17, 2) + + def test_fv(self): + assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), + 86609.36, 2) + + def test_pmt(self): + assert_almost_equal(np.pmt(0.08/12, 5*12, 15000), + -304.146, 3) + + def test_ppmt(self): + np.round(np.ppmt(0.1/12, 1, 60, 55000), 2) == 710.25 + + def test_ipmt(self): + np.round(np.ipmt(0.1/12, 1, 24, 2000), 2) == 16.67 + + def test_nper(self): + assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), + 21.54, 2) + + def test_nper2(self): + assert_almost_equal(np.nper(0.0, -2000, 0, 100000.), + 50.0, 1) + + def test_npv(self): + assert_almost_equal( + np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]), + 122.89, 2) + + def test_mirr(self): + val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000] + assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) + + val = [-120000, 39000, 30000, 21000, 37000, 46000] + assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6) + + val = [100, 200, -50, 300, -200] + assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4) + + val = [39000, 30000, 21000, 37000, 46000] + assert_(np.isnan(np.mirr(val, 0.10, 0.12))) + + def test_when(self): + #begin + assert_almost_equal(np.rate(10, 20, -3500, 10000, 1), + np.rate(10, 20, -3500, 10000, 'begin'), 4) + #end + assert_almost_equal(np.rate(10, 20, -3500, 10000), + np.rate(10, 20, -3500, 10000, 'end'), 4) + assert_almost_equal(np.rate(10, 20, -3500, 10000, 0), + np.rate(10, 20, -3500, 10000, 'end'), 4) + + # begin + assert_almost_equal(np.pv(0.07, 20, 12000, 0, 1), + np.pv(0.07, 20, 12000, 0, 'begin'), 2) + # end + assert_almost_equal(np.pv(0.07, 20, 12000, 0), + np.pv(0.07, 20, 12000, 0, 'end'), 2) + assert_almost_equal(np.pv(0.07, 20, 12000, 0, 0), + np.pv(0.07, 20, 12000, 0, 'end'), 2) + + # begin + assert_almost_equal(np.fv(0.075, 20, -2000, 0, 1), + np.fv(0.075, 20, -2000, 0, 'begin'), 4) + # end + assert_almost_equal(np.fv(0.075, 20, -2000, 0), + np.fv(0.075, 20, -2000, 0, 'end'), 4) + assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), + np.fv(0.075, 20, -2000, 0, 'end'), 4) + + # begin + assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 1), + np.pmt(0.08/12, 5*12, 15000., 0, 'begin'), 4) + # end + assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0), + np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4) + assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 0), + np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4) + + # begin + assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 1), + np.ppmt(0.1/12, 1, 60, 55000, 0, 'begin'), 4) + # end + assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0), + np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4) + assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 0), + np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4) + + # begin + assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 1), + np.ipmt(0.1/12, 1, 24, 2000, 0, 'begin'), 4) + # end + assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0), + np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4) + assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 0), + np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4) + + # begin + assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 1), + np.nper(0.075, -2000, 0, 100000., 'begin'), 4) + # end + assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), + np.nper(0.075, -2000, 0, 100000., 'end'), 4) + assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 0), + np.nper(0.075, -2000, 0, 100000., 'end'), 4) + + def test_broadcast(self): + assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]), + [21.5449442, 20.76156441], 4) + + assert_almost_equal(np.ipmt(0.1/12, list(range(5)), 24, 2000), + [-17.29165168, -16.66666667, -16.03647345, + -15.40102862, -14.76028842], 4) + + assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000), + [-74.998201, -75.62318601, -76.25337923, + -76.88882405, -77.52956425], 4) + + assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000, 0, + [0, 0, 1, 'end', 'begin']), + [-74.998201, -75.62318601, -75.62318601, + -76.88882405, -76.88882405], 4) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_format.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_format.py new file mode 100644 index 0000000000000..c09386789fbee --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_format.py @@ -0,0 +1,706 @@ +from __future__ import division, absolute_import, print_function + +r''' Test the .npy file format. + +Set up: + + >>> import sys + >>> from io import BytesIO + >>> from numpy.lib import format + >>> + >>> scalars = [ + ... np.uint8, + ... np.int8, + ... np.uint16, + ... np.int16, + ... np.uint32, + ... np.int32, + ... np.uint64, + ... np.int64, + ... np.float32, + ... np.float64, + ... np.complex64, + ... np.complex128, + ... object, + ... ] + >>> + >>> basic_arrays = [] + >>> + >>> for scalar in scalars: + ... for endian in '<>': + ... dtype = np.dtype(scalar).newbyteorder(endian) + ... basic = np.arange(15).astype(dtype) + ... basic_arrays.extend([ + ... np.array([], dtype=dtype), + ... np.array(10, dtype=dtype), + ... basic, + ... basic.reshape((3,5)), + ... basic.reshape((3,5)).T, + ... basic.reshape((3,5))[::-1,::2], + ... ]) + ... + >>> + >>> Pdescr = [ + ... ('x', 'i4', (2,)), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> PbufferT = [ + ... ([3,2], [[6.,4.],[6.,4.]], 8), + ... ([4,3], [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> Ndescr = [ + ... ('x', 'i4', (2,)), + ... ('Info', [ + ... ('value', 'c16'), + ... ('y2', 'f8'), + ... ('Info2', [ + ... ('name', 'S2'), + ... ('value', 'c16', (2,)), + ... ('y3', 'f8', (2,)), + ... ('z3', 'u4', (2,))]), + ... ('name', 'S2'), + ... ('z2', 'b1')]), + ... ('color', 'S2'), + ... ('info', [ + ... ('Name', 'U8'), + ... ('Value', 'c16')]), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> NbufferT = [ + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> record_arrays = [ + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + ... ] + +Test the magic string writing. + + >>> format.magic(1, 0) + '\x93NUMPY\x01\x00' + >>> format.magic(0, 0) + '\x93NUMPY\x00\x00' + >>> format.magic(255, 255) + '\x93NUMPY\xff\xff' + >>> format.magic(2, 5) + '\x93NUMPY\x02\x05' + +Test the magic string reading. + + >>> format.read_magic(BytesIO(format.magic(1, 0))) + (1, 0) + >>> format.read_magic(BytesIO(format.magic(0, 0))) + (0, 0) + >>> format.read_magic(BytesIO(format.magic(255, 255))) + (255, 255) + >>> format.read_magic(BytesIO(format.magic(2, 5))) + (2, 5) + +Test the header writing. + + >>> for arr in basic_arrays + record_arrays: + ... f = BytesIO() + ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... print repr(f.getvalue()) + ... + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" + "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" +''' + +import sys +import os +import shutil +import tempfile +import warnings +from io import BytesIO + +import numpy as np +from numpy.compat import asbytes, asbytes_nested +from numpy.testing import ( + run_module_suite, assert_, assert_array_equal, assert_raises, raises, + dec + ) +from numpy.lib import format + + +tempdir = None + +# Module-level setup. + + +def setup_module(): + global tempdir + tempdir = tempfile.mkdtemp() + + +def teardown_module(): + global tempdir + if tempdir is not None and os.path.isdir(tempdir): + shutil.rmtree(tempdir) + tempdir = None + + +# Generate some basic arrays to test with. +scalars = [ + np.uint8, + np.int8, + np.uint16, + np.int16, + np.uint32, + np.int32, + np.uint64, + np.int64, + np.float32, + np.float64, + np.complex64, + np.complex128, + object, +] +basic_arrays = [] +for scalar in scalars: + for endian in '<>': + dtype = np.dtype(scalar).newbyteorder(endian) + basic = np.arange(1500).astype(dtype) + basic_arrays.extend([ + # Empty + np.array([], dtype=dtype), + # Rank-0 + np.array(10, dtype=dtype), + # 1-D + basic, + # 2-D C-contiguous + basic.reshape((30, 50)), + # 2-D F-contiguous + basic.reshape((30, 50)).T, + # 2-D non-contiguous + basic.reshape((30, 50))[::-1, ::2], + ]) + +# More complicated record arrays. +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), + 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), + 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + +record_arrays = [ + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), +] + + +#BytesIO that reads a random number of bytes at a time +class BytesIOSRandomSize(BytesIO): + def read(self, size=None): + import random + size = random.randint(1, size) + return super(BytesIOSRandomSize, self).read(size) + + +def roundtrip(arr): + f = BytesIO() + format.write_array(f, arr) + f2 = BytesIO(f.getvalue()) + arr2 = format.read_array(f2) + return arr2 + + +def roundtrip_randsize(arr): + f = BytesIO() + format.write_array(f, arr) + f2 = BytesIOSRandomSize(f.getvalue()) + arr2 = format.read_array(f2) + return arr2 + + +def roundtrip_truncated(arr): + f = BytesIO() + format.write_array(f, arr) + #BytesIO is one byte short + f2 = BytesIO(f.getvalue()[0:-1]) + arr2 = format.read_array(f2) + return arr2 + + +def assert_equal_(o1, o2): + assert_(o1 == o2) + + +def test_roundtrip(): + for arr in basic_arrays + record_arrays: + arr2 = roundtrip(arr) + yield assert_array_equal, arr, arr2 + + +def test_roundtrip_randsize(): + for arr in basic_arrays + record_arrays: + if arr.dtype != object: + arr2 = roundtrip_randsize(arr) + yield assert_array_equal, arr, arr2 + + +def test_roundtrip_truncated(): + for arr in basic_arrays: + if arr.dtype != object: + yield assert_raises, ValueError, roundtrip_truncated, arr + + +def test_long_str(): + # check items larger than internal buffer size, gh-4027 + long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1))) + long_str_arr2 = roundtrip(long_str_arr) + assert_array_equal(long_str_arr, long_str_arr2) + + +@dec.slow +def test_memmap_roundtrip(): + # Fixme: test crashes nose on windows. + if not (sys.platform == 'win32' or sys.platform == 'cygwin'): + for arr in basic_arrays + record_arrays: + if arr.dtype.hasobject: + # Skip these since they can't be mmap'ed. + continue + # Write it out normally and through mmap. + nfn = os.path.join(tempdir, 'normal.npy') + mfn = os.path.join(tempdir, 'memmap.npy') + fp = open(nfn, 'wb') + try: + format.write_array(fp, arr) + finally: + fp.close() + + fortran_order = ( + arr.flags.f_contiguous and not arr.flags.c_contiguous) + ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype, + shape=arr.shape, fortran_order=fortran_order) + ma[...] = arr + del ma + + # Check that both of these files' contents are the same. + fp = open(nfn, 'rb') + normal_bytes = fp.read() + fp.close() + fp = open(mfn, 'rb') + memmap_bytes = fp.read() + fp.close() + yield assert_equal_, normal_bytes, memmap_bytes + + # Check that reading the file using memmap works. + ma = format.open_memmap(nfn, mode='r') + del ma + + +def test_compressed_roundtrip(): + arr = np.random.rand(200, 200) + npz_file = os.path.join(tempdir, 'compressed.npz') + np.savez_compressed(npz_file, arr=arr) + arr1 = np.load(npz_file)['arr'] + assert_array_equal(arr, arr1) + + +def test_version_2_0(): + f = BytesIO() + # requires more than 2 byte for header + dt = [(("%d" % i) * 100, float) for i in range(500)] + d = np.ones(1000, dtype=dt) + + format.write_array(f, d, version=(2, 0)) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + format.write_array(f, d) + assert_(w[0].category is UserWarning) + + f.seek(0) + n = format.read_array(f) + assert_array_equal(d, n) + + # 1.0 requested but data cannot be saved this way + assert_raises(ValueError, format.write_array, f, d, (1, 0)) + + +def test_version_2_0_memmap(): + # requires more than 2 byte for header + dt = [(("%d" % i) * 100, float) for i in range(500)] + d = np.ones(1000, dtype=dt) + tf = tempfile.mktemp('', 'mmap', dir=tempdir) + + # 1.0 requested but data cannot be saved this way + assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype, + shape=d.shape, version=(1, 0)) + + ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, + shape=d.shape, version=(2, 0)) + ma[...] = d + del ma + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, + shape=d.shape, version=None) + assert_(w[0].category is UserWarning) + ma[...] = d + del ma + + ma = format.open_memmap(tf, mode='r') + assert_array_equal(ma, d) + + +def test_write_version(): + f = BytesIO() + arr = np.arange(1) + # These should pass. + format.write_array(f, arr, version=(1, 0)) + format.write_array(f, arr) + + format.write_array(f, arr, version=None) + format.write_array(f, arr) + + format.write_array(f, arr, version=(2, 0)) + format.write_array(f, arr) + + # These should all fail. + bad_versions = [ + (1, 1), + (0, 0), + (0, 1), + (2, 2), + (255, 255), + ] + for version in bad_versions: + try: + format.write_array(f, arr, version=version) + except ValueError: + pass + else: + raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,)) + + +bad_version_magic = asbytes_nested([ + '\x93NUMPY\x01\x01', + '\x93NUMPY\x00\x00', + '\x93NUMPY\x00\x01', + '\x93NUMPY\x02\x00', + '\x93NUMPY\x02\x02', + '\x93NUMPY\xff\xff', +]) +malformed_magic = asbytes_nested([ + '\x92NUMPY\x01\x00', + '\x00NUMPY\x01\x00', + '\x93numpy\x01\x00', + '\x93MATLB\x01\x00', + '\x93NUMPY\x01', + '\x93NUMPY', + '', +]) + + +def test_read_magic_bad_magic(): + for magic in malformed_magic: + f = BytesIO(magic) + yield raises(ValueError)(format.read_magic), f + + +def test_read_version_1_0_bad_magic(): + for magic in bad_version_magic + malformed_magic: + f = BytesIO(magic) + yield raises(ValueError)(format.read_array), f + + +def test_bad_magic_args(): + assert_raises(ValueError, format.magic, -1, 1) + assert_raises(ValueError, format.magic, 256, 1) + assert_raises(ValueError, format.magic, 1, -1) + assert_raises(ValueError, format.magic, 1, 256) + + +def test_large_header(): + s = BytesIO() + d = {'a': 1, 'b': 2} + format.write_array_header_1_0(s, d) + + s = BytesIO() + d = {'a': 1, 'b': 2, 'c': 'x'*256*256} + assert_raises(ValueError, format.write_array_header_1_0, s, d) + + +def test_bad_header(): + # header of length less than 2 should fail + s = BytesIO() + assert_raises(ValueError, format.read_array_header_1_0, s) + s = BytesIO(asbytes('1')) + assert_raises(ValueError, format.read_array_header_1_0, s) + + # header shorter than indicated size should fail + s = BytesIO(asbytes('\x01\x00')) + assert_raises(ValueError, format.read_array_header_1_0, s) + + # headers without the exact keys required should fail + d = {"shape": (1, 2), + "descr": "x"} + s = BytesIO() + format.write_array_header_1_0(s, d) + assert_raises(ValueError, format.read_array_header_1_0, s) + + d = {"shape": (1, 2), + "fortran_order": False, + "descr": "x", + "extrakey": -1} + s = BytesIO() + format.write_array_header_1_0(s, d) + assert_raises(ValueError, format.read_array_header_1_0, s) + + +def test_large_file_support(): + from nose import SkipTest + if (sys.platform == 'win32' or sys.platform == 'cygwin'): + raise SkipTest("Unknown if Windows has sparse filesystems") + # try creating a large sparse file + tf_name = os.path.join(tempdir, 'sparse_file') + try: + # seek past end would work too, but linux truncate somewhat + # increases the chances that we have a sparse filesystem and can + # avoid actually writing 5GB + import subprocess as sp + sp.check_call(["truncate", "-s", "5368709120", tf_name]) + except: + raise SkipTest("Could not create 5GB large file") + # write a small array to the end + with open(tf_name, "wb") as f: + f.seek(5368709120) + d = np.arange(5) + np.save(f, d) + # read it back + with open(tf_name, "rb") as f: + f.seek(5368709120) + r = np.load(f) + assert_array_equal(r, d) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_function_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_function_base.py new file mode 100644 index 0000000000000..624b5f3eb58e3 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_function_base.py @@ -0,0 +1,2131 @@ +from __future__ import division, absolute_import, print_function + +import warnings + +import numpy as np +from numpy.testing import ( + run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, + assert_almost_equal, assert_array_almost_equal, assert_raises, + assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex + ) +from numpy.random import rand +from numpy.lib import * +from numpy.compat import long + + +class TestAny(TestCase): + def test_basic(self): + y1 = [0, 0, 1, 0] + y2 = [0, 0, 0, 0] + y3 = [1, 0, 1, 0] + assert_(np.any(y1)) + assert_(np.any(y3)) + assert_(not np.any(y2)) + + def test_nd(self): + y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]] + assert_(np.any(y1)) + assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0]) + assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1]) + + +class TestAll(TestCase): + def test_basic(self): + y1 = [0, 1, 1, 0] + y2 = [0, 0, 0, 0] + y3 = [1, 1, 1, 1] + assert_(not np.all(y1)) + assert_(np.all(y3)) + assert_(not np.all(y2)) + assert_(np.all(~np.array(y2))) + + def test_nd(self): + y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]] + assert_(not np.all(y1)) + assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1]) + assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1]) + + +class TestCopy(TestCase): + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + a_copy = np.copy(a) + assert_array_equal(a, a_copy) + a_copy[0, 0] = 10 + assert_equal(a[0, 0], 1) + assert_equal(a_copy[0, 0], 10) + + def test_order(self): + # It turns out that people rely on np.copy() preserving order by + # default; changing this broke scikit-learn: + # https://github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 + a = np.array([[1, 2], [3, 4]]) + assert_(a.flags.c_contiguous) + assert_(not a.flags.f_contiguous) + a_fort = np.array([[1, 2], [3, 4]], order="F") + assert_(not a_fort.flags.c_contiguous) + assert_(a_fort.flags.f_contiguous) + a_copy = np.copy(a) + assert_(a_copy.flags.c_contiguous) + assert_(not a_copy.flags.f_contiguous) + a_fort_copy = np.copy(a_fort) + assert_(not a_fort_copy.flags.c_contiguous) + assert_(a_fort_copy.flags.f_contiguous) + + +class TestAverage(TestCase): + def test_basic(self): + y1 = np.array([1, 2, 3]) + assert_(average(y1, axis=0) == 2.) + y2 = np.array([1., 2., 3.]) + assert_(average(y2, axis=0) == 2.) + y3 = [0., 0., 0.] + assert_(average(y3, axis=0) == 0.) + + y4 = np.ones((4, 4)) + y4[0, 1] = 0 + y4[1, 0] = 2 + assert_almost_equal(y4.mean(0), average(y4, 0)) + assert_almost_equal(y4.mean(1), average(y4, 1)) + + y5 = rand(5, 5) + assert_almost_equal(y5.mean(0), average(y5, 0)) + assert_almost_equal(y5.mean(1), average(y5, 1)) + + y6 = np.matrix(rand(5, 5)) + assert_array_equal(y6.mean(0), average(y6, 0)) + + def test_weights(self): + y = np.arange(10) + w = np.arange(10) + actual = average(y, weights=w) + desired = (np.arange(10) ** 2).sum()*1. / np.arange(10).sum() + assert_almost_equal(actual, desired) + + y1 = np.array([[1, 2, 3], [4, 5, 6]]) + w0 = [1, 2] + actual = average(y1, weights=w0, axis=0) + desired = np.array([3., 4., 5.]) + assert_almost_equal(actual, desired) + + w1 = [0, 0, 1] + actual = average(y1, weights=w1, axis=1) + desired = np.array([3., 6.]) + assert_almost_equal(actual, desired) + + # This should raise an error. Can we test for that ? + # assert_equal(average(y1, weights=w1), 9./2.) + + # 2D Case + w2 = [[0, 0, 1], [0, 0, 2]] + desired = np.array([3., 6.]) + assert_array_equal(average(y1, weights=w2, axis=1), desired) + assert_equal(average(y1, weights=w2), 5.) + + def test_returned(self): + y = np.array([[1, 2, 3], [4, 5, 6]]) + + # No weights + avg, scl = average(y, returned=True) + assert_equal(scl, 6.) + + avg, scl = average(y, 0, returned=True) + assert_array_equal(scl, np.array([2., 2., 2.])) + + avg, scl = average(y, 1, returned=True) + assert_array_equal(scl, np.array([3., 3.])) + + # With weights + w0 = [1, 2] + avg, scl = average(y, weights=w0, axis=0, returned=True) + assert_array_equal(scl, np.array([3., 3., 3.])) + + w1 = [1, 2, 3] + avg, scl = average(y, weights=w1, axis=1, returned=True) + assert_array_equal(scl, np.array([6., 6.])) + + w2 = [[0, 0, 1], [1, 2, 3]] + avg, scl = average(y, weights=w2, axis=1, returned=True) + assert_array_equal(scl, np.array([1., 6.])) + + +class TestSelect(TestCase): + choices = [np.array([1, 2, 3]), + np.array([4, 5, 6]), + np.array([7, 8, 9])] + conditions = [np.array([False, False, False]), + np.array([False, True, False]), + np.array([False, False, True])] + + def _select(self, cond, values, default=0): + output = [] + for m in range(len(cond)): + output += [V[m] for V, C in zip(values, cond) if C[m]] or [default] + return output + + def test_basic(self): + choices = self.choices + conditions = self.conditions + assert_array_equal(select(conditions, choices, default=15), + self._select(conditions, choices, default=15)) + + assert_equal(len(choices), 3) + assert_equal(len(conditions), 3) + + def test_broadcasting(self): + conditions = [np.array(True), np.array([False, True, False])] + choices = [1, np.arange(12).reshape(4, 3)] + assert_array_equal(select(conditions, choices), np.ones((4, 3))) + # default can broadcast too: + assert_equal(select([True], [0], default=[0]).shape, (1,)) + + def test_return_dtype(self): + assert_equal(select(self.conditions, self.choices, 1j).dtype, + np.complex_) + # But the conditions need to be stronger then the scalar default + # if it is scalar. + choices = [choice.astype(np.int8) for choice in self.choices] + assert_equal(select(self.conditions, choices).dtype, np.int8) + + d = np.array([1, 2, 3, np.nan, 5, 7]) + m = np.isnan(d) + assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0]) + + def test_deprecated_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") + assert_equal(select([], [], 3j), 3j) + + with warnings.catch_warnings(): + warnings.simplefilter("always") + assert_warns(DeprecationWarning, select, [], []) + warnings.simplefilter("error") + assert_raises(DeprecationWarning, select, [], []) + + def test_non_bool_deprecation(self): + choices = self.choices + conditions = self.conditions[:] + with warnings.catch_warnings(): + warnings.filterwarnings("always") + conditions[0] = conditions[0].astype(np.int_) + assert_warns(DeprecationWarning, select, conditions, choices) + conditions[0] = conditions[0].astype(np.uint8) + assert_warns(DeprecationWarning, select, conditions, choices) + warnings.filterwarnings("error") + assert_raises(DeprecationWarning, select, conditions, choices) + + def test_many_arguments(self): + # This used to be limited by NPY_MAXARGS == 32 + conditions = [np.array([False])] * 100 + choices = [np.array([1])] * 100 + select(conditions, choices) + + +class TestInsert(TestCase): + def test_basic(self): + a = [1, 2, 3] + assert_equal(insert(a, 0, 1), [1, 1, 2, 3]) + assert_equal(insert(a, 3, 1), [1, 2, 3, 1]) + assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3]) + assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3]) + assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9]) + assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3]) + assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9]) + b = np.array([0, 1], dtype=np.float64) + assert_equal(insert(b, 0, b[0]), [0., 0., 1.]) + assert_equal(insert(b, [], []), b) + # Bools will be treated differently in the future: + #assert_equal(insert(a, np.array([True]*4), 9), [9,1,9,2,9,3,9]) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_equal( + insert(a, np.array([True]*4), 9), [1, 9, 9, 9, 9, 2, 3]) + assert_(w[0].category is FutureWarning) + + def test_multidim(self): + a = [[1, 1, 1]] + r = [[2, 2, 2], + [1, 1, 1]] + assert_equal(insert(a, 0, [1]), [1, 1, 1, 1]) + assert_equal(insert(a, 0, [2, 2, 2], axis=0), r) + assert_equal(insert(a, 0, 2, axis=0), r) + assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]]) + + a = np.array([[1, 1], [2, 2], [3, 3]]) + b = np.arange(1, 4).repeat(3).reshape(3, 3) + c = np.concatenate( + (a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T, + a[:, 1:2]), axis=1) + assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b) + assert_equal(insert(a, [1], [1, 2, 3], axis=1), c) + # scalars behave differently, in this case exactly opposite: + assert_equal(insert(a, 1, [1, 2, 3], axis=1), b) + assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c) + + a = np.arange(4).reshape(2, 2) + assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a) + assert_equal(insert(a[:1, :], 1, a[1, :], axis=0), a) + + # negative axis value + a = np.arange(24).reshape((2, 3, 4)) + assert_equal(insert(a, 1, a[:, :, 3], axis=-1), + insert(a, 1, a[:, :, 3], axis=2)) + assert_equal(insert(a, 1, a[:, 2, :], axis=-2), + insert(a, 1, a[:, 2, :], axis=1)) + + # invalid axis value + assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=3) + assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=-4) + + # negative axis value + a = np.arange(24).reshape((2,3,4)) + assert_equal(insert(a, 1, a[:,:,3], axis=-1), + insert(a, 1, a[:,:,3], axis=2)) + assert_equal(insert(a, 1, a[:,2,:], axis=-2), + insert(a, 1, a[:,2,:], axis=1)) + + def test_0d(self): + # This is an error in the future + a = np.array(1) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', DeprecationWarning) + assert_equal(insert(a, [], 2, axis=0), np.array(2)) + assert_(w[0].category is DeprecationWarning) + + def test_subclass(self): + class SubClass(np.ndarray): + pass + a = np.arange(10).view(SubClass) + assert_(isinstance(np.insert(a, 0, [0]), SubClass)) + assert_(isinstance(np.insert(a, [], []), SubClass)) + assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass)) + assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass)) + assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass)) + # This is an error in the future: + a = np.array(1).view(SubClass) + assert_(isinstance(np.insert(a, 0, [0]), SubClass)) + + def test_index_array_copied(self): + x = np.array([1, 1, 1]) + np.insert([0, 1, 2], x, [3, 4, 5]) + assert_equal(x, np.array([1, 1, 1])) + + def test_structured_array(self): + a = np.array([(1, 'a'), (2, 'b'), (3, 'c')], + dtype=[('foo', 'i'), ('bar', 'a1')]) + val = (4, 'd') + b = np.insert(a, 0, val) + assert_array_equal(b[0], np.array(val, dtype=b.dtype)) + val = [(4, 'd')] * 2 + b = np.insert(a, [0, 2], val) + assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype)) + + +class TestAmax(TestCase): + def test_basic(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.amax(a), 10.0) + b = [[3, 6.0, 9.0], + [4, 10.0, 5.0], + [8, 3.0, 2.0]] + assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0]) + assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0]) + + +class TestAmin(TestCase): + def test_basic(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.amin(a), -5.0) + b = [[3, 6.0, 9.0], + [4, 10.0, 5.0], + [8, 3.0, 2.0]] + assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0]) + assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0]) + + +class TestPtp(TestCase): + def test_basic(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.ptp(a, axis=0), 15.0) + b = [[3, 6.0, 9.0], + [4, 10.0, 5.0], + [8, 3.0, 2.0]] + assert_equal(np.ptp(b, axis=0), [5.0, 7.0, 7.0]) + assert_equal(np.ptp(b, axis=-1), [6.0, 6.0, 6.0]) + + +class TestCumsum(TestCase): + def test_basic(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, + np.uint32, np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + + tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype) + assert_array_equal(np.cumsum(a, axis=0), tgt) + + tgt = np.array( + [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype) + assert_array_equal(np.cumsum(a2, axis=0), tgt) + + tgt = np.array( + [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype) + assert_array_equal(np.cumsum(a2, axis=1), tgt) + + +class TestProd(TestCase): + def test_basic(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + self.assertRaises(ArithmeticError, prod, a) + self.assertRaises(ArithmeticError, prod, a2, 1) + self.assertRaises(ArithmeticError, prod, a) + else: + assert_equal(np.prod(a, axis=0), 26400) + assert_array_equal(np.prod(a2, axis=0), + np.array([50, 36, 84, 180], ctype)) + assert_array_equal(np.prod(a2, axis=-1), + np.array([24, 1890, 600], ctype)) + + +class TestCumprod(TestCase): + def test_basic(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + self.assertRaises(ArithmeticError, cumprod, a) + self.assertRaises(ArithmeticError, cumprod, a2, 1) + self.assertRaises(ArithmeticError, cumprod, a) + else: + assert_array_equal(np.cumprod(a, axis=-1), + np.array([1, 2, 20, 220, + 1320, 6600, 26400], ctype)) + assert_array_equal(np.cumprod(a2, axis=0), + np.array([[1, 2, 3, 4], + [5, 12, 21, 36], + [50, 36, 84, 180]], ctype)) + assert_array_equal(np.cumprod(a2, axis=-1), + np.array([[1, 2, 6, 24], + [5, 30, 210, 1890], + [10, 30, 120, 600]], ctype)) + + +class TestDiff(TestCase): + def test_basic(self): + x = [1, 4, 6, 7, 12] + out = np.array([3, 2, 1, 5]) + out2 = np.array([-1, -1, 4]) + out3 = np.array([0, 5]) + assert_array_equal(diff(x), out) + assert_array_equal(diff(x, n=2), out2) + assert_array_equal(diff(x, n=3), out3) + + def test_nd(self): + x = 20 * rand(10, 20, 30) + out1 = x[:, :, 1:] - x[:, :, :-1] + out2 = out1[:, :, 1:] - out1[:, :, :-1] + out3 = x[1:, :, :] - x[:-1, :, :] + out4 = out3[1:, :, :] - out3[:-1, :, :] + assert_array_equal(diff(x), out1) + assert_array_equal(diff(x, n=2), out2) + assert_array_equal(diff(x, axis=0), out3) + assert_array_equal(diff(x, n=2, axis=0), out4) + + +class TestDelete(TestCase): + def setUp(self): + self.a = np.arange(5) + self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) + + def _check_inverse_of_slicing(self, indices): + a_del = delete(self.a, indices) + nd_a_del = delete(self.nd_a, indices, axis=1) + msg = 'Delete failed for obj: %r' % indices + # NOTE: The cast should be removed after warning phase for bools + if not isinstance(indices, (slice, int, long, np.integer)): + indices = np.asarray(indices, dtype=np.intp) + indices = indices[(indices >= 0) & (indices < 5)] + assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, + err_msg=msg) + xor = setxor1d(nd_a_del[0, :, 0], self.nd_a[0, indices, 0]) + assert_array_equal(xor, self.nd_a[0, :, 0], err_msg=msg) + + def test_slices(self): + lims = [-6, -2, 0, 1, 2, 4, 5] + steps = [-3, -1, 1, 3] + for start in lims: + for stop in lims: + for step in steps: + s = slice(start, stop, step) + self._check_inverse_of_slicing(s) + + def test_fancy(self): + # Deprecation/FutureWarning tests should be kept after change. + self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) + with warnings.catch_warnings(): + warnings.filterwarnings('error', category=DeprecationWarning) + assert_raises(DeprecationWarning, delete, self.a, [100]) + assert_raises(DeprecationWarning, delete, self.a, [-100]) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', category=FutureWarning) + self._check_inverse_of_slicing([0, -1, 2, 2]) + obj = np.array([True, False, False], dtype=bool) + self._check_inverse_of_slicing(obj) + assert_(w[0].category is FutureWarning) + assert_(w[1].category is FutureWarning) + + def test_single(self): + self._check_inverse_of_slicing(0) + self._check_inverse_of_slicing(-4) + + def test_0d(self): + a = np.array(1) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', DeprecationWarning) + assert_equal(delete(a, [], axis=0), a) + assert_(w[0].category is DeprecationWarning) + + def test_subclass(self): + class SubClass(np.ndarray): + pass + a = self.a.view(SubClass) + assert_(isinstance(delete(a, 0), SubClass)) + assert_(isinstance(delete(a, []), SubClass)) + assert_(isinstance(delete(a, [0, 1]), SubClass)) + assert_(isinstance(delete(a, slice(1, 2)), SubClass)) + assert_(isinstance(delete(a, slice(1, -2)), SubClass)) + + +class TestGradient(TestCase): + def test_basic(self): + v = [[1, 1], [3, 4]] + x = np.array(v) + dx = [np.array([[2., 3.], [2., 3.]]), + np.array([[0., 0.], [1., 1.]])] + assert_array_equal(gradient(x), dx) + assert_array_equal(gradient(v), dx) + + def test_badargs(self): + # for 2D array, gradient can take 0, 1, or 2 extra args + x = np.array([[1, 1], [3, 4]]) + assert_raises(SyntaxError, gradient, x, np.array([1., 1.]), + np.array([1., 1.]), np.array([1., 1.])) + + def test_masked(self): + # Make sure that gradient supports subclasses like masked arrays + x = np.ma.array([[1, 1], [3, 4]]) + assert_equal(type(gradient(x)[0]), type(x)) + + def test_datetime64(self): + # Make sure gradient() can handle special types like datetime64 + x = np.array( + ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12', + '1910-10-12', '1910-12-12', '1912-12-12'], + dtype='datetime64[D]') + dx = np.array( + [-7, -3, 0, 31, 61, 396, 1066], + dtype='timedelta64[D]') + assert_array_equal(gradient(x), dx) + assert_(dx.dtype == np.dtype('timedelta64[D]')) + + def test_timedelta64(self): + # Make sure gradient() can handle special types like timedelta64 + x = np.array( + [-5, -3, 10, 12, 61, 321, 300], + dtype='timedelta64[D]') + dx = np.array( + [-3, 7, 7, 25, 154, 119, -161], + dtype='timedelta64[D]') + assert_array_equal(gradient(x), dx) + assert_(dx.dtype == np.dtype('timedelta64[D]')) + + def test_second_order_accurate(self): + # Testing that the relative numerical error is less that 3% for + # this example problem. This corresponds to second order + # accurate finite differences for all interior and boundary + # points. + x = np.linspace(0, 1, 10) + dx = x[1] - x[0] + y = 2 * x ** 3 + 4 * x ** 2 + 2 * x + analytical = 6 * x ** 2 + 8 * x + 2 + num_error = np.abs((np.gradient(y, dx) / analytical) - 1) + assert_(np.all(num_error < 0.03) == True) + + +class TestAngle(TestCase): + def test_basic(self): + x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2, + 1, 1j, -1, -1j, 1 - 3j, -1 + 3j] + y = angle(x) + yo = [ + np.arctan(3.0 / 1.0), + np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0, + -np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)] + z = angle(x, deg=1) + zo = np.array(yo) * 180 / np.pi + assert_array_almost_equal(y, yo, 11) + assert_array_almost_equal(z, zo, 11) + + +class TestTrimZeros(TestCase): + """ only testing for integer splits. + """ + def test_basic(self): + a = np.array([0, 0, 1, 2, 3, 4, 0]) + res = trim_zeros(a) + assert_array_equal(res, np.array([1, 2, 3, 4])) + + def test_leading_skip(self): + a = np.array([0, 0, 1, 0, 2, 3, 4, 0]) + res = trim_zeros(a) + assert_array_equal(res, np.array([1, 0, 2, 3, 4])) + + def test_trailing_skip(self): + a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0]) + res = trim_zeros(a) + assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4])) + + +class TestExtins(TestCase): + def test_basic(self): + a = np.array([1, 3, 2, 1, 2, 3, 3]) + b = extract(a > 1, a) + assert_array_equal(b, [3, 2, 2, 3, 3]) + + def test_place(self): + a = np.array([1, 4, 3, 2, 5, 8, 7]) + place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) + assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) + + def test_both(self): + a = rand(10) + mask = a > 0.5 + ac = a.copy() + c = extract(mask, a) + place(a, mask, 0) + place(a, mask, c) + assert_array_equal(a, ac) + + +class TestVectorize(TestCase): + def test_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_scalar(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], 5) + assert_array_equal(r, [5, 8, 1, 4]) + + def test_large(self): + x = np.linspace(-3, 2, 10000) + f = vectorize(lambda x: x) + y = f(x) + assert_array_equal(y, x) + + def test_ufunc(self): + import math + f = vectorize(math.cos) + args = np.array([0, 0.5*np.pi, np.pi, 1.5*np.pi, 2*np.pi]) + r1 = f(args) + r2 = np.cos(args) + assert_array_equal(r1, r2) + + def test_keywords(self): + import math + + def foo(a, b=1): + return a + b + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(args, 2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords_no_func_code(self): + # This needs to test a function that has keywords but + # no func_code attribute, since otherwise vectorize will + # inspect the func_code. + import random + try: + f = vectorize(random.randrange) + except: + raise AssertionError() + + def test_keywords2_ticket_2100(self): + r"""Test kwarg support: enhancement ticket 2100""" + import math + + def foo(a, b=1): + return a + b + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(a=args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(b=1, a=args) + assert_array_equal(r1, r2) + r1 = f(args, b=2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords3_ticket_2100(self): + """Test excluded with mixed positional and kwargs: ticket 2100""" + def mypolyval(x, p): + _p = list(p) + res = _p.pop(0) + while _p: + res = res*x + _p.pop(0) + return res + vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) + ans = [3, 6] + assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) + + def test_keywords4_ticket_2100(self): + """Test vectorizing function with no positional args.""" + @vectorize + def f(**kw): + res = 1.0 + for _k in kw: + res *= kw[_k] + return res + assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) + + def test_keywords5_ticket_2100(self): + """Test vectorizing function with no kwargs args.""" + @vectorize + def f(*v): + return np.prod(v) + assert_array_equal(f([1, 2], [3, 4]), [3, 8]) + + def test_coverage1_ticket_2100(self): + def foo(): + return 1 + f = vectorize(foo) + assert_array_equal(f(), 1) + + def test_assigning_docstring(self): + def foo(x): + return x + doc = "Provided documentation" + f = vectorize(foo, doc=doc) + assert_equal(f.__doc__, doc) + + def test_UnboundMethod_ticket_1156(self): + """Regression test for issue 1156""" + class Foo: + b = 2 + + def bar(self, a): + return a**self.b + assert_array_equal(vectorize(Foo().bar)(np.arange(9)), + np.arange(9)**2) + assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), + np.arange(9)**2) + + def test_execution_order_ticket_1487(self): + """Regression test for dependence on execution order: issue 1487""" + f1 = vectorize(lambda x: x) + res1a = f1(np.arange(3)) + res1b = f1(np.arange(0.1, 3)) + f2 = vectorize(lambda x: x) + res2b = f2(np.arange(0.1, 3)) + res2a = f2(np.arange(3)) + assert_equal(res1a, res2a) + assert_equal(res1b, res2b) + + def test_string_ticket_1892(self): + """Test vectorization over strings: issue 1892.""" + f = np.vectorize(lambda x: x) + s = '0123456789'*10 + assert_equal(s, f(s)) + #z = f(np.array([s,s])) + #assert_array_equal([s,s], f(s)) + + def test_cache(self): + """Ensure that vectorized func called exactly once per argument.""" + _calls = [0] + + @vectorize + def f(x): + _calls[0] += 1 + return x**2 + f.cache = True + x = np.arange(5) + assert_array_equal(f(x), x*x) + assert_equal(_calls[0], len(x)) + + def test_otypes(self): + f = np.vectorize(lambda x: x) + f.otypes = 'i' + x = np.arange(5) + assert_array_equal(f(x), x) + + +class TestDigitize(TestCase): + def test_forward(self): + x = np.arange(-6, 5) + bins = np.arange(-5, 5) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(5, -5, -1) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_random(self): + x = rand(10) + bin = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bin) != 0)) + + def test_right_basic(self): + x = [1, 5, 4, 10, 8, 11, 0] + bins = [1, 5, 10] + default_answer = [1, 2, 1, 3, 2, 3, 0] + assert_array_equal(digitize(x, bins), default_answer) + right_answer = [0, 1, 1, 2, 2, 3, 0] + assert_array_equal(digitize(x, bins, True), right_answer) + + def test_right_open(self): + x = np.arange(-6, 5) + bins = np.arange(-6, 4) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(4, -6, -1) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_random(self): + x = rand(10) + bins = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bins, True) != 10)) + + def test_monotonic(self): + x = [-1, 0, 1, 2] + bins = [0, 0, 1] + assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) + assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) + bins = [1, 1, 0] + assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) + assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) + bins = [1, 1, 1, 1] + assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) + assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) + bins = [0, 0, 1, 0] + assert_raises(ValueError, digitize, x, bins) + bins = [1, 1, 0, 1] + assert_raises(ValueError, digitize, x, bins) + + +class TestUnwrap(TestCase): + def test_simple(self): + #check that unwrap removes jumps greather that 2*pi + assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) + #check that unwrap maintans continuity + assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) + + +class TestFilterwindows(TestCase): + def test_hanning(self): + #check symmetry + w = hanning(10) + assert_array_almost_equal(w, flipud(w), 7) + #check known value + assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + + def test_hamming(self): + #check symmetry + w = hamming(10) + assert_array_almost_equal(w, flipud(w), 7) + #check known value + assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + + def test_bartlett(self): + #check symmetry + w = bartlett(10) + assert_array_almost_equal(w, flipud(w), 7) + #check known value + assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + + def test_blackman(self): + #check symmetry + w = blackman(10) + assert_array_almost_equal(w, flipud(w), 7) + #check known value + assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + + +class TestTrapz(TestCase): + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapz(np.exp(-.5*x**2) / np.sqrt(2*np.pi), dx=0.1) + #check integral of normal equals 1 + assert_almost_equal(r, 1, 7) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None, :, None] + z[None, None, :] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapz(q, x=x[:, None, None], axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y[None, :, None], axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z[None, None, :], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapz(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z, axis=2) + assert_almost_equal(r, qz) + + def test_masked(self): + #Testing that masked arrays behave as if the function is 0 where + #masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_almost_equal(trapz(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapz(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapz(y, xm), r) + + def test_matrix(self): + #Test to make sure matrices give the same answer as ndarrays + x = np.linspace(0, 5) + y = x * x + r = trapz(y, x) + mx = np.matrix(x) + my = np.matrix(y) + mr = trapz(my, mx) + assert_almost_equal(mr, r) + + +class TestSinc(TestCase): + def test_simple(self): + assert_(sinc(0) == 1) + w = sinc(np.linspace(-1, 1, 100)) + #check symmetry + assert_array_almost_equal(w, flipud(w), 7) + + def test_array_like(self): + x = [0, 0.5] + y1 = sinc(np.array(x)) + y2 = sinc(list(x)) + y3 = sinc(tuple(x)) + assert_array_equal(y1, y2) + assert_array_equal(y1, y3) + + +class TestHistogram(TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def test_simple(self): + n = 100 + v = rand(n) + (a, b) = histogram(v) + #check if the sum of the bins equals the number of samples + assert_equal(np.sum(a, axis=0), n) + #check that the bin counts are evenly spaced when the data is from a + # linear function + (a, b) = histogram(np.linspace(0, 10, 100)) + assert_array_equal(a, 10) + + def test_one_bin(self): + # Ticket 632 + hist, edges = histogram([1, 2, 3, 4], [1, 2]) + assert_array_equal(hist, [2, ]) + assert_array_equal(edges, [1, 2]) + assert_raises(ValueError, histogram, [1, 2], bins=0) + h, e = histogram([1, 2], bins=1) + assert_equal(h, np.array([2])) + assert_allclose(e, np.array([1., 2.])) + + def test_normed(self): + # Check that the integral of the density equals 1. + n = 100 + v = rand(n) + a, b = histogram(v, normed=True) + area = np.sum(a * diff(b)) + assert_almost_equal(area, 1) + + # Check with non-constant bin widths (buggy but backwards compatible) + v = np.arange(10) + bins = [0, 1, 5, 9, 10] + a, b = histogram(v, bins, normed=True) + area = np.sum(a * diff(b)) + assert_almost_equal(area, 1) + + def test_density(self): + # Check that the integral of the density equals 1. + n = 100 + v = rand(n) + a, b = histogram(v, density=True) + area = np.sum(a * diff(b)) + assert_almost_equal(area, 1) + + # Check with non-constant bin widths + v = np.arange(10) + bins = [0, 1, 3, 6, 10] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, .1) + assert_equal(np.sum(a*diff(b)), 1) + + # Variale bin widths are especially useful to deal with + # infinities. + v = np.arange(10) + bins = [0, 1, 3, 6, np.inf] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, [.1, .1, .1, 0.]) + + # Taken from a bug report from N. Becker on the numpy-discussion + # mailing list Aug. 6, 2010. + counts, dmy = np.histogram( + [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) + assert_equal(counts, [.25, 0]) + + def test_outliers(self): + # Check that outliers are not tallied + a = np.arange(10) + .5 + + # Lower outliers + h, b = histogram(a, range=[0, 9]) + assert_equal(h.sum(), 9) + + # Upper outliers + h, b = histogram(a, range=[1, 10]) + assert_equal(h.sum(), 9) + + # Normalization + h, b = histogram(a, range=[1, 9], normed=True) + assert_almost_equal((h * diff(b)).sum(), 1, decimal=15) + + # Weights + w = np.arange(10) + .5 + h, b = histogram(a, range=[1, 9], weights=w, normed=True) + assert_equal((h * diff(b)).sum(), 1) + + h, b = histogram(a, bins=8, range=[1, 9], weights=w) + assert_equal(h, w[1:-1]) + + def test_type(self): + # Check the type of the returned histogram + a = np.arange(10) + .5 + h, b = histogram(a) + assert_(issubdtype(h.dtype, int)) + + h, b = histogram(a, normed=True) + assert_(issubdtype(h.dtype, float)) + + h, b = histogram(a, weights=np.ones(10, int)) + assert_(issubdtype(h.dtype, int)) + + h, b = histogram(a, weights=np.ones(10, float)) + assert_(issubdtype(h.dtype, float)) + + def test_f32_rounding(self): + # gh-4799, check that the rounding of the edges works with float32 + x = np.array([276.318359 , -69.593948 , 21.329449], dtype=np.float32) + y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) + counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) + assert_equal(counts_hist.sum(), 3.) + + def test_weights(self): + v = rand(100) + w = np.ones(100) * 5 + a, b = histogram(v) + na, nb = histogram(v, normed=True) + wa, wb = histogram(v, weights=w) + nwa, nwb = histogram(v, weights=w, normed=True) + assert_array_almost_equal(a * 5, wa) + assert_array_almost_equal(na, nwa) + + # Check weights are properly applied. + v = np.linspace(0, 10, 10) + w = np.concatenate((np.zeros(5), np.ones(5))) + wa, wb = histogram(v, bins=np.arange(11), weights=w) + assert_array_almost_equal(wa, w) + + # Check with integer weights + wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) + assert_array_equal(wa, [4, 5, 0, 1]) + wa, wb = histogram( + [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True) + assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) + + # Check weights with non-uniform bin widths + a, b = histogram( + np.arange(9), [0, 1, 3, 6, 10], + weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) + assert_almost_equal(a, [.2, .1, .1, .075]) + + def test_empty(self): + a, b = histogram([], bins=([0, 1])) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + +class TestHistogramdd(TestCase): + def test_simple(self): + x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], + [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) + H, edges = histogramdd(x, (2, 3, 3), + range=[[-1, 1], [0, 3], [0, 3]]) + answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], + [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) + assert_array_equal(H, answer) + + # Check normalization + ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] + H, edges = histogramdd(x, bins=ed, normed=True) + assert_(np.all(H == answer / 12.)) + + # Check that H has the correct shape. + H, edges = histogramdd(x, (2, 3, 4), + range=[[-1, 1], [0, 3], [0, 4]], + normed=True) + answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], + [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) + assert_array_almost_equal(H, answer / 6., 4) + # Check that a sequence of arrays is accepted and H has the correct + # shape. + z = [np.squeeze(y) for y in split(x, 3, axis=1)] + H, edges = histogramdd( + z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) + answer = np.array([[[0, 0], [0, 0], [0, 0]], + [[0, 1], [0, 0], [1, 0]], + [[0, 1], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0]]]) + assert_array_equal(H, answer) + + Z = np.zeros((5, 5, 5)) + Z[list(range(5)), list(range(5)), list(range(5))] = 1. + H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) + assert_array_equal(H, Z) + + def test_shape_3d(self): + # All possible permutations for bins of different lengths in 3D. + bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), + (4, 5, 6)) + r = rand(10, 3) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_shape_4d(self): + # All possible permutations for bins of different lengths in 4D. + bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), + (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), + (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), + (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), + (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), + (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) + + r = rand(10, 4) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_weights(self): + v = rand(100, 2) + hist, edges = histogramdd(v) + n_hist, edges = histogramdd(v, normed=True) + w_hist, edges = histogramdd(v, weights=np.ones(100)) + assert_array_equal(w_hist, hist) + w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, normed=True) + assert_array_equal(w_hist, n_hist) + w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) + assert_array_equal(w_hist, 2 * hist) + + def test_identical_samples(self): + x = np.zeros((10, 2), int) + hist, edges = histogramdd(x, bins=2) + assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) + + def test_empty(self): + a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, np.array([[0.]])) + a, b = np.histogramdd([[], [], []], bins=2) + assert_array_max_ulp(a, np.zeros((2, 2, 2))) + + def test_bins_errors(self): + """There are two ways to specify bins. Check for the right errors when + mixing those.""" + x = np.arange(8).reshape(2, 4) + assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) + assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) + assert_raises( + ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]]) + assert_raises( + ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) + assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) + + def test_inf_edges(self): + """Test using +/-inf bin edges works. See #1788.""" + with np.errstate(invalid='ignore'): + x = np.arange(6).reshape(3, 2) + expected = np.array([[1, 0], [0, 1], [0, 1]]) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) + assert_allclose(h, expected) + + def test_rightmost_binedge(self): + """Test event very close to rightmost binedge. + See Github issue #4266""" + x = [0.9999999995] + bins = [[0.,0.5,1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0] + bins = [[0.,0.5,1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0000000001] + bins = [[0.,0.5,1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0001] + bins = [[0.,0.5,1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + + +class TestUnique(TestCase): + def test_simple(self): + x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) + assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) + assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) + x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] + assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) + x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) + assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) + + +class TestCheckFinite(TestCase): + def test_simple(self): + a = [1, 2, 3] + b = [1, 2, np.inf] + c = [1, 2, np.nan] + np.lib.asarray_chkfinite(a) + assert_raises(ValueError, np.lib.asarray_chkfinite, b) + assert_raises(ValueError, np.lib.asarray_chkfinite, c) + + def test_dtype_order(self): + """Regression test for missing dtype and order arguments""" + a = [1, 2, 3] + a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64) + assert_(a.dtype == np.float64) + + +class TestCorrCoef(TestCase): + A = np.array( + [[0.15391142, 0.18045767, 0.14197213], + [0.70461506, 0.96474128, 0.27906989], + [0.9297531, 0.32296769, 0.19267156]]) + B = np.array( + [[0.10377691, 0.5417086, 0.49807457], + [0.82872117, 0.77801674, 0.39226705], + [0.9314666, 0.66800209, 0.03538394]]) + res1 = np.array( + [[1., 0.9379533, -0.04931983], + [0.9379533, 1., 0.30007991], + [-0.04931983, 0.30007991, 1.]]) + res2 = np.array( + [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], + [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], + [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], + [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], + [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], + [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) + + def test_non_array(self): + assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), + [[1., -1.], [-1., 1.]]) + + def test_simple(self): + assert_almost_equal(corrcoef(self.A), self.res1) + assert_almost_equal(corrcoef(self.A, self.B), self.res2) + + def test_ddof(self): + assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + assert_allclose(corrcoef(x), np.array([[1., -1.j], [1.j, 1.]])) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(corrcoef(np.array([])), np.nan) + assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_wrong_ddof(self): + x = np.array([[0, 2], [1, 1], [2, 0]]).T + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(corrcoef(x, ddof=5), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + +class TestCov(TestCase): + def test_basic(self): + x = np.array([[0, 2], [1, 1], [2, 0]]).T + assert_allclose(cov(x), np.array([[1., -1.], [-1., 1.]])) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + assert_allclose(cov(x), np.array([[1., -1.j], [1.j, 1.]])) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(np.array([])), np.nan) + assert_array_equal(cov(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(cov(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_wrong_ddof(self): + x = np.array([[0, 2], [1, 1], [2, 0]]).T + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(x, ddof=5), + np.array([[np.inf, -np.inf], [-np.inf, np.inf]])) + + +class Test_I0(TestCase): + def test_simple(self): + assert_almost_equal( + i0(0.5), + np.array(1.0634833707413234)) + + A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549]) + assert_almost_equal( + i0(A), + np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049])) + + B = np.array([[0.827002, 0.99959078], + [0.89694769, 0.39298162], + [0.37954418, 0.05206293], + [0.36465447, 0.72446427], + [0.48164949, 0.50324519]]) + assert_almost_equal( + i0(B), + np.array([[1.17843223, 1.26583466], + [1.21147086, 1.03898290], + [1.03633899, 1.00067775], + [1.03352052, 1.13557954], + [1.05884290, 1.06432317]])) + + +class TestKaiser(TestCase): + def test_simple(self): + assert_(np.isfinite(kaiser(1, 1.0))) + assert_almost_equal(kaiser(0, 1.0), + np.array([])) + assert_almost_equal(kaiser(2, 1.0), + np.array([0.78984831, 0.78984831])) + assert_almost_equal(kaiser(5, 1.0), + np.array([0.78984831, 0.94503323, 1., + 0.94503323, 0.78984831])) + assert_almost_equal(kaiser(5, 1.56789), + np.array([0.58285404, 0.88409679, 1., + 0.88409679, 0.58285404])) + + def test_int_beta(self): + kaiser(3, 4) + + +class TestMsort(TestCase): + def test_simple(self): + A = np.array([[0.44567325, 0.79115165, 0.54900530], + [0.36844147, 0.37325583, 0.96098397], + [0.64864341, 0.52929049, 0.39172155]]) + assert_almost_equal( + msort(A), + np.array([[0.36844147, 0.37325583, 0.39172155], + [0.44567325, 0.52929049, 0.54900530], + [0.64864341, 0.79115165, 0.96098397]])) + + +class TestMeshgrid(TestCase): + def test_simple(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) + assert_array_equal(X, np.array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]])) + assert_array_equal(Y, np.array([[4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7]])) + + def test_single_input(self): + [X] = meshgrid([1, 2, 3, 4]) + assert_array_equal(X, np.array([1, 2, 3, 4])) + + def test_no_input(self): + args = [] + assert_array_equal([], meshgrid(*args)) + + def test_indexing(self): + x = [1, 2, 3] + y = [4, 5, 6, 7] + [X, Y] = meshgrid(x, y, indexing='ij') + assert_array_equal(X, np.array([[1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3]])) + assert_array_equal(Y, np.array([[4, 5, 6, 7], + [4, 5, 6, 7], + [4, 5, 6, 7]])) + + # Test expected shapes: + z = [8, 9] + assert_(meshgrid(x, y)[0].shape == (4, 3)) + assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) + assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) + assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) + + assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') + + def test_sparse(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) + assert_array_equal(X, np.array([[1, 2, 3]])) + assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + + def test_invalid_arguments(self): + # Test that meshgrid complains about invalid arguments + # Regression test for issue #4755: + # https://github.com/numpy/numpy/issues/4755 + assert_raises(TypeError, meshgrid, + [1, 2, 3], [4, 5, 6, 7], indices='ij') + + +class TestPiecewise(TestCase): + def test_simple(self): + # Condition is single bool list + x = piecewise([0, 0], [True, False], [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: single bool list + x = piecewise([0, 0], [[True, False]], [1]) + assert_array_equal(x, [1, 0]) + + # Conditions is single bool array + x = piecewise([0, 0], np.array([True, False]), [1]) + assert_array_equal(x, [1, 0]) + + # Condition is single int array + x = piecewise([0, 0], np.array([1, 0]), [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: int array + x = piecewise([0, 0], [np.array([1, 0])], [1]) + assert_array_equal(x, [1, 0]) + + x = piecewise([0, 0], [[False, True]], [lambda x:-1]) + assert_array_equal(x, [0, -1]) + + def test_two_conditions(self): + x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) + assert_array_equal(x, [3, 4]) + + def test_default(self): + # No value specified for x[1], should be 0 + x = piecewise([1, 2], [True, False], [2]) + assert_array_equal(x, [2, 0]) + + # Should set x[1] to 3 + x = piecewise([1, 2], [True, False], [2, 3]) + assert_array_equal(x, [2, 3]) + + def test_0d(self): + x = np.array(3) + y = piecewise(x, x > 3, [4, 0]) + assert_(y.ndim == 0) + assert_(y == 0) + + x = 5 + y = piecewise(x, [[True], [False]], [1, 0]) + assert_(y.ndim == 0) + assert_(y == 1) + + def test_0d_comparison(self): + x = 3 + y = piecewise(x, [x <= 3, x > 3], [4, 0]) + + +class TestBincount(TestCase): + def test_simple(self): + y = np.bincount(np.arange(4)) + assert_array_equal(y, np.ones(4)) + + def test_simple2(self): + y = np.bincount(np.array([1, 5, 2, 4, 1])) + assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) + + def test_simple_weight(self): + x = np.arange(4) + w = np.array([0.2, 0.3, 0.5, 0.1]) + y = np.bincount(x, w) + assert_array_equal(y, w) + + def test_simple_weight2(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) + + def test_with_minlength(self): + x = np.array([0, 1, 0, 1, 1]) + y = np.bincount(x, minlength=3) + assert_array_equal(y, np.array([2, 3, 0])) + + def test_with_minlength_smaller_than_maxvalue(self): + x = np.array([0, 1, 1, 2, 2, 3, 3]) + y = np.bincount(x, minlength=2) + assert_array_equal(y, np.array([1, 2, 2, 2])) + + def test_with_minlength_and_weights(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w, 8) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) + + def test_empty(self): + x = np.array([], dtype=int) + y = np.bincount(x) + assert_array_equal(x, y) + + def test_empty_with_minlength(self): + x = np.array([], dtype=int) + y = np.bincount(x, minlength=5) + assert_array_equal(y, np.zeros(5, dtype=int)) + + def test_with_incorrect_minlength(self): + x = np.array([], dtype=int) + assert_raises_regex(TypeError, "an integer is required", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, "must be positive", + lambda: np.bincount(x, minlength=-1)) + assert_raises_regex(ValueError, "must be positive", + lambda: np.bincount(x, minlength=0)) + + x = np.arange(5) + assert_raises_regex(TypeError, "an integer is required", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, "minlength must be positive", + lambda: np.bincount(x, minlength=-1)) + assert_raises_regex(ValueError, "minlength must be positive", + lambda: np.bincount(x, minlength=0)) + + +class TestInterp(TestCase): + def test_exceptions(self): + assert_raises(ValueError, interp, 0, [], []) + assert_raises(ValueError, interp, 0, [0], [1, 2]) + + def test_basic(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_right_left_behavior(self): + assert_equal(interp([-1, 0, 1], [0], [1]), [1, 1, 1]) + assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0, 1, 1]) + assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1, 1, 0]) + assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0, 1, 0]) + + def test_scalar_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = .3 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float32(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float64(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.nan + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_zero_dimensional_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.array(.3, dtype=object) + assert_almost_equal(np.interp(x0, x, y), .3) + + def test_if_len_x_is_small(self): + xp = np.arange(0, 10, 0.0001) + fp = np.sin(xp) + assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) + + +def compare_results(res, desired): + for i in range(len(desired)): + assert_array_equal(res[i], desired[i]) + + +class TestScoreatpercentile(TestCase): + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, 0), 0.) + assert_equal(np.percentile(x, 100), 3.5) + assert_equal(np.percentile(x, 50), 1.75) + + def test_api(self): + d = np.ones(5) + np.percentile(d, 5, None, None, False) + np.percentile(d, 5, None, None, False, 'linear') + o = np.ones((1,)) + np.percentile(d, 5, None, o, False, 'linear') + + def test_2D(self): + x = np.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) + + def test_linear(self): + + # Test defaults + assert_equal(np.percentile(range(10), 50), 4.5) + + # explicitly specify interpolation_method 'fraction' (the default) + assert_equal(np.percentile(range(10), 50, + interpolation='linear'), 4.5) + + def test_lower_higher(self): + + # interpolation_method 'lower'/'higher' + assert_equal(np.percentile(range(10), 50, + interpolation='lower'), 4) + assert_equal(np.percentile(range(10), 50, + interpolation='higher'), 5) + + def test_midpoint(self): + assert_equal(np.percentile(range(10), 51, + interpolation='midpoint'), 4.5) + + def test_nearest(self): + assert_equal(np.percentile(range(10), 51, + interpolation='nearest'), 5) + assert_equal(np.percentile(range(10), 49, + interpolation='nearest'), 4) + + def test_sequence(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) + + def test_axis(self): + x = np.arange(12).reshape(3, 4) + + assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) + + # ensure qth axis is always first as with np.array(old_percentile(..)) + x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + assert_equal(np.percentile(x, (25, 50)).shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) + assert_equal(np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), + interpolation="higher").shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75), + interpolation="higher").shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0, + interpolation="higher").shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1, + interpolation="higher").shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2, + interpolation="higher").shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3, + interpolation="higher").shape, (2, 3, 4, 5)) + assert_equal(np.percentile(x, (25, 50, 75), axis=1, + interpolation="higher").shape, (3, 3, 5, 6)) + + def test_scalar_q(self): + # test for no empty dimensions for compatiblity with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50), 5.5) + self.assertTrue(np.isscalar(np.percentile(x, 50))) + r0 = np.array([ 4., 5., 6., 7.]) + assert_equal(np.percentile(x, 50, axis=0), r0) + assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) + r1 = np.array([ 1.5, 5.5, 9.5]) + assert_almost_equal(np.percentile(x, 50, axis=1), r1) + assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) + + out = np.empty(1) + assert_equal(np.percentile(x, 50, out=out), 5.5) + assert_equal(out, 5.5) + out = np.empty(4) + assert_equal(np.percentile(x, 50, axis=0, out=out), r0) + assert_equal(out, r0) + out = np.empty(3) + assert_equal(np.percentile(x, 50, axis=1, out=out), r1) + assert_equal(out, r1) + + # test for no empty dimensions for compatiblity with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) + self.assertTrue(np.isscalar(np.percentile(x, 50))) + r0 = np.array([ 4., 5., 6., 7.]) + c0 = np.percentile(x, 50, interpolation='lower', axis=0) + assert_equal(c0, r0) + assert_equal(c0.shape, r0.shape) + r1 = np.array([ 1., 5., 9.]) + c1 = np.percentile(x, 50, interpolation='lower', axis=1) + assert_almost_equal(c1, r1) + assert_equal(c1.shape, r1.shape) + + out = np.empty((), dtype=x.dtype) + c = np.percentile(x, 50, interpolation='lower', out=out) + assert_equal(c, 5) + assert_equal(out, 5) + out = np.empty(4, dtype=x.dtype) + c = np.percentile(x, 50, interpolation='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + out = np.empty(3, dtype=x.dtype) + c = np.percentile(x, 50, interpolation='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_exception(self): + assert_raises(ValueError, np.percentile, [1, 2], 56, + interpolation='foobar') + assert_raises(ValueError, np.percentile, [1], 101) + assert_raises(ValueError, np.percentile, [1], -1) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) + + def test_percentile_list(self): + assert_equal(np.percentile([1, 2, 3], 0), 1) + + def test_percentile_out(self): + x = np.array([1, 2, 3]) + y = np.zeros((3,)) + p = (1, 2, 3) + np.percentile(x, p, out=y) + assert_equal(y, np.percentile(x, p)) + + x = np.array([[1, 2, 3], + [4, 5, 6]]) + + y = np.zeros((3, 3)) + np.percentile(x, p, axis=0, out=y) + assert_equal(y, np.percentile(x, p, axis=0)) + + y = np.zeros((3, 2)) + np.percentile(x, p, axis=1, out=y) + assert_equal(y, np.percentile(x, p, axis=1)) + + x = np.arange(12).reshape(3, 4) + # q.dim > 1, float + r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) + out = np.empty((2, 4)) + assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0) + assert_equal(out, r0) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + out = np.empty((2, 3)) + assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) + assert_equal(out, r1) + + # q.dim > 1, int + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + out = np.empty((2, 4), dtype=x.dtype) + c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) + out = np.empty((2, 3), dtype=x.dtype) + c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_percentile_empty_dim(self): + # empty dims are preserved + d = np.arange(11*2).reshape(11, 1, 2, 1) + assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) + + assert_array_equal(np.percentile(d, 50, axis=2, + interpolation='midpoint').shape, + (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-2, + interpolation='midpoint').shape, + (11, 1, 1)) + + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, + (2, 1, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, + (2, 11, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, + (2, 11, 1, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, + (2, 11, 1, 2)) + + + def test_percentile_no_overwrite(self): + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50], overwrite_input=False) + assert_equal(a, np.array([2, 3, 4, 1])) + + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50]) + assert_equal(a, np.array([2, 3, 4, 1])) + + def test_no_p_overwrite(self): + p = np.linspace(0., 100., num=5) + np.percentile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5)) + p = np.linspace(0., 100., num=5).tolist() + np.percentile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) + + def test_percentile_overwrite(self): + a = np.array([2, 3, 4, 1]) + b = np.percentile(a, [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) + x = np.rollaxis(x, -1, 0) + assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + + assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), + np.percentile(x, [25, 60], axis=None)) + assert_equal(np.percentile(x, [25, 60], axis=(0,)), + np.percentile(x, [25, 60], axis=0)) + + d = np.arange(3 * 5 * 7 * 11).reshape(3, 5, 7, 11) + np.random.shuffle(d) + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + np.percentile(d[:, :, :, 0].flatten(), 25)) + assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], + np.percentile(d[:, :, 1, :].flatten(), [10, 90])) + assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], + np.percentile(d[:, :, 2, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], + np.percentile(d[2, :, :, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], + np.percentile(d[2, 1, :, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], + np.percentile(d[2, :, :, 1].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], + np.percentile(d[2, :, 2, :].flatten(), 25)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(IndexError, np.percentile, d, axis=-5, q=25) + assert_raises(IndexError, np.percentile, d, axis=(0, -5), q=25) + assert_raises(IndexError, np.percentile, d, axis=4, q=25) + assert_raises(IndexError, np.percentile, d, axis=(0, 4), q=25) + assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), + keepdims=True).shape, (2, 1, 1, 7, 1)) + assert_equal(np.percentile(d, [1, 7], axis=(0, 3), + keepdims=True).shape, (2, 1, 5, 7, 1)) + + +class TestMedian(TestCase): + def test_basic(self): + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_equal(np.median(a0), 1) + assert_allclose(np.median(a1), 0.5) + assert_allclose(np.median(a2), 2.5) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_equal(np.median(a2, axis=1), [1, 4]) + assert_allclose(np.median(a2, axis=None), 2.5) + + a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) + assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) + a = np.array([0.0463301, 0.0444502, 0.141249]) + assert_equal(a[0], np.median(a)) + a = np.array([0.0444502, 0.141249, 0.0463301]) + assert_equal(a[-1], np.median(a)) + # check array scalar result + assert_equal(np.median(a).ndim, 0) + a[1] = np.nan + assert_equal(np.median(a).ndim, 0) + + def test_axis_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: + orig = a.copy() + np.median(a, axis=None) + for ax in range(a.ndim): + np.median(a, axis=ax) + assert_array_equal(a, orig) + + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3), 3.5) + assert_allclose(np.median(a3, axis=None), 3.5) + assert_allclose(np.median(a3.T), 3.5) + + def test_overwrite_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) + assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), + [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) + assert_allclose( + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), + [3, 4]) + + a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) + map(np.random.shuffle, a4) + assert_allclose(np.median(a4, axis=None), + np.median(a4.copy(), axis=None, overwrite_input=True)) + assert_allclose(np.median(a4, axis=0), + np.median(a4.copy(), axis=0, overwrite_input=True)) + assert_allclose(np.median(a4, axis=1), + np.median(a4.copy(), axis=1, overwrite_input=True)) + assert_allclose(np.median(a4, axis=2), + np.median(a4.copy(), axis=2, overwrite_input=True)) + + def test_array_like(self): + x = [1, 2, 3] + assert_almost_equal(np.median(x), 2) + x2 = [x] + assert_almost_equal(np.median(x2), 2) + assert_allclose(np.median(x2, axis=0), x) + + def test_subclass(self): + # gh-3846 + class MySubClass(np.ndarray): + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def mean(self, axis=None, dtype=None, out=None): + return -7 + + a = MySubClass([1,2,3]) + assert_equal(np.median(a), -7) + + def test_object(self): + o = np.arange(7.); + assert_(type(np.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.median(o.astype(object))), float) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.median(x, axis=(0, 1)), np.median(o)) + x = np.rollaxis(x, -1, 0) + assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.median(x, axis=(0, -1)), np.median(o)) + + assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) + assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) + assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) + + d = np.arange(3 * 5 * 7 * 11).reshape(3, 5, 7, 11) + np.random.shuffle(d) + assert_equal(np.median(d, axis=(0, 1, 2))[0], + np.median(d[:, :, :, 0].flatten())) + assert_equal(np.median(d, axis=(0, 1, 3))[1], + np.median(d[:, :, 1, :].flatten())) + assert_equal(np.median(d, axis=(3, 1, -4))[2], + np.median(d[:, :, 2, :].flatten())) + assert_equal(np.median(d, axis=(3, 1, 2))[2], + np.median(d[2, :, :, :].flatten())) + assert_equal(np.median(d, axis=(3, 2))[2, 1], + np.median(d[2, 1, :, :].flatten())) + assert_equal(np.median(d, axis=(1, -2))[2, 1], + np.median(d[2, :, :, 1].flatten())) + assert_equal(np.median(d, axis=(1, 3))[2, 2], + np.median(d[2, :, 2, :].flatten())) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(IndexError, np.median, d, axis=-5) + assert_raises(IndexError, np.median, d, axis=(0, -5)) + assert_raises(IndexError, np.median, d, axis=4) + assert_raises(IndexError, np.median, d, axis=(0, 4)) + assert_raises(ValueError, np.median, d, axis=(1, 1)) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.median(d, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.median(d, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + + +class TestAdd_newdoc_ufunc(TestCase): + + def test_ufunc_arg(self): + assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") + assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah") + + def test_string_arg(self): + assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) + + +class TestAdd_newdoc(TestCase): + def test_add_doc(self): + # test np.add_newdoc + tgt = "Current flat index into the array." + self.assertEqual(np.core.flatiter.index.__doc__[:len(tgt)], tgt) + self.assertTrue(len(np.core.ufunc.identity.__doc__) > 300) + self.assertTrue(len(np.lib.index_tricks.mgrid.__doc__) > 300) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_index_tricks.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_index_tricks.py new file mode 100644 index 0000000000000..97047c53aa388 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_index_tricks.py @@ -0,0 +1,289 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import ( + run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, + assert_almost_equal, assert_array_almost_equal, assert_raises + ) +from numpy.lib.index_tricks import ( + mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, + index_exp, ndindex, r_, s_ + ) + + +class TestRavelUnravelIndex(TestCase): + def test_basic(self): + assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) + assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) + assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) + assert_raises(ValueError, np.unravel_index, -1, (2, 2)) + assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) + assert_raises(ValueError, np.unravel_index, 4, (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) + assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) + + assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal( + np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) + + arr = np.array([[3, 6, 6], [4, 5, 1]]) + assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) + assert_equal( + np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) + assert_equal( + np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) + assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), + [12, 13, 13]) + assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) + + assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), + [[3, 6, 6], [4, 5, 1]]) + assert_equal( + np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), + [[3, 6, 6], [4, 5, 1]]) + assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) + + def test_dtypes(self): + # Test with different data types + for dtype in [np.int16, np.uint16, np.int32, + np.uint32, np.int64, np.uint64]: + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) + shape = (5, 8) + uncoords = 8*coords[0]+coords[1] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*coords[1] + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + dtype=dtype) + shape = (5, 8, 10) + uncoords = 10*(8*coords[0]+coords[1])+coords[2] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*(coords[1]+8*coords[2]) + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + def test_clipmodes(self): + # Test clipmodes + assert_equal( + np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), + np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), + mode=( + 'wrap', 'raise', 'clip', 'raise')), + np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) + assert_raises( + ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) + + +class TestGrid(TestCase): + def test_basic(self): + a = mgrid[-1:1:10j] + b = mgrid[-1:1:0.1] + assert_(a.shape == (10,)) + assert_(b.shape == (20,)) + assert_(a[0] == -1) + assert_almost_equal(a[-1], 1) + assert_(b[0] == -1) + assert_almost_equal(b[1]-b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0]+19*0.1, 11) + assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) + + def test_linspace_equivalence(self): + y, st = np.linspace(2, 10, retstep=1) + assert_almost_equal(st, 8/49.0) + assert_array_almost_equal(y, mgrid[2:10:50j], 13) + + def test_nd(self): + c = mgrid[-1:1:10j, -2:2:10j] + d = mgrid[-1:1:0.1, -2:2:0.2] + assert_(c.shape == (2, 10, 10)) + assert_(d.shape == (2, 20, 20)) + assert_array_equal(c[0][0, :], -np.ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) + assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) + assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], + 0.1*np.ones(20, 'd'), 11) + assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], + 0.2*np.ones(20, 'd'), 11) + + +class TestConcatenator(TestCase): + def test_1d(self): + assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) + b = np.ones(5) + c = r_[b, 0, 0, b] + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + + def test_mixed_type(self): + g = r_[10.1, 1:10] + assert_(g.dtype == 'f8') + + def test_more_mixed_type(self): + g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] + assert_(g.dtype == 'f8') + + def test_2d(self): + b = np.random.rand(5, 5) + c = np.random.rand(5, 5) + d = r_['1', b, c] # append columns + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b) + assert_array_equal(d[:, 5:], c) + d = r_[b, c] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b) + assert_array_equal(d[5:, :], c) + + +class TestNdenumerate(TestCase): + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(list(ndenumerate(a)), + [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) + + +class TestIndexExpression(TestCase): + def test_regression_1(self): + # ticket #1196 + a = np.arange(2) + assert_equal(a[:-1], a[s_[:-1]]) + assert_equal(a[:-1], a[index_exp[:-1]]) + + def test_simple_1(self): + a = np.random.rand(4, 5, 6) + + assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) + assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) + + +def test_c_(): + a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] + assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) + + +def test_fill_diagonal(): + a = np.zeros((3, 3), int) + fill_diagonal(a, 5) + yield (assert_array_equal, a, + np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]])) + + #Test tall matrix + a = np.zeros((10, 3), int) + fill_diagonal(a, 5) + yield (assert_array_equal, a, + np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]])) + + #Test tall matrix wrap + a = np.zeros((10, 3), int) + fill_diagonal(a, 5, True) + yield (assert_array_equal, a, + np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0]])) + + #Test wide matrix + a = np.zeros((3, 10), int) + fill_diagonal(a, 5) + yield (assert_array_equal, a, + np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])) + + # The same function can operate on a 4-d array: + a = np.zeros((3, 3, 3, 3), int) + fill_diagonal(a, 4) + i = np.array([0, 1, 2]) + yield (assert_equal, np.where(a != 0), (i, i, i, i)) + + +def test_diag_indices(): + di = diag_indices(4) + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + a[di] = 100 + yield (assert_array_equal, a, + np.array([[100, 2, 3, 4], + [5, 100, 7, 8], + [9, 10, 100, 12], + [13, 14, 15, 100]])) + + # Now, we create indices to manipulate a 3-d array: + d3 = diag_indices(2, 3) + + # And use it to set the diagonal of a zeros array to 1: + a = np.zeros((2, 2, 2), int) + a[d3] = 1 + yield (assert_array_equal, a, + np.array([[[1, 0], + [0, 0]], + + [[0, 0], + [0, 1]]])) + + +def test_diag_indices_from(): + x = np.random.random((4, 4)) + r, c = diag_indices_from(x) + assert_array_equal(r, np.arange(4)) + assert_array_equal(c, np.arange(4)) + + +def test_ndindex(): + x = list(ndindex(1, 2, 3)) + expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] + assert_array_equal(x, expected) + + x = list(ndindex((1, 2, 3))) + assert_array_equal(x, expected) + + # Test use of scalars and tuples + x = list(ndindex((3,))) + assert_array_equal(x, list(ndindex(3))) + + # Make sure size argument is optional + x = list(ndindex()) + assert_equal(x, [()]) + + x = list(ndindex(())) + assert_equal(x, [()]) + + # Make sure 0-sized ndindex works correctly + x = list(ndindex(*[0])) + assert_equal(x, []) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_io.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_io.py new file mode 100644 index 0000000000000..c11cd004149c0 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_io.py @@ -0,0 +1,1736 @@ +from __future__ import division, absolute_import, print_function + +import sys +import gzip +import os +import threading +from tempfile import mkstemp, NamedTemporaryFile +import time +import warnings +import gc +from io import BytesIO +from datetime import datetime + +import numpy as np +import numpy.ma as ma +from numpy.lib._iotools import (ConverterError, ConverterLockError, + ConversionWarning) +from numpy.compat import asbytes, asbytes_nested, bytes, asstr +from nose import SkipTest +from numpy.ma.testutils import ( + TestCase, assert_equal, assert_array_equal, + assert_raises, assert_raises_regex, run_module_suite +) +from numpy.testing import assert_warns, assert_, build_err_msg +from numpy.testing.utils import tempdir + + +class TextIO(BytesIO): + """Helper IO class. + + Writes encode strings to bytes if needed, reads return bytes. + This makes it easier to emulate files opened in binary mode + without needing to explicitly convert strings to bytes in + setting up the test data. + + """ + def __init__(self, s=""): + BytesIO.__init__(self, asbytes(s)) + + def write(self, s): + BytesIO.write(self, asbytes(s)) + + def writelines(self, lines): + BytesIO.writelines(self, [asbytes(s) for s in lines]) + + +MAJVER, MINVER = sys.version_info[:2] +IS_64BIT = sys.maxsize > 2**32 + + +def strptime(s, fmt=None): + """This function is available in the datetime module only + from Python >= 2.5. + + """ + if sys.version_info[0] >= 3: + return datetime(*time.strptime(s.decode('latin1'), fmt)[:3]) + else: + return datetime(*time.strptime(s, fmt)[:3]) + + +class RoundtripTest(object): + def roundtrip(self, save_func, *args, **kwargs): + """ + save_func : callable + Function used to save arrays to file. + file_on_disk : bool + If true, store the file on disk, instead of in a + string buffer. + save_kwds : dict + Parameters passed to `save_func`. + load_kwds : dict + Parameters passed to `numpy.load`. + args : tuple of arrays + Arrays stored to file. + + """ + save_kwds = kwargs.get('save_kwds', {}) + load_kwds = kwargs.get('load_kwds', {}) + file_on_disk = kwargs.get('file_on_disk', False) + + if file_on_disk: + target_file = NamedTemporaryFile(delete=False) + load_file = target_file.name + else: + target_file = BytesIO() + load_file = target_file + + try: + arr = args + + save_func(target_file, *arr, **save_kwds) + target_file.flush() + target_file.seek(0) + + if sys.platform == 'win32' and not isinstance(target_file, BytesIO): + target_file.close() + + arr_reloaded = np.load(load_file, **load_kwds) + + self.arr = arr + self.arr_reloaded = arr_reloaded + finally: + if not isinstance(target_file, BytesIO): + target_file.close() + # holds an open file descriptor so it can't be deleted on win + if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): + os.remove(target_file.name) + + def check_roundtrips(self, a): + self.roundtrip(a) + self.roundtrip(a, file_on_disk=True) + self.roundtrip(np.asfortranarray(a)) + self.roundtrip(np.asfortranarray(a), file_on_disk=True) + if a.shape[0] > 1: + # neither C nor Fortran contiguous for 2D arrays or more + self.roundtrip(np.asfortranarray(a)[1:]) + self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) + + def test_array(self): + a = np.array([], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], int) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) + self.check_roundtrips(a) + + def test_array_object(self): + if sys.version_info[:2] >= (2, 7): + a = np.array([], object) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], object) + self.check_roundtrips(a) + # Fails with UnpicklingError: could not find MARK on Python 2.6 + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + self.roundtrip(a) + + @np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32") + def test_mmap(self): + a = np.array([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + a = np.asfortranarray([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + self.check_roundtrips(a) + + def test_format_2_0(self): + dt = [(("%d" % i) * 100, float) for i in range(500)] + a = np.ones(1000, dtype=dt) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', UserWarning) + self.check_roundtrips(a) + + +class TestSaveLoad(RoundtripTest, TestCase): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(self.arr[0], self.arr_reloaded) + assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) + assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + + +class TestSavezLoad(RoundtripTest, TestCase): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + try: + for n, arr in enumerate(self.arr): + reloaded = self.arr_reloaded['arr_%d' % n] + assert_equal(arr, reloaded) + assert_equal(arr.dtype, reloaded.dtype) + assert_equal(arr.flags.fnc, reloaded.flags.fnc) + finally: + # delete tempfile, must be done here on windows + if self.arr_reloaded.fid: + self.arr_reloaded.fid.close() + os.remove(self.arr_reloaded.fid.name) + + @np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems") + @np.testing.dec.slow + def test_big_arrays(self): + L = (1 << 31) + 100000 + a = np.empty(L, dtype=np.uint8) + with tempdir(prefix="numpy_test_big_arrays_") as tmpdir: + tmp = os.path.join(tmpdir, "file.npz") + np.savez(tmp, a=a) + del a + npfile = np.load(tmp) + a = npfile['a'] + npfile.close() + + def test_multiple_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + self.roundtrip(a, b) + + def test_named_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(a, l['file_a']) + assert_equal(b, l['file_b']) + + def test_savez_filename_clashes(self): + # Test that issue #852 is fixed + # and savez functions in multithreaded environment + + def writer(error_list): + fd, tmp = mkstemp(suffix='.npz') + os.close(fd) + try: + arr = np.random.randn(500, 500) + try: + np.savez(tmp, arr=arr) + except OSError as err: + error_list.append(err) + finally: + os.remove(tmp) + + errors = [] + threads = [threading.Thread(target=writer, args=(errors,)) + for j in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + if errors: + raise AssertionError(errors) + + def test_not_closing_opened_fid(self): + # Test that issue #2178 is fixed: + # verify could seek on 'loaded' file + + fd, tmp = mkstemp(suffix='.npz') + os.close(fd) + try: + fp = open(tmp, 'wb') + np.savez(fp, data='LOVELY LOAD') + fp.close() + + fp = open(tmp, 'rb', 10000) + fp.seek(0) + assert_(not fp.closed) + _ = np.load(fp)['data'] + assert_(not fp.closed) + # must not get closed by .load(opened fp) + fp.seek(0) + assert_(not fp.closed) + + finally: + fp.close() + os.remove(tmp) + + def test_closing_fid(self): + # Test that issue #1517 (too many opened files) remains closed + # It might be a "weak" test since failed to get triggered on + # e.g. Debian sid of 2012 Jul 05 but was reported to + # trigger the failure on Ubuntu 10.04: + # http://projects.scipy.org/numpy/ticket/1517#comment:2 + fd, tmp = mkstemp(suffix='.npz') + os.close(fd) + + try: + fp = open(tmp, 'wb') + np.savez(fp, data='LOVELY LOAD') + fp.close() + # We need to check if the garbage collector can properly close + # numpy npz file returned by np.load when their reference count + # goes to zero. Python 3 running in debug mode raises a + # ResourceWarning when file closing is left to the garbage + # collector, so we catch the warnings. Because ResourceWarning + # is unknown in Python < 3.x, we take the easy way out and + # catch all warnings. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + for i in range(1, 1025): + try: + np.load(tmp)["data"] + except Exception as e: + msg = "Failed to load data from a file: %s" % e + raise AssertionError(msg) + finally: + os.remove(tmp) + + def test_closing_zipfile_after_load(self): + # Check that zipfile owns file and can close it. + # This needs to pass a file name to load for the + # test. + with tempdir(prefix="numpy_test_closing_zipfile_after_load_") as tmpdir: + fd, tmp = mkstemp(suffix='.npz', dir=tmpdir) + os.close(fd) + np.savez(tmp, lab='place holder') + data = np.load(tmp) + fp = data.zip.fp + data.close() + assert_(fp.closed) + + +class TestSaveTxt(TestCase): + def test_array(self): + a = np.array([[1, 2], [3, 4]], float) + fmt = "%.18e" + c = BytesIO() + np.savetxt(c, a, fmt=fmt) + c.seek(0) + assert_equal(c.readlines(), + [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), + asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) + + a = np.array([[1, 2], [3, 4]], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_delimiter(self): + a = np.array([[1., 2.], [3., 4.]]) + c = BytesIO() + np.savetxt(c, a, delimiter=',', fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) + + def test_format(self): + a = np.array([(1, 2), (3, 4)]) + c = BytesIO() + # Sequence of formats + np.savetxt(c, a, fmt=['%02d', '%3.1f']) + c.seek(0) + assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) + + # A single multiformat string + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Specify delimiter, should be overiden + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Bad fmt, should raise a ValueError + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, a, fmt=99) + + def test_header_footer(self): + """ + Test the functionality of the header and footer keyword argument. + """ + c = BytesIO() + a = np.array([(1, 2), (3, 4)], dtype=np.int) + test_header_footer = 'Test header / footer' + # Test the header keyword argument + np.savetxt(c, a, fmt='%1d', header=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) + # Test the footer keyword argument + c = BytesIO() + np.savetxt(c, a, fmt='%1d', footer=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) + # Test the commentstr keyword argument used on the header + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + header=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) + # Test the commentstr keyword argument used on the footer + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + footer=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) + + def test_file_roundtrip(self): + f, name = mkstemp() + os.close(f) + try: + a = np.array([(1, 2), (3, 4)]) + np.savetxt(name, a) + b = np.loadtxt(name) + assert_array_equal(a, b) + finally: + os.unlink(name) + + def test_complex_arrays(self): + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re + 1.0j * im + + # One format only + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', + b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) + + # One format for each real and imaginary part + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', + b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) + + # One format for each complex number + c = BytesIO() + np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', + b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) + + def test_custom_writer(self): + + class CustomWriter(list): + def write(self, text): + self.extend(text.split(b'\n')) + + w = CustomWriter() + a = np.array([(1, 2), (3, 4)]) + np.savetxt(w, a) + b = np.loadtxt(w) + assert_array_equal(a, b) + + +class TestLoadTxt(TestCase): + def test_record(self): + c = TextIO() + c.write('1 2\n3 4') + c.seek(0) + x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_array_equal(x, a) + + d = TextIO() + d.write('M 64.0 75.0\nF 25.0 60.0') + d.seek(0) + mydescriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + b = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=mydescriptor) + y = np.loadtxt(d, dtype=mydescriptor) + assert_array_equal(y, b) + + def test_array(self): + c = TextIO() + c.write('1 2\n3 4') + + c.seek(0) + x = np.loadtxt(c, dtype=np.int) + a = np.array([[1, 2], [3, 4]], int) + assert_array_equal(x, a) + + c.seek(0) + x = np.loadtxt(c, dtype=float) + a = np.array([[1, 2], [3, 4]], float) + assert_array_equal(x, a) + + def test_1D(self): + c = TextIO() + c.write('1\n2\n3\n4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('1,2,3,4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',') + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + def test_missing(self): + c = TextIO() + c.write('1,2,3,,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + a = np.array([1, 2, 3, -999, 5], int) + assert_array_equal(x, a) + + def test_converters_with_usecols(self): + c = TextIO() + c.write('1,2,3,,5\n6,7,8,9,10\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + a = np.array([[2, -999], [7, 9]], int) + assert_array_equal(x, a) + + def test_comments(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_skiprows(self): + c = TextIO() + c.write('comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_usecols(self): + a = np.array([[1, 2], [3, 4]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1,)) + assert_array_equal(x, a[:, 1]) + + a = np.array([[1, 2, 3], [3, 4, 5]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1, 2)) + assert_array_equal(x, a[:, 1:]) + + # Testing with arrays instead of tuples. + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) + assert_array_equal(x, a[:, 1:]) + + # Checking with dtypes defined converters. + data = '''JOE 70.1 25.3 + BOB 60.5 27.9 + ''' + c = TextIO(data) + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(arr['stid'], [b"JOE", b"BOB"]) + assert_equal(arr['temp'], [25.3, 27.9]) + + def test_fancy_dtype(self): + c = TextIO() + c.write('1,2,3.0\n4,5,6.0\n') + c.seek(0) + dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + x = np.loadtxt(c, dtype=dt, delimiter=',') + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) + assert_array_equal(x, a) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_3d_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + assert_array_equal(x, a) + + def test_empty_file(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", + message="loadtxt: Empty input file:") + c = TextIO() + x = np.loadtxt(c) + assert_equal(x.shape, (0,)) + x = np.loadtxt(c, dtype=np.int64) + assert_equal(x.shape, (0,)) + assert_(x.dtype == np.int64) + + def test_unused_converter(self): + c = TextIO() + c.writelines(['1 21\n', '3 42\n']) + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_array_equal(data, [21, 42]) + + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_array_equal(data, [33, 66]) + + def test_dtype_with_object(self): + "Test using an explicit dtype with an object" + from datetime import date + import time + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', np.object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + def test_uint64_type(self): + tgt = (9223372043271415339, 9223372043271415853) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.uint64) + assert_equal(res, tgt) + + def test_int64_type(self): + tgt = (-9223372036854775807, 9223372036854775807) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.int64) + assert_equal(res, tgt) + + def test_universal_newline(self): + f, name = mkstemp() + os.write(f, b'1 21\r3 42\r') + os.close(f) + + try: + data = np.loadtxt(name) + assert_array_equal(data, [[1, 21], [3, 42]]) + finally: + os.unlink(name) + + def test_empty_field_after_tab(self): + c = TextIO() + c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') + c.seek(0) + dt = {'names': ('x', 'y', 'z', 'comment'), + 'formats': ('= 3: + # python 3k is known to fail for '\r' + linesep = ('\n', '\r\n') + else: + linesep = ('\n', '\r\n', '\r') + + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + f, name = mkstemp() + # We can't use NamedTemporaryFile on windows, because we cannot + # reopen the file. + try: + os.write(f, asbytes(data)) + assert_array_equal(np.genfromtxt(name), wanted) + finally: + os.close(f) + os.unlink(name) + + def test_gft_using_generator(self): + # gft doesn't work with unicode. + def count(): + for i in range(10): + yield asbytes("%d" % i) + + res = np.genfromtxt(count()) + assert_array_equal(res, np.arange(10)) + + +def test_gzip_load(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + +def test_gzip_loadtxt(): + # Thanks to another windows brokeness, we can't use + # NamedTemporaryFile: a file created from this function cannot be + # reopened by another open call. So we first put the gzipped string + # of the test reference array, write it to a securely opened file, + # which is then read from by the loadtxt function + s = BytesIO() + g = gzip.GzipFile(fileobj=s, mode='w') + g.write(b'1 2 3\n') + g.close() + s.seek(0) + + f, name = mkstemp(suffix='.gz') + try: + os.write(f, s.read()) + s.close() + assert_array_equal(np.loadtxt(name), [1, 2, 3]) + finally: + os.close(f) + os.unlink(name) + + +def test_gzip_loadtxt_from_string(): + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + f.write(b'1 2 3\n') + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.loadtxt(f), [1, 2, 3]) + + +def test_npzfile_dict(): + s = BytesIO() + x = np.zeros((3, 3)) + y = np.zeros((3, 3)) + + np.savez(s, x=x, y=y) + s.seek(0) + + z = np.load(s) + + assert_('x' in z) + assert_('y' in z) + assert_('x' in z.keys()) + assert_('y' in z.keys()) + + for f, a in z.items(): + assert_(f in ['x', 'y']) + assert_equal(a.shape, (3, 3)) + + assert_(len(z.items()) == 2) + + for f in z: + assert_(f in ['x', 'y']) + + assert_('x' in z.keys()) + + +def test_load_refcount(): + # Check that objects returned by np.load are directly freed based on + # their refcount, rather than needing the gc to collect them. + + f = BytesIO() + np.savez(f, [1, 2, 3]) + f.seek(0) + + gc.collect() + n_before = len(gc.get_objects()) + np.load(f) + n_after = len(gc.get_objects()) + + assert_equal(n_before, n_after) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_nanfunctions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_nanfunctions.py new file mode 100644 index 0000000000000..3da6b51490f65 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_nanfunctions.py @@ -0,0 +1,758 @@ +from __future__ import division, absolute_import, print_function + +import warnings + +import numpy as np +from numpy.testing import ( + run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal, + assert_raises, assert_array_equal + ) + + +# Test data +_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], + [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], + [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], + [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) + + +# Rows of _ndat with nans removed +_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), + np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), + np.array([0.1042, -0.5954]), + np.array([0.1610, 0.1859, 0.3146])] + + +class TestNanFunctions_MinMax(TestCase): + + nanfuncs = [np.nanmin, np.nanmax] + stdfuncs = [np.min, np.max] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for f in self.nanfuncs: + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalars + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(np.nan))) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_masked(self): + mat = np.ma.fix_invalid(_ndat) + msk = mat._mask.copy() + for f in [np.nanmin]: + res = f(mat, axis=1) + tgt = f(_ndat, axis=1) + assert_equal(res, tgt) + assert_equal(mat._mask, msk) + assert_(not np.isinf(mat).any()) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_matrices(self): + # Check that it works and that type and + # shape are preserved + mat = np.matrix(np.eye(3)) + for f in self.nanfuncs: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + # check that rows of nan are dealt with for subclasses (#4628) + mat[1] = np.nan + for f in self.nanfuncs: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) + and not np.isnan(res[2, 0])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat) + assert_(np.isscalar(res)) + assert_(res != np.nan) + assert_(len(w) == 0) + + +class TestNanFunctions_ArgminArgmax(TestCase): + + nanfuncs = [np.nanargmin, np.nanargmax] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_result_values(self): + for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): + for row in _ndat: + with warnings.catch_warnings(record=True): + warnings.simplefilter('always') + ind = f(row) + val = row[ind] + # comparing with NaN is tricky as the result + # is always false except for NaN != NaN + assert_(not np.isnan(val)) + assert_(not fcmp(val, row).any()) + assert_(not np.equal(val, row[:ind]).any()) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for f in self.nanfuncs: + for axis in [None, 0, 1]: + assert_raises(ValueError, f, mat, axis=axis) + assert_raises(ValueError, f, np.nan) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + assert_raises(ValueError, f, mat, axis=axis) + for axis in [1]: + res = f(mat, axis=axis) + assert_equal(res, np.zeros(0)) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_matrices(self): + # Check that it works and that type and + # shape are preserved + mat = np.matrix(np.eye(3)) + for f in self.nanfuncs: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + +class TestNanFunctions_IntTypes(TestCase): + + int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8, + np.uint16, np.uint32, np.uint64) + + mat = np.array([127, 39, 93, 87, 46]) + + def integer_arrays(self): + for dtype in self.int_types: + yield self.mat.astype(dtype) + + def test_nanmin(self): + tgt = np.min(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanmin(mat), tgt) + + def test_nanmax(self): + tgt = np.max(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanmax(mat), tgt) + + def test_nanargmin(self): + tgt = np.argmin(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanargmin(mat), tgt) + + def test_nanargmax(self): + tgt = np.argmax(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanargmax(mat), tgt) + + def test_nansum(self): + tgt = np.sum(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nansum(mat), tgt) + + def test_nanmean(self): + tgt = np.mean(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanmean(mat), tgt) + + def test_nanvar(self): + tgt = np.var(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanvar(mat), tgt) + + tgt = np.var(mat, ddof=1) + for mat in self.integer_arrays(): + assert_equal(np.nanvar(mat, ddof=1), tgt) + + def test_nanstd(self): + tgt = np.std(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanstd(mat), tgt) + + tgt = np.std(self.mat, ddof=1) + for mat in self.integer_arrays(): + assert_equal(np.nanstd(mat, ddof=1), tgt) + + +class TestNanFunctions_Sum(TestCase): + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nansum(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.sum(mat, axis=axis, keepdims=True) + res = np.nansum(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + resout = np.zeros(3) + tgt = np.sum(mat, axis=1) + res = np.nansum(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + codes = 'efdgFDG' + for c in codes: + tgt = np.sum(mat, dtype=np.dtype(c), axis=1).dtype.type + res = np.nansum(mat, dtype=np.dtype(c), axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = np.sum(mat, dtype=np.dtype(c), axis=None).dtype.type + res = np.nansum(mat, dtype=np.dtype(c), axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_char(self): + mat = np.eye(3) + codes = 'efdgFDG' + for c in codes: + tgt = np.sum(mat, dtype=c, axis=1).dtype.type + res = np.nansum(mat, dtype=c, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = np.sum(mat, dtype=c, axis=None).dtype.type + res = np.nansum(mat, dtype=c, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for c in codes: + mat = np.eye(3, dtype=c) + tgt = np.sum(mat, axis=1).dtype.type + res = np.nansum(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = np.sum(mat, axis=None).dtype.type + res = np.nansum(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + tgt = [np.sum(d) for d in _rdat] + res = np.nansum(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + # Check for FutureWarning + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = np.nansum([np.nan]*3, axis=None) + assert_(res == 0, 'result is not 0') + assert_(len(w) == 0, 'warning raised') + # Check scalar + res = np.nansum(np.nan) + assert_(res == 0, 'result is not 0') + assert_(len(w) == 0, 'warning raised') + # Check there is no warning for not all-nan + np.nansum([0]*3, axis=None) + assert_(len(w) == 0, 'unwanted warning raised') + + def test_empty(self): + mat = np.zeros((0, 3)) + tgt = [0]*3 + res = np.nansum(mat, axis=0) + assert_equal(res, tgt) + tgt = [] + res = np.nansum(mat, axis=1) + assert_equal(res, tgt) + tgt = 0 + res = np.nansum(mat, axis=None) + assert_equal(res, tgt) + + def test_scalar(self): + assert_(np.nansum(0.) == 0.) + + def test_matrices(self): + # Check that it works and that type and + # shape are preserved + mat = np.matrix(np.eye(3)) + res = np.nansum(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = np.nansum(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = np.nansum(mat) + assert_(np.isscalar(res)) + + +class TestNanFunctions_MeanVarStd(TestCase): + + nanfuncs = [np.nanmean, np.nanvar, np.nanstd] + stdfuncs = [np.mean, np.var, np.std] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool_, np.int_, np.object]: + assert_raises(TypeError, f, _ndat, axis=1, dtype=np.int) + + def test_out_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool_, np.int_, np.object]: + out = np.empty(_ndat.shape[0], dtype=dtype) + assert_raises(TypeError, f, _ndat, axis=1, out=out) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_char(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + tgt = rf(mat, dtype=c, axis=1).dtype.type + res = nf(mat, dtype=c, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=c, axis=None).dtype.type + res = nf(mat, dtype=c, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_ddof(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in [0, 1]: + tgt = [rf(d, ddof=ddof) for d in _rdat] + res = nf(_ndat, axis=1, ddof=ddof) + assert_almost_equal(res, tgt) + + def test_ddof_too_big(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + dsize = [len(d) for d in _rdat] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in range(5): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + tgt = [ddof >= d for d in dsize] + res = nf(_ndat, axis=1, ddof=ddof) + assert_equal(np.isnan(res), tgt) + if any(tgt): + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + else: + assert_(len(w) == 0) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for f in self.nanfuncs: + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalar + assert_(np.isnan(f(np.nan))) + assert_(len(w) == 2) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_matrices(self): + # Check that it works and that type and + # shape are preserved + mat = np.matrix(np.eye(3)) + for f in self.nanfuncs: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + +class TestNanFunctions_Median(TestCase): + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanmedian(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) + res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', RuntimeWarning) + res = np.nanmedian(d, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanmedian(d, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.median(mat, axis=1) + res = np.nanmedian(nan_mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.median(mat, axis=None) + res = np.nanmedian(nan_mat, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_small_large(self): + # test the small and large code paths, current cutoff 400 elements + for s in [5, 20, 51, 200, 1000]: + d = np.random.randn(4, s) + # Randomly set some elements to NaN: + w = np.random.randint(0, d.size, size=d.size // 5) + d.ravel()[w] = np.nan + d[:,0] = 1. # ensure at least one good value + # use normal median without nans to compare + tgt = [] + for x in d: + nonan = np.compress(~np.isnan(x), x) + tgt.append(np.median(nonan, overwrite_input=True)) + + assert_array_equal(np.nanmedian(d, axis=-1), tgt) + + def test_result_values(self): + tgt = [np.median(d) for d in _rdat] + res = np.nanmedian(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + if axis is None: + assert_(len(w) == 1) + else: + assert_(len(w) == 3) + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalar + assert_(np.isnan(np.nanmedian(np.nan))) + if axis is None: + assert_(len(w) == 2) + else: + assert_(len(w) == 4) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_(np.nanmedian(0.) == 0.) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(IndexError, np.nanmedian, d, axis=-5) + assert_raises(IndexError, np.nanmedian, d, axis=(0, -5)) + assert_raises(IndexError, np.nanmedian, d, axis=4) + assert_raises(IndexError, np.nanmedian, d, axis=(0, 4)) + assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) + + +class TestNanFunctions_Percentile(TestCase): + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanpercentile(ndat, 30) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.percentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + res = np.nanpercentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', RuntimeWarning) + res = np.nanpercentile(d, 90, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.percentile(mat, 42, axis=1) + res = np.nanpercentile(nan_mat, 42, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.percentile(mat, 42, axis=None) + res = np.nanpercentile(nan_mat, 42, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_result_values(self): + tgt = [np.percentile(d, 28) for d in _rdat] + res = np.nanpercentile(_ndat, 28, axis=1) + assert_almost_equal(res, tgt) + tgt = [np.percentile(d, (28, 98)) for d in _rdat] + res = np.nanpercentile(_ndat, (28, 98), axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all()) + if axis is None: + assert_(len(w) == 1) + else: + assert_(len(w) == 3) + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalar + assert_(np.isnan(np.nanpercentile(np.nan, 60))) + if axis is None: + assert_(len(w) == 2) + else: + assert_(len(w) == 4) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_(np.nanpercentile(0., 100) == 0.) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(IndexError, np.nanpercentile, d, q=5, axis=-5) + assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, -5)) + assert_raises(IndexError, np.nanpercentile, d, q=5, axis=4) + assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, 4)) + assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_polynomial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_polynomial.py new file mode 100644 index 0000000000000..02faa02839230 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_polynomial.py @@ -0,0 +1,177 @@ +from __future__ import division, absolute_import, print_function + +''' +>>> p = np.poly1d([1.,2,3]) +>>> p +poly1d([ 1., 2., 3.]) +>>> print(p) + 2 +1 x + 2 x + 3 +>>> q = np.poly1d([3.,2,1]) +>>> q +poly1d([ 3., 2., 1.]) +>>> print(q) + 2 +3 x + 2 x + 1 +>>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j])) + 3 2 +(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j) +>>> print(np.poly1d([-3, -2, -1])) + 2 +-3 x - 2 x - 1 + +>>> p(0) +3.0 +>>> p(5) +38.0 +>>> q(0) +1.0 +>>> q(5) +86.0 + +>>> p * q +poly1d([ 3., 8., 14., 8., 3.]) +>>> p / q +(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667])) +>>> p + q +poly1d([ 4., 4., 4.]) +>>> p - q +poly1d([-2., 0., 2.]) +>>> p ** 4 +poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.]) + +>>> p(q) +poly1d([ 9., 12., 16., 8., 6.]) +>>> q(p) +poly1d([ 3., 12., 32., 40., 34.]) + +>>> np.asarray(p) +array([ 1., 2., 3.]) +>>> len(p) +2 + +>>> p[0], p[1], p[2], p[3] +(3.0, 2.0, 1.0, 0) + +>>> p.integ() +poly1d([ 0.33333333, 1. , 3. , 0. ]) +>>> p.integ(1) +poly1d([ 0.33333333, 1. , 3. , 0. ]) +>>> p.integ(5) +poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. , + 0. , 0. , 0. ]) +>>> p.deriv() +poly1d([ 2., 2.]) +>>> p.deriv(2) +poly1d([ 2.]) + +>>> q = np.poly1d([1.,2,3], variable='y') +>>> print(q) + 2 +1 y + 2 y + 3 +>>> q = np.poly1d([1.,2,3], variable='lambda') +>>> print(q) + 2 +1 lambda + 2 lambda + 3 + +>>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1])) +(poly1d([ 1., -1.]), poly1d([ 0.])) + +''' +import numpy as np +from numpy.testing import ( + run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, + assert_almost_equal, rundocs + ) + + +class TestDocs(TestCase): + def test_doctests(self): + return rundocs() + + def test_roots(self): + assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + + def test_str_leading_zeros(self): + p = np.poly1d([4, 3, 2, 1]) + p[3] = 0 + assert_equal(str(p), + " 2\n" + "3 x + 2 x + 1") + + p = np.poly1d([1, 2]) + p[0] = 0 + p[1] = 0 + assert_equal(str(p), " \n0") + + def test_polyfit(self): + c = np.array([3., 2., 1.]) + x = np.linspace(0, 2, 7) + y = np.polyval(c, x) + err = [1, -1, 1, -1, 1, -1, 1] + weights = np.arange(8, 1, -1)**2/7.0 + + # check 1D case + m, cov = np.polyfit(x, y+err, 2, cov=True) + est = [3.8571, 0.2857, 1.619] + assert_almost_equal(est, m, decimal=4) + val0 = [[2.9388, -5.8776, 1.6327], + [-5.8776, 12.7347, -4.2449], + [1.6327, -4.2449, 2.3220]] + assert_almost_equal(val0, cov, decimal=4) + + m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) + assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) + val = [[8.7929, -10.0103, 0.9756], + [-10.0103, 13.6134, -1.8178], + [0.9756, -1.8178, 0.6674]] + assert_almost_equal(val, cov2, decimal=4) + + # check 2D (n,1) case + y = y[:, np.newaxis] + c = c[:, np.newaxis] + assert_almost_equal(c, np.polyfit(x, y, 2)) + # check 2D (n,2) case + yy = np.concatenate((y, y), axis=1) + cc = np.concatenate((c, c), axis=1) + assert_almost_equal(cc, np.polyfit(x, yy, 2)) + + m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) + assert_almost_equal(est, m[:, 0], decimal=4) + assert_almost_equal(est, m[:, 1], decimal=4) + assert_almost_equal(val0, cov[:, :, 0], decimal=4) + assert_almost_equal(val0, cov[:, :, 1], decimal=4) + + def test_objects(self): + from decimal import Decimal + p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) + p2 = p * Decimal('1.333333333333333') + assert_(p2[1] == Decimal("3.9999999999999990")) + p2 = p.deriv() + assert_(p2[1] == Decimal('8.0')) + p2 = p.integ() + assert_(p2[3] == Decimal("1.333333333333333333333333333")) + assert_(p2[2] == Decimal('1.5')) + assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) + + def test_complex(self): + p = np.poly1d([3j, 2j, 1j]) + p2 = p.integ() + assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) + p2 = p.deriv() + assert_((p2.coeffs == [6j, 2j]).all()) + + def test_integ_coeffs(self): + p = np.poly1d([3, 2, 1]) + p2 = p.integ(3, k=[9, 7, 6]) + assert_( + (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) + + def test_zero_dims(self): + try: + np.poly(np.zeros((0, 0))) + except ValueError: + pass + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_recfunctions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_recfunctions.py new file mode 100644 index 0000000000000..51a2077eb0765 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_recfunctions.py @@ -0,0 +1,705 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.ma as ma +from numpy.ma.mrecords import MaskedRecords +from numpy.ma.testutils import ( + run_module_suite, TestCase, assert_, assert_equal + ) +from numpy.lib.recfunctions import ( + drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, + find_duplicates, merge_arrays, append_fields, stack_arrays, join_by + ) +get_names = np.lib.recfunctions.get_names +get_names_flat = np.lib.recfunctions.get_names_flat +zip_descr = np.lib.recfunctions.zip_descr + + +class TestRecFunctions(TestCase): + # Misc tests + + def setUp(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array([('A', 1.), ('B', 2.)], + dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_zip_descr(self): + # Test zip_descr + (w, x, y, z) = self.data + + # Std array + test = zip_descr((x, x), flatten=True) + assert_equal(test, + np.dtype([('', int), ('', int)])) + test = zip_descr((x, x), flatten=False) + assert_equal(test, + np.dtype([('', int), ('', int)])) + + # Std & flexible-dtype + test = zip_descr((x, z), flatten=True) + assert_equal(test, + np.dtype([('', int), ('A', '|S3'), ('B', float)])) + test = zip_descr((x, z), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('A', '|S3'), ('B', float)])])) + + # Standard & nested dtype + test = zip_descr((x, w), flatten=True) + assert_equal(test, + np.dtype([('', int), + ('a', int), + ('ba', float), ('bb', int)])) + test = zip_descr((x, w), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('a', int), + ('b', [('ba', float), ('bb', int)])])])) + + def test_drop_fields(self): + # Test drop_fields + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + + # A basic field + test = drop_fields(a, 'a') + control = np.array([((2, 3.0),), ((5, 6.0),)], + dtype=[('b', [('ba', float), ('bb', int)])]) + assert_equal(test, control) + + # Another basic field (but nesting two fields) + test = drop_fields(a, 'b') + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # A nested sub-field + test = drop_fields(a, ['ba', ]) + control = np.array([(1, (3.0,)), (4, (6.0,))], + dtype=[('a', int), ('b', [('bb', int)])]) + assert_equal(test, control) + + # All the nested sub-field from a field: zap that field + test = drop_fields(a, ['ba', 'bb']) + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + test = drop_fields(a, ['a', 'b']) + assert_(test is None) + + def test_rename_fields(self): + # Test rename fields + a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + dtype=[('a', int), + ('b', [('ba', float), ('bb', (float, 2))])]) + test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) + newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] + control = a.view(newdtype) + assert_equal(test.dtype, newdtype) + assert_equal(test, control) + + def test_get_names(self): + # Test get_names + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ('ba', 'bb')))) + + def test_get_names_flat(self): + # Test get_names_flat + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names_flat(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b', 'ba', 'bb')) + + def test_get_fieldstructure(self): + # Test get_fieldstructure + + # No nested fields + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': []}) + + # One 1-nested field + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) + + # One 2-nested fields + ndtype = np.dtype([('A', int), + ('B', [('BA', int), + ('BB', [('BBA', int), ('BBB', int)])])]) + test = get_fieldstructure(ndtype) + control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], + 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + assert_equal(test, control) + + def test_find_duplicates(self): + # Test find_duplicates + a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), + (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], + mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), + (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], + dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 2] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='A', return_index=True) + control = [0, 1, 2, 3, 5] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='B', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BA', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BB', return_index=True) + control = [0, 1, 2, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_find_duplicates_ignoremask(self): + # Test the ignoremask option of find_duplicates + ndtype = [('a', int)] + a = ma.array([1, 1, 1, 2, 2, 3, 3], + mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + test = find_duplicates(a, ignoremask=True, return_index=True) + control = [0, 1, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 1, 2, 3, 4, 6] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + +class TestRecursiveFillFields(TestCase): + # Test recursive_fill_fields. + def test_simple_flexible(self): + # Test recursive_fill_fields on flexible-array + a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + b = np.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = np.array([(1, 10.), (2, 20.), (0, 0.)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + def test_masked_flexible(self): + # Test recursive_fill_fields on masked flexible-array + a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], + dtype=[('A', int), ('B', float)]) + b = ma.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = ma.array([(1, 10.), (2, 20.), (0, 0.)], + mask=[(0, 1), (1, 0), (0, 0)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + +class TestMergeArrays(TestCase): + # Test merge_arrays + + def setUp(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array( + [(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test merge_arrays on a single array. + (_, x, _, z) = self.data + + test = merge_arrays(x) + control = np.array([(1,), (2,)], dtype=[('f0', int)]) + assert_equal(test, control) + test = merge_arrays((x,)) + assert_equal(test, control) + + test = merge_arrays(z, flatten=False) + assert_equal(test, z) + test = merge_arrays(z, flatten=True) + assert_equal(test, z) + + def test_solo_w_flatten(self): + # Test merge_arrays on a single array w & w/o flattening + w = self.data[0] + test = merge_arrays(w, flatten=False) + assert_equal(test, w) + + test = merge_arrays(w, flatten=True) + control = np.array([(1, 2, 3.0), (4, 5, 6.0)], + dtype=[('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + def test_standard(self): + # Test standard & standard + # Test merge arrays + (_, x, y, _) = self.data + test = merge_arrays((x, y), usemask=False) + control = np.array([(1, 10), (2, 20), (-1, 30)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + + test = merge_arrays((x, y), usemask=True) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_flatten(self): + # Test standard & flexible + (_, x, _, z) = self.data + test = merge_arrays((x, z), flatten=True) + control = np.array([(1, 'A', 1.), (2, 'B', 2.)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + + test = merge_arrays((x, z), flatten=False) + control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], + dtype=[('f0', int), + ('f1', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + def test_flatten_wflexible(self): + # Test flatten standard & nested + (w, x, _, _) = self.data + test = merge_arrays((x, w), flatten=True) + control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], + dtype=[('f0', int), + ('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + test = merge_arrays((x, w), flatten=False) + controldtype = [('f0', int), + ('f1', [('a', int), + ('b', [('ba', float), ('bb', int)])])] + control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))], + dtype=controldtype) + assert_equal(test, control) + + def test_wmasked_arrays(self): + # Test merge_arrays masked arrays + (_, x, _, _) = self.data + mx = ma.array([1, 2, 3], mask=[1, 0, 0]) + test = merge_arrays((x, mx), usemask=True) + control = ma.array([(1, 1), (2, 2), (-1, 3)], + mask=[(0, 1), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + test = merge_arrays((x, mx), usemask=True, asrecarray=True) + assert_equal(test, control) + assert_(isinstance(test, MaskedRecords)) + + def test_w_singlefield(self): + # Test single field + test = merge_arrays((np.array([1, 2]).view([('a', int)]), + np.array([10., 20., 30.])),) + control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('a', int), ('f1', float)]) + assert_equal(test, control) + + def test_w_shorter_flex(self): + # Test merge_arrays w/ a shorter flexndarray. + z = self.data[-1] + + # Fixme, this test looks incomplete and broken + #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + # dtype=[('A', '|S3'), ('B', float), ('C', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes warnings about unused variables + merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + dtype=[('A', '|S3'), ('B', float), ('C', int)]) + + def test_singlerecord(self): + (_, x, y, z) = self.data + test = merge_arrays((x[0], y[0], z[0]), usemask=False) + control = np.array([(1, 10, ('A', 1))], + dtype=[('f0', int), + ('f1', int), + ('f2', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + +class TestAppendFields(TestCase): + # Test append_fields + + def setUp(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_append_single(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, 'A', data=[10, 20, 30]) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('A', int)],) + assert_equal(test, control) + + def test_append_double(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) + control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], + mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], + dtype=[('f0', int), ('A', int), ('B', int)],) + assert_equal(test, control) + + def test_append_on_flex(self): + # Test append_fields on flexible type arrays + z = self.data[-1] + test = append_fields(z, 'C', data=[10, 20, 30]) + control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], + mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('C', int)],) + assert_equal(test, control) + + def test_append_on_nested(self): + # Test append_fields on nested fields + w = self.data[0] + test = append_fields(w, 'C', data=[10, 20, 30]) + control = ma.array([(1, (2, 3.0), 10), + (4, (5, 6.0), 20), + (-1, (-1, -1.), 30)], + mask=[( + 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], + dtype=[('a', int), + ('b', [('ba', float), ('bb', int)]), + ('C', int)],) + assert_equal(test, control) + + +class TestStackArrays(TestCase): + # Test stack_arrays + def setUp(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test stack_arrays on single arrays + (_, x, _, _) = self.data + test = stack_arrays((x,)) + assert_equal(test, x) + self.assertTrue(test is x) + + test = stack_arrays(x) + assert_equal(test, x) + self.assertTrue(test is x) + + def test_unnamed_fields(self): + # Tests combinations of arrays w/o named fields + (_, x, y, _) = self.data + + test = stack_arrays((x, x), usemask=False) + control = np.array([1, 2, 1, 2]) + assert_equal(test, control) + + test = stack_arrays((x, y), usemask=False) + control = np.array([1, 2, 10, 20, 30]) + assert_equal(test, control) + + test = stack_arrays((y, x), usemask=False) + control = np.array([10, 20, 30, 1, 2]) + assert_equal(test, control) + + def test_unnamed_and_named_fields(self): + # Test combination of arrays w/ & w/o named fields + (_, x, _, z) = self.data + + test = stack_arrays((x, z)) + control = ma.array([(1, -1, -1), (2, -1, -1), + (-1, 'A', 1), (-1, 'B', 2)], + mask=[(0, 1, 1), (0, 1, 1), + (1, 0, 0), (1, 0, 0)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + + def test_matching_named_fields(self): + # Test combination of arrays w/ matching field names + (_, x, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + test = stack_arrays((z, zz)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, zz, x)) + ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] + control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), + ('a', 10., 100., -1), ('b', 20., 200., -1), + ('c', 30., 300., -1), + (-1, -1, -1, 1), (-1, -1, -1, 2)], + dtype=ndtype, + mask=[(0, 0, 1, 1), (0, 0, 1, 1), + (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), + (1, 1, 1, 0), (1, 1, 1, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_defaults(self): + # Test defaults: no exception raised if keys of defaults are not fields. + (_, _, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} + test = stack_arrays((z, zz), defaults=defaults) + control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_autoconversion(self): + # Tests autoconversion + adtype = [('A', int), ('B', bool), ('C', float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [('A', int), ('B', float), ('C', float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + test = stack_arrays((a, b), autoconvert=True) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + try: + test = stack_arrays((a, b), autoconvert=False) + except TypeError: + pass + else: + raise AssertionError + + def test_checktitles(self): + # Test using titles in the field names + adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + test = stack_arrays((a, b)) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + +class TestJoinBy(TestCase): + def setUp(self): + self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('c', int)]) + self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('d', int)]) + + def test_inner_join(self): + # Basic test of join_by + a, b = self.a, self.b + + test = join_by('a', a, b, jointype='inner') + control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), + (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), + (9, 59, 69, 109, 104)], + dtype=[('a', int), ('b1', int), ('b2', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_join(self): + a, b = self.a, self.b + + # Fixme, this test is broken + #test = join_by(('a', 'b'), a, b) + #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), + # (7, 57, 107, 102), (8, 58, 108, 103), + # (9, 59, 109, 104)], + # dtype=[('a', int), ('b', int), + # ('c', int), ('d', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes unused variable warnings + join_by(('a', 'b'), a, b) + np.array([(5, 55, 105, 100), (6, 56, 106, 101), + (7, 57, 107, 102), (8, 58, 108, 103), + (9, 59, 109, 104)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + + def test_outer_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'outer') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (5, 65, -1, 100), (6, 56, 106, -1), + (6, 66, -1, 101), (7, 57, 107, -1), + (7, 67, -1, 102), (8, 58, 108, -1), + (8, 68, -1, 103), (9, 59, 109, -1), + (9, 69, -1, 104), (10, 70, -1, 105), + (11, 71, -1, 106), (12, 72, -1, 107), + (13, 73, -1, 108), (14, 74, -1, 109)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_leftouter_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'leftouter') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (6, 56, 106, -1), (7, 57, 107, -1), + (8, 58, 108, -1), (9, 59, 109, -1)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1)], + dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) + assert_equal(test, control) + + +class TestJoinBy2(TestCase): + @classmethod + def setUp(cls): + cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('c', int)]) + cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('d', int)]) + + def test_no_r1postfix(self): + # Basic test of join_by no_r1postfix + a, b = self.a, self.b + + test = join_by( + 'a', a, b, r1postfix='', r2postfix='2', jointype='inner') + control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), + (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), + (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), + (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), + (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], + dtype=[('a', int), ('b', int), ('b2', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_no_postfix(self): + self.assertRaises(ValueError, join_by, 'a', self.a, self.b, + r1postfix='', r2postfix='') + + def test_no_r2postfix(self): + # Basic test of join_by no_r2postfix + a, b = self.a, self.b + + test = join_by( + 'a', a, b, r1postfix='1', r2postfix='', jointype='inner') + control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), + (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), + (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), + (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), + (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], + dtype=[('a', int), ('b1', int), ('b', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_two_keys_two_vars(self): + a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), + np.arange(50, 60), np.arange(10, 20))), + dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) + + b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), + np.arange(65, 75), np.arange(0, 10))), + dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) + + control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1), + (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3), + (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5), + (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7), + (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)], + dtype=[('k', int), ('a', int), ('b1', int), + ('b2', int), ('c1', int), ('c2', int)]) + test = join_by( + ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner') + assert_equal(test.dtype, control.dtype) + assert_equal(test, control) + + +if __name__ == '__main__': + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_regression.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_regression.py new file mode 100644 index 0000000000000..00fa3f195a5d5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_regression.py @@ -0,0 +1,265 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys + +import numpy as np +from numpy.testing import ( + run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, + assert_array_almost_equal, assert_raises + ) +from numpy.testing.utils import _assert_valid_refcount +from numpy.compat import unicode + +rlevel = 1 + + +class TestRegression(TestCase): + def test_poly1d(self, level=rlevel): + # Ticket #28 + assert_equal(np.poly1d([1]) - np.poly1d([1, 0]), + np.poly1d([-1, 1])) + + def test_cov_parameters(self, level=rlevel): + # Ticket #91 + x = np.random.random((3, 3)) + y = x.copy() + np.cov(x, rowvar=1) + np.cov(y, rowvar=0) + assert_array_equal(x, y) + + def test_mem_digitize(self, level=rlevel): + # Ticket #95 + for i in range(100): + np.digitize([1, 2, 3, 4], [1, 3]) + np.digitize([0, 1, 2, 3, 4], [1, 3]) + + def test_unique_zero_sized(self, level=rlevel): + # Ticket #205 + assert_array_equal([], np.unique(np.array([]))) + + def test_mem_vectorise(self, level=rlevel): + # Ticket #325 + vt = np.vectorize(lambda *args: args) + vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2))) + vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, + 1, 2)), np.zeros((2, 2))) + + def test_mgrid_single_element(self, level=rlevel): + # Ticket #339 + assert_array_equal(np.mgrid[0:0:1j], [0]) + assert_array_equal(np.mgrid[0:0], []) + + def test_refcount_vectorize(self, level=rlevel): + # Ticket #378 + def p(x, y): + return 123 + v = np.vectorize(p) + _assert_valid_refcount(v) + + def test_poly1d_nan_roots(self, level=rlevel): + # Ticket #396 + p = np.poly1d([np.nan, np.nan, 1], r=0) + self.assertRaises(np.linalg.LinAlgError, getattr, p, "r") + + def test_mem_polymul(self, level=rlevel): + # Ticket #448 + np.polymul([], [1.]) + + def test_mem_string_concat(self, level=rlevel): + # Ticket #469 + x = np.array([]) + np.append(x, 'asdasd\tasdasd') + + def test_poly_div(self, level=rlevel): + # Ticket #553 + u = np.poly1d([1, 2, 3]) + v = np.poly1d([1, 2, 3, 4, 5]) + q, r = np.polydiv(u, v) + assert_equal(q*v + r, u) + + def test_poly_eq(self, level=rlevel): + # Ticket #554 + x = np.poly1d([1, 2, 3]) + y = np.poly1d([3, 4]) + assert_(x != y) + assert_(x == x) + + def test_mem_insert(self, level=rlevel): + # Ticket #572 + np.lib.place(1, 1, 1) + + def test_polyfit_build(self): + # Ticket #628 + ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01, + 9.95368241e+00, -3.14526520e+02] + x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176] + y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0, + 6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0, + 13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0, + 7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0, + 6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0, + 6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0, + 8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0] + tested = np.polyfit(x, y, 4) + assert_array_almost_equal(ref, tested) + + def test_polydiv_type(self): + # Make polydiv work for complex types + msg = "Wrong type, should be complex" + x = np.ones(3, dtype=np.complex) + q, r = np.polydiv(x, x) + assert_(q.dtype == np.complex, msg) + msg = "Wrong type, should be float" + x = np.ones(3, dtype=np.int) + q, r = np.polydiv(x, x) + assert_(q.dtype == np.float, msg) + + def test_histogramdd_too_many_bins(self): + # Ticket 928. + assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10) + + def test_polyint_type(self): + # Ticket #944 + msg = "Wrong type, should be complex" + x = np.ones(3, dtype=np.complex) + assert_(np.polyint(x).dtype == np.complex, msg) + msg = "Wrong type, should be float" + x = np.ones(3, dtype=np.int) + assert_(np.polyint(x).dtype == np.float, msg) + + def test_ndenumerate_crash(self): + # Ticket 1140 + # Shouldn't crash: + list(np.ndenumerate(np.array([[]]))) + + def test_asfarray_none(self, level=rlevel): + # Test for changeset r5065 + assert_array_equal(np.array([np.nan]), np.asfarray([None])) + + def test_large_fancy_indexing(self, level=rlevel): + # Large enough to fail on 64-bit. + nbits = np.dtype(np.intp).itemsize * 8 + thesize = int((2**nbits)**(1.0/5.0)+1) + + def dp(): + n = 3 + a = np.ones((n,)*5) + i = np.random.randint(0, n, size=thesize) + a[np.ix_(i, i, i, i, i)] = 0 + + def dp2(): + n = 3 + a = np.ones((n,)*5) + i = np.random.randint(0, n, size=thesize) + a[np.ix_(i, i, i, i, i)] + + self.assertRaises(ValueError, dp) + self.assertRaises(ValueError, dp2) + + def test_void_coercion(self, level=rlevel): + dt = np.dtype([('a', 'f4'), ('b', 'i4')]) + x = np.zeros((1,), dt) + assert_(np.r_[x, x].dtype == dt) + + def test_who_with_0dim_array(self, level=rlevel): + # ticket #1243 + import os + import sys + + oldstdout = sys.stdout + sys.stdout = open(os.devnull, 'w') + try: + try: + np.who({'foo': np.array(1)}) + except: + raise AssertionError("ticket #1243") + finally: + sys.stdout.close() + sys.stdout = oldstdout + + def test_include_dirs(self): + # As a sanity check, just test that get_include + # includes something reasonable. Somewhat + # related to ticket #1405. + include_dirs = [np.get_include()] + for path in include_dirs: + assert_(isinstance(path, (str, unicode))) + assert_(path != '') + + def test_polyder_return_type(self): + # Ticket #1249 + assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d)) + assert_(isinstance(np.polyder([1], 0), np.ndarray)) + assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d)) + assert_(isinstance(np.polyder([1], 1), np.ndarray)) + + def test_append_fields_dtype_list(self): + # Ticket #1676 + from numpy.lib.recfunctions import append_fields + + base = np.array([1, 2, 3], dtype=np.int32) + names = ['a', 'b', 'c'] + data = np.eye(3).astype(np.int32) + dlist = [np.float64, np.int32, np.int32] + try: + append_fields(base, names, data, dlist) + except: + raise AssertionError() + + def test_loadtxt_fields_subarrays(self): + # For ticket #1936 + if sys.version_info[0] >= 3: + from io import StringIO + else: + from StringIO import StringIO + + dt = [("a", 'u1', 2), ("b", 'u1', 2)] + x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) + + dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])] + x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt)) + + dt = [("a", 'u1', (2, 2))] + x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt)) + + dt = [("a", 'u1', (2, 3, 2))] + x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt) + data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)] + assert_equal(x, np.array(data, dtype=dt)) + + def test_nansum_with_boolean(self): + # gh-2978 + a = np.zeros(2, dtype=np.bool) + try: + np.nansum(a) + except: + raise AssertionError() + + def test_py3_compat(self): + # gh-2561 + # Test if the oldstyle class test is bypassed in python3 + class C(): + """Old-style class in python2, normal class in python3""" + pass + + out = open(os.devnull, 'w') + try: + np.info(C(), output=out) + except AttributeError: + raise AssertionError() + finally: + out.close() + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_shape_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_shape_base.py new file mode 100644 index 0000000000000..23f3edfbe2dca --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_shape_base.py @@ -0,0 +1,368 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.lib.shape_base import ( + apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, + vsplit, dstack, kron, tile + ) +from numpy.testing import ( + run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, + assert_raises, assert_warns + ) + + +class TestApplyAlongAxis(TestCase): + def test_simple(self): + a = np.ones((20, 10), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_simple101(self, level=11): + a = np.ones((10, 101), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_3d(self): + a = np.arange(27).reshape((3, 3, 3)) + assert_array_equal(apply_along_axis(np.sum, 0, a), + [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) + + +class TestApplyOverAxes(TestCase): + def test_simple(self): + a = np.arange(24).reshape(2, 3, 4) + aoa_a = apply_over_axes(np.sum, a, [0, 2]) + assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) + + +class TestArraySplit(TestCase): + def test_integer_0_split(self): + a = np.arange(10) + assert_raises(ValueError, array_split, a, 0) + + def test_integer_split(self): + a = np.arange(10) + res = array_split(a, 1) + desired = [np.arange(10)] + compare_results(res, desired) + + res = array_split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + res = array_split(a, 3) + desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] + compare_results(res, desired) + + res = array_split(a, 4) + desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), + np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 5) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 6) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 7) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 8) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), + np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), + np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 9) + desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), + np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), + np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 10) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 11) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10), np.array([])] + compare_results(res, desired) + + def test_integer_split_2D_rows(self): + a = np.array([np.arange(10), np.arange(10)]) + res = assert_warns(FutureWarning, array_split, a, 3, axis=0) + + # After removing the FutureWarning, the last should be zeros((0, 10)) + desired = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.array([])] + compare_results(res, desired) + assert_(a.dtype.type is res[-1].dtype.type) + + def test_integer_split_2D_cols(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=-1) + desired = [np.array([np.arange(4), np.arange(4)]), + np.array([np.arange(4, 7), np.arange(4, 7)]), + np.array([np.arange(7, 10), np.arange(7, 10)])] + compare_results(res, desired) + + def test_integer_split_2D_default(self): + """ This will fail if we change default axis + """ + a = np.array([np.arange(10), np.arange(10)]) + res = assert_warns(FutureWarning, array_split, a, 3) + + # After removing the FutureWarning, the last should be zeros((0, 10)) + desired = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.array([])] + compare_results(res, desired) + assert_(a.dtype.type is res[-1].dtype.type) + # perhaps should check higher dimensions + + def test_index_split_simple(self): + a = np.arange(10) + indices = [1, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_low_bound(self): + a = np.arange(10) + indices = [0, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_high_bound(self): + a = np.arange(10) + indices = [0, 5, 7, 10, 12] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10), np.array([]), np.array([])] + compare_results(res, desired) + + +class TestSplit(TestCase): + # The split function is essentially the same as array_split, + # except that it test if splitting will result in an + # equal split. Only test for this case. + + def test_equal_split(self): + a = np.arange(10) + res = split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + def test_unequal_split(self): + a = np.arange(10) + assert_raises(ValueError, split, a, 3) + + +class TestDstack(TestCase): + def test_0D_array(self): + a = np.array(1) + b = np.array(2) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = np.array([1]) + b = np.array([2]) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = np.array([[1], [2]]) + b = np.array([[1], [2]]) + res = dstack([a, b]) + desired = np.array([[[1, 1]], [[2, 2, ]]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = np.array([1, 2]) + b = np.array([1, 2]) + res = dstack([a, b]) + desired = np.array([[[1, 1], [2, 2]]]) + assert_array_equal(res, desired) + + +# array_split has more comprehensive test of splitting. +# only do simple test on hsplit, vsplit, and dsplit +class TestHsplit(TestCase): + """Only testing for integer splits. + + """ + def test_0D_array(self): + a = np.array(1) + try: + hsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + res = hsplit(a, 2) + desired = [np.array([1, 2]), np.array([3, 4])] + compare_results(res, desired) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = hsplit(a, 2) + desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] + compare_results(res, desired) + + +class TestVsplit(TestCase): + """Only testing for integer splits. + + """ + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + try: + vsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = vsplit(a, 2) + desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] + compare_results(res, desired) + + +class TestDsplit(TestCase): + # Only testing for integer splits. + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + try: + dsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_3D_array(self): + a = np.array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]) + res = dsplit(a, 2) + desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), + np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] + compare_results(res, desired) + + +class TestSqueeze(TestCase): + def test_basic(self): + from numpy.random import rand + + a = rand(20, 10, 10, 1, 1) + b = rand(20, 1, 10, 1, 20) + c = rand(1, 1, 20, 10) + assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) + assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) + assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) + + # Squeezing to 0-dim should still give an ndarray + a = [[[1.5]]] + res = np.squeeze(a) + assert_equal(res, 1.5) + assert_equal(res.ndim, 0) + assert_equal(type(res), np.ndarray) + + +class TestKron(TestCase): + def test_return_type(self): + a = np.ones([2, 2]) + m = np.asmatrix(a) + assert_equal(type(kron(a, a)), np.ndarray) + assert_equal(type(kron(m, m)), np.matrix) + assert_equal(type(kron(a, m)), np.matrix) + assert_equal(type(kron(m, a)), np.matrix) + + class myarray(np.ndarray): + __array_priority__ = 0.0 + + ma = myarray(a.shape, a.dtype, a.data) + assert_equal(type(kron(a, a)), np.ndarray) + assert_equal(type(kron(ma, ma)), myarray) + assert_equal(type(kron(a, ma)), np.ndarray) + assert_equal(type(kron(ma, a)), myarray) + + +class TestTile(TestCase): + def test_basic(self): + a = np.array([0, 1, 2]) + b = [[1, 2], [3, 4]] + assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) + assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) + assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) + assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) + assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], + [1, 2, 1, 2], [3, 4, 3, 4]]) + + def test_empty(self): + a = np.array([[[]]]) + d = tile(a, (3, 2, 5)).shape + assert_equal(d, (3, 2, 0)) + + def test_kroncompare(self): + from numpy.random import randint + + reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] + shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] + for s in shape: + b = randint(0, 10, size=s) + for r in reps: + a = np.ones(r, b.dtype) + large = tile(b, r) + klarge = kron(a, b) + assert_equal(large, klarge) + + +class TestMayShareMemory(TestCase): + def test_basic(self): + d = np.ones((50, 60)) + d2 = np.ones((30, 60, 6)) + self.assertTrue(np.may_share_memory(d, d)) + self.assertTrue(np.may_share_memory(d, d[::-1])) + self.assertTrue(np.may_share_memory(d, d[::2])) + self.assertTrue(np.may_share_memory(d, d[1:, ::-1])) + + self.assertFalse(np.may_share_memory(d[::-1], d2)) + self.assertFalse(np.may_share_memory(d[::2], d2)) + self.assertFalse(np.may_share_memory(d[1:, ::-1], d2)) + self.assertTrue(np.may_share_memory(d2[1:, ::-1], d2)) + + +# Utility +def compare_results(res, desired): + for i in range(len(desired)): + assert_array_equal(res[i], desired[i]) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_stride_tricks.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_stride_tricks.py new file mode 100644 index 0000000000000..cd0973300052c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_stride_tricks.py @@ -0,0 +1,238 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import ( + run_module_suite, assert_equal, assert_array_equal, + assert_raises + ) +from numpy.lib.stride_tricks import as_strided, broadcast_arrays + + +def assert_shapes_correct(input_shapes, expected_shape): + # Broadcast a list of arrays with the given input shapes and check the + # common output shape. + + inarrays = [np.zeros(s) for s in input_shapes] + outarrays = broadcast_arrays(*inarrays) + outshapes = [a.shape for a in outarrays] + expected = [expected_shape] * len(inarrays) + assert_equal(outshapes, expected) + + +def assert_incompatible_shapes_raise(input_shapes): + # Broadcast a list of arrays with the given (incompatible) input shapes + # and check that they raise a ValueError. + + inarrays = [np.zeros(s) for s in input_shapes] + assert_raises(ValueError, broadcast_arrays, *inarrays) + + +def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): + # Broadcast two shapes against each other and check that the data layout + # is the same as if a ufunc did the broadcasting. + + x0 = np.zeros(shape0, dtype=int) + # Note that multiply.reduce's identity element is 1.0, so when shape1==(), + # this gives the desired n==1. + n = int(np.multiply.reduce(shape1)) + x1 = np.arange(n).reshape(shape1) + if transposed: + x0 = x0.T + x1 = x1.T + if flipped: + x0 = x0[::-1] + x1 = x1[::-1] + # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the + # result should be exactly the same as the broadcasted view of x1. + y = x0 + x1 + b0, b1 = broadcast_arrays(x0, x1) + assert_array_equal(y, b1) + + +def test_same(): + x = np.arange(10) + y = np.arange(10) + bx, by = broadcast_arrays(x, y) + assert_array_equal(x, bx) + assert_array_equal(y, by) + + +def test_one_off(): + x = np.array([[1, 2, 3]]) + y = np.array([[1], [2], [3]]) + bx, by = broadcast_arrays(x, y) + bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + by0 = bx0.T + assert_array_equal(bx0, bx) + assert_array_equal(by0, by) + + +def test_same_input_shapes(): + # Check that the final shape is just the input shape. + + data = [ + (), + (1,), + (3,), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), + ] + for shape in data: + input_shapes = [shape] + # Single input. + assert_shapes_correct(input_shapes, shape) + # Double input. + input_shapes2 = [shape, shape] + assert_shapes_correct(input_shapes2, shape) + # Triple input. + input_shapes3 = [shape, shape, shape] + assert_shapes_correct(input_shapes3, shape) + + +def test_two_compatible_by_ones_input_shapes(): + # Check that two different input shapes of the same length, but some have + # ones, broadcast to the correct shape. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_two_compatible_by_prepending_ones_input_shapes(): + # Check that two different input shapes (of different lengths) broadcast + # to the correct shape. + + data = [ + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_incompatible_shapes_raise_valueerror(): + # Check that a ValueError is raised for incompatible shapes. + + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + ] + for input_shapes in data: + assert_incompatible_shapes_raise(input_shapes) + # Reverse the input shapes since broadcasting should be symmetric. + assert_incompatible_shapes_raise(input_shapes[::-1]) + + +def test_same_as_ufunc(): + # Check that the data layout is the same as if a ufunc did the operation. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], + "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) + # Reverse the input shapes since broadcasting should be symmetric. + assert_same_as_ufunc(input_shapes[1], input_shapes[0]) + # Try them transposed, too. + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) + # ... and flipped for non-rank-0 inputs in order to test negative + # strides. + if () not in input_shapes: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) + +def test_as_strided(): + a = np.array([None]) + a_view = as_strided(a) + expected = np.array([None]) + assert_array_equal(a_view, np.array([None])) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + expected = np.array([1, 3]) + assert_array_equal(a_view, expected) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) + expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + assert_array_equal(a_view, expected) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_twodim_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_twodim_base.py new file mode 100644 index 0000000000000..739061a5df49d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_twodim_base.py @@ -0,0 +1,504 @@ +"""Test functions for matrix module + +""" +from __future__ import division, absolute_import, print_function + +from numpy.testing import ( + TestCase, run_module_suite, assert_equal, assert_array_equal, + assert_array_max_ulp, assert_array_almost_equal, assert_raises, rand, + ) + +from numpy import ( + arange, rot90, add, fliplr, flipud, zeros, ones, eye, array, diag, + histogram2d, tri, mask_indices, triu_indices, triu_indices_from, + tril_indices, tril_indices_from, vander, + ) + +import numpy as np +from numpy.compat import asbytes_nested + + +def get_mat(n): + data = arange(n) + data = add.outer(data, data) + return data + + +class TestEye(TestCase): + def test_basic(self): + assert_equal(eye(4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]])) + + assert_equal(eye(4, dtype='f'), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], 'f')) + + assert_equal(eye(3) == 1, + eye(3, dtype=bool)) + + def test_diag(self): + assert_equal(eye(4, k=1), + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, k=-1), + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_2d(self): + assert_equal(eye(4, 3), + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0]])) + + assert_equal(eye(3, 4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_diag2d(self): + assert_equal(eye(3, 4, k=2), + array([[0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, 3, k=-2), + array([[0, 0, 0], + [0, 0, 0], + [1, 0, 0], + [0, 1, 0]])) + + def test_eye_bounds(self): + assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) + assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) + assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) + assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) + assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) + assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) + assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) + + def test_strings(self): + assert_equal(eye(2, 2, dtype='S3'), + asbytes_nested([['1', ''], ['', '1']])) + + def test_bool(self): + assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) + + +class TestDiag(TestCase): + def test_vector(self): + vals = (100 * arange(5)).astype('l') + b = zeros((5, 5)) + for k in range(5): + b[k, k] = vals[k] + assert_equal(diag(vals), b) + b = zeros((7, 7)) + c = b.copy() + for k in range(5): + b[k, k + 2] = vals[k] + c[k + 2, k] = vals[k] + assert_equal(diag(vals, k=2), b) + assert_equal(diag(vals, k=-2), c) + + def test_matrix(self, vals=None): + if vals is None: + vals = (100 * get_mat(5) + 1).astype('l') + b = zeros((5,)) + for k in range(5): + b[k] = vals[k, k] + assert_equal(diag(vals), b) + b = b * 0 + for k in range(3): + b[k] = vals[k, k + 2] + assert_equal(diag(vals, 2), b[:3]) + for k in range(3): + b[k] = vals[k + 2, k] + assert_equal(diag(vals, -2), b[:3]) + + def test_fortran_order(self): + vals = array((100 * get_mat(5) + 1), order='F', dtype='l') + self.test_matrix(vals) + + def test_diag_bounds(self): + A = [[1, 2], [3, 4], [5, 6]] + assert_equal(diag(A, k=2), []) + assert_equal(diag(A, k=1), [2]) + assert_equal(diag(A, k=0), [1, 4]) + assert_equal(diag(A, k=-1), [3, 6]) + assert_equal(diag(A, k=-2), [5]) + assert_equal(diag(A, k=-3), []) + + def test_failure(self): + self.assertRaises(ValueError, diag, [[[1]]]) + + +class TestFliplr(TestCase): + def test_basic(self): + self.assertRaises(ValueError, fliplr, ones(4)) + a = get_mat(4) + b = a[:, ::-1] + assert_equal(fliplr(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(fliplr(a), b) + + +class TestFlipud(TestCase): + def test_basic(self): + a = get_mat(4) + b = a[::-1, :] + assert_equal(flipud(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(flipud(a), b) + + +class TestRot90(TestCase): + def test_basic(self): + self.assertRaises(ValueError, rot90, ones(4)) + + a = [[0, 1, 2], + [3, 4, 5]] + b1 = [[2, 5], + [1, 4], + [0, 3]] + b2 = [[5, 4, 3], + [2, 1, 0]] + b3 = [[3, 0], + [4, 1], + [5, 2]] + b4 = [[0, 1, 2], + [3, 4, 5]] + + for k in range(-3, 13, 4): + assert_equal(rot90(a, k=k), b1) + for k in range(-2, 13, 4): + assert_equal(rot90(a, k=k), b2) + for k in range(-1, 13, 4): + assert_equal(rot90(a, k=k), b3) + for k in range(0, 13, 4): + assert_equal(rot90(a, k=k), b4) + + def test_axes(self): + a = ones((50, 40, 3)) + assert_equal(rot90(a).shape, (40, 50, 3)) + + +class TestHistogram2d(TestCase): + def test_simple(self): + x = array( + [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) + y = array( + [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) + xedges = np.linspace(0, 1, 10) + yedges = np.linspace(0, 1, 10) + H = histogram2d(x, y, (xedges, yedges))[0] + answer = array( + [[0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + assert_array_equal(H.T, answer) + H = histogram2d(x, y, xedges)[0] + assert_array_equal(H.T, answer) + H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) + assert_array_equal(H, eye(10, 10)) + assert_array_equal(xedges, np.linspace(0, 9, 11)) + assert_array_equal(yedges, np.linspace(0, 9, 11)) + + def test_asym(self): + x = array([1, 1, 2, 3, 4, 4, 4, 5]) + y = array([1, 3, 2, 0, 1, 2, 3, 4]) + H, xed, yed = histogram2d( + x, y, (6, 5), range=[[0, 6], [0, 5]], normed=True) + answer = array( + [[0., 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + assert_array_almost_equal(H, answer/8., 3) + assert_array_equal(xed, np.linspace(0, 6, 7)) + assert_array_equal(yed, np.linspace(0, 5, 6)) + + def test_norm(self): + x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + H, xed, yed = histogram2d( + x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], normed=True) + answer = array([[1, 1, .5], + [1, 1, .5], + [.5, .5, .25]])/9. + assert_array_almost_equal(H, answer, 3) + + def test_all_outliers(self): + r = rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 + H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) + assert_array_equal(H, 0) + + def test_empty(self): + a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, array([[0.]])) + + a, edge1, edge2 = histogram2d([], [], bins=4) + assert_array_max_ulp(a, np.zeros((4, 4))) + + +class TestTri(TestCase): + def test_dtype(self): + out = array([[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]) + assert_array_equal(tri(3), out) + assert_array_equal(tri(3, dtype=bool), out.astype(bool)) + + +def test_tril_triu_ndim2(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.ones((2, 2), dtype=dtype) + b = np.tril(a) + c = np.triu(a) + yield assert_array_equal, b, [[1, 0], [1, 1]] + yield assert_array_equal, c, b.T + # should return the same dtype as the original array + yield assert_equal, b.dtype, a.dtype + yield assert_equal, c.dtype, a.dtype + + +def test_tril_triu_ndim3(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.array([ + [[1, 1], [1, 1]], + [[1, 1], [1, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_tril_desired = np.array([ + [[1, 0], [1, 1]], + [[1, 0], [1, 0]], + [[1, 0], [0, 0]], + ], dtype=dtype) + a_triu_desired = np.array([ + [[1, 1], [0, 1]], + [[1, 1], [0, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_triu_observed = np.triu(a) + a_tril_observed = np.tril(a) + yield assert_array_equal, a_triu_observed, a_triu_desired + yield assert_array_equal, a_tril_observed, a_tril_desired + yield assert_equal, a_triu_observed.dtype, a.dtype + yield assert_equal, a_tril_observed.dtype, a.dtype + +def test_tril_triu_with_inf(): + # Issue 4859 + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + out_tril = np.array([[1, 0, 0], + [1, 1, 0], + [np.inf, 1, 1]]) + out_triu = out_tril.T + assert_array_equal(np.triu(arr), out_triu) + assert_array_equal(np.tril(arr), out_tril) + + +def test_tril_triu_dtype(): + # Issue 4916 + # tril and triu should return the same dtype as input + for c in np.typecodes['All']: + if c == 'V': + continue + arr = np.zeros((3, 3), dtype=c) + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + # check special cases + arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], + ['2004-01-01T12:00', '2003-01-03T13:45']], + dtype='datetime64') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + arr = np.zeros((3,3), dtype='f4,f4') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + +def test_mask_indices(): + # simple test without offset + iu = mask_indices(3, np.triu) + a = np.arange(9).reshape(3, 3) + yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8])) + # Now with an offset + iu1 = mask_indices(3, np.triu, 1) + yield (assert_array_equal, a[iu1], array([1, 2, 5])) + + +def test_tril_indices(): + # indices without and with offset + il1 = tril_indices(4) + il2 = tril_indices(4, k=2) + il3 = tril_indices(4, m=5) + il4 = tril_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # indexing: + yield (assert_array_equal, a[il1], + array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) + yield (assert_array_equal, b[il3], + array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) + + # And for assigning values: + a[il1] = -1 + yield (assert_array_equal, a, + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]])) + b[il3] = -1 + yield (assert_array_equal, b, + array([[-1, 2, 3, 4, 5], + [-1, -1, 8, 9, 10], + [-1, -1, -1, 14, 15], + [-1, -1, -1, -1, 20]])) + # These cover almost the whole array (two diagonals right of the main one): + a[il2] = -10 + yield (assert_array_equal, a, + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]])) + b[il4] = -10 + yield (assert_array_equal, b, + array([[-10, -10, -10, 4, 5], + [-10, -10, -10, -10, 10], + [-10, -10, -10, -10, -10], + [-10, -10, -10, -10, -10]])) + + +class TestTriuIndices(object): + def test_triu_indices(self): + iu1 = triu_indices(4) + iu2 = triu_indices(4, k=2) + iu3 = triu_indices(4, m=5) + iu4 = triu_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # Both for indexing: + yield (assert_array_equal, a[iu1], + array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) + yield (assert_array_equal, b[iu3], + array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20])) + + # And for assigning values: + a[iu1] = -1 + yield (assert_array_equal, a, + array([[-1, -1, -1, -1], + [5, -1, -1, -1], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu3] = -1 + yield (assert_array_equal, b, + array([[-1, -1, -1, -1, -1], + [6, -1, -1, -1, -1], + [11, 12, -1, -1, -1], + [16, 17, 18, -1, -1]])) + + # These cover almost the whole array (two diagonals right of the + # main one): + a[iu2] = -10 + yield (assert_array_equal, a, + array([[-1, -1, -10, -10], + [5, -1, -1, -10], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu4] = -10 + yield (assert_array_equal, b, + array([[-1, -1, -10, -10, -10], + [6, -1, -1, -10, -10], + [11, 12, -1, -1, -10], + [16, 17, 18, -1, -1]])) + + +class TestTrilIndicesFrom(object): + def test_exceptions(self): + assert_raises(ValueError, tril_indices_from, np.ones((2,))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) + + +class TestTriuIndicesFrom(object): + def test_exceptions(self): + assert_raises(ValueError, triu_indices_from, np.ones((2,))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) + + +class TestVander(object): + def test_basic(self): + c = np.array([0, 1, -2, 3]) + v = vander(c) + powers = np.array([[0, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + [16, -8, 4, -2, 1], + [81, 27, 9, 3, 1]]) + # Check default value of N: + yield (assert_array_equal, v, powers[:, 1:]) + # Check a range of N values, including 0 and 5 (greater than default) + m = powers.shape[1] + for n in range(6): + v = vander(c, N=n) + yield (assert_array_equal, v, powers[:, m-n:m]) + + def test_dtypes(self): + c = array([11, -12, 13], dtype=np.int8) + v = vander(c) + expected = np.array([[121, 11, 1], + [144, -12, 1], + [169, 13, 1]]) + yield (assert_array_equal, v, expected) + + c = array([1.0+1j, 1.0-1j]) + v = vander(c, N=3) + expected = np.array([[2j, 1+1j, 1], + [-2j, 1-1j, 1]]) + # The data is floating point, but the values are small integers, + # so assert_array_equal *should* be safe here (rather than, say, + # assert_array_almost_equal). + yield (assert_array_equal, v, expected) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_type_check.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_type_check.py new file mode 100644 index 0000000000000..3931f95e5fb9d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_type_check.py @@ -0,0 +1,328 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.compat import long +from numpy.testing import ( + TestCase, assert_, assert_equal, assert_array_equal, run_module_suite + ) +from numpy.lib.type_check import ( + common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, + nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close + ) + + +def assert_all(x): + assert_(np.all(x), x) + + +class TestCommonType(TestCase): + def test_basic(self): + ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) + af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) + af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) + acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) + acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) + assert_(common_type(ai32) == np.float64) + assert_(common_type(af32) == np.float32) + assert_(common_type(af64) == np.float64) + assert_(common_type(acs) == np.csingle) + assert_(common_type(acd) == np.cdouble) + + +class TestMintypecode(TestCase): + + def test_default_1(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype), 'd') + assert_equal(mintypecode('f'), 'f') + assert_equal(mintypecode('d'), 'd') + assert_equal(mintypecode('F'), 'F') + assert_equal(mintypecode('D'), 'D') + + def test_default_2(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype+'f'), 'f') + assert_equal(mintypecode(itype+'d'), 'd') + assert_equal(mintypecode(itype+'F'), 'F') + assert_equal(mintypecode(itype+'D'), 'D') + assert_equal(mintypecode('ff'), 'f') + assert_equal(mintypecode('fd'), 'd') + assert_equal(mintypecode('fF'), 'F') + assert_equal(mintypecode('fD'), 'D') + assert_equal(mintypecode('df'), 'd') + assert_equal(mintypecode('dd'), 'd') + #assert_equal(mintypecode('dF',savespace=1),'F') + assert_equal(mintypecode('dF'), 'D') + assert_equal(mintypecode('dD'), 'D') + assert_equal(mintypecode('Ff'), 'F') + #assert_equal(mintypecode('Fd',savespace=1),'F') + assert_equal(mintypecode('Fd'), 'D') + assert_equal(mintypecode('FF'), 'F') + assert_equal(mintypecode('FD'), 'D') + assert_equal(mintypecode('Df'), 'D') + assert_equal(mintypecode('Dd'), 'D') + assert_equal(mintypecode('DF'), 'D') + assert_equal(mintypecode('DD'), 'D') + + def test_default_3(self): + assert_equal(mintypecode('fdF'), 'D') + #assert_equal(mintypecode('fdF',savespace=1),'F') + assert_equal(mintypecode('fdD'), 'D') + assert_equal(mintypecode('fFD'), 'D') + assert_equal(mintypecode('dFD'), 'D') + + assert_equal(mintypecode('ifd'), 'd') + assert_equal(mintypecode('ifF'), 'F') + assert_equal(mintypecode('ifD'), 'D') + assert_equal(mintypecode('idF'), 'D') + #assert_equal(mintypecode('idF',savespace=1),'F') + assert_equal(mintypecode('idD'), 'D') + + +class TestIsscalar(TestCase): + + def test_basic(self): + assert_(np.isscalar(3)) + assert_(not np.isscalar([3])) + assert_(not np.isscalar((3,))) + assert_(np.isscalar(3j)) + assert_(np.isscalar(long(10))) + assert_(np.isscalar(4.0)) + + +class TestReal(TestCase): + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(y, np.real(y)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.real, np.real(y)) + + +class TestImag(TestCase): + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(0, np.imag(y)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.imag, np.imag(y)) + + +class TestIscomplex(TestCase): + + def test_fail(self): + z = np.array([-1, 0, 1]) + res = iscomplex(z) + assert_(not np.sometrue(res, axis=0)) + + def test_pass(self): + z = np.array([-1j, 1, 0]) + res = iscomplex(z) + assert_array_equal(res, [1, 0, 0]) + + +class TestIsreal(TestCase): + + def test_pass(self): + z = np.array([-1, 0, 1j]) + res = isreal(z) + assert_array_equal(res, [1, 1, 0]) + + def test_fail(self): + z = np.array([-1j, 1, 0]) + res = isreal(z) + assert_array_equal(res, [0, 1, 1]) + + +class TestIscomplexobj(TestCase): + + def test_basic(self): + z = np.array([-1, 0, 1]) + assert_(not iscomplexobj(z)) + z = np.array([-1j, 0, -1]) + assert_(iscomplexobj(z)) + + +class TestIsrealobj(TestCase): + def test_basic(self): + z = np.array([-1, 0, 1]) + assert_(isrealobj(z)) + z = np.array([-1j, 0, -1]) + assert_(not isrealobj(z)) + + +class TestIsnan(TestCase): + + def test_goodvalues(self): + z = np.array((-1., 0., 1.)) + res = np.isnan(z) == 0 + assert_all(np.all(res, axis=0)) + + def test_posinf(self): + with np.errstate(divide='ignore'): + assert_all(np.isnan(np.array((1.,))/0.) == 0) + + def test_neginf(self): + with np.errstate(divide='ignore'): + assert_all(np.isnan(np.array((-1.,))/0.) == 0) + + def test_ind(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isnan(np.array((0.,))/0.) == 1) + + def test_integer(self): + assert_all(np.isnan(1) == 0) + + def test_complex(self): + assert_all(np.isnan(1+1j) == 0) + + def test_complex1(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isnan(np.array(0+0j)/0.) == 1) + + +class TestIsfinite(TestCase): + # Fixme, wrong place, isfinite now ufunc + + def test_goodvalues(self): + z = np.array((-1., 0., 1.)) + res = np.isfinite(z) == 1 + assert_all(np.all(res, axis=0)) + + def test_posinf(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isfinite(np.array((1.,))/0.) == 0) + + def test_neginf(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isfinite(np.array((-1.,))/0.) == 0) + + def test_ind(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isfinite(np.array((0.,))/0.) == 0) + + def test_integer(self): + assert_all(np.isfinite(1) == 1) + + def test_complex(self): + assert_all(np.isfinite(1+1j) == 1) + + def test_complex1(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isfinite(np.array(1+1j)/0.) == 0) + + +class TestIsinf(TestCase): + # Fixme, wrong place, isinf now ufunc + + def test_goodvalues(self): + z = np.array((-1., 0., 1.)) + res = np.isinf(z) == 0 + assert_all(np.all(res, axis=0)) + + def test_posinf(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isinf(np.array((1.,))/0.) == 1) + + def test_posinf_scalar(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isinf(np.array(1.,)/0.) == 1) + + def test_neginf(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isinf(np.array((-1.,))/0.) == 1) + + def test_neginf_scalar(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isinf(np.array(-1.)/0.) == 1) + + def test_ind(self): + with np.errstate(divide='ignore', invalid='ignore'): + assert_all(np.isinf(np.array((0.,))/0.) == 0) + + +class TestIsposinf(TestCase): + + def test_generic(self): + with np.errstate(divide='ignore', invalid='ignore'): + vals = isposinf(np.array((-1., 0, 1))/0.) + assert_(vals[0] == 0) + assert_(vals[1] == 0) + assert_(vals[2] == 1) + + +class TestIsneginf(TestCase): + + def test_generic(self): + with np.errstate(divide='ignore', invalid='ignore'): + vals = isneginf(np.array((-1., 0, 1))/0.) + assert_(vals[0] == 1) + assert_(vals[1] == 0) + assert_(vals[2] == 0) + + +class TestNanToNum(TestCase): + + def test_generic(self): + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1))/0.) + assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) + assert_(vals[1] == 0) + assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) + + def test_integer(self): + vals = nan_to_num(1) + assert_all(vals == 1) + + def test_complex_good(self): + vals = nan_to_num(1+1j) + assert_all(vals == 1+1j) + + def test_complex_bad(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(0+1.j)/0. + vals = nan_to_num(v) + # !! This is actually (unexpectedly) zero + assert_all(np.isfinite(vals)) + + def test_complex_bad2(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(-1+1.j)/0. + vals = nan_to_num(v) + assert_all(np.isfinite(vals)) + # Fixme + #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) + # !! This is actually (unexpectedly) positive + # !! inf. Comment out for now, and see if it + # !! changes + #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) + + +class TestRealIfClose(TestCase): + + def test_basic(self): + a = np.random.rand(10) + b = real_if_close(a+1e-15j) + assert_all(isrealobj(b)) + assert_array_equal(a, b) + b = real_if_close(a+1e-7j) + assert_all(iscomplexobj(b)) + b = real_if_close(a+1e-7j, tol=1e-6) + assert_all(isrealobj(b)) + + +class TestArrayConversion(TestCase): + + def test_asfarray(self): + a = asfarray(np.array([1, 2, 3])) + assert_equal(a.__class__, np.ndarray) + assert_(np.issubdtype(a.dtype, np.float)) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_ufunclike.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_ufunclike.py new file mode 100644 index 0000000000000..97d608ecfa801 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_ufunclike.py @@ -0,0 +1,65 @@ +from __future__ import division, absolute_import, print_function + +import numpy.core as nx +import numpy.lib.ufunclike as ufl +from numpy.testing import ( + run_module_suite, TestCase, assert_, assert_equal, assert_array_equal + ) + + +class TestUfunclike(TestCase): + + def test_isposinf(self): + a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) + out = nx.zeros(a.shape, bool) + tgt = nx.array([True, False, False, False, False, False]) + + res = ufl.isposinf(a) + assert_equal(res, tgt) + res = ufl.isposinf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + def test_isneginf(self): + a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) + out = nx.zeros(a.shape, bool) + tgt = nx.array([False, True, False, False, False, False]) + + res = ufl.isneginf(a) + assert_equal(res, tgt) + res = ufl.isneginf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + def test_fix(self): + a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) + out = nx.zeros(a.shape, float) + tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) + + res = ufl.fix(a) + assert_equal(res, tgt) + res = ufl.fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(ufl.fix(3.14), 3) + + def test_fix_with_subclass(self): + class MyArray(nx.ndarray): + def __new__(cls, data, metadata=None): + res = nx.array(data, copy=True).view(cls) + res.metadata = metadata + return res + + def __array_wrap__(self, obj, context=None): + obj.metadata = self.metadata + return obj + + a = nx.array([1.1, -1.1]) + m = MyArray(a, metadata='foo') + f = ufl.fix(m) + assert_array_equal(f, nx.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_utils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_utils.py new file mode 100644 index 0000000000000..fcb37f98a3e72 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_utils.py @@ -0,0 +1,65 @@ +from __future__ import division, absolute_import, print_function + +import sys +from numpy.core import arange +from numpy.testing import ( + run_module_suite, assert_, assert_equal + ) +from numpy.lib import deprecate +import numpy.lib.utils as utils + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + + +def test_lookfor(): + out = StringIO() + utils.lookfor('eigenvalue', module='numpy', output=out, + import_modules=False) + out = out.getvalue() + assert_('numpy.linalg.eig' in out) + + +@deprecate +def old_func(self, x): + return x + + +@deprecate(message="Rather use new_func2") +def old_func2(self, x): + return x + + +def old_func3(self, x): + return x +new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3") + + +def test_deprecate_decorator(): + assert_('deprecated' in old_func.__doc__) + + +def test_deprecate_decorator_message(): + assert_('Rather use new_func2' in old_func2.__doc__) + + +def test_deprecate_fn(): + assert_('old_func3' in new_func3.__doc__) + assert_('new_func3' in new_func3.__doc__) + + +def test_safe_eval_nameconstant(): + # Test if safe_eval supports Python 3.4 _ast.NameConstant + utils.safe_eval('None') + + +def test_byte_bounds(): + a = arange(12).reshape(3, 4) + low, high = utils.byte_bounds(a) + assert_equal(high - low, a.size * a.itemsize) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/twodim_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/twodim_base.py new file mode 100644 index 0000000000000..40a140b6b09c5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/twodim_base.py @@ -0,0 +1,1003 @@ +""" Basic functions for manipulating 2d arrays + +""" +from __future__ import division, absolute_import, print_function + +from numpy.core.numeric import ( + asanyarray, arange, zeros, greater_equal, multiply, ones, asarray, + where, int8, int16, int32, int64, empty, promote_types + ) +from numpy.core import iinfo + + +__all__ = [ + 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu', + 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', + 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] + + +i1 = iinfo(int8) +i2 = iinfo(int16) +i4 = iinfo(int32) +def _min_int(low, high): + """ get small int that fits the range """ + if high <= i1.max and low >= i1.min: + return int8 + if high <= i2.max and low >= i2.min: + return int16 + if high <= i4.max and low >= i4.min: + return int32 + return int64 + + +def fliplr(m): + """ + Flip array in the left/right direction. + + Flip the entries in each row in the left/right direction. + Columns are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array, must be at least 2-D. + + Returns + ------- + f : ndarray + A view of `m` with the columns reversed. Since a view + is returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + flipud : Flip array in the up/down direction. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to A[:,::-1]. Requires the array to be at least 2-D. + + Examples + -------- + >>> A = np.diag([1.,2.,3.]) + >>> A + array([[ 1., 0., 0.], + [ 0., 2., 0.], + [ 0., 0., 3.]]) + >>> np.fliplr(A) + array([[ 0., 0., 1.], + [ 0., 2., 0.], + [ 3., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.fliplr(A)==A[:,::-1,...]) + True + + """ + m = asanyarray(m) + if m.ndim < 2: + raise ValueError("Input must be >= 2-d.") + return m[:, ::-1] + + +def flipud(m): + """ + Flip array in the up/down direction. + + Flip the entries in each column in the up/down direction. + Rows are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + out : array_like + A view of `m` with the rows reversed. Since a view is + returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + fliplr : Flip array in the left/right direction. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``A[::-1,...]``. + Does not require the array to be two-dimensional. + + Examples + -------- + >>> A = np.diag([1.0, 2, 3]) + >>> A + array([[ 1., 0., 0.], + [ 0., 2., 0.], + [ 0., 0., 3.]]) + >>> np.flipud(A) + array([[ 0., 0., 3.], + [ 0., 2., 0.], + [ 1., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.flipud(A)==A[::-1,...]) + True + + >>> np.flipud([1,2]) + array([2, 1]) + + """ + m = asanyarray(m) + if m.ndim < 1: + raise ValueError("Input must be >= 1-d.") + return m[::-1, ...] + + +def rot90(m, k=1): + """ + Rotate an array by 90 degrees in the counter-clockwise direction. + + The first two dimensions are rotated; therefore, the array must be at + least 2-D. + + Parameters + ---------- + m : array_like + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + + Returns + ------- + y : ndarray + Rotated array. + + See Also + -------- + fliplr : Flip an array horizontally. + flipud : Flip an array vertically. + + Examples + -------- + >>> m = np.array([[1,2],[3,4]], int) + >>> m + array([[1, 2], + [3, 4]]) + >>> np.rot90(m) + array([[2, 4], + [1, 3]]) + >>> np.rot90(m, 2) + array([[4, 3], + [2, 1]]) + + """ + m = asanyarray(m) + if m.ndim < 2: + raise ValueError("Input must >= 2-d.") + k = k % 4 + if k == 0: + return m + elif k == 1: + return fliplr(m).swapaxes(0, 1) + elif k == 2: + return fliplr(flipud(m)) + else: + # k == 3 + return fliplr(m.swapaxes(0, 1)) + + +def eye(N, M=None, k=0, dtype=float): + """ + Return a 2-D array with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the output. + M : int, optional + Number of columns in the output. If None, defaults to `N`. + k : int, optional + Index of the diagonal: 0 (the default) refers to the main diagonal, + a positive value refers to an upper diagonal, and a negative value + to a lower diagonal. + dtype : data-type, optional + Data-type of the returned array. + + Returns + ------- + I : ndarray of shape (N,M) + An array where all elements are equal to zero, except for the `k`-th + diagonal, whose values are equal to one. + + See Also + -------- + identity : (almost) equivalent function + diag : diagonal 2-D array from a 1-D array specified by the user. + + Examples + -------- + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + >>> np.eye(3, k=1) + array([[ 0., 1., 0.], + [ 0., 0., 1.], + [ 0., 0., 0.]]) + + """ + if M is None: + M = N + m = zeros((N, M), dtype=dtype) + if k >= M: + return m + if k >= 0: + i = k + else: + i = (-k) * M + m[:M-k].flat[i::M+1] = 1 + return m + + +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + See the more detailed documentation for ``numpy.diagonal`` if you use this + function to extract a diagonal and wish to write to the resulting array; + whether it returns a copy or a view depends on what version of numpy you + are using. + + Parameters + ---------- + v : array_like + If `v` is a 2-D array, return a copy of its `k`-th diagonal. + If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th + diagonal. + k : int, optional + Diagonal in question. The default is 0. Use `k>0` for diagonals + above the main diagonal, and `k<0` for diagonals below the main + diagonal. + + Returns + ------- + out : ndarray + The extracted diagonal or constructed diagonal array. + + See Also + -------- + diagonal : Return specified diagonals. + diagflat : Create a 2-D array with the flattened input as a diagonal. + trace : Sum along diagonals. + triu : Upper triangle of an array. + tril : Lower triangle of an array. + + Examples + -------- + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(x, k=1) + array([1, 5]) + >>> np.diag(x, k=-1) + array([3, 7]) + + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + v = asarray(v) + s = v.shape + if len(s) == 1: + n = s[0]+abs(k) + res = zeros((n, n), v.dtype) + if k >= 0: + i = k + else: + i = (-k) * n + res[:n-k].flat[i::n+1] = v + return res + elif len(s) == 2: + return v.diagonal(k) + else: + raise ValueError("Input must be 1- or 2-d.") + + +def diagflat(v, k=0): + """ + Create a two-dimensional array with the flattened input as a diagonal. + + Parameters + ---------- + v : array_like + Input data, which is flattened and set as the `k`-th + diagonal of the output. + k : int, optional + Diagonal to set; 0, the default, corresponds to the "main" diagonal, + a positive (negative) `k` giving the number of the diagonal above + (below) the main. + + Returns + ------- + out : ndarray + The 2-D output array. + + See Also + -------- + diag : MATLAB work-alike for 1-D and 2-D arrays. + diagonal : Return specified diagonals. + trace : Sum along diagonals. + + Examples + -------- + >>> np.diagflat([[1,2], [3,4]]) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]]) + + >>> np.diagflat([1,2], 1) + array([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]]) + + """ + try: + wrap = v.__array_wrap__ + except AttributeError: + wrap = None + v = asarray(v).ravel() + s = len(v) + n = s + abs(k) + res = zeros((n, n), v.dtype) + if (k >= 0): + i = arange(0, n-k) + fi = i+k+i*n + else: + i = arange(0, n+k) + fi = i+(i-k)*n + res.flat[fi] = v + if not wrap: + return res + return wrap(res) + + +def tri(N, M=None, k=0, dtype=float): + """ + An array with ones at and below the given diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the array. + M : int, optional + Number of columns in the array. + By default, `M` is taken equal to `N`. + k : int, optional + The sub-diagonal at and below which the array is filled. + `k` = 0 is the main diagonal, while `k` < 0 is below it, + and `k` > 0 is above. The default is 0. + dtype : dtype, optional + Data type of the returned array. The default is float. + + Returns + ------- + tri : ndarray of shape (N, M) + Array with its lower triangle filled with ones and zero elsewhere; + in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise. + + Examples + -------- + >>> np.tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + + >>> np.tri(3, 5, -1) + array([[ 0., 0., 0., 0., 0.], + [ 1., 0., 0., 0., 0.], + [ 1., 1., 0., 0., 0.]]) + + """ + if M is None: + M = N + + m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), + arange(-k, M-k, dtype=_min_int(-k, M - k))) + + # Avoid making a copy if the requested type is already bool + m = m.astype(dtype, copy=False) + + return m + + +def tril(m, k=0): + """ + Lower triangle of an array. + + Return a copy of an array with elements above the `k`-th diagonal zeroed. + + Parameters + ---------- + m : array_like, shape (M, N) + Input array. + k : int, optional + Diagonal above which to zero elements. `k = 0` (the default) is the + main diagonal, `k < 0` is below it and `k > 0` is above. + + Returns + ------- + tril : ndarray, shape (M, N) + Lower triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + triu : same thing, only for the upper triangle + + Examples + -------- + >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k, dtype=bool) + + return where(mask, m, zeros(1, m.dtype)) + + +def triu(m, k=0): + """ + Upper triangle of an array. + + Return a copy of a matrix with the elements below the `k`-th diagonal + zeroed. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k-1, dtype=bool) + + return where(mask, zeros(1, m.dtype), m) + + +# Originally borrowed from John Hunter and matplotlib +def vander(x, N=None, increasing=False): + """ + Generate a Vandermonde matrix. + + The columns of the output matrix are powers of the input vector. The + order of the powers is determined by the `increasing` boolean argument. + Specifically, when `increasing` is False, the `i`-th output column is + the input vector raised element-wise to the power of ``N - i - 1``. Such + a matrix with a geometric progression in each row is named for Alexandre- + Theophile Vandermonde. + + Parameters + ---------- + x : array_like + 1-D input array. + N : int, optional + Number of columns in the output. If `N` is not specified, a square + array is returned (``N = len(x)``). + increasing : bool, optional + Order of the powers of the columns. If True, the powers increase + from left to right, if False (the default) they are reversed. + + .. versionadded:: 1.9.0 + + Returns + ------- + out : ndarray + Vandermonde matrix. If `increasing` is False, the first column is + ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is + True, the columns are ``x^0, x^1, ..., x^(N-1)``. + + See Also + -------- + polynomial.polynomial.polyvander + + Examples + -------- + >>> x = np.array([1, 2, 3, 5]) + >>> N = 3 + >>> np.vander(x, N) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> np.column_stack([x**(N-1-i) for i in range(N)]) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> x = np.array([1, 2, 3, 5]) + >>> np.vander(x) + array([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> np.vander(x, increasing=True) + array([[ 1, 1, 1, 1], + [ 1, 2, 4, 8], + [ 1, 3, 9, 27], + [ 1, 5, 25, 125]]) + + The determinant of a square Vandermonde matrix is the product + of the differences between the values of the input vector: + + >>> np.linalg.det(np.vander(x)) + 48.000000000000043 + >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) + 48 + + """ + x = asarray(x) + if x.ndim != 1: + raise ValueError("x must be a one-dimensional array or sequence.") + if N is None: + N = len(x) + + v = empty((len(x), N), dtype=promote_types(x.dtype, int)) + tmp = v[:, ::-1] if not increasing else v + + if N > 0: + tmp[:, 0] = 1 + if N > 1: + tmp[:, 1:] = x[:, None] + multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) + + return v + + +def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): + """ + Compute the bi-dimensional histogram of two data samples. + + Parameters + ---------- + x : array_like, shape (N,) + An array containing the x coordinates of the points to be + histogrammed. + y : array_like, shape (N,) + An array containing the y coordinates of the points to be + histogrammed. + bins : int or [int, int] or array_like or [array, array], optional + The bin specification: + + * If int, the number of bins for the two dimensions (nx=ny=bins). + * If [int, int], the number of bins in each dimension + (nx, ny = bins). + * If array_like, the bin edges for the two dimensions + (x_edges=y_edges=bins). + * If [array, array], the bin edges in each dimension + (x_edges, y_edges = bins). + + range : array_like, shape(2,2), optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range + will be considered outliers and not tallied in the histogram. + normed : bool, optional + If False, returns the number of samples in each bin. If True, + returns the bin density ``bin_count / sample_count / bin_area``. + weights : array_like, shape(N,), optional + An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. + Weights are normalized to 1 if `normed` is True. If `normed` is + False, the values of the returned histogram are equal to the sum of + the weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray, shape(nx, ny) + The bi-dimensional histogram of samples `x` and `y`. Values in `x` + are histogrammed along the first dimension and values in `y` are + histogrammed along the second dimension. + xedges : ndarray, shape(nx,) + The bin edges along the first dimension. + yedges : ndarray, shape(ny,) + The bin edges along the second dimension. + + See Also + -------- + histogram : 1D histogram + histogramdd : Multidimensional histogram + + Notes + ----- + When `normed` is True, then the returned histogram is the sample + density, defined such that the sum over bins of the product + ``bin_value * bin_area`` is 1. + + Please note that the histogram does not follow the Cartesian convention + where `x` values are on the abscissa and `y` values on the ordinate + axis. Rather, `x` is histogrammed along the first dimension of the + array (vertical), and `y` along the second dimension of the array + (horizontal). This ensures compatibility with `histogramdd`. + + Examples + -------- + >>> import matplotlib as mpl + >>> import matplotlib.pyplot as plt + + Construct a 2D-histogram with variable bin width. First define the bin + edges: + + >>> xedges = [0, 1, 1.5, 3, 5] + >>> yedges = [0, 2, 3, 4, 6] + + Next we create a histogram H with random bin content: + + >>> x = np.random.normal(3, 1, 100) + >>> y = np.random.normal(1, 1, 100) + >>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges)) + + Or we fill the histogram H with a determined bin content: + + >>> H = np.ones((4, 4)).cumsum().reshape(4, 4) + >>> print H[::-1] # This shows the bin content in the order as plotted + [[ 13. 14. 15. 16.] + [ 9. 10. 11. 12.] + [ 5. 6. 7. 8.] + [ 1. 2. 3. 4.]] + + Imshow can only do an equidistant representation of bins: + + >>> fig = plt.figure(figsize=(7, 3)) + >>> ax = fig.add_subplot(131) + >>> ax.set_title('imshow: equidistant') + >>> im = plt.imshow(H, interpolation='nearest', origin='low', + extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + + pcolormesh can display exact bin edges: + + >>> ax = fig.add_subplot(132) + >>> ax.set_title('pcolormesh: exact bin edges') + >>> X, Y = np.meshgrid(xedges, yedges) + >>> ax.pcolormesh(X, Y, H) + >>> ax.set_aspect('equal') + + NonUniformImage displays exact bin edges with interpolation: + + >>> ax = fig.add_subplot(133) + >>> ax.set_title('NonUniformImage: interpolated') + >>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear') + >>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1]) + >>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1]) + >>> im.set_data(xcenters, ycenters, H) + >>> ax.images.append(im) + >>> ax.set_xlim(xedges[0], xedges[-1]) + >>> ax.set_ylim(yedges[0], yedges[-1]) + >>> ax.set_aspect('equal') + >>> plt.show() + + """ + from numpy import histogramdd + + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + xedges = yedges = asarray(bins, float) + bins = [xedges, yedges] + hist, edges = histogramdd([x, y], bins, range, normed, weights) + return hist, edges[0], edges[1] + + +def mask_indices(n, mask_func, k=0): + """ + Return the indices to access (n, n) arrays, given a masking function. + + Assume `mask_func` is a function that, for a square array a of size + ``(n, n)`` with a possible offset argument `k`, when called as + ``mask_func(a, k)`` returns a new array with zeros in certain locations + (functions like `triu` or `tril` do precisely this). Then this function + returns the indices where the non-zero values would be located. + + Parameters + ---------- + n : int + The returned indices will be valid to access arrays of shape (n, n). + mask_func : callable + A function whose call signature is similar to that of `triu`, `tril`. + That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. + `k` is an optional argument to the function. + k : scalar + An optional argument which is passed through to `mask_func`. Functions + like `triu`, `tril` take a second argument that is interpreted as an + offset. + + Returns + ------- + indices : tuple of arrays. + The `n` arrays of indices corresponding to the locations where + ``mask_func(np.ones((n, n)), k)`` is True. + + See Also + -------- + triu, tril, triu_indices, tril_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + These are the indices that would allow you to access the upper triangular + part of any 3x3 array: + + >>> iu = np.mask_indices(3, np.triu) + + For example, if `a` is a 3x3 array: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> a[iu] + array([0, 1, 2, 4, 5, 8]) + + An offset can be passed also to the masking function. This gets us the + indices starting on the first diagonal right of the main one: + + >>> iu1 = np.mask_indices(3, np.triu, 1) + + with which we now extract only three elements: + + >>> a[iu1] + array([1, 2, 5]) + + """ + m = ones((n, n), int) + a = mask_func(m, k) + return where(a != 0) + + +def tril_indices(n, k=0, m=None): + """ + Return the indices for the lower-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The row dimension of the arrays for which the returned + indices will be valid. + k : int, optional + Diagonal offset (see `tril` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple of arrays + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. + + See also + -------- + triu_indices : similar function, for upper-triangular. + mask_indices : generic function accepting an arbitrary mask function. + tril, triu + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> il1 = np.tril_indices(4) + >>> il2 = np.tril_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[il1] + array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) + + And for assigning values: + + >>> a[il1] = -1 + >>> a + array([[-1, 1, 2, 3], + [-1, -1, 6, 7], + [-1, -1, -1, 11], + [-1, -1, -1, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + + >>> a[il2] = -10 + >>> a + array([[-10, -10, -10, 3], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) + + """ + return where(tri(n, m, k=k, dtype=bool)) + + +def tril_indices_from(arr, k=0): + """ + Return the indices for the lower-triangle of arr. + + See `tril_indices` for full details. + + Parameters + ---------- + arr : array_like + The indices will be valid for square arrays whose dimensions are + the same as arr. + k : int, optional + Diagonal offset (see `tril` for details). + + See Also + -------- + tril_indices, tril + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +def triu_indices(n, k=0, m=None): + """ + Return the indices for the upper-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The size of the arrays for which the returned indices will + be valid. + k : int, optional + Diagonal offset (see `triu` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple, shape(2) of ndarrays, shape(`n`) + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. Can be used + to slice a ndarray of shape(`n`, `n`). + + See also + -------- + tril_indices : similar function, for lower-triangular. + mask_indices : generic function accepting an arbitrary mask function. + triu, tril + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + upper triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> iu1 = np.triu_indices(4) + >>> iu2 = np.triu_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[iu1] + array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) + + And for assigning values: + + >>> a[iu1] = -1 + >>> a + array([[-1, -1, -1, -1], + [ 4, -1, -1, -1], + [ 8, 9, -1, -1], + [12, 13, 14, -1]]) + + These cover only a small part of the whole array (two diagonals right + of the main one): + + >>> a[iu2] = -10 + >>> a + array([[ -1, -1, -10, -10], + [ 4, -1, -1, -10], + [ 8, 9, -1, -1], + [ 12, 13, 14, -1]]) + + """ + return where(~tri(n, m, k=k-1, dtype=bool)) + + +def triu_indices_from(arr, k=0): + """ + Return the indices for the upper-triangle of arr. + + See `triu_indices` for full details. + + Parameters + ---------- + arr : ndarray, shape(N, N) + The indices will be valid for square arrays. + k : int, optional + Diagonal offset (see `triu` for details). + + Returns + ------- + triu_indices_from : tuple, shape(2) of ndarray, shape(N) + Indices for the upper-triangle of `arr`. + + See Also + -------- + triu_indices, triu + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/type_check.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/type_check.py new file mode 100644 index 0000000000000..a45d0bd865c30 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/type_check.py @@ -0,0 +1,605 @@ +"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', + 'isreal', 'nan_to_num', 'real', 'real_if_close', + 'typename', 'asfarray', 'mintypecode', 'asscalar', + 'common_type'] + +import numpy.core.numeric as _nx +from numpy.core.numeric import asarray, asanyarray, array, isnan, \ + obj2sctype, zeros +from .ufunclike import isneginf, isposinf + +_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' + +def mintypecode(typechars,typeset='GDFgdf',default='d'): + """ + Return the character for the minimum-size type to which given types can + be safely cast. + + The returned type character must represent the smallest size dtype such + that an array of the returned type can handle the data from an array of + all types in `typechars` (or if `typechars` is an array, then its + dtype.char). + + Parameters + ---------- + typechars : list of str or array_like + If a list of strings, each string should represent a dtype. + If array_like, the character representation of the array dtype is used. + typeset : str or list of str, optional + The set of characters that the returned character is chosen from. + The default set is 'GDFgdf'. + default : str, optional + The default character, this is returned if none of the characters in + `typechars` matches a character in `typeset`. + + Returns + ------- + typechar : str + The character representing the minimum-size type that was found. + + See Also + -------- + dtype, sctype2char, maximum_sctype + + Examples + -------- + >>> np.mintypecode(['d', 'f', 'S']) + 'd' + >>> x = np.array([1.1, 2-3.j]) + >>> np.mintypecode(x) + 'D' + + >>> np.mintypecode('abceh', default='G') + 'G' + + """ + typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char + for t in typechars] + intersection = [t for t in typecodes if t in typeset] + if not intersection: + return default + if 'F' in intersection and 'd' in intersection: + return 'D' + l = [] + for t in intersection: + i = _typecodes_by_elsize.index(t) + l.append((i, t)) + l.sort() + return l[0][1] + +def asfarray(a, dtype=_nx.float_): + """ + Return an array converted to a float type. + + Parameters + ---------- + a : array_like + The input array. + dtype : str or dtype object, optional + Float type code to coerce input array `a`. If `dtype` is one of the + 'int' dtypes, it is replaced with float64. + + Returns + ------- + out : ndarray + The input `a` as a float ndarray. + + Examples + -------- + >>> np.asfarray([2, 3]) + array([ 2., 3.]) + >>> np.asfarray([2, 3], dtype='float') + array([ 2., 3.]) + >>> np.asfarray([2, 3], dtype='int8') + array([ 2., 3.]) + + """ + dtype = _nx.obj2sctype(dtype) + if not issubclass(dtype, _nx.inexact): + dtype = _nx.float_ + return asarray(a, dtype=dtype) + +def real(val): + """ + Return the real part of the elements of the array. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray + Output array. If `val` is real, the type of `val` is used for the + output. If `val` has complex elements, the returned type is float. + + See Also + -------- + real_if_close, imag, angle + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.real + array([ 1., 3., 5.]) + >>> a.real = 9 + >>> a + array([ 9.+2.j, 9.+4.j, 9.+6.j]) + >>> a.real = np.array([9, 8, 7]) + >>> a + array([ 9.+2.j, 8.+4.j, 7.+6.j]) + + """ + return asanyarray(val).real + +def imag(val): + """ + Return the imaginary part of the elements of the array. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray + Output array. If `val` is real, the type of `val` is used for the + output. If `val` has complex elements, the returned type is float. + + See Also + -------- + real, angle, real_if_close + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.imag + array([ 2., 4., 6.]) + >>> a.imag = np.array([8, 10, 12]) + >>> a + array([ 1. +8.j, 3.+10.j, 5.+12.j]) + + """ + return asanyarray(val).imag + +def iscomplex(x): + """ + Returns a bool array, where True if input element is complex. + + What is tested is whether the input has a non-zero imaginary part, not if + the input type is complex. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray of bools + Output array. + + See Also + -------- + isreal + iscomplexobj : Return True if x is a complex type or an array of complex + numbers. + + Examples + -------- + >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([ True, False, False, False, False, True], dtype=bool) + + """ + ax = asanyarray(x) + if issubclass(ax.dtype.type, _nx.complexfloating): + return ax.imag != 0 + res = zeros(ax.shape, bool) + return +res # convet to array-scalar if needed + +def isreal(x): + """ + Returns a bool array, where True if input element is real. + + If element has complex type with zero complex part, the return value + for that element is True. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray, bool + Boolean array of same shape as `x`. + + See Also + -------- + iscomplex + isrealobj : Return True if x is not a complex type. + + Examples + -------- + >>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([False, True, True, True, True, False], dtype=bool) + + """ + return imag(x) == 0 + +def iscomplexobj(x): + """ + Check for a complex type or an array of complex numbers. + + The type of the input is checked, not the value. Even if the input + has an imaginary part equal to zero, `iscomplexobj` evaluates to True. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + iscomplexobj : bool + The return value, True if `x` is of a complex type or has at least + one complex element. + + See Also + -------- + isrealobj, iscomplex + + Examples + -------- + >>> np.iscomplexobj(1) + False + >>> np.iscomplexobj(1+0j) + True + >>> np.iscomplexobj([3, 1+0j, True]) + True + + """ + return issubclass(asarray(x).dtype.type, _nx.complexfloating) + +def isrealobj(x): + """ + Return True if x is a not complex type or an array of complex numbers. + + The type of the input is checked, not the value. So even if the input + has an imaginary part equal to zero, `isrealobj` evaluates to False + if the data type is complex. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + y : bool + The return value, False if `x` is of a complex type. + + See Also + -------- + iscomplexobj, isreal + + Examples + -------- + >>> np.isrealobj(1) + True + >>> np.isrealobj(1+0j) + False + >>> np.isrealobj([3, 1+0j, True]) + False + + """ + return not issubclass(asarray(x).dtype.type, _nx.complexfloating) + +#----------------------------------------------------------------------------- + +def _getmaxmin(t): + from numpy.core import getlimits + f = getlimits.finfo(t) + return f.max, f.min + +def nan_to_num(x): + """ + Replace nan with zero and inf with finite numbers. + + Returns an array or scalar replacing Not a Number (NaN) with zero, + (positive) infinity with a very large number and negative infinity + with a very small (or negative) number. + + Parameters + ---------- + x : array_like + Input data. + + Returns + ------- + out : ndarray, float + Array with the same shape as `x` and dtype of the element in `x` with + the greatest precision. NaN is replaced by zero, and infinity + (-infinity) is replaced by the largest (smallest or most negative) + floating point value that fits in the output dtype. All finite numbers + are upcast to the output dtype (default float64). + + See Also + -------- + isinf : Shows which elements are negative or negative infinity. + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + isfinite : Shows which elements are finite (not NaN, not infinity) + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + + Examples + -------- + >>> np.set_printoptions(precision=8) + >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) + >>> np.nan_to_num(x) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, + -1.28000000e+002, 1.28000000e+002]) + + """ + try: + t = x.dtype.type + except AttributeError: + t = obj2sctype(type(x)) + if issubclass(t, _nx.complexfloating): + return nan_to_num(x.real) + 1j * nan_to_num(x.imag) + else: + try: + y = x.copy() + except AttributeError: + y = array(x) + if not issubclass(t, _nx.integer): + if not y.shape: + y = array([x]) + scalar = True + else: + scalar = False + are_inf = isposinf(y) + are_neg_inf = isneginf(y) + are_nan = isnan(y) + maxf, minf = _getmaxmin(y.dtype.type) + y[are_nan] = 0 + y[are_inf] = maxf + y[are_neg_inf] = minf + if scalar: + y = y[0] + return y + +#----------------------------------------------------------------------------- + +def real_if_close(a,tol=100): + """ + If complex input returns a real array if complex parts are close to zero. + + "Close to zero" is defined as `tol` * (machine epsilon of the type for + `a`). + + Parameters + ---------- + a : array_like + Input array. + tol : float + Tolerance in machine epsilons for the complex part of the elements + in the array. + + Returns + ------- + out : ndarray + If `a` is real, the type of `a` is used for the output. If `a` + has complex elements, the returned type is float. + + See Also + -------- + real, imag, angle + + Notes + ----- + Machine epsilon varies from machine to machine and between data types + but Python floats on most platforms have a machine epsilon equal to + 2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print + out the machine epsilon for floats. + + Examples + -------- + >>> np.finfo(np.float).eps + 2.2204460492503131e-16 + + >>> np.real_if_close([2.1 + 4e-14j], tol=1000) + array([ 2.1]) + >>> np.real_if_close([2.1 + 4e-13j], tol=1000) + array([ 2.1 +4.00000000e-13j]) + + """ + a = asanyarray(a) + if not issubclass(a.dtype.type, _nx.complexfloating): + return a + if tol > 1: + from numpy.core import getlimits + f = getlimits.finfo(a.dtype.type) + tol = f.eps * tol + if _nx.allclose(a.imag, 0, atol=tol): + a = a.real + return a + + +def asscalar(a): + """ + Convert an array of size 1 to its scalar equivalent. + + Parameters + ---------- + a : ndarray + Input array of size 1. + + Returns + ------- + out : scalar + Scalar representation of `a`. The output data type is the same type + returned by the input's `item` method. + + Examples + -------- + >>> np.asscalar(np.array([24])) + 24 + + """ + return a.item() + +#----------------------------------------------------------------------------- + +_namefromtype = {'S1': 'character', + '?': 'bool', + 'b': 'signed char', + 'B': 'unsigned char', + 'h': 'short', + 'H': 'unsigned short', + 'i': 'integer', + 'I': 'unsigned integer', + 'l': 'long integer', + 'L': 'unsigned long integer', + 'q': 'long long integer', + 'Q': 'unsigned long long integer', + 'f': 'single precision', + 'd': 'double precision', + 'g': 'long precision', + 'F': 'complex single precision', + 'D': 'complex double precision', + 'G': 'complex long double precision', + 'S': 'string', + 'U': 'unicode', + 'V': 'void', + 'O': 'object' + } + +def typename(char): + """ + Return a description for the given data type code. + + Parameters + ---------- + char : str + Data type code. + + Returns + ------- + out : str + Description of the input data type code. + + See Also + -------- + dtype, typecodes + + Examples + -------- + >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', + ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] + >>> for typechar in typechars: + ... print typechar, ' : ', np.typename(typechar) + ... + S1 : character + ? : bool + B : unsigned char + D : complex double precision + G : complex long double precision + F : complex single precision + I : unsigned integer + H : unsigned short + L : unsigned long integer + O : object + Q : unsigned long long integer + S : string + U : unicode + V : void + b : signed char + d : double precision + g : long precision + f : single precision + i : integer + h : short + l : long integer + q : long long integer + + """ + return _namefromtype[char] + +#----------------------------------------------------------------------------- + +#determine the "minimum common type" for a group of arrays. +array_type = [[_nx.single, _nx.double, _nx.longdouble], + [_nx.csingle, _nx.cdouble, _nx.clongdouble]] +array_precision = {_nx.single: 0, + _nx.double: 1, + _nx.longdouble: 2, + _nx.csingle: 0, + _nx.cdouble: 1, + _nx.clongdouble: 2} +def common_type(*arrays): + """ + Return a scalar type which is common to the input arrays. + + The return type will always be an inexact (i.e. floating point) scalar + type, even if all the arrays are integer arrays. If one of the inputs is + an integer array, the minimum precision type that is returned is a + 64-bit floating point dtype. + + All input arrays can be safely cast to the returned dtype without loss + of information. + + Parameters + ---------- + array1, array2, ... : ndarrays + Input arrays. + + Returns + ------- + out : data type code + Data type code. + + See Also + -------- + dtype, mintypecode + + Examples + -------- + >>> np.common_type(np.arange(2, dtype=np.float32)) + + >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) + + >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) + + + """ + is_complex = False + precision = 0 + for a in arrays: + t = a.dtype.type + if iscomplexobj(a): + is_complex = True + if issubclass(t, _nx.integer): + p = 1 + else: + p = array_precision.get(t, None) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/ufunclike.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/ufunclike.py new file mode 100644 index 0000000000000..e91f64d0ef927 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/ufunclike.py @@ -0,0 +1,177 @@ +""" +Module of functions that are like ufuncs in acting on arrays and optionally +storing results in an output array. + +""" +from __future__ import division, absolute_import, print_function + +__all__ = ['fix', 'isneginf', 'isposinf'] + +import numpy.core.numeric as nx + +def fix(x, y=None): + """ + Round to nearest integer towards zero. + + Round an array of floats element-wise to nearest integer towards zero. + The rounded values are returned as floats. + + Parameters + ---------- + x : array_like + An array of floats to be rounded + y : ndarray, optional + Output array + + Returns + ------- + out : ndarray of floats + The array of rounded numbers + + See Also + -------- + trunc, floor, ceil + around : Round to given number of decimals + + Examples + -------- + >>> np.fix(3.14) + 3.0 + >>> np.fix(3) + 3.0 + >>> np.fix([2.1, 2.9, -2.1, -2.9]) + array([ 2., 2., -2., -2.]) + + """ + x = nx.asanyarray(x) + y1 = nx.floor(x) + y2 = nx.ceil(x) + if y is None: + y = nx.asanyarray(y1) + y[...] = nx.where(x >= 0, y1, y2) + return y + +def isposinf(x, y=None): + """ + Test element-wise for positive infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + y : array_like, optional + A boolean array with the same shape as `x` to store the result. + + Returns + ------- + y : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a boolean array is returned + with values True where the corresponding element of the input is + positive infinity and values False where the element of the input is + not positive infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as zeros + and ones, if the type is boolean then as False and True. + The return value `y` is then a reference to that array. + + See Also + -------- + isinf, isneginf, isfinite, isnan + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when `x` is a + scalar input, or if first and second arguments have different shapes. + + Examples + -------- + >>> np.isposinf(np.PINF) + array(True, dtype=bool) + >>> np.isposinf(np.inf) + array(True, dtype=bool) + >>> np.isposinf(np.NINF) + array(False, dtype=bool) + >>> np.isposinf([-np.inf, 0., np.inf]) + array([False, False, True], dtype=bool) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isposinf(x, y) + array([0, 0, 1]) + >>> y + array([0, 0, 1]) + + """ + if y is None: + x = nx.asarray(x) + y = nx.empty(x.shape, dtype=nx.bool_) + nx.logical_and(nx.isinf(x), ~nx.signbit(x), y) + return y + +def isneginf(x, y=None): + """ + Test element-wise for negative infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + y : array_like, optional + A boolean array with the same shape and type as `x` to store the + result. + + Returns + ------- + y : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a numpy boolean array is + returned with values True where the corresponding element of the + input is negative infinity and values False where the element of + the input is not negative infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as + zeros and ones, if the type is boolean then as False and True. The + return value `y` is then a reference to that array. + + See Also + -------- + isinf, isposinf, isnan, isfinite + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, or if first and second arguments have different shapes. + + Examples + -------- + >>> np.isneginf(np.NINF) + array(True, dtype=bool) + >>> np.isneginf(np.inf) + array(False, dtype=bool) + >>> np.isneginf(np.PINF) + array(False, dtype=bool) + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False], dtype=bool) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isneginf(x, y) + array([1, 0, 0]) + >>> y + array([1, 0, 0]) + + """ + if y is None: + x = nx.asarray(x) + y = nx.empty(x.shape, dtype=nx.bool_) + nx.logical_and(nx.isinf(x), nx.signbit(x), y) + return y diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/user_array.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/user_array.py new file mode 100644 index 0000000000000..bb5bec628f122 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/user_array.py @@ -0,0 +1,277 @@ +""" +Standard container-class for easy multiple-inheritance. +Try to inherit from the ndarray instead of using this class as this is not +complete. + +""" +from __future__ import division, absolute_import, print_function + +from numpy.core import ( + array, asarray, absolute, add, subtract, multiply, divide, + remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, + bitwise_xor, invert, less, less_equal, not_equal, equal, greater, + greater_equal, shape, reshape, arange, sin, sqrt, transpose +) +from numpy.compat import long + + +class container(object): + + def __init__(self, data, dtype=None, copy=True): + self.array = array(data, dtype, copy=copy) + + def __repr__(self): + if len(self.shape) > 0: + return self.__class__.__name__ + repr(self.array)[len("array"):] + else: + return self.__class__.__name__ + "(" + repr(self.array) + ")" + + def __array__(self, t=None): + if t: + return self.array.astype(t) + return self.array + + # Array as sequence + def __len__(self): + return len(self.array) + + def __getitem__(self, index): + return self._rc(self.array[index]) + + def __getslice__(self, i, j): + return self._rc(self.array[i:j]) + + def __setitem__(self, index, value): + self.array[index] = asarray(value, self.dtype) + + def __setslice__(self, i, j, value): + self.array[i:j] = asarray(value, self.dtype) + + def __abs__(self): + return self._rc(absolute(self.array)) + + def __neg__(self): + return self._rc(-self.array) + + def __add__(self, other): + return self._rc(self.array + asarray(other)) + + __radd__ = __add__ + + def __iadd__(self, other): + add(self.array, other, self.array) + return self + + def __sub__(self, other): + return self._rc(self.array - asarray(other)) + + def __rsub__(self, other): + return self._rc(asarray(other) - self.array) + + def __isub__(self, other): + subtract(self.array, other, self.array) + return self + + def __mul__(self, other): + return self._rc(multiply(self.array, asarray(other))) + + __rmul__ = __mul__ + + def __imul__(self, other): + multiply(self.array, other, self.array) + return self + + def __div__(self, other): + return self._rc(divide(self.array, asarray(other))) + + def __rdiv__(self, other): + return self._rc(divide(asarray(other), self.array)) + + def __idiv__(self, other): + divide(self.array, other, self.array) + return self + + def __mod__(self, other): + return self._rc(remainder(self.array, other)) + + def __rmod__(self, other): + return self._rc(remainder(other, self.array)) + + def __imod__(self, other): + remainder(self.array, other, self.array) + return self + + def __divmod__(self, other): + return (self._rc(divide(self.array, other)), + self._rc(remainder(self.array, other))) + + def __rdivmod__(self, other): + return (self._rc(divide(other, self.array)), + self._rc(remainder(other, self.array))) + + def __pow__(self, other): + return self._rc(power(self.array, asarray(other))) + + def __rpow__(self, other): + return self._rc(power(asarray(other), self.array)) + + def __ipow__(self, other): + power(self.array, other, self.array) + return self + + def __lshift__(self, other): + return self._rc(left_shift(self.array, other)) + + def __rshift__(self, other): + return self._rc(right_shift(self.array, other)) + + def __rlshift__(self, other): + return self._rc(left_shift(other, self.array)) + + def __rrshift__(self, other): + return self._rc(right_shift(other, self.array)) + + def __ilshift__(self, other): + left_shift(self.array, other, self.array) + return self + + def __irshift__(self, other): + right_shift(self.array, other, self.array) + return self + + def __and__(self, other): + return self._rc(bitwise_and(self.array, other)) + + def __rand__(self, other): + return self._rc(bitwise_and(other, self.array)) + + def __iand__(self, other): + bitwise_and(self.array, other, self.array) + return self + + def __xor__(self, other): + return self._rc(bitwise_xor(self.array, other)) + + def __rxor__(self, other): + return self._rc(bitwise_xor(other, self.array)) + + def __ixor__(self, other): + bitwise_xor(self.array, other, self.array) + return self + + def __or__(self, other): + return self._rc(bitwise_or(self.array, other)) + + def __ror__(self, other): + return self._rc(bitwise_or(other, self.array)) + + def __ior__(self, other): + bitwise_or(self.array, other, self.array) + return self + + def __pos__(self): + return self._rc(self.array) + + def __invert__(self): + return self._rc(invert(self.array)) + + def _scalarfunc(self, func): + if len(self.shape) == 0: + return func(self[0]) + else: + raise TypeError( + "only rank-0 arrays can be converted to Python scalars.") + + def __complex__(self): + return self._scalarfunc(complex) + + def __float__(self): + return self._scalarfunc(float) + + def __int__(self): + return self._scalarfunc(int) + + def __long__(self): + return self._scalarfunc(long) + + def __hex__(self): + return self._scalarfunc(hex) + + def __oct__(self): + return self._scalarfunc(oct) + + def __lt__(self, other): + return self._rc(less(self.array, other)) + + def __le__(self, other): + return self._rc(less_equal(self.array, other)) + + def __eq__(self, other): + return self._rc(equal(self.array, other)) + + def __ne__(self, other): + return self._rc(not_equal(self.array, other)) + + def __gt__(self, other): + return self._rc(greater(self.array, other)) + + def __ge__(self, other): + return self._rc(greater_equal(self.array, other)) + + def copy(self): + return self._rc(self.array.copy()) + + def tostring(self): + return self.array.tostring() + + def byteswap(self): + return self._rc(self.array.byteswap()) + + def astype(self, typecode): + return self._rc(self.array.astype(typecode)) + + def _rc(self, a): + if len(shape(a)) == 0: + return a + else: + return self.__class__(a) + + def __array_wrap__(self, *args): + return self.__class__(args[0]) + + def __setattr__(self, attr, value): + if attr == 'array': + object.__setattr__(self, attr, value) + return + try: + self.array.__setattr__(attr, value) + except AttributeError: + object.__setattr__(self, attr, value) + + # Only called after other approaches fail. + def __getattr__(self, attr): + if (attr == 'array'): + return object.__getattribute__(self, attr) + return self.array.__getattribute__(attr) + +############################################################# +# Test of class container +############################################################# +if __name__ == '__main__': + temp = reshape(arange(10000), (100, 100)) + + ua = container(temp) + # new object created begin test + print(dir(ua)) + print(shape(ua), ua.shape) # I have changed Numeric.py + + ua_small = ua[:3, :5] + print(ua_small) + # this did not change ua[0,0], which is not normal behavior + ua_small[0, 0] = 10 + print(ua_small[0, 0], ua[0, 0]) + print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) + print(less(ua_small, 103), type(less(ua_small, 103))) + print(type(ua_small * reshape(arange(15), shape(ua_small)))) + print(reshape(ua_small, (5, 3))) + print(transpose(ua_small)) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/utils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/utils.py new file mode 100644 index 0000000000000..df0052493da3f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/utils.py @@ -0,0 +1,1176 @@ +from __future__ import division, absolute_import, print_function + +import os +import sys +import types +import re + +from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype +from numpy.core import ndarray, ufunc, asarray + +__all__ = [ + 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', + 'deprecate_with_doc', 'get_include', 'info', 'source', 'who', + 'lookfor', 'byte_bounds', 'safe_eval' + ] + +def get_include(): + """ + Return the directory that contains the NumPy \\*.h header files. + + Extension modules that need to compile against NumPy should use this + function to locate the appropriate include directory. + + Notes + ----- + When using ``distutils``, for example in ``setup.py``. + :: + + import numpy as np + ... + Extension('extension_name', ... + include_dirs=[np.get_include()]) + ... + + """ + import numpy + if numpy.show_config is None: + # running from numpy source directory + d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include') + else: + # using installed numpy core headers + import numpy.core as core + d = os.path.join(os.path.dirname(core.__file__), 'include') + return d + + +def _set_function_name(func, name): + func.__name__ = name + return func + + +class _Deprecate(object): + """ + Decorator class to deprecate old functions. + + Refer to `deprecate` for details. + + See Also + -------- + deprecate + + """ + + def __init__(self, old_name=None, new_name=None, message=None): + self.old_name = old_name + self.new_name = new_name + self.message = message + + def __call__(self, func, *args, **kwargs): + """ + Decorator call. Refer to ``decorate``. + + """ + old_name = self.old_name + new_name = self.new_name + message = self.message + + import warnings + if old_name is None: + try: + old_name = func.__name__ + except AttributeError: + old_name = func.__name__ + if new_name is None: + depdoc = "`%s` is deprecated!" % old_name + else: + depdoc = "`%s` is deprecated, use `%s` instead!" % \ + (old_name, new_name) + + if message is not None: + depdoc += "\n" + message + + def newfunc(*args,**kwds): + """`arrayrange` is deprecated, use `arange` instead!""" + warnings.warn(depdoc, DeprecationWarning) + return func(*args, **kwds) + + newfunc = _set_function_name(newfunc, old_name) + doc = func.__doc__ + if doc is None: + doc = depdoc + else: + doc = '\n\n'.join([depdoc, doc]) + newfunc.__doc__ = doc + try: + d = func.__dict__ + except AttributeError: + pass + else: + newfunc.__dict__.update(d) + return newfunc + +def deprecate(*args, **kwargs): + """ + Issues a DeprecationWarning, adds warning to `old_name`'s + docstring, rebinds ``old_name.__name__`` and returns the new + function object. + + This function may also be used as a decorator. + + Parameters + ---------- + func : function + The function to be deprecated. + old_name : str, optional + The name of the function to be deprecated. Default is None, in + which case the name of `func` is used. + new_name : str, optional + The new name for the function. Default is None, in which case the + deprecation message is that `old_name` is deprecated. If given, the + deprecation message is that `old_name` is deprecated and `new_name` + should be used instead. + message : str, optional + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + old_func : function + The deprecated function. + + Examples + -------- + Note that ``olduint`` returns a value after printing Deprecation + Warning: + + >>> olduint = np.deprecate(np.uint) + >>> olduint(6) + /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114: + DeprecationWarning: uint32 is deprecated + warnings.warn(str1, DeprecationWarning) + 6 + + """ + # Deprecate may be run as a function or as a decorator + # If run as a function, we initialise the decorator class + # and execute its __call__ method. + + if args: + fn = args[0] + args = args[1:] + + # backward compatibility -- can be removed + # after next release + if 'newname' in kwargs: + kwargs['new_name'] = kwargs.pop('newname') + if 'oldname' in kwargs: + kwargs['old_name'] = kwargs.pop('oldname') + + return _Deprecate(*args, **kwargs)(fn) + else: + return _Deprecate(*args, **kwargs) + +deprecate_with_doc = lambda msg: _Deprecate(message=msg) + + +#-------------------------------------------- +# Determine if two arrays can share memory +#-------------------------------------------- + +def byte_bounds(a): + """ + Returns pointers to the end-points of an array. + + Parameters + ---------- + a : ndarray + Input array. It must conform to the Python-side of the array + interface. + + Returns + ------- + (low, high) : tuple of 2 integers + The first integer is the first byte of the array, the second + integer is just past the last byte of the array. If `a` is not + contiguous it will not use every byte between the (`low`, `high`) + values. + + Examples + -------- + >>> I = np.eye(2, dtype='f'); I.dtype + dtype('float32') + >>> low, high = np.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + >>> I = np.eye(2, dtype='G'); I.dtype + dtype('complex192') + >>> low, high = np.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + + """ + ai = a.__array_interface__ + a_data = ai['data'][0] + astrides = ai['strides'] + ashape = ai['shape'] + bytes_a = asarray(a).dtype.itemsize + + a_low = a_high = a_data + if astrides is None: + # contiguous case + a_high += a.size * bytes_a + else: + for shape, stride in zip(ashape, astrides): + if stride < 0: + a_low += (shape-1)*stride + else: + a_high += (shape-1)*stride + a_high += bytes_a + return a_low, a_high + + +#----------------------------------------------------------------------------- +# Function for output and information on the variables used. +#----------------------------------------------------------------------------- + + +def who(vardict=None): + """ + Print the Numpy arrays in the given dictionary. + + If there is no dictionary passed in or `vardict` is None then returns + Numpy arrays in the globals() dictionary (all Numpy arrays in the + namespace). + + Parameters + ---------- + vardict : dict, optional + A dictionary possibly containing ndarrays. Default is globals(). + + Returns + ------- + out : None + Returns 'None'. + + Notes + ----- + Prints out the name, shape, bytes and type of all of the ndarrays + present in `vardict`. + + Examples + -------- + >>> a = np.arange(10) + >>> b = np.ones(20) + >>> np.who() + Name Shape Bytes Type + =========================================================== + a 10 40 int32 + b 20 160 float64 + Upper bound on total bytes = 200 + + >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', + ... 'idx':5} + >>> np.who(d) + Name Shape Bytes Type + =========================================================== + y 3 24 float64 + x 2 16 float64 + Upper bound on total bytes = 40 + + """ + if vardict is None: + frame = sys._getframe().f_back + vardict = frame.f_globals + sta = [] + cache = {} + for name in vardict.keys(): + if isinstance(vardict[name], ndarray): + var = vardict[name] + idv = id(var) + if idv in cache.keys(): + namestr = name + " (%s)" % cache[idv] + original = 0 + else: + cache[idv] = name + namestr = name + original = 1 + shapestr = " x ".join(map(str, var.shape)) + bytestr = str(var.nbytes) + sta.append([namestr, shapestr, bytestr, var.dtype.name, + original]) + + maxname = 0 + maxshape = 0 + maxbyte = 0 + totalbytes = 0 + for k in range(len(sta)): + val = sta[k] + if maxname < len(val[0]): + maxname = len(val[0]) + if maxshape < len(val[1]): + maxshape = len(val[1]) + if maxbyte < len(val[2]): + maxbyte = len(val[2]) + if val[4]: + totalbytes += int(val[2]) + + if len(sta) > 0: + sp1 = max(10, maxname) + sp2 = max(10, maxshape) + sp3 = max(10, maxbyte) + prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ') + print(prval + "\n" + "="*(len(prval)+5) + "\n") + + for k in range(len(sta)): + val = sta[k] + print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4), + val[1], ' '*(sp2-len(val[1])+5), + val[2], ' '*(sp3-len(val[2])+5), + val[3])) + print("\nUpper bound on total bytes = %d" % totalbytes) + return + +#----------------------------------------------------------------------------- + + +# NOTE: pydoc defines a help function which works simliarly to this +# except it uses a pager to take over the screen. + +# combine name and arguments and split to multiple lines of width +# characters. End lines on a comma and begin argument list indented with +# the rest of the arguments. +def _split_line(name, arguments, width): + firstwidth = len(name) + k = firstwidth + newstr = name + sepstr = ", " + arglist = arguments.split(sepstr) + for argument in arglist: + if k == firstwidth: + addstr = "" + else: + addstr = sepstr + k = k + len(argument) + len(addstr) + if k > width: + k = firstwidth + 1 + len(argument) + newstr = newstr + ",\n" + " "*(firstwidth+2) + argument + else: + newstr = newstr + addstr + argument + return newstr + +_namedict = None +_dictlist = None + +# Traverse all module directories underneath globals +# to see if something is defined +def _makenamedict(module='numpy'): + module = __import__(module, globals(), locals(), []) + thedict = {module.__name__:module.__dict__} + dictlist = [module.__name__] + totraverse = [module.__dict__] + while True: + if len(totraverse) == 0: + break + thisdict = totraverse.pop(0) + for x in thisdict.keys(): + if isinstance(thisdict[x], types.ModuleType): + modname = thisdict[x].__name__ + if modname not in dictlist: + moddict = thisdict[x].__dict__ + dictlist.append(modname) + totraverse.append(moddict) + thedict[modname] = moddict + return thedict, dictlist + + +def _info(obj, output=sys.stdout): + """Provide information about ndarray obj. + + Parameters + ---------- + obj: ndarray + Must be ndarray, not checked. + output: + Where printed output goes. + + Notes + ----- + Copied over from the numarray module prior to its removal. + Adapted somewhat as only numpy is an option now. + + Called by info. + + """ + extra = "" + tic = "" + bp = lambda x: x + cls = getattr(obj, '__class__', type(obj)) + nm = getattr(cls, '__name__', cls) + strides = obj.strides + endian = obj.dtype.byteorder + + print("class: ", nm, file=output) + print("shape: ", obj.shape, file=output) + print("strides: ", strides, file=output) + print("itemsize: ", obj.itemsize, file=output) + print("aligned: ", bp(obj.flags.aligned), file=output) + print("contiguous: ", bp(obj.flags.contiguous), file=output) + print("fortran: ", obj.flags.fortran, file=output) + print( + "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), + file=output + ) + print("byteorder: ", end=' ', file=output) + if endian in ['|', '=']: + print("%s%s%s" % (tic, sys.byteorder, tic), file=output) + byteswap = False + elif endian == '>': + print("%sbig%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "big" + else: + print("%slittle%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "little" + print("byteswap: ", bp(byteswap), file=output) + print("type: %s" % obj.dtype, file=output) + + +def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): + """ + Get help information for a function, class, or module. + + Parameters + ---------- + object : object or str, optional + Input object or name to get information about. If `object` is a + numpy object, its docstring is given. If it is a string, available + modules are searched for matching objects. If None, information + about `info` itself is returned. + maxwidth : int, optional + Printing width. + output : file like object, optional + File like object that the output is written to, default is + ``stdout``. The object has to be opened in 'w' or 'a' mode. + toplevel : str, optional + Start search at this level. + + See Also + -------- + source, lookfor + + Notes + ----- + When used interactively with an object, ``np.info(obj)`` is equivalent + to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython + prompt. + + Examples + -------- + >>> np.info(np.polyval) # doctest: +SKIP + polyval(p, x) + Evaluate the polynomial p at x. + ... + + When using a string for `object` it is possible to get multiple results. + + >>> np.info('fft') # doctest: +SKIP + *** Found in numpy *** + Core FFT routines + ... + *** Found in numpy.fft *** + fft(a, n=None, axis=-1) + ... + *** Repeat reference found in numpy.fft.fftpack *** + *** Total of 3 references found. *** + + """ + global _namedict, _dictlist + # Local import to speed up numpy's import time. + import pydoc + import inspect + + if (hasattr(object, '_ppimport_importer') or + hasattr(object, '_ppimport_module')): + object = object._ppimport_module + elif hasattr(object, '_ppimport_attr'): + object = object._ppimport_attr + + if object is None: + info(info) + elif isinstance(object, ndarray): + _info(object, output=output) + elif isinstance(object, str): + if _namedict is None: + _namedict, _dictlist = _makenamedict(toplevel) + numfound = 0 + objlist = [] + for namestr in _dictlist: + try: + obj = _namedict[namestr][object] + if id(obj) in objlist: + print("\n " + "*** Repeat reference found in %s *** " % namestr, + file=output + ) + else: + objlist.append(id(obj)) + print(" *** Found in %s ***" % namestr, file=output) + info(obj) + print("-"*maxwidth, file=output) + numfound += 1 + except KeyError: + pass + if numfound == 0: + print("Help for %s not found." % object, file=output) + else: + print("\n " + "*** Total of %d references found. ***" % numfound, + file=output + ) + + elif inspect.isfunction(object): + name = object.__name__ + arguments = inspect.formatargspec(*inspect.getargspec(object)) + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif inspect.isclass(object): + name = object.__name__ + arguments = "()" + try: + if hasattr(object, '__init__'): + arguments = inspect.formatargspec( + *inspect.getargspec(object.__init__.__func__) + ) + arglist = arguments.split(', ') + if len(arglist) > 1: + arglist[1] = "("+arglist[1] + arguments = ", ".join(arglist[1:]) + except: + pass + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc1 = inspect.getdoc(object) + if doc1 is None: + if hasattr(object, '__init__'): + print(inspect.getdoc(object.__init__), file=output) + else: + print(inspect.getdoc(object), file=output) + + methods = pydoc.allmethods(object) + if methods != []: + print("\n\nMethods:\n", file=output) + for meth in methods: + if meth[0] == '_': + continue + thisobj = getattr(object, meth, None) + if thisobj is not None: + methstr, other = pydoc.splitdoc( + inspect.getdoc(thisobj) or "None" + ) + print(" %s -- %s" % (meth, methstr), file=output) + + elif (sys.version_info[0] < 3 + and isinstance(object, types.InstanceType)): + # check for __call__ method + # types.InstanceType is the type of the instances of oldstyle classes + print("Instance of class: ", object.__class__.__name__, file=output) + print(file=output) + if hasattr(object, '__call__'): + arguments = inspect.formatargspec( + *inspect.getargspec(object.__call__.__func__) + ) + arglist = arguments.split(', ') + if len(arglist) > 1: + arglist[1] = "("+arglist[1] + arguments = ", ".join(arglist[1:]) + else: + arguments = "()" + + if hasattr(object, 'name'): + name = "%s" % object.name + else: + name = "" + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc = inspect.getdoc(object.__call__) + if doc is not None: + print(inspect.getdoc(object.__call__), file=output) + print(inspect.getdoc(object), file=output) + + else: + print(inspect.getdoc(object), file=output) + + elif inspect.ismethod(object): + name = object.__name__ + arguments = inspect.formatargspec( + *inspect.getargspec(object.__func__) + ) + arglist = arguments.split(', ') + if len(arglist) > 1: + arglist[1] = "("+arglist[1] + arguments = ", ".join(arglist[1:]) + else: + arguments = "()" + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif hasattr(object, '__doc__'): + print(inspect.getdoc(object), file=output) + + +def source(object, output=sys.stdout): + """ + Print or write to a file the source code for a Numpy object. + + The source code is only returned for objects written in Python. Many + functions and classes are defined in C and will therefore not return + useful information. + + Parameters + ---------- + object : numpy object + Input object. This can be any object (function, class, module, + ...). + output : file object, optional + If `output` not supplied then source code is printed to screen + (sys.stdout). File object must be created with either write 'w' or + append 'a' modes. + + See Also + -------- + lookfor, info + + Examples + -------- + >>> np.source(np.interp) #doctest: +SKIP + In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py + def interp(x, xp, fp, left=None, right=None): + \"\"\".... (full docstring printed)\"\"\" + if isinstance(x, (float, int, number)): + return compiled_interp([x], xp, fp, left, right).item() + else: + return compiled_interp(x, xp, fp, left, right) + + The source code is only returned for objects written in Python. + + >>> np.source(np.array) #doctest: +SKIP + Not available for this object. + + """ + # Local import to speed up numpy's import time. + import inspect + try: + print("In file: %s\n" % inspect.getsourcefile(object), file=output) + print(inspect.getsource(object), file=output) + except: + print("Not available for this object.", file=output) + + +# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} +# where kind: "func", "class", "module", "object" +# and index: index in breadth-first namespace traversal +_lookfor_caches = {} + +# regexp whose match indicates that the string may contain a function +# signature +_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I) + +def lookfor(what, module=None, import_modules=True, regenerate=False, + output=None): + """ + Do a keyword search on docstrings. + + A list of of objects that matched the search is displayed, + sorted by relevance. All given keywords need to be found in the + docstring for it to be returned as a result, but the order does + not matter. + + Parameters + ---------- + what : str + String containing words to look for. + module : str or list, optional + Name of module(s) whose docstrings to go through. + import_modules : bool, optional + Whether to import sub-modules in packages. Default is True. + regenerate : bool, optional + Whether to re-generate the docstring cache. Default is False. + output : file-like, optional + File-like object to write the output to. If omitted, use a pager. + + See Also + -------- + source, info + + Notes + ----- + Relevance is determined only roughly, by checking if the keywords occur + in the function name, at the start of a docstring, etc. + + Examples + -------- + >>> np.lookfor('binary representation') + Search results for 'binary representation' + ------------------------------------------ + numpy.binary_repr + Return the binary representation of the input number as a string. + numpy.core.setup_common.long_double_representation + Given a binary dump as given by GNU od -b, look for long double + numpy.base_repr + Return a string representation of a number in the given base system. + ... + + """ + import pydoc + + # Cache + cache = _lookfor_generate_cache(module, import_modules, regenerate) + + # Search + # XXX: maybe using a real stemming search engine would be better? + found = [] + whats = str(what).lower().split() + if not whats: + return + + for name, (docstring, kind, index) in cache.items(): + if kind in ('module', 'object'): + # don't show modules or objects + continue + ok = True + doc = docstring.lower() + for w in whats: + if w not in doc: + ok = False + break + if ok: + found.append(name) + + # Relevance sort + # XXX: this is full Harrison-Stetson heuristics now, + # XXX: it probably could be improved + + kind_relevance = {'func': 1000, 'class': 1000, + 'module': -1000, 'object': -1000} + + def relevance(name, docstr, kind, index): + r = 0 + # do the keywords occur within the start of the docstring? + first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) + r += sum([200 for w in whats if w in first_doc]) + # do the keywords occur in the function name? + r += sum([30 for w in whats if w in name]) + # is the full name long? + r += -len(name) * 5 + # is the object of bad type? + r += kind_relevance.get(kind, -1000) + # is the object deep in namespace hierarchy? + r += -name.count('.') * 10 + r += max(-index / 100, -100) + return r + + def relevance_value(a): + return relevance(a, *cache[a]) + found.sort(key=relevance_value) + + # Pretty-print + s = "Search results for '%s'" % (' '.join(whats)) + help_text = [s, "-"*len(s)] + for name in found[::-1]: + doc, kind, ix = cache[name] + + doclines = [line.strip() for line in doc.strip().split("\n") + if line.strip()] + + # find a suitable short description + try: + first_doc = doclines[0].strip() + if _function_signature_re.search(first_doc): + first_doc = doclines[1].strip() + except IndexError: + first_doc = "" + help_text.append("%s\n %s" % (name, first_doc)) + + if not found: + help_text.append("Nothing found.") + + # Output + if output is not None: + output.write("\n".join(help_text)) + elif len(help_text) > 10: + pager = pydoc.getpager() + pager("\n".join(help_text)) + else: + print("\n".join(help_text)) + +def _lookfor_generate_cache(module, import_modules, regenerate): + """ + Generate docstring cache for given module. + + Parameters + ---------- + module : str, None, module + Module for which to generate docstring cache + import_modules : bool + Whether to import sub-modules in packages. + regenerate : bool + Re-generate the docstring cache + + Returns + ------- + cache : dict {obj_full_name: (docstring, kind, index), ...} + Docstring cache for the module, either cached one (regenerate=False) + or newly generated. + + """ + global _lookfor_caches + # Local import to speed up numpy's import time. + import inspect + + if sys.version_info[0] >= 3: + # In Python3 stderr, stdout are text files. + from io import StringIO + else: + from StringIO import StringIO + + if module is None: + module = "numpy" + + if isinstance(module, str): + try: + __import__(module) + except ImportError: + return {} + module = sys.modules[module] + elif isinstance(module, list) or isinstance(module, tuple): + cache = {} + for mod in module: + cache.update(_lookfor_generate_cache(mod, import_modules, + regenerate)) + return cache + + if id(module) in _lookfor_caches and not regenerate: + return _lookfor_caches[id(module)] + + # walk items and collect docstrings + cache = {} + _lookfor_caches[id(module)] = cache + seen = {} + index = 0 + stack = [(module.__name__, module)] + while stack: + name, item = stack.pop(0) + if id(item) in seen: + continue + seen[id(item)] = True + + index += 1 + kind = "object" + + if inspect.ismodule(item): + kind = "module" + try: + _all = item.__all__ + except AttributeError: + _all = None + + # import sub-packages + if import_modules and hasattr(item, '__path__'): + for pth in item.__path__: + for mod_path in os.listdir(pth): + this_py = os.path.join(pth, mod_path) + init_py = os.path.join(pth, mod_path, '__init__.py') + if (os.path.isfile(this_py) and + mod_path.endswith('.py')): + to_import = mod_path[:-3] + elif os.path.isfile(init_py): + to_import = mod_path + else: + continue + if to_import == '__init__': + continue + + try: + # Catch SystemExit, too + base_exc = BaseException + except NameError: + # Python 2.4 doesn't have BaseException + base_exc = Exception + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + try: + sys.stdout = StringIO() + sys.stderr = StringIO() + __import__("%s.%s" % (name, to_import)) + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + except base_exc: + continue + + for n, v in _getmembers(item): + try: + item_name = getattr(v, '__name__', "%s.%s" % (name, n)) + mod_name = getattr(v, '__module__', None) + except NameError: + # ref. SWIG's global cvars + # NameError: Unknown C global variable + item_name = "%s.%s" % (name, n) + mod_name = None + if '.' not in item_name and mod_name: + item_name = "%s.%s" % (mod_name, item_name) + + if not item_name.startswith(name + '.'): + # don't crawl "foreign" objects + if isinstance(v, ufunc): + # ... unless they are ufuncs + pass + else: + continue + elif not (inspect.ismodule(v) or _all is None or n in _all): + continue + stack.append(("%s.%s" % (name, n), v)) + elif inspect.isclass(item): + kind = "class" + for n, v in _getmembers(item): + stack.append(("%s.%s" % (name, n), v)) + elif hasattr(item, "__call__"): + kind = "func" + + try: + doc = inspect.getdoc(item) + except NameError: + # ref SWIG's NameError: Unknown C global variable + doc = None + if doc is not None: + cache[name] = (doc, kind, index) + + return cache + +def _getmembers(item): + import inspect + try: + members = inspect.getmembers(item) + except AttributeError: + members = [(x, getattr(item, x)) for x in dir(item) + if hasattr(item, x)] + return members + +#----------------------------------------------------------------------------- + +# The following SafeEval class and company are adapted from Michael Spencer's +# ASPN Python Cookbook recipe: +# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469 +# Accordingly it is mostly Copyright 2006 by Michael Spencer. +# The recipe, like most of the other ASPN Python Cookbook recipes was made +# available under the Python license. +# http://www.python.org/license + +# It has been modified to: +# * handle unary -/+ +# * support True/False/None +# * raise SyntaxError instead of a custom exception. + +class SafeEval(object): + """ + Object to evaluate constant string expressions. + + This includes strings with lists, dicts and tuples using the abstract + syntax tree created by ``compiler.parse``. + + For an example of usage, see `safe_eval`. + + See Also + -------- + safe_eval + + """ + + if sys.version_info[0] < 3: + def visit(self, node, **kw): + cls = node.__class__ + meth = getattr(self, 'visit'+cls.__name__, self.default) + return meth(node, **kw) + + def default(self, node, **kw): + raise SyntaxError("Unsupported source construct: %s" + % node.__class__) + + def visitExpression(self, node, **kw): + for child in node.getChildNodes(): + return self.visit(child, **kw) + + def visitConst(self, node, **kw): + return node.value + + def visitDict(self, node,**kw): + return dict( + [(self.visit(k), self.visit(v)) for k, v in node.items] + ) + + def visitTuple(self, node, **kw): + return tuple([self.visit(i) for i in node.nodes]) + + def visitList(self, node, **kw): + return [self.visit(i) for i in node.nodes] + + def visitUnaryAdd(self, node, **kw): + return +self.visit(node.getChildNodes()[0]) + + def visitUnarySub(self, node, **kw): + return -self.visit(node.getChildNodes()[0]) + + def visitName(self, node, **kw): + if node.name == 'False': + return False + elif node.name == 'True': + return True + elif node.name == 'None': + return None + else: + raise SyntaxError("Unknown name: %s" % node.name) + else: + + def visit(self, node): + cls = node.__class__ + meth = getattr(self, 'visit' + cls.__name__, self.default) + return meth(node) + + def default(self, node): + raise SyntaxError("Unsupported source construct: %s" + % node.__class__) + + def visitExpression(self, node): + return self.visit(node.body) + + def visitNum(self, node): + return node.n + + def visitStr(self, node): + return node.s + + def visitBytes(self, node): + return node.s + + def visitDict(self, node,**kw): + return dict([(self.visit(k), self.visit(v)) + for k, v in zip(node.keys, node.values)]) + + def visitTuple(self, node): + return tuple([self.visit(i) for i in node.elts]) + + def visitList(self, node): + return [self.visit(i) for i in node.elts] + + def visitUnaryOp(self, node): + import ast + if isinstance(node.op, ast.UAdd): + return +self.visit(node.operand) + elif isinstance(node.op, ast.USub): + return -self.visit(node.operand) + else: + raise SyntaxError("Unknown unary op: %r" % node.op) + + def visitName(self, node): + if node.id == 'False': + return False + elif node.id == 'True': + return True + elif node.id == 'None': + return None + else: + raise SyntaxError("Unknown name: %s" % node.id) + + def visitNameConstant(self, node): + return node.value + +def safe_eval(source): + """ + Protected string evaluation. + + Evaluate a string containing a Python literal expression without + allowing the execution of arbitrary non-literal code. + + Parameters + ---------- + source : str + The string to evaluate. + + Returns + ------- + obj : object + The result of evaluating `source`. + + Raises + ------ + SyntaxError + If the code has invalid Python syntax, or if it contains + non-literal code. + + Examples + -------- + >>> np.safe_eval('1') + 1 + >>> np.safe_eval('[1, 2, 3]') + [1, 2, 3] + >>> np.safe_eval('{"foo": ("bar", 10.0)}') + {'foo': ('bar', 10.0)} + + >>> np.safe_eval('import os') + Traceback (most recent call last): + ... + SyntaxError: invalid syntax + + >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') + Traceback (most recent call last): + ... + SyntaxError: Unsupported source construct: compiler.ast.CallFunc + + """ + # Local imports to speed up numpy's import time. + import warnings + + with warnings.catch_warnings(): + # compiler package is deprecated for 3.x, which is already solved + # here + warnings.simplefilter('ignore', DeprecationWarning) + try: + import compiler + except ImportError: + import ast as compiler + + walker = SafeEval() + try: + ast = compiler.parse(source, mode="eval") + except SyntaxError: + raise + try: + return walker.visit(ast) + except SyntaxError: + raise + +#----------------------------------------------------------------------------- diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/__init__.py new file mode 100644 index 0000000000000..bc2a1ff6ce9fb --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/__init__.py @@ -0,0 +1,55 @@ +""" +Core Linear Algebra Tools +========================= + +=============== ========================================================== +Linear algebra basics +========================================================================== +norm Vector or matrix norm +inv Inverse of a square matrix +solve Solve a linear system of equations +det Determinant of a square matrix +slogdet Logarithm of the determinant of a square matrix +lstsq Solve linear least-squares problem +pinv Pseudo-inverse (Moore-Penrose) calculated using a singular + value decomposition +matrix_power Integer power of a square matrix +matrix_rank Calculate matrix rank using an SVD-based method +=============== ========================================================== + +=============== ========================================================== +Eigenvalues and decompositions +========================================================================== +eig Eigenvalues and vectors of a square matrix +eigh Eigenvalues and eigenvectors of a Hermitian matrix +eigvals Eigenvalues of a square matrix +eigvalsh Eigenvalues of a Hermitian matrix +qr QR decomposition of a matrix +svd Singular value decomposition of a matrix +cholesky Cholesky decomposition of a matrix +=============== ========================================================== + +=============== ========================================================== +Tensor operations +========================================================================== +tensorsolve Solve a linear tensor equation +tensorinv Calculate an inverse of a tensor +=============== ========================================================== + +=============== ========================================================== +Exceptions +========================================================================== +LinAlgError Indicates a failed linear algebra operation +=============== ========================================================== + +""" +from __future__ import division, absolute_import, print_function + +# To get sub-modules +from .info import __doc__ + +from .linalg import * + +from numpy.testing import Tester +test = Tester().test +bench = Tester().test diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/_umath_linalg.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/_umath_linalg.py new file mode 100644 index 0000000000000..389a85fc2fa93 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/_umath_linalg.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, '_umath_linalg.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/info.py new file mode 100644 index 0000000000000..646ecda04aa95 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/info.py @@ -0,0 +1,37 @@ +"""\ +Core Linear Algebra Tools +------------------------- +Linear algebra basics: + +- norm Vector or matrix norm +- inv Inverse of a square matrix +- solve Solve a linear system of equations +- det Determinant of a square matrix +- lstsq Solve linear least-squares problem +- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular + value decomposition +- matrix_power Integer power of a square matrix + +Eigenvalues and decompositions: + +- eig Eigenvalues and vectors of a square matrix +- eigh Eigenvalues and eigenvectors of a Hermitian matrix +- eigvals Eigenvalues of a square matrix +- eigvalsh Eigenvalues of a Hermitian matrix +- qr QR decomposition of a matrix +- svd Singular value decomposition of a matrix +- cholesky Cholesky decomposition of a matrix + +Tensor operations: + +- tensorsolve Solve a linear tensor equation +- tensorinv Calculate an inverse of a tensor + +Exceptions: + +- LinAlgError Indicates a failed linear algebra operation + +""" +from __future__ import division, absolute_import, print_function + +depends = ['core'] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/lapack_lite.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/lapack_lite.py new file mode 100644 index 0000000000000..3b8026dadd909 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/lapack_lite.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'lapack_lite.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/linalg.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/linalg.py new file mode 100644 index 0000000000000..6b2299fe7a6c0 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/linalg.py @@ -0,0 +1,2136 @@ +"""Lite version of scipy.linalg. + +Notes +----- +This module is a lite version of the linalg.py module in SciPy which +contains high-level Python interface to the LAPACK library. The lite +version only accesses the following LAPACK functions: dgesv, zgesv, +dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, +zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. +""" +from __future__ import division, absolute_import, print_function + + +__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', + 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', + 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', + 'LinAlgError'] + +import warnings + +from numpy.core import ( + array, asarray, zeros, empty, empty_like, transpose, intc, single, double, + csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot, + add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size, + finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs, + broadcast + ) +from numpy.lib import triu, asfarray +from numpy.linalg import lapack_lite, _umath_linalg +from numpy.matrixlib.defmatrix import matrix_power +from numpy.compat import asbytes + +# For Python2/3 compatibility +_N = asbytes('N') +_V = asbytes('V') +_A = asbytes('A') +_S = asbytes('S') +_L = asbytes('L') + +fortran_int = intc + +# Error object +class LinAlgError(Exception): + """ + Generic Python-exception-derived object raised by linalg functions. + + General purpose exception class, derived from Python's exception.Exception + class, programmatically raised in linalg functions when a Linear + Algebra-related condition would prevent further correct execution of the + function. + + Parameters + ---------- + None + + Examples + -------- + >>> from numpy import linalg as LA + >>> LA.inv(np.zeros((2,2))) + Traceback (most recent call last): + File "", line 1, in + File "...linalg.py", line 350, + in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) + File "...linalg.py", line 249, + in solve + raise LinAlgError('Singular matrix') + numpy.linalg.LinAlgError: Singular matrix + + """ + pass + +# Dealing with errors in _umath_linalg + +_linalg_error_extobj = None + +def _determine_error_states(): + global _linalg_error_extobj + errobj = geterrobj() + bufsize = errobj[0] + + with errstate(invalid='call', over='ignore', + divide='ignore', under='ignore'): + invalid_call_errmask = geterrobj()[1] + + _linalg_error_extobj = [bufsize, invalid_call_errmask, None] + +_determine_error_states() + +def _raise_linalgerror_singular(err, flag): + raise LinAlgError("Singular matrix") + +def _raise_linalgerror_nonposdef(err, flag): + raise LinAlgError("Matrix is not positive definite") + +def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): + raise LinAlgError("Eigenvalues did not converge") + +def _raise_linalgerror_svd_nonconvergence(err, flag): + raise LinAlgError("SVD did not converge") + +def get_linalg_error_extobj(callback): + extobj = list(_linalg_error_extobj) + extobj[2] = callback + return extobj + +def _makearray(a): + new = asarray(a) + wrap = getattr(a, "__array_prepare__", new.__array_wrap__) + return new, wrap + +def isComplexType(t): + return issubclass(t, complexfloating) + +_real_types_map = {single : single, + double : double, + csingle : single, + cdouble : double} + +_complex_types_map = {single : csingle, + double : cdouble, + csingle : csingle, + cdouble : cdouble} + +def _realType(t, default=double): + return _real_types_map.get(t, default) + +def _complexType(t, default=cdouble): + return _complex_types_map.get(t, default) + +def _linalgRealType(t): + """Cast the type t to either double or cdouble.""" + return double + +_complex_types_map = {single : csingle, + double : cdouble, + csingle : csingle, + cdouble : cdouble} + +def _commonType(*arrays): + # in lite version, use higher precision (always double or cdouble) + result_type = single + is_complex = False + for a in arrays: + if issubclass(a.dtype.type, inexact): + if isComplexType(a.dtype.type): + is_complex = True + rt = _realType(a.dtype.type, default=None) + if rt is None: + # unsupported inexact scalar + raise TypeError("array type %s is unsupported in linalg" % + (a.dtype.name,)) + else: + rt = double + if rt is double: + result_type = double + if is_complex: + t = cdouble + result_type = _complex_types_map[result_type] + else: + t = double + return t, result_type + + +# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are). + +_fastCT = fastCopyAndTranspose + +def _to_native_byte_order(*arrays): + ret = [] + for arr in arrays: + if arr.dtype.byteorder not in ('=', '|'): + ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) + else: + ret.append(arr) + if len(ret) == 1: + return ret[0] + else: + return ret + +def _fastCopyAndTranspose(type, *arrays): + cast_arrays = () + for a in arrays: + if a.dtype.type is type: + cast_arrays = cast_arrays + (_fastCT(a),) + else: + cast_arrays = cast_arrays + (_fastCT(a.astype(type)),) + if len(cast_arrays) == 1: + return cast_arrays[0] + else: + return cast_arrays + +def _assertRank2(*arrays): + for a in arrays: + if len(a.shape) != 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'two-dimensional' % len(a.shape)) + +def _assertRankAtLeast2(*arrays): + for a in arrays: + if len(a.shape) < 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % len(a.shape)) + +def _assertSquareness(*arrays): + for a in arrays: + if max(a.shape) != min(a.shape): + raise LinAlgError('Array must be square') + +def _assertNdSquareness(*arrays): + for a in arrays: + if max(a.shape[-2:]) != min(a.shape[-2:]): + raise LinAlgError('Last 2 dimensions of the array must be square') + +def _assertFinite(*arrays): + for a in arrays: + if not (isfinite(a).all()): + raise LinAlgError("Array must not contain infs or NaNs") + +def _assertNoEmpty2d(*arrays): + for a in arrays: + if a.size == 0 and product(a.shape[-2:]) == 0: + raise LinAlgError("Arrays cannot be empty") + + +# Linear equations + +def tensorsolve(a, b, axes=None): + """ + Solve the tensor equation ``a x = b`` for x. + + It is assumed that all indices of `x` are summed over in the product, + together with the rightmost indices of `a`, as is done in, for example, + ``tensordot(a, x, axes=len(b.shape))``. + + Parameters + ---------- + a : array_like + Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals + the shape of that sub-tensor of `a` consisting of the appropriate + number of its rightmost indices, and must be such that + ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be + 'square'). + b : array_like + Right-hand tensor, which can be of any shape. + axes : tuple of ints, optional + Axes in `a` to reorder to the right, before inversion. + If None (default), no reordering is done. + + Returns + ------- + x : ndarray, shape Q + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + tensordot, tensorinv, einsum + + Examples + -------- + >>> a = np.eye(2*3*4) + >>> a.shape = (2*3, 4, 2, 3, 4) + >>> b = np.random.randn(2*3, 4) + >>> x = np.linalg.tensorsolve(a, b) + >>> x.shape + (2, 3, 4) + >>> np.allclose(np.tensordot(a, x, axes=3), b) + True + + """ + a, wrap = _makearray(a) + b = asarray(b) + an = a.ndim + + if axes is not None: + allaxes = list(range(0, an)) + for k in axes: + allaxes.remove(k) + allaxes.insert(an, k) + a = a.transpose(allaxes) + + oldshape = a.shape[-(an-b.ndim):] + prod = 1 + for k in oldshape: + prod *= k + + a = a.reshape(-1, prod) + b = b.ravel() + res = wrap(solve(a, b)) + res.shape = oldshape + return res + +def solve(a, b): + """ + Solve a linear matrix equation, or system of linear scalar equations. + + Computes the "exact" solution, `x`, of the well-determined, i.e., full + rank, linear matrix equation `ax = b`. + + Parameters + ---------- + a : (..., M, M) array_like + Coefficient matrix. + b : {(..., M,), (..., M, K)}, array_like + Ordinate or "dependent variable" values. + + Returns + ------- + x : {(..., M,), (..., M, K)} ndarray + Solution to the system a x = b. Returned shape is identical to `b`. + + Raises + ------ + LinAlgError + If `a` is singular or not square. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The solutions are computed using LAPACK routine _gesv + + `a` must be square and of full-rank, i.e., all rows (or, equivalently, + columns) must be linearly independent; if either is not true, use + `lstsq` for the least-squares best "solution" of the + system/equation. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 22. + + Examples + -------- + Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``: + + >>> a = np.array([[3,1], [1,2]]) + >>> b = np.array([9,8]) + >>> x = np.linalg.solve(a, b) + >>> x + array([ 2., 3.]) + + Check that the solution is correct: + + >>> np.allclose(np.dot(a, x), b) + True + + """ + a, _ = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + b, wrap = _makearray(b) + t, result_t = _commonType(a, b) + + # We use the b = (..., M,) logic, only if the number of extra dimensions + # match exactly + if b.ndim == a.ndim - 1: + if a.shape[-1] == 0 and b.shape[-1] == 0: + # Legal, but the ufunc cannot handle the 0-sized inner dims + # let the ufunc handle all wrong cases. + a = a.reshape(a.shape[:-1]) + bc = broadcast(a, b) + return wrap(empty(bc.shape, dtype=result_t)) + + gufunc = _umath_linalg.solve1 + else: + if b.size == 0: + if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0: + a = a[:,:1].reshape(a.shape[:-1] + (1,)) + bc = broadcast(a, b) + return wrap(empty(bc.shape, dtype=result_t)) + + gufunc = _umath_linalg.solve + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_singular) + r = gufunc(a, b, signature=signature, extobj=extobj) + + return wrap(r.astype(result_t)) + + +def tensorinv(a, ind=2): + """ + Compute the 'inverse' of an N-dimensional array. + + The result is an inverse for `a` relative to the tensordot operation + ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, + ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the + tensordot operation. + + Parameters + ---------- + a : array_like + Tensor to 'invert'. Its shape must be 'square', i. e., + ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. + ind : int, optional + Number of first indices that are involved in the inverse sum. + Must be a positive integer, default is 2. + + Returns + ------- + b : ndarray + `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + tensordot, tensorsolve + + Examples + -------- + >>> a = np.eye(4*6) + >>> a.shape = (4, 6, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=2) + >>> ainv.shape + (8, 3, 4, 6) + >>> b = np.random.randn(4, 6) + >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) + True + + >>> a = np.eye(4*6) + >>> a.shape = (24, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=1) + >>> ainv.shape + (8, 3, 24) + >>> b = np.random.randn(24) + >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + True + + """ + a = asarray(a) + oldshape = a.shape + prod = 1 + if ind > 0: + invshape = oldshape[ind:] + oldshape[:ind] + for k in oldshape[ind:]: + prod *= k + else: + raise ValueError("Invalid ind argument.") + a = a.reshape(prod, -1) + ia = inv(a) + return ia.reshape(*invshape) + + +# Matrix inversion + +def inv(a): + """ + Compute the (multiplicative) inverse of a matrix. + + Given a square matrix `a`, return the matrix `ainv` satisfying + ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be inverted. + + Returns + ------- + ainv : (..., M, M) ndarray or matrix + (Multiplicative) inverse of the matrix `a`. + + Raises + ------ + LinAlgError + If `a` is not square or inversion fails. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + Examples + -------- + >>> from numpy.linalg import inv + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> ainv = inv(a) + >>> np.allclose(np.dot(a, ainv), np.eye(2)) + True + >>> np.allclose(np.dot(ainv, a), np.eye(2)) + True + + If a is a matrix object, then the return value is a matrix as well: + + >>> ainv = inv(np.matrix(a)) + >>> ainv + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + + Inverses of several matrices can be computed at once: + + >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) + >>> inv(a) + array([[[-2. , 1. ], + [ 1.5, -0.5]], + [[-5. , 2. ], + [ 3. , -1. ]]]) + + """ + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + + if a.shape[-1] == 0: + # The inner array is 0x0, the ufunc cannot handle this case + return wrap(empty_like(a, dtype=result_t)) + + signature = 'D->D' if isComplexType(t) else 'd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_singular) + ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) + return wrap(ainv.astype(result_t)) + + +# Cholesky decomposition + +def cholesky(a): + """ + Cholesky decomposition. + + Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, + where `L` is lower-triangular and .H is the conjugate transpose operator + (which is the ordinary transpose if `a` is real-valued). `a` must be + Hermitian (symmetric if real-valued) and positive-definite. Only `L` is + actually returned. + + Parameters + ---------- + a : (..., M, M) array_like + Hermitian (symmetric if all elements are real), positive-definite + input matrix. + + Returns + ------- + L : (..., M, M) array_like + Upper or lower-triangular Cholesky factor of `a`. Returns a + matrix object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the decomposition fails, for example, if `a` is not + positive-definite. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The Cholesky decomposition is often used as a fast way of solving + + .. math:: A \\mathbf{x} = \\mathbf{b} + + (when `A` is both Hermitian/symmetric and positive-definite). + + First, we solve for :math:`\\mathbf{y}` in + + .. math:: L \\mathbf{y} = \\mathbf{b}, + + and then for :math:`\\mathbf{x}` in + + .. math:: L.H \\mathbf{x} = \\mathbf{y}. + + Examples + -------- + >>> A = np.array([[1,-2j],[2j,5]]) + >>> A + array([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> L = np.linalg.cholesky(A) + >>> L + array([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> np.dot(L, L.T.conj()) # verify that L * L.H = A + array([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? + >>> np.linalg.cholesky(A) # an ndarray object is returned + array([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> # But a matrix object is returned if A is a matrix object + >>> LA.cholesky(np.matrix(A)) + matrix([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + + """ + extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef) + gufunc = _umath_linalg.cholesky_lo + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t)) + +# QR decompostion + +def qr(a, mode='reduced'): + """ + Compute the qr factorization of a matrix. + + Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is + upper-triangular. + + Parameters + ---------- + a : array_like, shape (M, N) + Matrix to be factored. + mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional + If K = min(M, N), then + + 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) + 'complete' : returns q, r with dimensions (M, M), (M, N) + 'r' : returns r only with dimensions (K, N) + 'raw' : returns h, tau with dimensions (N, M), (K,) + 'full' : alias of 'reduced', deprecated + 'economic' : returns h from 'raw', deprecated. + + The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, + see the notes for more information. The default is 'reduced' and to + maintain backward compatibility with earlier versions of numpy both + it and the old default 'full' can be omitted. Note that array h + returned in 'raw' mode is transposed for calling Fortran. The + 'economic' mode is deprecated. The modes 'full' and 'economic' may + be passed using only the first letter for backwards compatibility, + but all others must be spelled out. See the Notes for more + explanation. + + + Returns + ------- + q : ndarray of float or complex, optional + A matrix with orthonormal columns. When mode = 'complete' the + result is an orthogonal/unitary matrix depending on whether or not + a is real/complex. The determinant may be either +/- 1 in that + case. + r : ndarray of float or complex, optional + The upper-triangular matrix. + (h, tau) : ndarrays of np.double or np.cdouble, optional + The array h contains the Householder reflectors that generate q + along with r. The tau array contains scaling factors for the + reflectors. In the deprecated 'economic' mode only h is returned. + + Raises + ------ + LinAlgError + If factoring fails. + + Notes + ----- + This is an interface to the LAPACK routines dgeqrf, zgeqrf, + dorgqr, and zungqr. + + For more information on the qr factorization, see for example: + http://en.wikipedia.org/wiki/QR_factorization + + Subclasses of `ndarray` are preserved except for the 'raw' mode. So if + `a` is of type `matrix`, all the return values will be matrices too. + + New 'reduced', 'complete', and 'raw' options for mode were added in + Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In + addition the options 'full' and 'economic' were deprecated. Because + 'full' was the previous default and 'reduced' is the new default, + backward compatibility can be maintained by letting `mode` default. + The 'raw' option was added so that LAPACK routines that can multiply + arrays by q using the Householder reflectors can be used. Note that in + this case the returned arrays are of type np.double or np.cdouble and + the h array is transposed to be FORTRAN compatible. No routines using + the 'raw' return are currently exposed by numpy, but some are available + in lapack_lite and just await the necessary work. + + Examples + -------- + >>> a = np.random.randn(9, 6) + >>> q, r = np.linalg.qr(a) + >>> np.allclose(a, np.dot(q, r)) # a does equal qr + True + >>> r2 = np.linalg.qr(a, mode='r') + >>> r3 = np.linalg.qr(a, mode='economic') + >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full' + True + >>> # But only triu parts are guaranteed equal when mode='economic' + >>> np.allclose(r, np.triu(r3[:6,:6], k=0)) + True + + Example illustrating a common use of `qr`: solving of least squares + problems + + What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for + the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points + and you'll see that it should be y0 = 0, m = 1.) The answer is provided + by solving the over-determined matrix equation ``Ax = b``, where:: + + A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) + x = array([[y0], [m]]) + b = array([[1], [0], [2], [1]]) + + If A = qr such that q is orthonormal (which is always possible via + Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice, + however, we simply use `lstsq`.) + + >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) + >>> A + array([[0, 1], + [1, 1], + [1, 1], + [2, 1]]) + >>> b = np.array([1, 0, 2, 1]) + >>> q, r = LA.qr(A) + >>> p = np.dot(q.T, b) + >>> np.dot(LA.inv(r), p) + array([ 1.1e-16, 1.0e+00]) + + """ + if mode not in ('reduced', 'complete', 'r', 'raw'): + if mode in ('f', 'full'): + msg = "".join(( + "The 'full' option is deprecated in favor of 'reduced'.\n", + "For backward compatibility let mode default.")) + warnings.warn(msg, DeprecationWarning) + mode = 'reduced' + elif mode in ('e', 'economic'): + msg = "The 'economic' option is deprecated.", + warnings.warn(msg, DeprecationWarning) + mode = 'economic' + else: + raise ValueError("Unrecognized mode '%s'" % mode) + + a, wrap = _makearray(a) + _assertRank2(a) + _assertNoEmpty2d(a) + m, n = a.shape + t, result_t = _commonType(a) + a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) + mn = min(m, n) + tau = zeros((mn,), t) + if isComplexType(t): + lapack_routine = lapack_lite.zgeqrf + routine_name = 'zgeqrf' + else: + lapack_routine = lapack_lite.dgeqrf + routine_name = 'dgeqrf' + + # calculate optimal size of work data 'work' + lwork = 1 + work = zeros((lwork,), t) + results = lapack_routine(m, n, a, m, tau, work, -1, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + # do qr decomposition + lwork = int(abs(work[0])) + work = zeros((lwork,), t) + results = lapack_routine(m, n, a, m, tau, work, lwork, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + # handle modes that don't return q + if mode == 'r': + r = _fastCopyAndTranspose(result_t, a[:, :mn]) + return wrap(triu(r)) + + if mode == 'raw': + return a, tau + + if mode == 'economic': + if t != result_t : + a = a.astype(result_t) + return wrap(a.T) + + # generate q from a + if mode == 'complete' and m > n: + mc = m + q = empty((m, m), t) + else: + mc = mn + q = empty((n, m), t) + q[:n] = a + + if isComplexType(t): + lapack_routine = lapack_lite.zungqr + routine_name = 'zungqr' + else: + lapack_routine = lapack_lite.dorgqr + routine_name = 'dorgqr' + + # determine optimal lwork + lwork = 1 + work = zeros((lwork,), t) + results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + # compute q + lwork = int(abs(work[0])) + work = zeros((lwork,), t) + results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + q = _fastCopyAndTranspose(result_t, q[:mc]) + r = _fastCopyAndTranspose(result_t, a[:, :mc]) + + return wrap(q), wrap(triu(r)) + + +# Eigenvalues + + +def eigvals(a): + """ + Compute the eigenvalues of a general matrix. + + Main difference between `eigvals` and `eig`: the eigenvectors aren't + returned. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues will be computed. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues, each repeated according to its multiplicity. + They are not necessarily ordered, nor are they necessarily + real for real matrices. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays + eigvalsh : eigenvalues of symmetric or Hermitian arrays. + eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the _geev LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + Examples + -------- + Illustration, using the fact that the eigenvalues of a diagonal matrix + are its diagonal elements, that multiplying a matrix on the left + by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose + of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, + if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as + ``A``: + + >>> from numpy import linalg as LA + >>> x = np.random.random() + >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) + >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) + (1.0, 1.0, 0.0) + + Now multiply a diagonal matrix by Q on one side and by Q.T on the other: + + >>> D = np.diag((-1,1)) + >>> LA.eigvals(D) + array([-1., 1.]) + >>> A = np.dot(Q, D) + >>> A = np.dot(A, Q.T) + >>> LA.eigvals(A) + array([ 1., -1.]) + + """ + a, wrap = _makearray(a) + _assertNoEmpty2d(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + _assertFinite(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + signature = 'D->D' if isComplexType(t) else 'd->D' + w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj) + + if not isComplexType(t): + if all(w.imag == 0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + return w.astype(result_t) + +def eigvalsh(a, UPLO='L'): + """ + Compute the eigenvalues of a Hermitian or real symmetric matrix. + + Main difference from eigh: the eigenvectors are not computed. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues are to be + computed. + UPLO : {'L', 'U'}, optional + Same as `lower`, with 'L' for lower and 'U' for upper triangular. + Deprecated. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues, not necessarily ordered, each repeated according to + its multiplicity. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. + eigvals : eigenvalues of general real or complex arrays. + eig : eigenvalues and right eigenvectors of general real or complex + arrays. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues are computed using LAPACK routines _ssyevd, _heevd + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> LA.eigvalsh(a) + array([ 0.17157288+0.j, 5.82842712+0.j]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + if UPLO == 'L': + gufunc = _umath_linalg.eigvalsh_lo + else: + gufunc = _umath_linalg.eigvalsh_up + + a, wrap = _makearray(a) + _assertNoEmpty2d(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + signature = 'D->d' if isComplexType(t) else 'd->d' + w = gufunc(a, signature=signature, extobj=extobj) + return w.astype(_realType(result_t)) + +def _convertarray(a): + t, result_t = _commonType(a) + a = _fastCT(a.astype(t)) + return a, t, result_t + + +# Eigenvectors + + +def eig(a): + """ + Compute the eigenvalues and right eigenvectors of a square array. + + Parameters + ---------- + a : (..., M, M) array + Matrices for which the eigenvalues and right eigenvectors will + be computed + + Returns + ------- + w : (..., M) array + The eigenvalues, each repeated according to its multiplicity. + The eigenvalues are not necessarily ordered. The resulting + array will be always be of complex type. When `a` is real + the resulting eigenvalues will be real (0 imaginary part) or + occur in conjugate pairs + + v : (..., M, M) array + The normalized (unit "length") eigenvectors, such that the + column ``v[:,i]`` is the eigenvector corresponding to the + eigenvalue ``w[i]``. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric) + array. + + eigvals : eigenvalues of a non-symmetric array. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the _geev LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + The number `w` is an eigenvalue of `a` if there exists a vector + `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and + `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]`` + for :math:`i \\in \\{0,...,M-1\\}`. + + The array `v` of eigenvectors may not be of maximum rank, that is, some + of the columns may be linearly dependent, although round-off error may + obscure that fact. If the eigenvalues are all different, then theoretically + the eigenvectors are linearly independent. Likewise, the (complex-valued) + matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e., + if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate + transpose of `a`. + + Finally, it is emphasized that `v` consists of the *right* (as in + right-hand side) eigenvectors of `a`. A vector `y` satisfying + ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left* + eigenvector of `a`, and, in general, the left and right eigenvectors + of a matrix are not necessarily the (perhaps conjugate) transposes + of each other. + + References + ---------- + G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, + Academic Press, Inc., 1980, Various pp. + + Examples + -------- + >>> from numpy import linalg as LA + + (Almost) trivial example with real e-values and e-vectors. + + >>> w, v = LA.eig(np.diag((1, 2, 3))) + >>> w; v + array([ 1., 2., 3.]) + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + Real matrix possessing complex e-values and e-vectors; note that the + e-values are complex conjugates of each other. + + >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) + >>> w; v + array([ 1. + 1.j, 1. - 1.j]) + array([[ 0.70710678+0.j , 0.70710678+0.j ], + [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]]) + + Complex-valued matrix with real e-values (but complex-valued e-vectors); + note that a.conj().T = a, i.e., a is Hermitian. + + >>> a = np.array([[1, 1j], [-1j, 1]]) + >>> w, v = LA.eig(a) + >>> w; v + array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0} + array([[ 0.00000000+0.70710678j, 0.70710678+0.j ], + [ 0.70710678+0.j , 0.00000000+0.70710678j]]) + + Be careful about round-off error! + + >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) + >>> # Theor. e-values are 1 +/- 1e-9 + >>> w, v = LA.eig(a) + >>> w; v + array([ 1., 1.]) + array([[ 1., 0.], + [ 0., 1.]]) + + """ + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + _assertFinite(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + signature = 'D->DD' if isComplexType(t) else 'd->DD' + w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj) + + if not isComplexType(t) and all(w.imag == 0.0): + w = w.real + vt = vt.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + vt = vt.astype(result_t) + return w.astype(result_t), wrap(vt) + + +def eigh(a, UPLO='L'): + """ + Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix. + + Returns two objects, a 1-D array containing the eigenvalues of `a`, and + a 2-D square array or matrix (depending on the input type) of the + corresponding eigenvectors (in columns). + + Parameters + ---------- + A : (..., M, M) array + Hermitian/Symmetric matrices whose eigenvalues and + eigenvectors are to be computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + + Returns + ------- + w : (..., M) ndarray + The eigenvalues, not necessarily ordered. + v : {(..., M, M) ndarray, (..., M, M) matrix} + The column ``v[:, i]`` is the normalized eigenvector corresponding + to the eigenvalue ``w[i]``. Will return a matrix object if `a` is + a matrix object. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvalsh : eigenvalues of symmetric or Hermitian arrays. + eig : eigenvalues and right eigenvectors for non-symmetric arrays. + eigvals : eigenvalues of non-symmetric arrays. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd, + _heevd + + The eigenvalues of real symmetric or complex Hermitian matrices are + always real. [1]_ The array `v` of (column) eigenvectors is unitary + and `a`, `w`, and `v` satisfy the equations + ``dot(a, v[:, i]) = w[i] * v[:, i]``. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 222. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> a + array([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> w, v = LA.eigh(a) + >>> w; v + array([ 0.17157288, 5.82842712]) + array([[-0.92387953+0.j , -0.38268343+0.j ], + [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) + + >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair + array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j]) + >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair + array([ 0.+0.j, 0.+0.j]) + + >>> A = np.matrix(a) # what happens if input is a matrix object + >>> A + matrix([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> w, v = LA.eigh(A) + >>> w; v + array([ 0.17157288, 5.82842712]) + matrix([[-0.92387953+0.j , -0.38268343+0.j ], + [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + a, wrap = _makearray(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + if UPLO == 'L': + gufunc = _umath_linalg.eigh_lo + else: + gufunc = _umath_linalg.eigh_up + + signature = 'D->dD' if isComplexType(t) else 'd->dd' + w, vt = gufunc(a, signature=signature, extobj=extobj) + w = w.astype(_realType(result_t)) + vt = vt.astype(result_t) + return w, wrap(vt) + + +# Singular value decomposition + +def svd(a, full_matrices=1, compute_uv=1): + """ + Singular Value Decomposition. + + Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v` + are unitary and `s` is a 1-d array of `a`'s singular values. + + Parameters + ---------- + a : (..., M, N) array_like + A real or complex matrix of shape (`M`, `N`) . + full_matrices : bool, optional + If True (default), `u` and `v` have the shapes (`M`, `M`) and + (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`) + and (`K`, `N`), respectively, where `K` = min(`M`, `N`). + compute_uv : bool, optional + Whether or not to compute `u` and `v` in addition to `s`. True + by default. + + Returns + ------- + u : { (..., M, M), (..., M, K) } array + Unitary matrices. The actual shape depends on the value of + ``full_matrices``. Only returned when ``compute_uv`` is True. + s : (..., K) array + The singular values for every matrix, sorted in descending order. + v : { (..., N, N), (..., K, N) } array + Unitary matrices. The actual shape depends on the value of + ``full_matrices``. Only returned when ``compute_uv`` is True. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The decomposition is performed using LAPACK routine _gesdd + + The SVD is commonly written as ``a = U S V.H``. The `v` returned + by this function is ``V.H`` and ``u = U``. + + If ``U`` is a unitary matrix, it means that it + satisfies ``U.H = inv(U)``. + + The rows of `v` are the eigenvectors of ``a.H a``. The columns + of `u` are the eigenvectors of ``a a.H``. For row ``i`` in + `v` and column ``i`` in `u`, the corresponding eigenvalue is + ``s[i]**2``. + + If `a` is a `matrix` object (as opposed to an `ndarray`), then so + are all the return values. + + Examples + -------- + >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) + + Reconstruction based on full SVD: + + >>> U, s, V = np.linalg.svd(a, full_matrices=True) + >>> U.shape, V.shape, s.shape + ((9, 9), (6, 6), (6,)) + >>> S = np.zeros((9, 6), dtype=complex) + >>> S[:6, :6] = np.diag(s) + >>> np.allclose(a, np.dot(U, np.dot(S, V))) + True + + Reconstruction based on reduced SVD: + + >>> U, s, V = np.linalg.svd(a, full_matrices=False) + >>> U.shape, V.shape, s.shape + ((9, 6), (6, 6), (6,)) + >>> S = np.diag(s) + >>> np.allclose(a, np.dot(U, np.dot(S, V))) + True + + """ + a, wrap = _makearray(a) + _assertNoEmpty2d(a) + _assertRankAtLeast2(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) + + m = a.shape[-2] + n = a.shape[-1] + if compute_uv: + if full_matrices: + if m < n: + gufunc = _umath_linalg.svd_m_f + else: + gufunc = _umath_linalg.svd_n_f + else: + if m < n: + gufunc = _umath_linalg.svd_m_s + else: + gufunc = _umath_linalg.svd_n_s + + signature = 'D->DdD' if isComplexType(t) else 'd->ddd' + u, s, vt = gufunc(a, signature=signature, extobj=extobj) + u = u.astype(result_t) + s = s.astype(_realType(result_t)) + vt = vt.astype(result_t) + return wrap(u), s, wrap(vt) + else: + if m < n: + gufunc = _umath_linalg.svd_m + else: + gufunc = _umath_linalg.svd_n + + signature = 'D->d' if isComplexType(t) else 'd->d' + s = gufunc(a, signature=signature, extobj=extobj) + s = s.astype(_realType(result_t)) + return s + +def cond(x, p=None): + """ + Compute the condition number of a matrix. + + This function is capable of returning the condition number using + one of seven different norms, depending on the value of `p` (see + Parameters below). + + Parameters + ---------- + x : (M, N) array_like + The matrix whose condition number is sought. + p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional + Order of the norm: + + ===== ============================ + p norm for matrices + ===== ============================ + None 2-norm, computed directly using the ``SVD`` + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 2-norm (largest sing. value) + -2 smallest singular value + ===== ============================ + + inf means the numpy.inf object, and the Frobenius norm is + the root-of-sum-of-squares norm. + + Returns + ------- + c : {float, inf} + The condition number of the matrix. May be infinite. + + See Also + -------- + numpy.linalg.norm + + Notes + ----- + The condition number of `x` is defined as the norm of `x` times the + norm of the inverse of `x` [1]_; the norm can be the usual L2-norm + (root-of-sum-of-squares) or one of a number of other matrix norms. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, + Academic Press, Inc., 1980, pg. 285. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) + >>> a + array([[ 1, 0, -1], + [ 0, 1, 0], + [ 1, 0, 1]]) + >>> LA.cond(a) + 1.4142135623730951 + >>> LA.cond(a, 'fro') + 3.1622776601683795 + >>> LA.cond(a, np.inf) + 2.0 + >>> LA.cond(a, -np.inf) + 1.0 + >>> LA.cond(a, 1) + 2.0 + >>> LA.cond(a, -1) + 1.0 + >>> LA.cond(a, 2) + 1.4142135623730951 + >>> LA.cond(a, -2) + 0.70710678118654746 + >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0)) + 0.70710678118654746 + + """ + x = asarray(x) # in case we have a matrix + if p is None: + s = svd(x, compute_uv=False) + return s[0]/s[-1] + else: + return norm(x, p)*norm(inv(x), p) + + +def matrix_rank(M, tol=None): + """ + Return matrix rank of array using SVD method + + Rank of the array is the number of SVD singular values of the array that are + greater than `tol`. + + Parameters + ---------- + M : {(M,), (M, N)} array_like + array of <=2 dimensions + tol : {None, float}, optional + threshold below which SVD values are considered zero. If `tol` is + None, and ``S`` is an array with singular values for `M`, and + ``eps`` is the epsilon value for datatype of ``S``, then `tol` is + set to ``S.max() * max(M.shape) * eps``. + + Notes + ----- + The default threshold to detect rank deficiency is a test on the magnitude + of the singular values of `M`. By default, we identify singular values less + than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with + the symbols defined above). This is the algorithm MATLAB uses [1]. It also + appears in *Numerical recipes* in the discussion of SVD solutions for linear + least squares [2]. + + This default threshold is designed to detect rank deficiency accounting for + the numerical errors of the SVD computation. Imagine that there is a column + in `M` that is an exact (in floating point) linear combination of other + columns in `M`. Computing the SVD on `M` will not produce a singular value + exactly equal to 0 in general: any difference of the smallest SVD value from + 0 will be caused by numerical imprecision in the calculation of the SVD. + Our threshold for small SVD values takes this numerical imprecision into + account, and the default threshold will detect such numerical rank + deficiency. The threshold may declare a matrix `M` rank deficient even if + the linear combination of some columns of `M` is not exactly equal to + another column of `M` but only numerically very close to another column of + `M`. + + We chose our default threshold because it is in wide use. Other thresholds + are possible. For example, elsewhere in the 2007 edition of *Numerical + recipes* there is an alternative threshold of ``S.max() * + np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe + this threshold as being based on "expected roundoff error" (p 71). + + The thresholds above deal with floating point roundoff error in the + calculation of the SVD. However, you may have more information about the + sources of error in `M` that would make you consider other tolerance values + to detect *effective* rank deficiency. The most useful measure of the + tolerance depends on the operations you intend to use on your matrix. For + example, if your data come from uncertain measurements with uncertainties + greater than floating point epsilon, choosing a tolerance near that + uncertainty may be preferable. The tolerance may be absolute if the + uncertainties are absolute rather than relative. + + References + ---------- + .. [1] MATLAB reference documention, "Rank" + http://www.mathworks.com/help/techdoc/ref/rank.html + .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, + "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, + page 795. + + Examples + -------- + >>> from numpy.linalg import matrix_rank + >>> matrix_rank(np.eye(4)) # Full rank matrix + 4 + >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix + >>> matrix_rank(I) + 3 + >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 + 1 + >>> matrix_rank(np.zeros((4,))) + 0 + """ + M = asarray(M) + if M.ndim > 2: + raise TypeError('array should have 2 or fewer dimensions') + if M.ndim < 2: + return int(not all(M==0)) + S = svd(M, compute_uv=False) + if tol is None: + tol = S.max() * max(M.shape) * finfo(S.dtype).eps + return sum(S > tol) + + +# Generalized inverse + +def pinv(a, rcond=1e-15 ): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate the generalized inverse of a matrix using its + singular-value decomposition (SVD) and including all + *large* singular values. + + Parameters + ---------- + a : (M, N) array_like + Matrix to be pseudo-inverted. + rcond : float + Cutoff for small singular values. + Singular values smaller (in modulus) than + `rcond` * largest_singular_value (again, in modulus) + are set to zero. + + Returns + ------- + B : (N, M) ndarray + The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so + is `B`. + + Raises + ------ + LinAlgError + If the SVD computation does not converge. + + Notes + ----- + The pseudo-inverse of a matrix A, denoted :math:`A^+`, is + defined as: "the matrix that 'solves' [the least-squares problem] + :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then + :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. + + It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular + value decomposition of A, then + :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are + orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting + of A's so-called singular values, (followed, typically, by + zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix + consisting of the reciprocals of A's singular values + (again, followed by zeros). [1]_ + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pp. 139-142. + + Examples + -------- + The following example checks that ``a * a+ * a == a`` and + ``a+ * a * a+ == a+``: + + >>> a = np.random.randn(9, 6) + >>> B = np.linalg.pinv(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a, wrap = _makearray(a) + _assertNoEmpty2d(a) + a = a.conjugate() + u, s, vt = svd(a, 0) + m = u.shape[0] + n = vt.shape[1] + cutoff = rcond*maximum.reduce(s) + for i in range(min(n, m)): + if s[i] > cutoff: + s[i] = 1./s[i] + else: + s[i] = 0.; + res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u))) + return wrap(res) + +# Determinant + +def slogdet(a): + """ + Compute the sign and (natural) logarithm of the determinant of an array. + + If an array has a very small or very large determinant, than a call to + `det` may overflow or underflow. This routine is more robust against such + issues, because it computes the logarithm of the determinant rather than + the determinant itself. + + Parameters + ---------- + a : (..., M, M) array_like + Input array, has to be a square 2-D array. + + Returns + ------- + sign : (...) array_like + A number representing the sign of the determinant. For a real matrix, + this is 1, 0, or -1. For a complex matrix, this is a complex number + with absolute value 1 (i.e., it is on the unit circle), or else 0. + logdet : (...) array_like + The natural log of the absolute value of the determinant. + + If the determinant is zero, then `sign` will be 0 and `logdet` will be + -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``. + + See Also + -------- + det + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine z/dgetrf. + + .. versionadded:: 1.6.0. + + Examples + -------- + The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> (sign, logdet) = np.linalg.slogdet(a) + >>> (sign, logdet) + (-1, 0.69314718055994529) + >>> sign * np.exp(logdet) + -2.0 + + Computing log-determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> sign, logdet = np.linalg.slogdet(a) + >>> (sign, logdet) + (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) + >>> sign * np.exp(logdet) + array([-2., -3., -8.]) + + This routine succeeds where ordinary `det` does not: + + >>> np.linalg.det(np.eye(500) * 0.1) + 0.0 + >>> np.linalg.slogdet(np.eye(500) * 0.1) + (1, -1151.2925464970228) + + """ + a = asarray(a) + _assertNoEmpty2d(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + real_t = _realType(result_t) + signature = 'D->Dd' if isComplexType(t) else 'd->dd' + sign, logdet = _umath_linalg.slogdet(a, signature=signature) + return sign.astype(result_t), logdet.astype(real_t) + +def det(a): + """ + Compute the determinant of an array. + + Parameters + ---------- + a : (..., M, M) array_like + Input array to compute determinants for. + + Returns + ------- + det : (...) array_like + Determinant of `a`. + + See Also + -------- + slogdet : Another way to representing the determinant, more suitable + for large matrices where underflow/overflow may occur. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine z/dgetrf. + + Examples + -------- + The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.linalg.det(a) + -2.0 + + Computing determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (2, 2, 2 + >>> np.linalg.det(a) + array([-2., -3., -8.]) + + """ + a = asarray(a) + _assertNoEmpty2d(a) + _assertRankAtLeast2(a) + _assertNdSquareness(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + return _umath_linalg.det(a, signature=signature).astype(result_t) + +# Linear Least Squares + +def lstsq(a, b, rcond=-1): + """ + Return the least-squares solution to a linear matrix equation. + + Solves the equation `a x = b` by computing a vector `x` that + minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may + be under-, well-, or over- determined (i.e., the number of + linearly independent rows of `a` can be less than, equal to, or + greater than its number of linearly independent columns). If `a` + is square and of full rank, then `x` (but for round-off error) is + the "exact" solution of the equation. + + Parameters + ---------- + a : (M, N) array_like + "Coefficient" matrix. + b : {(M,), (M, K)} array_like + Ordinate or "dependent variable" values. If `b` is two-dimensional, + the least-squares solution is calculated for each of the `K` columns + of `b`. + rcond : float, optional + Cut-off ratio for small singular values of `a`. + Singular values are set to zero if they are smaller than `rcond` + times the largest singular value of `a`. + + Returns + ------- + x : {(N,), (N, K)} ndarray + Least-squares solution. If `b` is two-dimensional, + the solutions are in the `K` columns of `x`. + residuals : {(), (1,), (K,)} ndarray + Sums of residuals; squared Euclidean 2-norm for each column in + ``b - a*x``. + If the rank of `a` is < N or M <= N, this is an empty array. + If `b` is 1-dimensional, this is a (1,) shape array. + Otherwise the shape is (K,). + rank : int + Rank of matrix `a`. + s : (min(M, N),) ndarray + Singular values of `a`. + + Raises + ------ + LinAlgError + If computation does not converge. + + Notes + ----- + If `b` is a matrix, then all array results are returned as matrices. + + Examples + -------- + Fit a line, ``y = mx + c``, through some noisy data-points: + + >>> x = np.array([0, 1, 2, 3]) + >>> y = np.array([-1, 0.2, 0.9, 2.1]) + + By examining the coefficients, we see that the line should have a + gradient of roughly 1 and cut the y-axis at, more or less, -1. + + We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` + and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: + + >>> A = np.vstack([x, np.ones(len(x))]).T + >>> A + array([[ 0., 1.], + [ 1., 1.], + [ 2., 1.], + [ 3., 1.]]) + + >>> m, c = np.linalg.lstsq(A, y)[0] + >>> print m, c + 1.0 -0.95 + + Plot the data along with the fitted line: + + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o', label='Original data', markersize=10) + >>> plt.plot(x, m*x + c, 'r', label='Fitted line') + >>> plt.legend() + >>> plt.show() + + """ + import math + a, _ = _makearray(a) + b, wrap = _makearray(b) + is_1d = len(b.shape) == 1 + if is_1d: + b = b[:, newaxis] + _assertRank2(a, b) + m = a.shape[0] + n = a.shape[1] + n_rhs = b.shape[1] + ldb = max(n, m) + if m != b.shape[0]: + raise LinAlgError('Incompatible dimensions') + t, result_t = _commonType(a, b) + result_real_t = _realType(result_t) + real_t = _linalgRealType(t) + bstar = zeros((ldb, n_rhs), t) + bstar[:b.shape[0], :n_rhs] = b.copy() + a, bstar = _fastCopyAndTranspose(t, a, bstar) + a, bstar = _to_native_byte_order(a, bstar) + s = zeros((min(m, n),), real_t) + nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 ) + iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int) + if isComplexType(t): + lapack_routine = lapack_lite.zgelsd + lwork = 1 + rwork = zeros((lwork,), real_t) + work = zeros((lwork,), t) + results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, + 0, work, -1, rwork, iwork, 0) + lwork = int(abs(work[0])) + rwork = zeros((lwork,), real_t) + a_real = zeros((m, n), real_t) + bstar_real = zeros((ldb, n_rhs,), real_t) + results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m, + bstar_real, ldb, s, rcond, + 0, rwork, -1, iwork, 0) + lrwork = int(rwork[0]) + work = zeros((lwork,), t) + rwork = zeros((lrwork,), real_t) + results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, + 0, work, lwork, rwork, iwork, 0) + else: + lapack_routine = lapack_lite.dgelsd + lwork = 1 + work = zeros((lwork,), t) + results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, + 0, work, -1, iwork, 0) + lwork = int(work[0]) + work = zeros((lwork,), t) + results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, + 0, work, lwork, iwork, 0) + if results['info'] > 0: + raise LinAlgError('SVD did not converge in Linear Least Squares') + resids = array([], result_real_t) + if is_1d: + x = array(ravel(bstar)[:n], dtype=result_t, copy=True) + if results['rank'] == n and m > n: + if isComplexType(t): + resids = array([sum(abs(ravel(bstar)[n:])**2)], + dtype=result_real_t) + else: + resids = array([sum((ravel(bstar)[n:])**2)], + dtype=result_real_t) + else: + x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True) + if results['rank'] == n and m > n: + if isComplexType(t): + resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype( + result_real_t) + else: + resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype( + result_real_t) + + st = s[:min(n, m)].copy().astype(result_real_t) + return wrap(x), wrap(resids), results['rank'], st + + +def _multi_svd_norm(x, row_axis, col_axis, op): + """Compute the extreme singular values of the 2-D matrices in `x`. + + This is a private utility function used by numpy.linalg.norm(). + + Parameters + ---------- + x : ndarray + row_axis, col_axis : int + The axes of `x` that hold the 2-D matrices. + op : callable + This should be either numpy.amin or numpy.amax. + + Returns + ------- + result : float or ndarray + If `x` is 2-D, the return values is a float. + Otherwise, it is an array with ``x.ndim - 2`` dimensions. + The return values are either the minimum or maximum of the + singular values of the matrices, depending on whether `op` + is `numpy.amin` or `numpy.amax`. + + """ + if row_axis > col_axis: + row_axis -= 1 + y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1) + result = op(svd(y, compute_uv=0), axis=-1) + return result + + +def norm(x, ord=None, axis=None): + """ + Matrix or vector norm. + + This function is able to return one of seven different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + Parameters + ---------- + x : array_like + Input array. If `axis` is None, `x` must be 1-D or 2-D. + ord : {non-zero int, inf, -inf, 'fro'}, optional + Order of the norm (see table under ``Notes``). inf means numpy's + `inf` object. + axis : {int, 2-tuple of ints, None}, optional + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + Notes + ----- + For values of ``ord <= 0``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.norm(a) + 7.745966692414834 + >>> LA.norm(b) + 7.745966692414834 + >>> LA.norm(b, 'fro') + 7.745966692414834 + >>> LA.norm(a, np.inf) + 4 + >>> LA.norm(b, np.inf) + 9 + >>> LA.norm(a, -np.inf) + 0 + >>> LA.norm(b, -np.inf) + 2 + + >>> LA.norm(a, 1) + 20 + >>> LA.norm(b, 1) + 7 + >>> LA.norm(a, -1) + -4.6566128774142013e-010 + >>> LA.norm(b, -1) + 6 + >>> LA.norm(a, 2) + 7.745966692414834 + >>> LA.norm(b, 2) + 7.3484692283495345 + + >>> LA.norm(a, -2) + nan + >>> LA.norm(b, -2) + 1.8570331885190563e-016 + >>> LA.norm(a, 3) + 5.8480354764257312 + >>> LA.norm(a, -3) + nan + + Using the `axis` argument to compute vector norms: + + >>> c = np.array([[ 1, 2, 3], + ... [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + array([ 1.41421356, 2.23606798, 5. ]) + >>> LA.norm(c, axis=1) + array([ 3.74165739, 4.24264069]) + >>> LA.norm(c, ord=1, axis=1) + array([6, 6]) + + Using the `axis` argument to compute matrix norms: + + >>> m = np.arange(8).reshape(2,2,2) + >>> LA.norm(m, axis=(1,2)) + array([ 3.74165739, 11.22497216]) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (3.7416573867739413, 11.224972160321824) + + """ + x = asarray(x) + + # Check the default case first and handle it immediately. + if ord is None and axis is None: + x = x.ravel(order='K') + if isComplexType(x.dtype.type): + sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) + else: + sqnorm = dot(x, x) + return sqrt(sqnorm) + + # Normalize the `axis` argument to a tuple. + nd = x.ndim + if axis is None: + axis = tuple(range(nd)) + elif not isinstance(axis, tuple): + axis = (axis,) + + if len(axis) == 1: + if ord == Inf: + return abs(x).max(axis=axis) + elif ord == -Inf: + return abs(x).min(axis=axis) + elif ord == 0: + # Zero norm + return (x != 0).sum(axis=axis) + elif ord == 1: + # special case for speedup + return add.reduce(abs(x), axis=axis) + elif ord is None or ord == 2: + # special case for speedup + s = (x.conj() * x).real + return sqrt(add.reduce(s, axis=axis)) + else: + try: + ord + 1 + except TypeError: + raise ValueError("Invalid norm order for vectors.") + if x.dtype.type is longdouble: + # Convert to a float type, so integer arrays give + # float results. Don't apply asfarray to longdouble arrays, + # because it will downcast to float64. + absx = abs(x) + else: + absx = x if isComplexType(x.dtype.type) else asfarray(x) + if absx.dtype is x.dtype: + absx = abs(absx) + else: + # if the type changed, we can safely overwrite absx + abs(absx, out=absx) + absx **= ord + return add.reduce(absx, axis=axis) ** (1.0 / ord) + elif len(axis) == 2: + row_axis, col_axis = axis + if not (-nd <= row_axis < nd and -nd <= col_axis < nd): + raise ValueError('Invalid axis %r for an array with shape %r' % + (axis, x.shape)) + if row_axis % nd == col_axis % nd: + raise ValueError('Duplicate axes given.') + if ord == 2: + return _multi_svd_norm(x, row_axis, col_axis, amax) + elif ord == -2: + return _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + if col_axis > row_axis: + col_axis -= 1 + return add.reduce(abs(x), axis=row_axis).max(axis=col_axis) + elif ord == Inf: + if row_axis > col_axis: + row_axis -= 1 + return add.reduce(abs(x), axis=col_axis).max(axis=row_axis) + elif ord == -1: + if col_axis > row_axis: + col_axis -= 1 + return add.reduce(abs(x), axis=row_axis).min(axis=col_axis) + elif ord == -Inf: + if row_axis > col_axis: + row_axis -= 1 + return add.reduce(abs(x), axis=col_axis).min(axis=row_axis) + elif ord in [None, 'fro', 'f']: + return sqrt(add.reduce((x.conj() * x).real, axis=axis)) + else: + raise ValueError("Invalid norm order for matrices.") + else: + raise ValueError("Improper number of dimensions to norm.") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/setup.py new file mode 100644 index 0000000000000..282c3423c93c5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/setup.py @@ -0,0 +1,56 @@ +from __future__ import division, print_function + +import os +import sys + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + from numpy.distutils.system_info import get_info + config = Configuration('linalg', parent_package, top_path) + + config.add_data_dir('tests') + + # Configure lapack_lite + + src_dir = 'lapack_lite' + lapack_lite_src = [ + os.path.join(src_dir, 'python_xerbla.c'), + os.path.join(src_dir, 'zlapack_lite.c'), + os.path.join(src_dir, 'dlapack_lite.c'), + os.path.join(src_dir, 'blas_lite.c'), + os.path.join(src_dir, 'dlamch.c'), + os.path.join(src_dir, 'f2c_lite.c'), + os.path.join(src_dir, 'f2c.h'), + ] + + lapack_info = get_info('lapack_opt', 0) # and {} + def get_lapack_lite_sources(ext, build_dir): + if not lapack_info: + print("### Warning: Using unoptimized lapack ###") + return ext.depends[:-1] + else: + if sys.platform=='win32': + print("### Warning: python_xerbla.c is disabled ###") + return ext.depends[:1] + return ext.depends[:2] + + config.add_extension('lapack_lite', + sources = [get_lapack_lite_sources], + depends = ['lapack_litemodule.c'] + lapack_lite_src, + extra_info = lapack_info + ) + + # umath_linalg module + + config.add_extension('_umath_linalg', + sources = [get_lapack_lite_sources], + depends = ['umath_linalg.c.src'] + lapack_lite_src, + extra_info = lapack_info, + libraries = ['npymath'], + ) + + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_build.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_build.py new file mode 100644 index 0000000000000..0d237c81cb866 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_build.py @@ -0,0 +1,53 @@ +from __future__ import division, absolute_import, print_function + +from subprocess import call, PIPE, Popen +import sys +import re + +import numpy as np +from numpy.linalg import lapack_lite +from numpy.testing import TestCase, dec + +from numpy.compat import asbytes_nested + +class FindDependenciesLdd(object): + def __init__(self): + self.cmd = ['ldd'] + + try: + p = Popen(self.cmd, stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + except OSError: + raise RuntimeError("command %s cannot be run" % self.cmd) + + def get_dependencies(self, lfile): + p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + if not (p.returncode == 0): + raise RuntimeError("failed dependencies check for %s" % lfile) + + return stdout + + def grep_dependencies(self, lfile, deps): + stdout = self.get_dependencies(lfile) + + rdeps = dict([(dep, re.compile(dep)) for dep in deps]) + founds = [] + for l in stdout.splitlines(): + for k, v in rdeps.items(): + if v.search(l): + founds.append(k) + + return founds + +class TestF77Mismatch(TestCase): + @dec.skipif(not(sys.platform[:5] == 'linux'), + "Skipping fortran compiler mismatch on non Linux platform") + def test_lapack(self): + f = FindDependenciesLdd() + deps = f.grep_dependencies(lapack_lite.__file__, + asbytes_nested(['libg2c', 'libgfortran'])) + self.assertFalse(len(deps) > 1, +"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to +cause random crashes and wrong results. See numpy INSTALL.txt for more +information.""") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_deprecations.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_deprecations.py new file mode 100644 index 0000000000000..13d244199733e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_deprecations.py @@ -0,0 +1,24 @@ +"""Test deprecation and future warnings. + +""" +import numpy as np +from numpy.testing import assert_warns, run_module_suite + + +def test_qr_mode_full_future_warning(): + """Check mode='full' FutureWarning. + + In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were + deprecated. The release date will probably be sometime in the summer + of 2013. + + """ + a = np.eye(2) + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_linalg.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_linalg.py new file mode 100644 index 0000000000000..8edf36aa67e79 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_linalg.py @@ -0,0 +1,1153 @@ +""" Test functions for linalg module + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import itertools +import traceback + +import numpy as np +from numpy import array, single, double, csingle, cdouble, dot, identity +from numpy import multiply, atleast_2d, inf, asarray, matrix +from numpy import linalg +from numpy.linalg import matrix_power, norm, matrix_rank +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, + assert_almost_equal, assert_allclose, run_module_suite, + dec +) + + +def ifthen(a, b): + return not a or b + + +def imply(a, b): + return not a or b + + +old_assert_almost_equal = assert_almost_equal + + +def assert_almost_equal(a, b, **kw): + if asarray(a).dtype.type in (single, csingle): + decimal = 6 + else: + decimal = 12 + old_assert_almost_equal(a, b, decimal=decimal, **kw) + + +def get_real_dtype(dtype): + return {single: single, double: double, + csingle: single, cdouble: double}[dtype] + + +def get_complex_dtype(dtype): + return {single: csingle, double: cdouble, + csingle: csingle, cdouble: cdouble}[dtype] + +def get_rtol(dtype): + # Choose a safe rtol + if dtype in (single, csingle): + return 1e-5 + else: + return 1e-11 + +class LinalgCase(object): + def __init__(self, name, a, b, exception_cls=None): + assert isinstance(name, str) + self.name = name + self.a = a + self.b = b + self.exception_cls = exception_cls + + def check(self, do): + if self.exception_cls is None: + do(self.a, self.b) + else: + assert_raises(self.exception_cls, do, self.a, self.b) + + def __repr__(self): + return "" % (self.name,) + + +# +# Base test cases +# + +np.random.seed(1234) + +SQUARE_CASES = [ + LinalgCase("single", + array([[1., 2.], [3., 4.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("double", + array([[1., 2.], [3., 4.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_2", + array([[1., 2.], [3., 4.]], dtype=double), + array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), + LinalgCase("csingle", + array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=csingle), + array([2.+1j, 1.+2j], dtype=csingle)), + LinalgCase("cdouble", + array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble), + array([2.+1j, 1.+2j], dtype=cdouble)), + LinalgCase("cdouble_2", + array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble), + array([[2.+1j, 1.+2j, 1+3j], [1-2j, 1-3j, 1-6j]], dtype=cdouble)), + LinalgCase("empty", + atleast_2d(array([], dtype = double)), + atleast_2d(array([], dtype = double)), + linalg.LinAlgError), + LinalgCase("8x8", + np.random.rand(8, 8), + np.random.rand(8)), + LinalgCase("1x1", + np.random.rand(1, 1), + np.random.rand(1)), + LinalgCase("nonarray", + [[1, 2], [3, 4]], + [2, 1]), + LinalgCase("matrix_b_only", + array([[1., 2.], [3., 4.]]), + matrix([2., 1.]).T), + LinalgCase("matrix_a_and_b", + matrix([[1., 2.], [3., 4.]]), + matrix([2., 1.]).T), +] + +NONSQUARE_CASES = [ + LinalgCase("single_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("single_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), + array([2., 1., 3.], dtype=single)), + LinalgCase("double_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), + array([2., 1., 3.], dtype=double)), + LinalgCase("csingle_nsq_1", + array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=csingle), + array([2.+1j, 1.+2j], dtype=csingle)), + LinalgCase("csingle_nsq_2", + array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=csingle), + array([2.+1j, 1.+2j, 3.-3j], dtype=csingle)), + LinalgCase("cdouble_nsq_1", + array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble), + array([2.+1j, 1.+2j], dtype=cdouble)), + LinalgCase("cdouble_nsq_2", + array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble), + array([2.+1j, 1.+2j, 3.-3j], dtype=cdouble)), + LinalgCase("cdouble_nsq_1_2", + array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble), + array([[2.+1j, 1.+2j], [1-1j, 2-2j]], dtype=cdouble)), + LinalgCase("cdouble_nsq_2_2", + array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble), + array([[2.+1j, 1.+2j], [1-1j, 2-2j], [1-1j, 2-2j]], dtype=cdouble)), + LinalgCase("8x11", + np.random.rand(8, 11), + np.random.rand(11)), + LinalgCase("1x5", + np.random.rand(1, 5), + np.random.rand(5)), + LinalgCase("5x1", + np.random.rand(5, 1), + np.random.rand(1)), +] + +HERMITIAN_CASES = [ + LinalgCase("hsingle", + array([[1., 2.], [2., 1.]], dtype=single), + None), + LinalgCase("hdouble", + array([[1., 2.], [2., 1.]], dtype=double), + None), + LinalgCase("hcsingle", + array([[1., 2+3j], [2-3j, 1]], dtype=csingle), + None), + LinalgCase("hcdouble", + array([[1., 2+3j], [2-3j, 1]], dtype=cdouble), + None), + LinalgCase("hempty", + atleast_2d(array([], dtype = double)), + None, + linalg.LinAlgError), + LinalgCase("hnonarray", + [[1, 2], [2, 1]], + None), + LinalgCase("matrix_b_only", + array([[1., 2.], [2., 1.]]), + None), + LinalgCase("hmatrix_a_and_b", + matrix([[1., 2.], [2., 1.]]), + None), + LinalgCase("hmatrix_1x1", + np.random.rand(1, 1), + None), +] + + +# +# Gufunc test cases +# + +GENERALIZED_SQUARE_CASES = [] +GENERALIZED_NONSQUARE_CASES = [] +GENERALIZED_HERMITIAN_CASES = [] + +for tgt, src in ((GENERALIZED_SQUARE_CASES, SQUARE_CASES), + (GENERALIZED_NONSQUARE_CASES, NONSQUARE_CASES), + (GENERALIZED_HERMITIAN_CASES, HERMITIAN_CASES)): + for case in src: + if not isinstance(case.a, np.ndarray): + continue + + a = np.array([case.a, 2*case.a, 3*case.a]) + if case.b is None: + b = None + else: + b = np.array([case.b, 7*case.b, 6*case.b]) + new_case = LinalgCase(case.name + "_tile3", a, b, + case.exception_cls) + tgt.append(new_case) + + a = np.array([case.a]*2*3).reshape((3, 2) + case.a.shape) + if case.b is None: + b = None + else: + b = np.array([case.b]*2*3).reshape((3, 2) + case.b.shape) + new_case = LinalgCase(case.name + "_tile213", a, b, + case.exception_cls) + tgt.append(new_case) + +# +# Generate stride combination variations of the above +# + +def _stride_comb_iter(x): + """ + Generate cartesian product of strides for all axes + """ + + if not isinstance(x, np.ndarray): + yield x, "nop" + return + + stride_set = [(1,)]*x.ndim + stride_set[-1] = (1, 3, -4) + if x.ndim > 1: + stride_set[-2] = (1, 3, -4) + if x.ndim > 2: + stride_set[-3] = (1, -4) + + for repeats in itertools.product(*tuple(stride_set)): + new_shape = [abs(a*b) for a, b in zip(x.shape, repeats)] + slices = tuple([slice(None, None, repeat) for repeat in repeats]) + + # new array with different strides, but same data + xi = np.empty(new_shape, dtype=x.dtype) + xi.view(np.uint32).fill(0xdeadbeef) + xi = xi[slices] + xi[...] = x + xi = xi.view(x.__class__) + assert np.all(xi == x) + yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + + # generate also zero strides if possible + if x.ndim >= 1 and x.shape[-1] == 1: + s = list(x.strides) + s[-1] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0" + if x.ndim >= 2 and x.shape[-2] == 1: + s = list(x.strides) + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_x" + if x.ndim >= 2 and x.shape[:-2] == (1, 1): + s = list(x.strides) + s[-1] = 0 + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_0" + +for src in (SQUARE_CASES, + NONSQUARE_CASES, + HERMITIAN_CASES, + GENERALIZED_SQUARE_CASES, + GENERALIZED_NONSQUARE_CASES, + GENERALIZED_HERMITIAN_CASES): + + new_cases = [] + for case in src: + for a, a_tag in _stride_comb_iter(case.a): + for b, b_tag in _stride_comb_iter(case.b): + new_case = LinalgCase(case.name + "_" + a_tag + "_" + b_tag, a, b, + exception_cls=case.exception_cls) + new_cases.append(new_case) + src.extend(new_cases) + + +# +# Test different routines against the above cases +# + +def _check_cases(func, cases): + for case in cases: + try: + case.check(func) + except Exception: + msg = "In test case: %r\n\n" % case + msg += traceback.format_exc() + raise AssertionError(msg) + +class LinalgTestCase(object): + def test_sq_cases(self): + _check_cases(self.do, SQUARE_CASES) + + +class LinalgNonsquareTestCase(object): + def test_sq_cases(self): + _check_cases(self.do, NONSQUARE_CASES) + + +class LinalgGeneralizedTestCase(object): + @dec.slow + def test_generalized_sq_cases(self): + _check_cases(self.do, GENERALIZED_SQUARE_CASES) + + +class LinalgGeneralizedNonsquareTestCase(object): + @dec.slow + def test_generalized_nonsq_cases(self): + _check_cases(self.do, GENERALIZED_NONSQUARE_CASES) + + +class HermitianTestCase(object): + def test_herm_cases(self): + _check_cases(self.do, HERMITIAN_CASES) + + +class HermitianGeneralizedTestCase(object): + @dec.slow + def test_generalized_herm_cases(self): + _check_cases(self.do, GENERALIZED_HERMITIAN_CASES) + + +def dot_generalized(a, b): + a = asarray(a) + if a.ndim >= 3: + if a.ndim == b.ndim: + # matrix x matrix + new_shape = a.shape[:-1] + b.shape[-1:] + elif a.ndim == b.ndim + 1: + # matrix x vector + new_shape = a.shape[:-1] + else: + raise ValueError("Not implemented...") + r = np.empty(new_shape, dtype=np.common_type(a, b)) + for c in itertools.product(*map(range, a.shape[:-2])): + r[c] = dot(a[c], b[c]) + return r + else: + return dot(a, b) + + +def identity_like_generalized(a): + a = asarray(a) + if a.ndim >= 3: + r = np.empty(a.shape, dtype=a.dtype) + for c in itertools.product(*map(range, a.shape[:-2])): + r[c] = identity(a.shape[-2]) + return r + else: + return identity(a.shape[0]) + + +class TestSolve(LinalgTestCase, LinalgGeneralizedTestCase): + def do(self, a, b): + x = linalg.solve(a, b) + assert_almost_equal(b, dot_generalized(a, x)) + assert_(imply(isinstance(b, matrix), isinstance(x, matrix))) + + def test_types(self): + def check(dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.solve(x, x).dtype, dtype) + for dtype in [single, double, csingle, cdouble]: + yield check, dtype + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + # Test system of 0x0 matrices + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, 0:0,:] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0,:]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # Test errors for non-square and only b's dimension being 0 + assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) + assert_raises(ValueError, linalg.solve, a, b[:, 0:0,:]) + + # Test broadcasting error + b = np.arange(6).reshape(1, 3, 2) # broadcasting error + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + + # Test zero "single equations" with 0x0 matrices. + b = np.arange(2).reshape(1, 2).view(ArraySubclass) + expected = linalg.solve(a, b)[:, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + b = np.arange(3).reshape(1, 3) + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) + + def test_0_size_k(self): + # test zero multiple equation (K=0) case. + class ArraySubclass(np.ndarray): + pass + a = np.arange(4).reshape(1, 2, 2) + b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) + + expected = linalg.solve(a, b)[:,:, 0:0] + result = linalg.solve(a, b[:,:, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # test both zero. + expected = linalg.solve(a, b)[:, 0:0, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:,0:0, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + +class TestInv(LinalgTestCase, LinalgGeneralizedTestCase): + def do(self, a, b): + a_inv = linalg.inv(a) + assert_almost_equal(dot_generalized(a, a_inv), + identity_like_generalized(a)) + assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix))) + + def test_types(self): + def check(dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.inv(x).dtype, dtype) + for dtype in [single, double, csingle, cdouble]: + yield check, dtype + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res.shape) + assert_(isinstance(a, ArraySubclass)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res.shape) + + +class TestEigvals(LinalgTestCase, LinalgGeneralizedTestCase): + def do(self, a, b): + ev = linalg.eigvals(a) + evalues, evectors = linalg.eig(a) + assert_almost_equal(ev, evalues) + + def test_types(self): + def check(dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, dtype) + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) + for dtype in [single, double, csingle, cdouble]: + yield check, dtype + + +class TestEig(LinalgTestCase, LinalgGeneralizedTestCase): + def do(self, a, b): + evalues, evectors = linalg.eig(a) + assert_allclose(dot_generalized(a, evectors), + np.asarray(evectors) * np.asarray(evalues)[...,None,:], + rtol=get_rtol(evalues.dtype)) + assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix))) + + def test_types(self): + def check(dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, dtype) + assert_equal(v.dtype, dtype) + + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) + + for dtype in [single, double, csingle, cdouble]: + yield check, dtype + + +class TestSVD(LinalgTestCase, LinalgGeneralizedTestCase): + def do(self, a, b): + u, s, vt = linalg.svd(a, 0) + assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[...,None,:], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + assert_(imply(isinstance(a, matrix), isinstance(u, matrix))) + assert_(imply(isinstance(a, matrix), isinstance(vt, matrix))) + + def test_types(self): + def check(dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + u, s, vh = linalg.svd(x) + assert_equal(u.dtype, dtype) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(vh.dtype, dtype) + s = linalg.svd(x, compute_uv=False) + assert_equal(s.dtype, get_real_dtype(dtype)) + + for dtype in [single, double, csingle, cdouble]: + yield check, dtype + + +class TestCondSVD(LinalgTestCase, LinalgGeneralizedTestCase): + def do(self, a, b): + c = asarray(a) # a might be a matrix + s = linalg.svd(c, compute_uv=False) + old_assert_almost_equal(s[0]/s[-1], linalg.cond(a), decimal=5) + + +class TestCond2(LinalgTestCase): + def do(self, a, b): + c = asarray(a) # a might be a matrix + s = linalg.svd(c, compute_uv=False) + old_assert_almost_equal(s[0]/s[-1], linalg.cond(a, 2), decimal=5) + + +class TestCondInf(object): + def test(self): + A = array([[1., 0, 0], [0, -2., 0], [0, 0, 3.]]) + assert_almost_equal(linalg.cond(A, inf), 3.) + + +class TestPinv(LinalgTestCase): + def do(self, a, b): + a_ginv = linalg.pinv(a) + assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0])) + assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix))) + + +class TestDet(LinalgTestCase, LinalgGeneralizedTestCase): + def do(self, a, b): + d = linalg.det(a) + (s, ld) = linalg.slogdet(a) + if asarray(a).dtype.type in (single, double): + ad = asarray(a).astype(double) + else: + ad = asarray(a).astype(cdouble) + ev = linalg.eigvals(ad) + assert_almost_equal(d, multiply.reduce(ev, axis=-1)) + assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) + + s = np.atleast_1d(s) + ld = np.atleast_1d(ld) + m = (s != 0) + assert_almost_equal(np.abs(s[m]), 1) + assert_equal(ld[~m], -inf) + + def test_zero(self): + assert_equal(linalg.det([[0.0]]), 0.0) + assert_equal(type(linalg.det([[0.0]])), double) + assert_equal(linalg.det([[0.0j]]), 0.0) + assert_equal(type(linalg.det([[0.0j]])), cdouble) + + assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) + assert_equal(type(linalg.slogdet([[0.0]])[0]), double) + assert_equal(type(linalg.slogdet([[0.0]])[1]), double) + assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) + assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) + assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) + + def test_types(self): + def check(dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(np.linalg.det(x).dtype, dtype) + ph, s = np.linalg.slogdet(x) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(ph.dtype, dtype) + for dtype in [single, double, csingle, cdouble]: + yield check, dtype + + +class TestLstsq(LinalgTestCase, LinalgNonsquareTestCase): + def do(self, a, b): + arr = np.asarray(a) + m, n = arr.shape + u, s, vt = linalg.svd(a, 0) + x, residuals, rank, sv = linalg.lstsq(a, b) + if m <= n: + assert_almost_equal(b, dot(a, x)) + assert_equal(rank, m) + else: + assert_equal(rank, n) + assert_almost_equal(sv, sv.__array_wrap__(s)) + if rank == n and m > n: + expect_resids = (np.asarray(abs(np.dot(a, x) - b))**2).sum(axis=0) + expect_resids = np.asarray(expect_resids) + if len(np.asarray(b).shape) == 1: + expect_resids.shape = (1,) + assert_equal(residuals.shape, expect_resids.shape) + else: + expect_resids = np.array([]).view(type(x)) + assert_almost_equal(residuals, expect_resids) + assert_(np.issubdtype(residuals.dtype, np.floating)) + assert_(imply(isinstance(b, matrix), isinstance(x, matrix))) + assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix))) + + +class TestMatrixPower(object): + R90 = array([[0, 1], [-1, 0]]) + Arb22 = array([[4, -7], [-2, 10]]) + noninv = array([[1, 0], [0, 0]]) + arbfloat = array([[0.1, 3.2], [1.2, 0.7]]) + + large = identity(10) + t = large[1,:].copy() + large[1,:] = large[0,:] + large[0,:] = t + + def test_large_power(self): + assert_equal(matrix_power(self.R90, 2**100+2**10+2**5+1), self.R90) + + def test_large_power_trailing_zero(self): + assert_equal(matrix_power(self.R90, 2**100+2**10+2**5), identity(2)) + + def testip_zero(self): + def tz(M): + mz = matrix_power(M, 0) + assert_equal(mz, identity(M.shape[0])) + assert_equal(mz.dtype, M.dtype) + for M in [self.Arb22, self.arbfloat, self.large]: + yield tz, M + + def testip_one(self): + def tz(M): + mz = matrix_power(M, 1) + assert_equal(mz, M) + assert_equal(mz.dtype, M.dtype) + for M in [self.Arb22, self.arbfloat, self.large]: + yield tz, M + + def testip_two(self): + def tz(M): + mz = matrix_power(M, 2) + assert_equal(mz, dot(M, M)) + assert_equal(mz.dtype, M.dtype) + for M in [self.Arb22, self.arbfloat, self.large]: + yield tz, M + + def testip_invert(self): + def tz(M): + mz = matrix_power(M, -1) + assert_almost_equal(identity(M.shape[0]), dot(mz, M)) + for M in [self.R90, self.Arb22, self.arbfloat, self.large]: + yield tz, M + + def test_invert_noninvertible(self): + import numpy.linalg + assert_raises(numpy.linalg.linalg.LinAlgError, + lambda: matrix_power(self.noninv, -1)) + + +class TestBoolPower(object): + def test_square(self): + A = array([[True, False], [True, True]]) + assert_equal(matrix_power(A, 2), A) + + +class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase): + def do(self, a, b): + # note that eigenvalue arrays must be sorted since + # their order isn't guaranteed. + ev = linalg.eigvalsh(a, 'L') + evalues, evectors = linalg.eig(a) + ev.sort(axis=-1) + evalues.sort(axis=-1) + assert_allclose(ev, evalues, + rtol=get_rtol(ev.dtype)) + + ev2 = linalg.eigvalsh(a, 'U') + ev2.sort(axis=-1) + assert_allclose(ev2, evalues, + rtol=get_rtol(ev.dtype)) + + def test_types(self): + def check(dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w = np.linalg.eigvalsh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + for dtype in [single, double, csingle, cdouble]: + yield check, dtype + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") + assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0],[1, 0]], dtype=np.double) + Kup = np.array([[0, 1],[0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w = np.linalg.eigvalsh(Klo) + assert_allclose(np.sort(w), tgt, rtol=rtol) + # Check 'L' + w = np.linalg.eigvalsh(Klo, UPLO='L') + assert_allclose(np.sort(w), tgt, rtol=rtol) + # Check 'l' + w = np.linalg.eigvalsh(Klo, UPLO='l') + assert_allclose(np.sort(w), tgt, rtol=rtol) + # Check 'U' + w = np.linalg.eigvalsh(Kup, UPLO='U') + assert_allclose(np.sort(w), tgt, rtol=rtol) + # Check 'u' + w = np.linalg.eigvalsh(Kup, UPLO='u') + assert_allclose(np.sort(w), tgt, rtol=rtol) + + +class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase): + def do(self, a, b): + # note that eigenvalue arrays must be sorted since + # their order isn't guaranteed. + ev, evc = linalg.eigh(a) + evalues, evectors = linalg.eig(a) + ev.sort(axis=-1) + evalues.sort(axis=-1) + assert_almost_equal(ev, evalues) + + assert_allclose(dot_generalized(a, evc), + np.asarray(ev)[...,None,:] * np.asarray(evc), + rtol=get_rtol(ev.dtype)) + + ev2, evc2 = linalg.eigh(a, 'U') + ev2.sort(axis=-1) + assert_almost_equal(ev2, evalues) + + assert_allclose(dot_generalized(a, evc2), + np.asarray(ev2)[...,None,:] * np.asarray(evc2), + rtol=get_rtol(ev.dtype), err_msg=repr(a)) + + def test_types(self): + def check(dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eigh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + assert_equal(v.dtype, dtype) + for dtype in [single, double, csingle, cdouble]: + yield check, dtype + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigh, x, "lower") + assert_raises(ValueError, np.linalg.eigh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0],[1, 0]], dtype=np.double) + Kup = np.array([[0, 1],[0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w, v = np.linalg.eigh(Klo) + assert_allclose(np.sort(w), tgt, rtol=rtol) + # Check 'L' + w, v = np.linalg.eigh(Klo, UPLO='L') + assert_allclose(np.sort(w), tgt, rtol=rtol) + # Check 'l' + w, v = np.linalg.eigh(Klo, UPLO='l') + assert_allclose(np.sort(w), tgt, rtol=rtol) + # Check 'U' + w, v = np.linalg.eigh(Kup, UPLO='U') + assert_allclose(np.sort(w), tgt, rtol=rtol) + # Check 'u' + w, v = np.linalg.eigh(Kup, UPLO='u') + assert_allclose(np.sort(w), tgt, rtol=rtol) + + +class _TestNorm(object): + + dt = None + dec = None + + def test_empty(self): + assert_equal(norm([]), 0.0) + assert_equal(norm(array([], dtype=self.dt)), 0.0) + assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) + + def test_vector(self): + a = [1, 2, 3, 4] + b = [-1, -2, -3, -4] + c = [-1, 2, -3, 4] + + def _test(v): + np.testing.assert_almost_equal(norm(v), 30**0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, inf), 4.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -inf), 1.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 1), 10.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -1), 12.0/25, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 2), 30**0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -2), ((205./144)**-0.5), + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 0), 4, + decimal=self.dec) + + for v in (a, b, c,): + _test(v) + + for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), + array(c, dtype=self.dt)): + _test(v) + + def test_matrix(self): + A = matrix([[1, 3], [5, 7]], dtype=self.dt) + assert_almost_equal(norm(A), 84**0.5) + assert_almost_equal(norm(A, 'fro'), 84**0.5) + assert_almost_equal(norm(A, inf), 12.0) + assert_almost_equal(norm(A, -inf), 4.0) + assert_almost_equal(norm(A, 1), 10.0) + assert_almost_equal(norm(A, -1), 6.0) + assert_almost_equal(norm(A, 2), 9.1231056256176615) + assert_almost_equal(norm(A, -2), 0.87689437438234041) + + assert_raises(ValueError, norm, A, 'nofro') + assert_raises(ValueError, norm, A, -3) + assert_raises(ValueError, norm, A, 0) + + def test_axis(self): + # Vector norms. + # Compare the use of `axis` with computing the norm of each row + # or column separately. + A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: + expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] + assert_almost_equal(norm(A, ord=order, axis=0), expected0) + expected1 = [norm(A[k,:], ord=order) for k in range(A.shape[0])] + assert_almost_equal(norm(A, ord=order, axis=1), expected1) + + # Matrix norms. + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']: + assert_almost_equal(norm(A, ord=order), norm(A, ord=order, + axis=(0, 1))) + + n = norm(B, ord=order, axis=(1, 2)) + expected = [norm(B[k], ord=order) for k in range(B.shape[0])] + assert_almost_equal(n, expected) + + n = norm(B, ord=order, axis=(2, 1)) + expected = [norm(B[k].T, ord=order) for k in range(B.shape[0])] + assert_almost_equal(n, expected) + + n = norm(B, ord=order, axis=(0, 2)) + expected = [norm(B[:, k,:], ord=order) for k in range(B.shape[1])] + assert_almost_equal(n, expected) + + n = norm(B, ord=order, axis=(0, 1)) + expected = [norm(B[:,:, k], ord=order) for k in range(B.shape[2])] + assert_almost_equal(n, expected) + + def test_bad_args(self): + # Check that bad arguments raise the appropriate exceptions. + + A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + # Using `axis=` or passing in a 1-D array implies vector + # norms are being computed, so also using `ord='fro'` raises a + # ValueError. + assert_raises(ValueError, norm, A, 'fro', 0) + assert_raises(ValueError, norm, [3, 4], 'fro', None) + + # Similarly, norm should raise an exception when ord is any finite + # number other than 1, 2, -1 or -2 when computing matrix norms. + for order in [0, 3]: + assert_raises(ValueError, norm, A, order, None) + assert_raises(ValueError, norm, A, order, (0, 1)) + assert_raises(ValueError, norm, B, order, (1, 2)) + + # Invalid axis + assert_raises(ValueError, norm, B, None, 3) + assert_raises(ValueError, norm, B, None, (2, 3)) + assert_raises(ValueError, norm, B, None, (0, 1, 2)) + + def test_longdouble_norm(self): + # Non-regression test: p-norm of longdouble would previously raise + # UnboundLocalError. + x = np.arange(10, dtype=np.longdouble) + old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) + + def test_intmin(self): + # Non-regression test: p-norm of signed integer would previously do + # float cast and abs in the wrong order. + x = np.array([-2 ** 31], dtype=np.int32) + old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) + + def test_complex_high_ord(self): + # gh-4156 + d = np.empty((2,), dtype=np.clongdouble) + d[0] = 6+7j + d[1] = -6+7j + res = 11.615898132184 + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) + d = d.astype(np.complex128) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) + d = d.astype(np.complex64) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) + + +class TestNormDouble(_TestNorm): + dt = np.double + dec = 12 + + +class TestNormSingle(_TestNorm): + dt = np.float32 + dec = 6 + + +class TestNormInt64(_TestNorm): + dt = np.int64 + dec = 12 + + +class TestMatrixRank(object): + def test_matrix_rank(self): + # Full rank matrix + yield assert_equal, 4, matrix_rank(np.eye(4)) + # rank deficient matrix + I=np.eye(4); I[-1, -1] = 0. + yield assert_equal, matrix_rank(I), 3 + # All zeros - zero rank + yield assert_equal, matrix_rank(np.zeros((4, 4))), 0 + # 1 dimension - rank 1 unless all 0 + yield assert_equal, matrix_rank([1, 0, 0, 0]), 1 + yield assert_equal, matrix_rank(np.zeros((4,))), 0 + # accepts array-like + yield assert_equal, matrix_rank([1]), 1 + # greater than 2 dimensions raises error + yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2)) + # works on scalar + yield assert_equal, matrix_rank(1), 1 + + +def test_reduced_rank(): + # Test matrices with reduced rank + rng = np.random.RandomState(20120714) + for i in range(100): + # Make a rank deficient matrix + X = rng.normal(size=(40, 10)) + X[:, 0] = X[:, 1] + X[:, 2] + # Assert that matrix_rank detected deficiency + assert_equal(matrix_rank(X), 9) + X[:, 3] = X[:, 4] + X[:, 5] + assert_equal(matrix_rank(X), 8) + + +class TestQR(object): + + def check_qr(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape + k = min(m, n) + + # mode == 'complete' + q, r = linalg.qr(a, mode='complete') + assert_(q.dtype == a_dtype) + assert_(r.dtype == a_dtype) + assert_(isinstance(q, a_type)) + assert_(isinstance(r, a_type)) + assert_(q.shape == (m, m)) + assert_(r.shape == (m, n)) + assert_almost_equal(dot(q, r), a) + assert_almost_equal(dot(q.T.conj(), q), np.eye(m)) + assert_almost_equal(np.triu(r), r) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape == (m, k)) + assert_(r1.shape == (k, n)) + assert_almost_equal(dot(q1, r1), a) + assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) + assert_almost_equal(np.triu(r1), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + def test_qr_empty(self): + a = np.zeros((0, 2)) + assert_raises(linalg.LinAlgError, linalg.qr, a) + + def test_mode_raw(self): + # The factorization is not unique and varies between libraries, + # so it is not possible to check against known values. Functional + # testing is a possibility, but awaits the exposure of more + # of the functions in lapack_lite. Consequently, this test is + # very limited in scope. Note that the results are in FORTRAN + # order, hence the h arrays are transposed. + a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double) + b = a.astype(np.single) + + # Test double + h, tau = linalg.qr(a, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (2, 3)) + assert_(tau.shape == (2,)) + + h, tau = linalg.qr(a.T, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (3, 2)) + assert_(tau.shape == (2,)) + + def test_mode_all_but_economic(self): + a = array([[1, 2], [3, 4]]) + b = array([[1, 2], [3, 4], [5, 6]]) + for dt in "fd": + m1 = a.astype(dt) + m2 = b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + self.check_qr(matrix(m1)) + for dt in "fd": + m1 = 1 + 1j * a.astype(dt) + m2 = 1 + 1j * b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + self.check_qr(matrix(m1)) + + +def test_byteorder_check(): + # Byte order check should pass for native order + if sys.byteorder == 'little': + native = '<' + else: + native = '>' + + for dtt in (np.float32, np.float64): + arr = np.eye(4, dtype=dtt) + n_arr = arr.newbyteorder(native) + sw_arr = arr.newbyteorder('S').byteswap() + assert_equal(arr.dtype.byteorder, '=') + for routine in (linalg.inv, linalg.det, linalg.pinv): + # Normal call + res = routine(arr) + # Native but not '=' + assert_array_equal(res, routine(n_arr)) + # Swapped + assert_array_equal(res, routine(sw_arr)) + + +def test_generalized_raise_multiloop(): + # It should raise an error even if the error doesn't occur in the + # last iteration of the ufunc inner loop + + invertible = np.array([[1, 2], [3, 4]]) + non_invertible = np.array([[1, 1], [1, 1]]) + + x = np.zeros([4, 4, 2, 2])[1::2] + x[...] = invertible + x[0, 0] = non_invertible + + assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) + +def test_xerbla_override(): + # Check that our xerbla has been successfully linked in. If it is not, + # the default xerbla routine is called, which prints a message to stdout + # and may, or may not, abort the process depending on the LAPACK package. + from nose import SkipTest + + try: + pid = os.fork() + except (OSError, AttributeError): + # fork failed, or not running on POSIX + raise SkipTest("Not POSIX or fork failed.") + + if pid == 0: + # child; close i/o file handles + os.close(1) + os.close(0) + # Avoid producing core files. + import resource + resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) + # These calls may abort. + try: + np.linalg.lapack_lite.xerbla() + except ValueError: + pass + except: + os._exit(os.EX_CONFIG) + + try: + a = np.array([[1.]]) + np.linalg.lapack_lite.dorgqr( + 1, 1, 1, a, + 0, # <- invalid value + a, a, 0, 0) + except ValueError as e: + if "DORGQR parameter number 5" in str(e): + # success + os._exit(os.EX_OK) + + # Did not abort, but our xerbla was not linked in. + os._exit(os.EX_CONFIG) + else: + # parent + pid, status = os.wait() + if os.WEXITSTATUS(status) != os.EX_OK or os.WIFSIGNALED(status): + raise SkipTest('Numpy xerbla not linked in.') + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_regression.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_regression.py new file mode 100644 index 0000000000000..18d212cdc9d2a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_regression.py @@ -0,0 +1,90 @@ +""" Test functions for linalg module +""" +from __future__ import division, absolute_import, print_function + + +from numpy.testing import * +import numpy as np +from numpy import linalg, arange, float64, array, dot, transpose + +rlevel = 1 + +class TestRegression(TestCase): + def test_eig_build(self, level = rlevel): + """Ticket #652""" + rva = array([1.03221168e+02 +0.j, + -1.91843603e+01 +0.j, + -6.04004526e-01+15.84422474j, + -6.04004526e-01-15.84422474j, + -1.13692929e+01 +0.j, + -6.57612485e-01+10.41755503j, + -6.57612485e-01-10.41755503j, + 1.82126812e+01 +0.j, + 1.06011014e+01 +0.j, + 7.80732773e+00 +0.j, + -7.65390898e-01 +0.j, + 1.51971555e-15 +0.j, + -1.51308713e-15 +0.j]) + a = arange(13*13, dtype = float64) + a.shape = (13, 13) + a = a%17 + va, ve = linalg.eig(a) + va.sort() + rva.sort() + assert_array_almost_equal(va, rva) + + def test_eigh_build(self, level = rlevel): + """Ticket 662.""" + rvals = [68.60568999, 89.57756725, 106.67185574] + + cov = array([[ 77.70273908, 3.51489954, 15.64602427], + [3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) + + vals, vecs = linalg.eigh(cov) + assert_array_almost_equal(vals, rvals) + + def test_svd_build(self, level = rlevel): + """Ticket 627.""" + a = array([[ 0., 1.], [ 1., 1.], [ 2., 1.], [ 3., 1.]]) + m, n = a.shape + u, s, vh = linalg.svd(a) + + b = dot(transpose(u[:, n:]), a) + + assert_array_almost_equal(b, np.zeros((2, 2))) + + def test_norm_vector_badarg(self): + """Regression for #786: Froebenius norm for vectors raises + TypeError.""" + self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + + def test_lapack_endian(self): + # For bug #1482 + a = array([[5.7998084, -2.1825367 ], + [-2.1825367, 9.85910595]], dtype='>f8') + b = array(a, dtype=' 0.5) + assert_equal(c, 1) + assert_equal(np.linalg.matrix_rank(a), 1) + assert_array_less(1, np.linalg.norm(a, ord=2)) + + +if __name__ == '__main__': + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/__init__.py new file mode 100644 index 0000000000000..0cb92f6678a23 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/__init__.py @@ -0,0 +1,58 @@ +""" +============= +Masked Arrays +============= + +Arrays sometimes contain invalid or missing data. When doing operations +on such arrays, we wish to suppress invalid values, which is the purpose masked +arrays fulfill (an example of typical use is given below). + +For example, examine the following array: + +>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) + +When we try to calculate the mean of the data, the result is undetermined: + +>>> np.mean(x) +nan + +The mean is calculated using roughly ``np.sum(x)/len(x)``, but since +any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter +masked arrays: + +>>> m = np.ma.masked_array(x, np.isnan(x)) +>>> m +masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], + mask = [False False False True False False False True], + fill_value=1e+20) + +Here, we construct a masked array that suppress all ``NaN`` values. We +may now proceed to calculate the mean of the other values: + +>>> np.mean(m) +2.6666666666666665 + +.. [1] Not-a-Number, a floating point value that is the result of an + invalid operation. + +""" +from __future__ import division, absolute_import, print_function + +__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" +__version__ = '1.0' +__revision__ = "$Revision: 3473 $" +__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' + +from . import core +from .core import * + +from . import extras +from .extras import * + +__all__ = ['core', 'extras'] +__all__ += core.__all__ +__all__ += extras.__all__ + +from numpy.testing import Tester +test = Tester().test +bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/bench.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/bench.py new file mode 100644 index 0000000000000..75e6d90c8f5e4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/bench.py @@ -0,0 +1,166 @@ +#! python +# encoding: utf-8 +from __future__ import division, absolute_import, print_function + +import timeit +#import IPython.ipapi +#ip = IPython.ipapi.get() +#from IPython import ipmagic +import numpy +#from numpy import ma +#from numpy.ma import filled +#from numpy.ma.testutils import assert_equal + + +#####--------------------------------------------------------------------------- +#---- --- Global variables --- +#####--------------------------------------------------------------------------- + +# Small arrays .................................. +xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3) +ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3) +zs = xs + 1j * ys +m1 = [[True, False, False], [False, False, True]] +m2 = [[True, False, True], [False, False, True]] +nmxs = numpy.ma.array(xs, mask=m1) +nmys = numpy.ma.array(ys, mask=m2) +nmzs = numpy.ma.array(zs, mask=m1) +# Big arrays .................................... +xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) +yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) +zl = xl + 1j * yl +maskx = xl > 0.8 +masky = yl < -0.8 +nmxl = numpy.ma.array(xl, mask=maskx) +nmyl = numpy.ma.array(yl, mask=masky) +nmzl = numpy.ma.array(zl, mask=maskx) + +#####--------------------------------------------------------------------------- +#---- --- Functions --- +#####--------------------------------------------------------------------------- + +def timer(s, v='', nloop=500, nrep=3): + units = ["s", "ms", "µs", "ns"] + scaling = [1, 1e3, 1e6, 1e9] + print("%s : %-50s : " % (v, s), end=' ') + varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz'] + setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames) + Timer = timeit.Timer(stmt=s, setup=setup) + best = min(Timer.repeat(nrep, nloop)) / nloop + if best > 0.0: + order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3) + else: + order = 3 + print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep, + 3, + best * scaling[order], + units[order])) +# ip.magic('timeit -n%i %s' % (nloop,s)) + + + +def compare_functions_1v(func, nloop=500, + xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): + funcname = func.__name__ + print("-"*50) + print("%s on small arrays" % funcname) + module, data = "numpy.ma", "nmxs" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + # + print("%s on large arrays" % funcname) + module, data = "numpy.ma", "nmxl" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + return + +def compare_methods(methodname, args, vars='x', nloop=500, test=True, + xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): + print("-"*50) + print("%s on small arrays" % methodname) + data, ver = "nm%ss" % vars, 'numpy.ma' + timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) + # + print("%s on large arrays" % methodname) + data, ver = "nm%sl" % vars, 'numpy.ma' + timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) + return + +def compare_functions_2v(func, nloop=500, test=True, + xs=xs, nmxs=nmxs, + ys=ys, nmys=nmys, + xl=xl, nmxl=nmxl, + yl=yl, nmyl=nmyl): + funcname = func.__name__ + print("-"*50) + print("%s on small arrays" % funcname) + module, data = "numpy.ma", "nmxs,nmys" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + # + print("%s on large arrays" % funcname) + module, data = "numpy.ma", "nmxl,nmyl" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + return + + +############################################################################### + + +################################################################################ +if __name__ == '__main__': +# # Small arrays .................................. +# xs = numpy.random.uniform(-1,1,6).reshape(2,3) +# ys = numpy.random.uniform(-1,1,6).reshape(2,3) +# zs = xs + 1j * ys +# m1 = [[True, False, False], [False, False, True]] +# m2 = [[True, False, True], [False, False, True]] +# nmxs = numpy.ma.array(xs, mask=m1) +# nmys = numpy.ma.array(ys, mask=m2) +# nmzs = numpy.ma.array(zs, mask=m1) +# mmxs = maskedarray.array(xs, mask=m1) +# mmys = maskedarray.array(ys, mask=m2) +# mmzs = maskedarray.array(zs, mask=m1) +# # Big arrays .................................... +# xl = numpy.random.uniform(-1,1,100*100).reshape(100,100) +# yl = numpy.random.uniform(-1,1,100*100).reshape(100,100) +# zl = xl + 1j * yl +# maskx = xl > 0.8 +# masky = yl < -0.8 +# nmxl = numpy.ma.array(xl, mask=maskx) +# nmyl = numpy.ma.array(yl, mask=masky) +# nmzl = numpy.ma.array(zl, mask=maskx) +# mmxl = maskedarray.array(xl, mask=maskx, shrink=True) +# mmyl = maskedarray.array(yl, mask=masky, shrink=True) +# mmzl = maskedarray.array(zl, mask=maskx, shrink=True) +# + compare_functions_1v(numpy.sin) + compare_functions_1v(numpy.log) + compare_functions_1v(numpy.sqrt) + #.................................................................... + compare_functions_2v(numpy.multiply) + compare_functions_2v(numpy.divide) + compare_functions_2v(numpy.power) + #.................................................................... + compare_methods('ravel', '', nloop=1000) + compare_methods('conjugate', '', 'z', nloop=1000) + compare_methods('transpose', '', nloop=1000) + compare_methods('compressed', '', nloop=1000) + compare_methods('__getitem__', '0', nloop=1000) + compare_methods('__getitem__', '(0,0)', nloop=1000) + compare_methods('__getitem__', '[0,-1]', nloop=1000) + compare_methods('__setitem__', '0, 17', nloop=1000, test=False) + compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False) + #.................................................................... + print("-"*50) + print("__setitem__ on small arrays") + timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) + + print("-"*50) + print("__setitem__ on large arrays") + timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) + + #.................................................................... + print("-"*50) + print("where on small arrays") + timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000) + print("-"*50) + print("where on large arrays") + timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/core.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/core.py new file mode 100644 index 0000000000000..00164b851f95a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/core.py @@ -0,0 +1,7321 @@ +""" +numpy.ma : a package to handle missing or invalid values. + +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. + +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# pylint: disable-msg=E1002 +from __future__ import division, absolute_import, print_function + +import sys +import warnings +from functools import reduce + +import numpy as np +import numpy.core.umath as umath +import numpy.core.numerictypes as ntypes +from numpy import ndarray, amax, amin, iscomplexobj, bool_ +from numpy import array as narray +from numpy.lib.function_base import angle +from numpy.compat import getargspec, formatargspec, long, basestring +from numpy import expand_dims as n_expand_dims + +if sys.version_info[0] >= 3: + import pickle +else: + import cPickle as pickle + +__author__ = "Pierre GF Gerard-Marchant" +__docformat__ = "restructuredtext en" + +__all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray', + 'bool_', + 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', + 'amax', 'amin', 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', + 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', + 'arctanh', 'argmax', 'argmin', 'argsort', 'around', + 'array', 'asarray', 'asanyarray', + 'bitwise_and', 'bitwise_or', 'bitwise_xor', + 'ceil', 'choose', 'clip', 'common_fill_value', 'compress', + 'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', + 'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump', + 'dumps', + 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', + 'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide', + 'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex', + 'fromfunction', + 'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal', + 'harden_mask', 'hypot', + 'identity', 'ids', 'indices', 'inner', 'innerproduct', + 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', + 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log2', + 'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', + 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', + 'masked', 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_invalid', + 'masked_less', 'masked_less_equal', 'masked_not_equal', + 'masked_object', 'masked_outside', 'masked_print_option', + 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', + 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', + 'mod', 'multiply', 'mvoid', + 'negative', 'nomask', 'nonzero', 'not_equal', + 'ones', 'outer', 'outerproduct', + 'power', 'prod', 'product', 'ptp', 'put', 'putmask', + 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', + 'right_shift', 'round_', 'round', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', + 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', + 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', + 'var', 'where', + 'zeros'] + +MaskType = np.bool_ +nomask = MaskType(0) + +def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + """ + if initialdoc is None: + return + if note is None: + return initialdoc + newdoc = """ + %s + + Notes + ----- + %s + """ + return newdoc % (initialdoc, note) + +def get_object_signature(obj): + """ + Get the signature from obj + """ + try: + sig = formatargspec(*getargspec(obj)) + except TypeError as errmsg: + sig = '' +# msg = "Unable to retrieve the signature of %s '%s'\n"\ +# "(Initial error message: %s)" +# warnings.warn(msg % (type(obj), +# getattr(obj, '__name__', '???'), +# errmsg)) + return sig + + +#####-------------------------------------------------------------------------- +#---- --- Exceptions --- +#####-------------------------------------------------------------------------- +class MAError(Exception): + """Class for masked array related errors.""" + pass +class MaskError(MAError): + "Class for mask related errors." + pass + + +#####-------------------------------------------------------------------------- +#---- --- Filling options --- +#####-------------------------------------------------------------------------- +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c' : 1.e20 + 0.0j, + 'f' : 1.e20, + 'i' : 999999, + 'O' : '?', + 'S' : 'N/A', + 'u' : 999999, + 'V' : '???', + 'U' : 'N/A', + 'M8[D]' : np.datetime64('NaT', 'D'), + 'M8[us]' : np.datetime64('NaT', 'us') + } +max_filler = ntypes._minvals +max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]]) +min_filler = ntypes._maxvals +min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]]) +if 'float128' in ntypes.typeDict: + max_filler.update([(np.float128, -np.inf)]) + min_filler.update([(np.float128, +np.inf)]) + + +def default_fill_value(obj): + """ + Return the default fill value for the argument object. + + The default filling value depends on the datatype of the input + array or the type of the input scalar: + + ======== ======== + datatype default + ======== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + ======== ======== + + + Parameters + ---------- + obj : ndarray, dtype or scalar + The array data-type or scalar for which the default fill value + is returned. + + Returns + ------- + fill_value : scalar + The default fill value. + + Examples + -------- + >>> np.ma.default_fill_value(1) + 999999 + >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) + 1e+20 + >>> np.ma.default_fill_value(np.dtype(complex)) + (1e+20+0j) + + """ + if hasattr(obj, 'dtype'): + defval = _check_fill_value(None, obj.dtype) + elif isinstance(obj, np.dtype): + if obj.subdtype: + defval = default_filler.get(obj.subdtype[0].kind, '?') + elif obj.kind == 'M': + defval = default_filler.get(obj.str[1:], '?') + else: + defval = default_filler.get(obj.kind, '?') + elif isinstance(obj, float): + defval = default_filler['f'] + elif isinstance(obj, int) or isinstance(obj, long): + defval = default_filler['i'] + elif isinstance(obj, str): + defval = default_filler['S'] + elif isinstance(obj, unicode): + defval = default_filler['U'] + elif isinstance(obj, complex): + defval = default_filler['c'] + else: + defval = default_filler['O'] + return defval + + +def _recursive_extremum_fill_value(ndtype, extremum): + names = ndtype.names + if names: + deflist = [] + for name in names: + fval = _recursive_extremum_fill_value(ndtype[name], extremum) + deflist.append(fval) + return tuple(deflist) + return extremum[ndtype] + + +def minimum_fill_value(obj): + """ + Return the maximum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the minimum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray or dtype + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The maximum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + maximum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.int32() + >>> ma.minimum_fill_value(a) + 2147483647 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.minimum_fill_value(a) + inf + + """ + errmsg = "Unsuitable type for calculating minimum." + if hasattr(obj, 'dtype'): + return _recursive_extremum_fill_value(obj.dtype, min_filler) + elif isinstance(obj, float): + return min_filler[ntypes.typeDict['float_']] + elif isinstance(obj, int): + return min_filler[ntypes.typeDict['int_']] + elif isinstance(obj, long): + return min_filler[ntypes.typeDict['uint']] + elif isinstance(obj, np.dtype): + return min_filler[obj] + else: + raise TypeError(errmsg) + + +def maximum_fill_value(obj): + """ + Return the minimum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the maximum of an array with a given dtype. + + Parameters + ---------- + obj : {ndarray, dtype} + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The minimum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + minimum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.int32() + >>> ma.maximum_fill_value(a) + -2147483648 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.maximum_fill_value(a) + -inf + + """ + errmsg = "Unsuitable type for calculating maximum." + if hasattr(obj, 'dtype'): + return _recursive_extremum_fill_value(obj.dtype, max_filler) + elif isinstance(obj, float): + return max_filler[ntypes.typeDict['float_']] + elif isinstance(obj, int): + return max_filler[ntypes.typeDict['int_']] + elif isinstance(obj, long): + return max_filler[ntypes.typeDict['uint']] + elif isinstance(obj, np.dtype): + return max_filler[obj] + else: + raise TypeError(errmsg) + + +def _recursive_set_default_fill_value(dtypedescr): + deflist = [] + for currentdescr in dtypedescr: + currenttype = currentdescr[1] + if isinstance(currenttype, list): + deflist.append(tuple(_recursive_set_default_fill_value(currenttype))) + else: + deflist.append(default_fill_value(np.dtype(currenttype))) + return tuple(deflist) + +def _recursive_set_fill_value(fillvalue, dtypedescr): + fillvalue = np.resize(fillvalue, len(dtypedescr)) + output_value = [] + for (fval, descr) in zip(fillvalue, dtypedescr): + cdtype = descr[1] + if isinstance(cdtype, list): + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + +def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype + if this latter is standard (no fields). If the datatype is flexible (named + fields), fill_value is set to a tuple whose elements are the default fill + values corresponding to each field. + + If fill_value is not None, its value is forced to the given dtype. + + """ + ndtype = np.dtype(ndtype) + fields = ndtype.fields + if fill_value is None: + if fields: + descr = ndtype.descr + fill_value = np.array(_recursive_set_default_fill_value(descr), + dtype=ndtype,) + else: + fill_value = default_fill_value(ndtype) + elif fields: + fdtype = [(_[0], _[1]) for _ in ndtype.descr] + if isinstance(fill_value, (ndarray, np.void)): + try: + fill_value = np.array(fill_value, copy=False, dtype=fdtype) + except ValueError: + err_msg = "Unable to transform %s to dtype %s" + raise ValueError(err_msg % (fill_value, fdtype)) + else: + descr = ndtype.descr + fill_value = np.asarray(fill_value, dtype=object) + fill_value = np.array(_recursive_set_fill_value(fill_value, descr), + dtype=ndtype) + else: + if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'): + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) + else: + # In case we want to convert 1e20 to int... + try: + fill_value = np.array(fill_value, copy=False, dtype=ndtype) + except OverflowError: + # Raise TypeError instead of OverflowError. OverflowError + # is seldom used, and the real problem here is that the + # passed fill_value is not compatible with the ndtype. + err_msg = "Fill value %s overflows dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) + return np.array(fill_value) + + +def set_fill_value(a, fill_value): + """ + Set the filling value of a, if a is a masked array. + + This function changes the fill value of the masked array `a` in place. + If `a` is not a masked array, the function returns silently, without + doing anything. + + Parameters + ---------- + a : array_like + Input array. + fill_value : dtype + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of `a`. + + Returns + ------- + None + Nothing returned by this function. + + See Also + -------- + maximum_fill_value : Return the default fill value for a dtype. + MaskedArray.fill_value : Return current fill value. + MaskedArray.set_fill_value : Equivalent method. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> a = ma.masked_where(a < 3, a) + >>> a + masked_array(data = [-- -- -- 3 4], + mask = [ True True True False False], + fill_value=999999) + >>> ma.set_fill_value(a, -999) + >>> a + masked_array(data = [-- -- -- 3 4], + mask = [ True True True False False], + fill_value=-999) + + Nothing happens if `a` is not a masked array. + + >>> a = range(5) + >>> a + [0, 1, 2, 3, 4] + >>> ma.set_fill_value(a, 100) + >>> a + [0, 1, 2, 3, 4] + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> ma.set_fill_value(a, 100) + >>> a + array([0, 1, 2, 3, 4]) + + """ + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + return + +def get_fill_value(a): + """ + Return the filling value of a, if any. Otherwise, returns the + default filling value for that type. + + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + +def common_fill_value(a, b): + """ + Return the common filling value of two masked arrays, if any. + + If ``a.fill_value == b.fill_value``, return the fill value, + otherwise return None. + + Parameters + ---------- + a, b : MaskedArray + The masked arrays for which to compare fill values. + + Returns + ------- + fill_value : scalar or None + The common fill value, or None. + + Examples + -------- + >>> x = np.ma.array([0, 1.], fill_value=3) + >>> y = np.ma.array([0, 1.], fill_value=3) + >>> np.ma.common_fill_value(x, y) + 3.0 + + """ + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + + +#####-------------------------------------------------------------------------- +def filled(a, fill_value=None): + """ + Return input as an array with masked data replaced by a fill value. + + If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to + ``a.fill_value``. + + Parameters + ---------- + a : MaskedArray or array_like + An input object. + fill_value : scalar, optional + Filling value. Default is None. + + Returns + ------- + a : ndarray + The filled array. + + See Also + -------- + compressed + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x.filled() + array([[999999, 1, 2], + [999999, 4, 5], + [ 6, 7, 8]]) + + """ + if hasattr(a, 'filled'): + return a.filled(fill_value) + elif isinstance(a, ndarray): + # Should we check for contiguity ? and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return np.array(a, 'O') + else: + return np.array(a) + +#####-------------------------------------------------------------------------- +def get_masked_subclass(*arrays): + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + In case of siblings, the first listed takes over. + + """ + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + # Don't return MaskedConstant as result: revert to MaskedArray + if rcls.__name__ == 'MaskedConstant': + return MaskedArray + return rcls + +#####-------------------------------------------------------------------------- +def getdata(a, subok=True): + """ + Return the data of a masked array as an ndarray. + + Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, + else return `a` as a ndarray or subclass (depending on `subok`) if not. + + Parameters + ---------- + a : array_like + Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + subok : bool + Whether to force the output to be a `pure` ndarray (False) or to + return a subclass of ndarray if appropriate (True, default). + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array(data = + [[1 --] + [3 4]], + mask = + [[False True] + [False False]], + fill_value=999999) + >>> ma.getdata(a) + array([[1, 2], + [3, 4]]) + + Equivalently use the ``MaskedArray`` `data` attribute. + + >>> a.data + array([[1, 2], + [3, 4]]) + + """ + try: + data = a._data + except AttributeError: + data = np.array(a, copy=False, subok=subok) + if not subok: + return data.view(ndarray) + return data +get_data = getdata + + +def fix_invalid(a, mask=nomask, copy=True, fill_value=None): + """ + Return input with invalid data masked and replaced by a fill value. + + Invalid data means values of `nan`, `inf`, etc. + + Parameters + ---------- + a : array_like + Input array, a (subclass of) ndarray. + copy : bool, optional + Whether to use a copy of `a` (True) or to fix `a` in place (False). + Default is True. + fill_value : scalar, optional + Value used for fixing invalid data. Default is None, in which case + the ``a.fill_value`` is used. + + Returns + ------- + b : MaskedArray + The input array with invalid entries fixed. + + Notes + ----- + A copy is performed by default. + + Examples + -------- + >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) + >>> x + masked_array(data = [-- -1.0 nan inf], + mask = [ True False False False], + fill_value = 1e+20) + >>> np.ma.fix_invalid(x) + masked_array(data = [-- -1.0 -- --], + mask = [ True False True True], + fill_value = 1e+20) + + >>> fixed = np.ma.fix_invalid(x) + >>> fixed.data + array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20, + 1.00000000e+20]) + >>> x.data + array([ 1., -1., NaN, Inf]) + + """ + a = masked_array(a, copy=copy, mask=mask, subok=True) + #invalid = (numpy.isnan(a._data) | numpy.isinf(a._data)) + invalid = np.logical_not(np.isfinite(a._data)) + if not invalid.any(): + return a + a._mask |= invalid + if fill_value is None: + fill_value = a.fill_value + a._data[invalid] = fill_value + return a + + + +#####-------------------------------------------------------------------------- +#---- --- Ufuncs --- +#####-------------------------------------------------------------------------- +ufunc_domain = {} +ufunc_fills = {} + +class _DomainCheckInterval: + """ + Define a valid interval, so that : + + ``domain_check_interval(a,b)(x) == True`` where + ``x < a`` or ``x > b``. + + """ + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if (a > b): + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__ (self, x): + "Execute the call behavior." + return umath.logical_or(umath.greater (x, self.b), + umath.less(x, self.a)) + + + +class _DomainTan: + """Define a valid interval for the `tan` function, so that: + + ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` + + """ + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + + def __call__ (self, x): + "Executes the call behavior." + return umath.less(umath.absolute(umath.cos(x)), self.eps) + + + +class _DomainSafeDivide: + """Define a domain for safe division.""" + def __init__ (self, tolerance=None): + self.tolerance = tolerance + + def __call__ (self, a, b): + # Delay the selection of the tolerance to here in order to reduce numpy + # import times. The calculation of these parameters is a substantial + # component of numpy's import time. + if self.tolerance is None: + self.tolerance = np.finfo(float).tiny + return umath.absolute(a) * self.tolerance >= umath.absolute(b) + + + +class _DomainGreater: + """DomainGreater(v)(x) is True where x <= v.""" + def __init__(self, critical_value): + "DomainGreater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__ (self, x): + "Executes the call behavior." + return umath.less_equal(x, self.critical_value) + + + +class _DomainGreaterEqual: + """DomainGreaterEqual(v)(x) is True where x < v.""" + def __init__(self, critical_value): + "DomainGreaterEqual(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__ (self, x): + "Executes the call behavior." + return umath.less(x, self.critical_value) + +#.............................................................................. +class _MaskedUnaryOperation: + """ + Defines masked version of unary operations, where invalid values are + pre-masked. + + Parameters + ---------- + mufunc : callable + The function for which to define a masked version. Made available + as ``_MaskedUnaryOperation.f``. + fill : scalar, optional + Filling value, default is 0. + domain : class instance + Domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + + """ + def __init__ (self, mufunc, fill=0, domain=None): + """ _MaskedUnaryOperation(aufunc, fill=0, domain=None) + aufunc(fill) must be defined + self(x) returns aufunc(x) + with masked values where domain(x) is true or getmask(x) is true. + """ + self.f = mufunc + self.fill = fill + self.domain = domain + self.__doc__ = getattr(mufunc, "__doc__", str(mufunc)) + self.__name__ = getattr(mufunc, "__name__", str(mufunc)) + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + # + def __call__ (self, a, *args, **kwargs): + "Execute the call behavior." + d = getdata(a) + # Case 1.1. : Domained function + if self.domain is not None: + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + # Make a mask + m = ~umath.isfinite(result) + m |= self.domain(d) + m |= getmask(a) + # Case 1.2. : Function without a domain + else: + # Get the result and the mask + result = self.f(d, *args, **kwargs) + m = getmask(a) + # Case 2.1. : The result is scalarscalar + if not result.ndim: + if m: + return masked + return result + # Case 2.2. The result is an array + # We need to fill the invalid data back w/ the input + # Now, that's plain silly: in C, we would just skip the element and keep + # the original, but we do have to do it that way in Python + if m is not nomask: + # In case result has a lower dtype than the inputs (as in equal) + try: + np.copyto(result, d, where=m) + except TypeError: + pass + # Transform to + if isinstance(a, MaskedArray): + subtype = type(a) + else: + subtype = MaskedArray + result = result.view(subtype) + result._mask = m + result._update_from(a) + return result + # + def __str__ (self): + return "Masked version of %s. [Invalid values are masked]" % str(self.f) + + + +class _MaskedBinaryOperation: + """ + Define masked version of binary operations, where invalid + values are pre-masked. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_MaskedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + def __init__ (self, mbfunc, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + self.f = mbfunc + self.fillx = fillx + self.filly = filly + self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc)) + self.__name__ = getattr(mbfunc, "__name__", str(mbfunc)) + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + + def __call__ (self, a, b, *args, **kwargs): + "Execute the call behavior." + # Get the data, as ndarray + (da, db) = (getdata(a, subok=False), getdata(b, subok=False)) + # Get the mask + (ma, mb) = (getmask(a), getmask(b)) + if ma is nomask: + if mb is nomask: + m = nomask + else: + m = umath.logical_or(getmaskarray(a), mb) + elif mb is nomask: + m = umath.logical_or(ma, getmaskarray(b)) + else: + m = umath.logical_or(ma, mb) + # Get the result + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(da, db, *args, **kwargs) + # check it worked + if result is NotImplemented: + return NotImplemented + # Case 1. : scalar + if not result.ndim: + if m: + return masked + return result + # Case 2. : array + # Revert result to da where masked + if m is not nomask: + np.copyto(result, da, casting='unsafe', where=m) + # Transforms to a (subclass of) MaskedArray + result = result.view(get_masked_subclass(a, b)) + result._mask = m + # Update the optional info from the inputs + if isinstance(b, MaskedArray): + if isinstance(a, MaskedArray): + result._update_from(a) + else: + result._update_from(b) + elif isinstance(a, MaskedArray): + result._update_from(a) + return result + + + def reduce(self, target, axis=0, dtype=None): + """Reduce `target` along the given `axis`.""" + if isinstance(target, MaskedArray): + tclass = type(target) + else: + tclass = MaskedArray + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=1) + m.shape = (1,) + if m is nomask: + return self.f.reduce(t, axis).view(tclass) + t = t.view(tclass) + t._mask = m + tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype) + mr = umath.logical_and.reduce(m, axis) + tr = tr.view(tclass) + if mr.ndim > 0: + tr._mask = mr + return tr + elif mr: + return masked + return tr + + def outer (self, a, b): + """Return the function applied to the outer product of a and b. + + """ + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + (da, db) = (getdata(a), getdata(b)) + d = self.f.outer(da, db) + # check it worked + if d is NotImplemented: + return NotImplemented + if m is not nomask: + np.copyto(d, da, where=m) + if d.shape: + d = d.view(get_masked_subclass(a, b)) + d._mask = m + return d + + def accumulate (self, target, axis=0): + """Accumulate `target` along `axis` after filling with y fill + value. + + """ + if isinstance(target, MaskedArray): + tclass = type(target) + else: + tclass = MaskedArray + t = filled(target, self.filly) + return self.f.accumulate(t, axis).view(tclass) + + def __str__ (self): + return "Masked version of " + str(self.f) + + + +class _DomainedBinaryOperation: + """ + Define binary operations that have a domain, like divide. + + They have no reduce, outer or accumulate. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_DomainedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + def __init__ (self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + self.f = dbfunc + self.domain = domain + self.fillx = fillx + self.filly = filly + self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc)) + self.__name__ = getattr(dbfunc, "__name__", str(dbfunc)) + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + "Execute the call behavior." + # Get the data and the mask + (da, db) = (getdata(a, subok=False), getdata(b, subok=False)) + (ma, mb) = (getmask(a), getmask(b)) + # Get the result + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(da, db, *args, **kwargs) + # check it worked + if result is NotImplemented: + return NotImplemented + # Get the mask as a combination of ma, mb and invalid + m = ~umath.isfinite(result) + m |= ma + m |= mb + # Apply the domain + domain = ufunc_domain.get(self.f, None) + if domain is not None: + m |= filled(domain(da, db), True) + # Take care of the scalar case first + if (not m.ndim): + if m: + return masked + else: + return result + # When the mask is True, put back da + np.copyto(result, da, casting='unsafe', where=m) + result = result.view(get_masked_subclass(a, b)) + result._mask = m + if isinstance(b, MaskedArray): + if isinstance(a, MaskedArray): + result._update_from(a) + else: + result._update_from(b) + elif isinstance(a, MaskedArray): + result._update_from(a) + return result + + def __str__ (self): + return "Masked version of " + str(self.f) + +#.............................................................................. +# Unary ufuncs +exp = _MaskedUnaryOperation(umath.exp) +conjugate = _MaskedUnaryOperation(umath.conjugate) +sin = _MaskedUnaryOperation(umath.sin) +cos = _MaskedUnaryOperation(umath.cos) +tan = _MaskedUnaryOperation(umath.tan) +arctan = _MaskedUnaryOperation(umath.arctan) +arcsinh = _MaskedUnaryOperation(umath.arcsinh) +sinh = _MaskedUnaryOperation(umath.sinh) +cosh = _MaskedUnaryOperation(umath.cosh) +tanh = _MaskedUnaryOperation(umath.tanh) +abs = absolute = _MaskedUnaryOperation(umath.absolute) +angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base +fabs = _MaskedUnaryOperation(umath.fabs) +negative = _MaskedUnaryOperation(umath.negative) +floor = _MaskedUnaryOperation(umath.floor) +ceil = _MaskedUnaryOperation(umath.ceil) +around = _MaskedUnaryOperation(np.round_) +logical_not = _MaskedUnaryOperation(umath.logical_not) +# Domained unary ufuncs ....................................................... +sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, + _DomainGreaterEqual(0.0)) +log = _MaskedUnaryOperation(umath.log, 1.0, + _DomainGreater(0.0)) +log2 = _MaskedUnaryOperation(umath.log2, 1.0, + _DomainGreater(0.0)) +log10 = _MaskedUnaryOperation(umath.log10, 1.0, + _DomainGreater(0.0)) +tan = _MaskedUnaryOperation(umath.tan, 0.0, + _DomainTan(1e-35)) +arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccos = _MaskedUnaryOperation(umath.arccos, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, + _DomainGreaterEqual(1.0)) +arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, + _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) +# Binary ufuncs ............................................................... +add = _MaskedBinaryOperation(umath.add) +subtract = _MaskedBinaryOperation(umath.subtract) +multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) +arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) +equal = _MaskedBinaryOperation(umath.equal) +equal.reduce = None +not_equal = _MaskedBinaryOperation(umath.not_equal) +not_equal.reduce = None +less_equal = _MaskedBinaryOperation(umath.less_equal) +less_equal.reduce = None +greater_equal = _MaskedBinaryOperation(umath.greater_equal) +greater_equal.reduce = None +less = _MaskedBinaryOperation(umath.less) +less.reduce = None +greater = _MaskedBinaryOperation(umath.greater) +greater.reduce = None +logical_and = _MaskedBinaryOperation(umath.logical_and) +alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce +logical_or = _MaskedBinaryOperation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = _MaskedBinaryOperation(umath.logical_xor) +bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) +bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) +bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) +hypot = _MaskedBinaryOperation(umath.hypot) +# Domained binary ufuncs ...................................................... +divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) +true_divide = _DomainedBinaryOperation(umath.true_divide, + _DomainSafeDivide(), 0, 1) +floor_divide = _DomainedBinaryOperation(umath.floor_divide, + _DomainSafeDivide(), 0, 1) +remainder = _DomainedBinaryOperation(umath.remainder, + _DomainSafeDivide(), 0, 1) +fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) + + +#####-------------------------------------------------------------------------- +#---- --- Mask creation functions --- +#####-------------------------------------------------------------------------- + +def _recursive_make_descr(datatype, newtype=bool_): + "Private function allowing recursion in make_descr." + # Do we have some name fields ? + if datatype.names: + descr = [] + for name in datatype.names: + field = datatype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recursive_make_descr(field[0], newtype))) + return descr + # Is this some kind of composite a la (np.float,2) + elif datatype.subdtype: + mdescr = list(datatype.subdtype) + mdescr[0] = newtype + return tuple(mdescr) + else: + return newtype + +def make_mask_descr(ndtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with the type of all fields in `ndtype` to a + boolean type. Field names are not altered. + + Parameters + ---------- + ndtype : dtype + The dtype to convert. + + Returns + ------- + result : dtype + A dtype that looks like `ndtype`, the type of all fields is boolean. + + Examples + -------- + >>> import numpy.ma as ma + >>> dtype = np.dtype({'names':['foo', 'bar'], + 'formats':[np.float32, np.int]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_descr(dtype) + dtype([('foo', '|b1'), ('bar', '|b1')]) + >>> ma.make_mask_descr(np.float32) + + + """ + # Make sure we do have a dtype + if not isinstance(ndtype, np.dtype): + ndtype = np.dtype(ndtype) + return np.dtype(_recursive_make_descr(ndtype, np.bool)) + +def getmask(a): + """ + Return the mask of a masked array, or nomask. + + Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the + mask is not `nomask`, else return `nomask`. To guarantee a full array + of booleans of the same shape as a, use `getmaskarray`. + + Parameters + ---------- + a : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getdata : Return the data of a masked array as an ndarray. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array(data = + [[1 --] + [3 4]], + mask = + [[False True] + [False False]], + fill_value=999999) + >>> ma.getmask(a) + array([[False, True], + [False, False]], dtype=bool) + + Equivalently use the `MaskedArray` `mask` attribute. + + >>> a.mask + array([[False, True], + [False, False]], dtype=bool) + + Result when mask == `nomask` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array(data = + [[1 2] + [3 4]], + mask = + False, + fill_value=999999) + >>> ma.nomask + False + >>> ma.getmask(b) == ma.nomask + True + >>> b.mask == ma.nomask + True + + """ + return getattr(a, '_mask', nomask) +get_mask = getmask + +def getmaskarray(arr): + """ + Return the mask of a masked array, or full boolean array of False. + + Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and + the mask is not `nomask`, else return a full boolean array of False of + the same shape as `arr`. + + Parameters + ---------- + arr : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getdata : Return the data of a masked array as an ndarray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array(data = + [[1 --] + [3 4]], + mask = + [[False True] + [False False]], + fill_value=999999) + >>> ma.getmaskarray(a) + array([[False, True], + [False, False]], dtype=bool) + + Result when mask == ``nomask`` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array(data = + [[1 2] + [3 4]], + mask = + False, + fill_value=999999) + >>> >ma.getmaskarray(b) + array([[False, False], + [False, False]], dtype=bool) + + """ + mask = getmask(arr) + if mask is nomask: + mask = make_mask_none(np.shape(arr), getdata(arr).dtype) + return mask + +def is_mask(m): + """ + Return True if m is a valid, standard mask. + + This function does not check the contents of the input, only that the + type is MaskType. In particular, this function returns False if the + mask has a flexible dtype. + + Parameters + ---------- + m : array_like + Array to test. + + Returns + ------- + result : bool + True if `m.dtype.type` is MaskType, False otherwise. + + See Also + -------- + isMaskedArray : Test whether input is an instance of MaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> m + masked_array(data = [-- 1 -- 2 3], + mask = [ True False True False False], + fill_value=999999) + >>> ma.is_mask(m) + False + >>> ma.is_mask(m.mask) + True + + Input must be an ndarray (or have similar attributes) + for it to be considered a valid mask. + + >>> m = [False, True, False] + >>> ma.is_mask(m) + False + >>> m = np.array([False, True, False]) + >>> m + array([False, True, False], dtype=bool) + >>> ma.is_mask(m) + True + + Arrays with complex dtypes don't return True. + + >>> dtype = np.dtype({'names':['monty', 'pithon'], + 'formats':[np.bool, np.bool]}) + >>> dtype + dtype([('monty', '|b1'), ('pithon', '|b1')]) + >>> m = np.array([(True, False), (False, True), (True, False)], + dtype=dtype) + >>> m + array([(True, False), (False, True), (True, False)], + dtype=[('monty', '|b1'), ('pithon', '|b1')]) + >>> ma.is_mask(m) + False + + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False + +def make_mask(m, copy=False, shrink=True, dtype=MaskType): + """ + Create a boolean mask from an array. + + Return `m` as a boolean mask, creating a copy if necessary or requested. + The function can accept any sequence that is convertible to integers, + or ``nomask``. Does not require that contents must be 0s and 1s, values + of 0 are interepreted as False, everything else as True. + + Parameters + ---------- + m : array_like + Potential mask. + copy : bool, optional + Whether to return a copy of `m` (True) or `m` itself (False). + shrink : bool, optional + Whether to shrink `m` to ``nomask`` if all its values are False. + dtype : dtype, optional + Data-type of the output mask. By default, the output mask has + a dtype of MaskType (bool). If the dtype is flexible, each field + has a boolean dtype. + + Returns + ------- + result : ndarray + A boolean mask derived from `m`. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = [True, False, True, True] + >>> ma.make_mask(m) + array([ True, False, True, True], dtype=bool) + >>> m = [1, 0, 1, 1] + >>> ma.make_mask(m) + array([ True, False, True, True], dtype=bool) + >>> m = [1, 0, 2, -3] + >>> ma.make_mask(m) + array([ True, False, True, True], dtype=bool) + + Effect of the `shrink` parameter. + + >>> m = np.zeros(4) + >>> m + array([ 0., 0., 0., 0.]) + >>> ma.make_mask(m) + False + >>> ma.make_mask(m, shrink=False) + array([False, False, False, False], dtype=bool) + + Using a flexible `dtype`. + + >>> m = [1, 0, 1, 1] + >>> n = [0, 1, 0, 0] + >>> arr = [] + >>> for man, mouse in zip(m, n): + ... arr.append((man, mouse)) + >>> arr + [(1, 0), (0, 1), (1, 0), (1, 0)] + >>> dtype = np.dtype({'names':['man', 'mouse'], + 'formats':[np.int, np.int]}) + >>> arr = np.array(arr, dtype=dtype) + >>> arr + array([(1, 0), (0, 1), (1, 0), (1, 0)], + dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) + array([(True, False), (False, True), (True, False), (True, False)], + dtype=[('man', '|b1'), ('mouse', '|b1')]) + + """ + if m is nomask: + return nomask + elif isinstance(m, ndarray): + # We won't return after this point to make sure we can shrink the mask + # Fill the mask in case there are missing data + m = filled(m, True) + # Make sure the input dtype is valid + dtype = make_mask_descr(dtype) + if m.dtype == dtype: + if copy: + result = m.copy() + else: + result = m + else: + result = np.array(m, dtype=dtype, copy=copy) + else: + result = np.array(filled(m, True), dtype=MaskType) + # Bas les masques ! + if shrink and (not result.dtype.names) and (not result.any()): + return nomask + else: + return result + + +def make_mask_none(newshape, dtype=None): + """ + Return a boolean mask of the given shape, filled with False. + + This function returns a boolean ndarray with all entries False, that can + be used in common mask manipulations. If a complex dtype is specified, the + type of each field is converted to a boolean type. + + Parameters + ---------- + newshape : tuple + A tuple indicating the shape of the mask. + dtype : {None, dtype}, optional + If None, use a MaskType instance. Otherwise, use a new datatype with + the same fields as `dtype`, converted to boolean types. + + Returns + ------- + result : ndarray + An ndarray of appropriate shape and dtype, filled with False. + + See Also + -------- + make_mask : Create a boolean mask from an array. + make_mask_descr : Construct a dtype description list from a given dtype. + + Examples + -------- + >>> import numpy.ma as ma + >>> ma.make_mask_none((3,)) + array([False, False, False], dtype=bool) + + Defining a more complex dtype. + + >>> dtype = np.dtype({'names':['foo', 'bar'], + 'formats':[np.float32, np.int]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) + array([(False, False), (False, False), (False, False)], + dtype=[('foo', '|b1'), ('bar', '|b1')]) + + """ + if dtype is None: + result = np.zeros(newshape, dtype=MaskType) + else: + result = np.zeros(newshape, dtype=make_mask_descr(dtype)) + return result + +def mask_or (m1, m2, copy=False, shrink=True): + """ + Combine two masks with the ``logical_or`` operator. + + The result may be a view on `m1` or `m2` if the other is `nomask` + (i.e. False). + + Parameters + ---------- + m1, m2 : array_like + Input masks. + copy : bool, optional + If copy is False and one of the inputs is `nomask`, return a view + of the other input mask. Defaults to False. + shrink : bool, optional + Whether to shrink the output to `nomask` if all its values are + False. Defaults to True. + + Returns + ------- + mask : output mask + The result masks values that are masked in either `m1` or `m2`. + + Raises + ------ + ValueError + If `m1` and `m2` have different flexible dtypes. + + Examples + -------- + >>> m1 = np.ma.make_mask([0, 1, 1, 0]) + >>> m2 = np.ma.make_mask([1, 0, 0, 0]) + >>> np.ma.mask_or(m1, m2) + array([ True, True, True, False], dtype=bool) + + """ + def _recursive_mask_or(m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names: + _recursive_mask_or(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + return + # + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) + if m1 is m2 and is_mask(m1): + return m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if (dtype1 != dtype2): + raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) + if dtype1.names: + newmask = np.empty_like(m1) + _recursive_mask_or(m1, m2, newmask) + return newmask + return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) + + +def flatten_mask(mask): + """ + Returns a completely flattened version of the mask, where nested fields + are collapsed. + + Parameters + ---------- + mask : array_like + Input array, which will be interpreted as booleans. + + Returns + ------- + flattened_mask : ndarray of bools + The flattened input. + + Examples + -------- + >>> mask = np.array([0, 0, 1], dtype=np.bool) + >>> flatten_mask(mask) + array([False, False, True], dtype=bool) + + >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + >>> flatten_mask(mask) + array([False, False, False, True], dtype=bool) + + >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) + >>> flatten_mask(mask) + array([False, False, False, False, False, True], dtype=bool) + + """ + # + def _flatmask(mask): + "Flatten the mask and returns a (maybe nested) sequence of booleans." + mnames = mask.dtype.names + if mnames: + return [flatten_mask(mask[name]) for name in mnames] + else: + return mask + # + def _flatsequence(sequence): + "Generates a flattened version of the sequence." + try: + for element in sequence: + if hasattr(element, '__iter__'): + for f in _flatsequence(element): + yield f + else: + yield element + except TypeError: + yield sequence + # + mask = np.asarray(mask) + flattened = _flatsequence(_flatmask(mask)) + return np.array([_ for _ in flattened], dtype=bool) + + +def _check_mask_axis(mask, axis): + "Check whether there are masked values along the given axis" + if mask is not nomask: + return mask.all(axis=axis) + return nomask + + +#####-------------------------------------------------------------------------- +#--- --- Masking functions --- +#####-------------------------------------------------------------------------- + +def masked_where(condition, a, copy=True): + """ + Mask an array where a condition is met. + + Return `a` as an array masked where `condition` is True. + Any masked values of `a` or `condition` are also masked in the output. + + Parameters + ---------- + condition : array_like + Masking condition. When `condition` tests floating point values for + equality, consider using ``masked_values`` instead. + a : array_like + Array to mask. + copy : bool + If True (default) make a copy of `a` in the result. If False modify + `a` in place and return a view. + + Returns + ------- + result : MaskedArray + The result of masking `a` where `condition` is True. + + See Also + -------- + masked_values : Mask using floating point equality. + masked_equal : Mask where equal to a given value. + masked_not_equal : Mask where `not` equal to a given value. + masked_less_equal : Mask where less than or equal to a given value. + masked_greater_equal : Mask where greater than or equal to a given value. + masked_less : Mask where less than a given value. + masked_greater : Mask where greater than a given value. + masked_inside : Mask inside a given interval. + masked_outside : Mask outside a given interval. + masked_invalid : Mask invalid values (NaNs or infs). + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_where(a <= 2, a) + masked_array(data = [-- -- -- 3], + mask = [ True True True False], + fill_value=999999) + + Mask array `b` conditional on `a`. + + >>> b = ['a', 'b', 'c', 'd'] + >>> ma.masked_where(a == 2, b) + masked_array(data = [a b -- d], + mask = [False False True False], + fill_value=N/A) + + Effect of the `copy` argument. + + >>> c = ma.masked_where(a <= 2, a) + >>> c + masked_array(data = [-- -- -- 3], + mask = [ True True True False], + fill_value=999999) + >>> c[0] = 99 + >>> c + masked_array(data = [99 -- -- 3], + mask = [False True True False], + fill_value=999999) + >>> a + array([0, 1, 2, 3]) + >>> c = ma.masked_where(a <= 2, a, copy=False) + >>> c[0] = 99 + >>> c + masked_array(data = [99 -- -- 3], + mask = [False True True False], + fill_value=999999) + >>> a + array([99, 1, 2, 3]) + + When `condition` or `a` contain masked values. + + >>> a = np.arange(4) + >>> a = ma.masked_where(a == 2, a) + >>> a + masked_array(data = [0 1 -- 3], + mask = [False False True False], + fill_value=999999) + >>> b = np.arange(4) + >>> b = ma.masked_where(b == 0, b) + >>> b + masked_array(data = [-- 1 2 3], + mask = [ True False False False], + fill_value=999999) + >>> ma.masked_where(a == 3, b) + masked_array(data = [-- 1 -- --], + mask = [ True False True True], + fill_value=999999) + + """ + # Make sure that condition is a valid standard-type mask. + cond = make_mask(condition) + a = np.array(a, copy=copy, subok=True) + + (cshape, ashape) = (cond.shape, a.shape) + if cshape and cshape != ashape: + raise IndexError("Inconsistant shape between the condition and the input" + " (got %s and %s)" % (cshape, ashape)) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + result._mask = cond + return result + + +def masked_greater(x, value, copy=True): + """ + Mask an array where greater than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x > value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater(a, 2) + masked_array(data = [0 1 2 --], + mask = [False False False True], + fill_value=999999) + + """ + return masked_where(greater(x, value), x, copy=copy) + + +def masked_greater_equal(x, value, copy=True): + """ + Mask an array where greater than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x >= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater_equal(a, 2) + masked_array(data = [0 1 -- --], + mask = [False False True True], + fill_value=999999) + + """ + return masked_where(greater_equal(x, value), x, copy=copy) + + +def masked_less(x, value, copy=True): + """ + Mask an array where less than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x < value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less(a, 2) + masked_array(data = [-- -- 2 3], + mask = [ True True False False], + fill_value=999999) + + """ + return masked_where(less(x, value), x, copy=copy) + + +def masked_less_equal(x, value, copy=True): + """ + Mask an array where less than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x <= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less_equal(a, 2) + masked_array(data = [-- -- -- 3], + mask = [ True True True False], + fill_value=999999) + + """ + return masked_where(less_equal(x, value), x, copy=copy) + + +def masked_not_equal(x, value, copy=True): + """ + Mask an array where `not` equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x != value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_not_equal(a, 2) + masked_array(data = [-- -- 2 --], + mask = [ True True False True], + fill_value=999999) + + """ + return masked_where(not_equal(x, value), x, copy=copy) + + +def masked_equal(x, value, copy=True): + """ + Mask an array where equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x == value). For floating point arrays, + consider using ``masked_values(x, value)``. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_equal(a, 2) + masked_array(data = [0 1 -- 3], + mask = [False False True False], + fill_value=999999) + + """ + # An alternative implementation relies on filling first: probably not needed. + # d = filled(x, 0) + # c = umath.equal(d, value) + # m = mask_or(c, getmask(x)) + # return array(d, mask=m, copy=copy) + output = masked_where(equal(x, value), x, copy=copy) + output.fill_value = value + return output + + +def masked_inside(x, v1, v2, copy=True): + """ + Mask an array inside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` inside + the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` + can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_inside(x, -0.3, 0.3) + masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], + mask = [False False True True False False], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_inside(x, 0.3, -0.3) + masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], + mask = [False False True True False False], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + + +def masked_outside(x, v1, v2, copy=True): + """ + Mask an array outside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` outside + the interval [v1,v2] (x < v1)|(x > v2). + The boundaries `v1` and `v2` can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_outside(x, -0.3, 0.3) + masked_array(data = [-- -- 0.01 0.2 -- --], + mask = [ True True False False True True], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_outside(x, 0.3, -0.3) + masked_array(data = [-- -- 0.01 0.2 -- --], + mask = [ True True False False True True], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + + +def masked_object(x, value, copy=True, shrink=True): + """ + Mask the array `x` where the data are exactly equal to value. + + This function is similar to `masked_values`, but only suitable + for object arrays: for floating point, use `masked_values` instead. + + Parameters + ---------- + x : array_like + Array to mask + value : object + Comparison value + copy : {True, False}, optional + Whether to return a copy of `x`. + shrink : {True, False}, optional + Whether to collapse a mask full of False to nomask + + Returns + ------- + result : MaskedArray + The result of masking `x` where equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> # don't eat spoiled food + >>> eat = ma.masked_object(food, 'green_eggs') + >>> print eat + [-- ham] + >>> # plain ol` ham is boring + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> eat = ma.masked_object(fresh_food, 'green_eggs') + >>> print eat + [cheese ham pineapple] + + Note that `mask` is set to ``nomask`` if possible. + + >>> eat + masked_array(data = [cheese ham pineapple], + mask = False, + fill_value=?) + + """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(np.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, shrink=shrink)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + + +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): + """ + Mask using floating point equality. + + Return a MaskedArray, masked where the data in array `x` are approximately + equal to `value`, i.e. where the following condition is True + + (abs(x - value) <= atol+rtol*abs(value)) + + The fill_value is set to `value` and the mask is set to ``nomask`` if + possible. For integers, consider using ``masked_equal``. + + Parameters + ---------- + x : array_like + Array to mask. + value : float + Masking value. + rtol : float, optional + Tolerance parameter. + atol : float, optional + Tolerance parameter (1e-8). + copy : bool, optional + Whether to return a copy of `x`. + shrink : bool, optional + Whether to collapse a mask full of False to ``nomask``. + + Returns + ------- + result : MaskedArray + The result of masking `x` where approximately equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + + Examples + -------- + >>> import numpy.ma as ma + >>> x = np.array([1, 1.1, 2, 1.1, 3]) + >>> ma.masked_values(x, 1.1) + masked_array(data = [1.0 -- 2.0 -- 3.0], + mask = [False True False True False], + fill_value=1.1) + + Note that `mask` is set to ``nomask`` if possible. + + >>> ma.masked_values(x, 1.5) + masked_array(data = [ 1. 1.1 2. 1.1 3. ], + mask = False, + fill_value=1.5) + + For integers, the fill value will be different in general to the + result of ``masked_equal``. + + >>> x = np.arange(5) + >>> x + array([0, 1, 2, 3, 4]) + >>> ma.masked_values(x, 2) + masked_array(data = [0 1 -- 3 4], + mask = [False False True False False], + fill_value=2) + >>> ma.masked_equal(x, 2) + masked_array(data = [0 1 -- 3 4], + mask = [False False True False False], + fill_value=999999) + + """ + mabs = umath.absolute + xnew = filled(x, value) + if issubclass(xnew.dtype.type, np.floating): + condition = umath.less_equal(mabs(xnew - value), atol + rtol * mabs(value)) + mask = getattr(x, '_mask', nomask) + else: + condition = umath.equal(xnew, value) + mask = nomask + mask = mask_or(mask, make_mask(condition, shrink=shrink)) + return masked_array(xnew, mask=mask, copy=copy, fill_value=value) + + +def masked_invalid(a, copy=True): + """ + Mask an array where invalid values occur (NaNs or infs). + + This function is a shortcut to ``masked_where``, with + `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. + Only applies to arrays with a dtype where NaNs or infs make sense + (i.e. floating point types), but accepts any array_like object. + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5, dtype=np.float) + >>> a[2] = np.NaN + >>> a[3] = np.PINF + >>> a + array([ 0., 1., NaN, Inf, 4.]) + >>> ma.masked_invalid(a) + masked_array(data = [0.0 1.0 -- -- 4.0], + mask = [False False True True False], + fill_value=1e+20) + + """ + a = np.array(a, copy=copy, subok=True) + mask = getattr(a, '_mask', None) + if mask is not None: + condition = ~(np.isfinite(getdata(a))) + if mask is not nomask: + condition |= mask + cls = type(a) + else: + condition = ~(np.isfinite(a)) + cls = MaskedArray + result = a.view(cls) + result._mask = condition + return result + + +#####-------------------------------------------------------------------------- +#---- --- Printing options --- +#####-------------------------------------------------------------------------- + +class _MaskedPrintOption: + """ + Handle the string used to represent missing data in a masked array. + + """ + def __init__ (self, display): + "Create the masked_print_option object." + self._display = display + self._enabled = True + + def display(self): + "Display the string to print for masked values." + return self._display + + def set_display (self, s): + "Set the string to print for masked values." + self._display = s + + def enabled(self): + "Is the use of the display value enabled?" + return self._enabled + + def enable(self, shrink=1): + "Set the enabling shrink to `shrink`." + self._enabled = shrink + + def __str__ (self): + return str(self._display) + + __repr__ = __str__ + +#if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + Private function allowing for recursion + """ + names = result.dtype.names + for name in names: + (curdata, curmask) = (result[name], mask[name]) + if curdata.dtype.names: + _recursive_printoption(curdata, curmask, printopt) + else: + np.copyto(curdata, printopt, where=curmask) + return + +_print_templates = dict(long_std="""\ +masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) +""", + short_std="""\ +masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, +%(nlen)s fill_value = %(fill)s) +""", + long_flx="""\ +masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, +%(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) +""", + short_flx="""\ +masked_%(name)s(data = %(data)s, +%(nlen)s mask = %(mask)s, +%(nlen)s fill_value = %(fill)s, +%(nlen)s dtype = %(dtype)s) +""") + +#####-------------------------------------------------------------------------- +#---- --- MaskedArray class --- +#####-------------------------------------------------------------------------- + +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + Private function + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.copyto(current, fill_value[name], where=mask[name]) + + + +def flatten_structured_array(a): + """ + Flatten a structured array. + + The data type of the output is chosen such that it can represent all of the + (nested) fields. + + Parameters + ---------- + a : structured array + + Returns + ------- + output : masked array or ndarray + A flattened masked array if the input is a masked array, otherwise a + standard ndarray. + + Examples + -------- + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + # + def flatten_sequence(iterable): + """Flattens a compound of nested iterables.""" + for elm in iter(iterable): + if hasattr(elm, '__iter__'): + for f in flatten_sequence(elm): + yield f + else: + yield elm + # + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + + +class _arraymethod(object): + """ + Define a wrapper for basic array methods. + + Upon call, returns a masked array, where the new ``_data`` array is + the output of the corresponding method called on the original + ``_data``. + + If `onmask` is True, the new mask is the output of the method called + on the initial mask. Otherwise, the new mask is just a reference + to the initial mask. + + Attributes + ---------- + _onmask : bool + Holds the `onmask` parameter. + obj : object + The object calling `_arraymethod`. + + Parameters + ---------- + funcname : str + Name of the function to apply on data. + onmask : bool + Whether the mask must be processed also (True) or left + alone (False). Default is True. Make available as `_onmask` + attribute. + + """ + def __init__(self, funcname, onmask=True): + self.__name__ = funcname + self._onmask = onmask + self.obj = None + self.__doc__ = self.getdoc() + # + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + methdoc = getattr(ndarray, self.__name__, None) or \ + getattr(np, self.__name__, None) + if methdoc is not None: + return methdoc.__doc__ + # + def __get__(self, obj, objtype=None): + self.obj = obj + return self + # + def __call__(self, *args, **params): + methodname = self.__name__ + instance = self.obj + # Fallback : if the instance has not been initialized, use the first arg + if instance is None: + args = list(args) + instance = args.pop(0) + data = instance._data + mask = instance._mask + cls = type(instance) + result = getattr(data, methodname)(*args, **params).view(cls) + result._update_from(instance) + if result.ndim: + if not self._onmask: + result.__setmask__(mask) + elif mask is not nomask: + result.__setmask__(getattr(mask, methodname)(*args, **params)) + else: + if mask.ndim and (not mask.dtype.names and mask.all()): + return masked + return result + + +class MaskedIterator(object): + """ + Flat iterator object to iterate over masked arrays. + + A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array + `x`. It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in C-contiguous style, with the last index varying the + fastest. The iterator can also be indexed using basic slicing or + advanced indexing. + + See Also + -------- + MaskedArray.flat : Return a flat iterator over an array. + MaskedArray.flatten : Returns a flattened copy of an array. + + Notes + ----- + `MaskedIterator` is not exported by the `ma` module. Instead of + instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. + + Examples + -------- + >>> x = np.ma.array(arange(6).reshape(2, 3)) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print item + ... + 0 + 1 + 2 + 3 + 4 + 5 + + Extracting more than a single element b indexing the `MaskedIterator` + returns a masked array: + + >>> fl[2:4] + masked_array(data = [2 3], + mask = False, + fill_value = 999999) + + """ + def __init__(self, ma): + self.ma = ma + self.dataiter = ma._data.flat + # + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + if isinstance(_mask, ndarray): + # set shape to match that of data; this is needed for matrices + _mask.shape = result.shape + result._mask = _mask + elif isinstance(_mask, np.void): + return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) + elif _mask: # Just a scalar, masked + return masked + return result + + ### This won't work is ravel makes a copy + def __setitem__(self, index, value): + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) + + def __next__(self): + """ + Return the next value, or raise StopIteration. + + Examples + -------- + >>> x = np.ma.array([3, 2], mask=[0, 1]) + >>> fl = x.flat + >>> fl.next() + 3 + >>> fl.next() + masked_array(data = --, + mask = True, + fill_value = 1e+20) + >>> fl.next() + Traceback (most recent call last): + File "", line 1, in + File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next + d = self.dataiter.next() + StopIteration + + """ + d = next(self.dataiter) + if self.maskiter is not None: + m = next(self.maskiter) + if isinstance(m, np.void): + return mvoid(d, mask=m, hardmask=self.ma._hardmask) + elif m: # Just a scalar, masked + return masked + return d + + next = __next__ + + +class MaskedArray(ndarray): + """ + An array class with possibly masked values. + + Masked values of True exclude the corresponding element from any + computation. + + Construction:: + + x = MaskedArray(data, mask=nomask, dtype=None, + copy=False, subok=True, ndmin=0, fill_value=None, + keep_mask=True, hard_mask=None, shrink=True) + + Parameters + ---------- + data : array_like + Input data. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + dtype : dtype, optional + Data type of the output. + If `dtype` is None, the type of the data argument (``data.dtype``) + is used. If `dtype` is not None and different from ``data.dtype``, + a copy is performed. + copy : bool, optional + Whether to copy the input data (True), or to use a reference instead. + Default is False. + subok : bool, optional + Whether to return a subclass of `MaskedArray` if possible (True) or a + plain `MaskedArray`. Default is True. + ndmin : int, optional + Minimum number of dimensions. Default is 0. + fill_value : scalar, optional + Value used to fill in the masked values when necessary. + If None, a default based on the data-type is used. + keep_mask : bool, optional + Whether to combine `mask` with the mask of the input data, if any + (True), or to use only `mask` for the output (False). Default is True. + hard_mask : bool, optional + Whether to use a hard mask or not. With a hard mask, masked values + cannot be unmasked. Default is False. + shrink : bool, optional + Whether to force compression of an empty mask. Default is True. + + """ + + __array_priority__ = 15 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = ndarray + + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, + subok=True, ndmin=0, fill_value=None, + keep_mask=True, hard_mask=None, shrink=True, + **options): + """ + Create a new masked array from scratch. + + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). + + """ + # Process data............ + _data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin) + _baseclass = getattr(data, '_baseclass', type(_data)) + # Check that we're not erasing the mask.......... + if isinstance(data, MaskedArray) and (data.shape != _data.shape): + copy = True + # Careful, cls might not always be MaskedArray... + if not isinstance(data, cls) or not subok: + _data = ndarray.view(_data, cls) + else: + _data = ndarray.view(_data, type(data)) + # Backwards compatibility w/ numpy.core.ma ....... + if hasattr(data, '_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + _sharedmask = True + # Process mask ............................... + # Number of named fields (or zero if none) + names_ = _data.dtype.names or () + # Type of the mask + if names_: + mdtype = make_mask_descr(_data.dtype) + else: + mdtype = MaskType + # Case 1. : no mask in input ............ + if mask is nomask: + # Erase the current mask ? + if not keep_mask: + # With a reduced version + if shrink: + _data._mask = nomask + # With full version + else: + _data._mask = np.zeros(_data.shape, dtype=mdtype) + # Check whether we missed something + elif isinstance(data, (tuple, list)): + try: + # If data is a sequence of masked array + mask = np.array([getmaskarray(m) for m in data], + dtype=mdtype) + except ValueError: + # If data is nested + mask = nomask + # Force shrinking of the mask if needed (and possible) + if (mdtype == MaskType) and mask.any(): + _data._mask = mask + _data._sharedmask = False + else: + if copy: + _data._mask = _data._mask.copy() + _data._sharedmask = False + # Reset the shape of the original mask + if getmask(data) is not nomask: + data._mask.shape = data.shape + else: + _data._sharedmask = True + # Case 2. : With a mask in input ........ + else: + # Read the mask with the current mdtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Make sure the mask and the data have the same shape + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = np.resize(mask, _data.shape) + elif nm == nd: + mask = np.reshape(mask, _data.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MaskError(msg % (nd, nm)) + copy = True + # Set the mask to the new value + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = not copy + else: + if not keep_mask: + _data._mask = mask + _data._sharedmask = not copy + else: + if names_: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names: + _recursive_or(af, bf) + else: + af |= bf + return + _recursive_or(_data._mask, mask) + else: + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False + # Update fill_value....... + if fill_value is None: + fill_value = getattr(data, '_fill_value', None) + # But don't run the check unless we have something to check.... + if fill_value is not None: + _data._fill_value = _check_fill_value(fill_value, _data.dtype) + # Process extra options .. + if hard_mask is None: + _data._hardmask = getattr(data, '_hardmask', False) + else: + _data._hardmask = hard_mask + _data._baseclass = _baseclass + return _data + # + def _update_from(self, obj): + """Copies some attributes of obj to self. + """ + if obj is not None and isinstance(obj, ndarray): + _baseclass = type(obj) + else: + _baseclass = ndarray + # We need to copy the _basedict to avoid backward propagation + _optinfo = {} + _optinfo.update(getattr(obj, '_optinfo', {})) + _optinfo.update(getattr(obj, '_basedict', {})) + if not isinstance(obj, MaskedArray): + _optinfo.update(getattr(obj, '__dict__', {})) + _dict = dict(_fill_value=getattr(obj, '_fill_value', None), + _hardmask=getattr(obj, '_hardmask', False), + _sharedmask=getattr(obj, '_sharedmask', False), + _isfield=getattr(obj, '_isfield', False), + _baseclass=getattr(obj, '_baseclass', _baseclass), + _optinfo=_optinfo, + _basedict=_optinfo) + self.__dict__.update(_dict) + self.__dict__.update(_optinfo) + return + + + def __array_finalize__(self, obj): + """Finalizes the masked array. + """ + # Get main attributes ......... + self._update_from(obj) + if isinstance(obj, ndarray): + odtype = obj.dtype + if odtype.names: + _mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype)) + else: + _mask = getattr(obj, '_mask', nomask) + else: + _mask = nomask + self._mask = _mask + # Finalize the mask ........... + if self._mask is not nomask: + try: + self._mask.shape = self.shape + except ValueError: + self._mask = nomask + except (TypeError, AttributeError): + # When _mask.shape is not writable (because it's a void) + pass + # Finalize the fill_value for structured arrays + if self.dtype.names: + if self._fill_value is None: + self._fill_value = _check_fill_value(None, self.dtype) + return + + + def __array_wrap__(self, obj, context=None): + """ + Special hook for ufuncs. + Wraps the numpy array and sets the mask according to context. + """ + result = obj.view(type(self)) + result._update_from(self) + #.......... + if context is not None: + result._mask = result._mask.copy() + (func, args, _) = context + m = reduce(mask_or, [getmaskarray(arg) for arg in args]) + # Get the domain mask................ + domain = ufunc_domain.get(func, None) + if domain is not None: + # Take the domain, and make sure it's a ndarray + if len(args) > 2: + d = filled(reduce(domain, args), True) + else: + d = filled(domain(*args), True) + # Fill the result where the domain is wrong + try: + # Binary domain: take the last value + fill_value = ufunc_fills[func][-1] + except TypeError: + # Unary domain: just use this one + fill_value = ufunc_fills[func] + except KeyError: + # Domain not recognized, use fill_value instead + fill_value = self.fill_value + result = result.copy() + np.copyto(result, fill_value, where=d) + # Update the mask + if m is nomask: + if d is not nomask: + m = d + else: + # Don't modify inplace, we risk back-propagation + m = (m | d) + # Make sure the mask has the proper size + if result.shape == () and m: + return masked + else: + result._mask = m + result._sharedmask = False + #.... + return result + + + def view(self, dtype=None, type=None, fill_value=None): + """ + Return a view of the MaskedArray data + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + The default, None, results in the view having the same data-type + as `a`. As with ``ndarray.view``, dtype can also be specified as + an ndarray sub-class, which then specifies the type of the + returned object (this is equivalent to setting the ``type`` + parameter). + type : Python type, optional + Type of the returned view, e.g., ndarray or matrix. Again, the + default None results in type preservation. + + Notes + ----- + + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + If `fill_value` is not specified, but `dtype` is specified (and is not + an ndarray sub-class), the `fill_value` of the MaskedArray will be + reset. If neither `fill_value` nor `dtype` are specified (or if + `dtype` is an ndarray sub-class), then the fill value is preserved. + Finally, if `fill_value` is specified, but `dtype` is not, the fill + value is set to the specified value. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + """ + + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + except TypeError: + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype, type) + # Should we update the mask ? + if (getattr(output, '_mask', nomask) is not nomask): + if dtype is None: + dtype = output.dtype + mdtype = make_mask_descr(dtype) + output._mask = self._mask.view(mdtype, ndarray) + # Try to reset the shape of the mask (if we don't have a void) + try: + output._mask.shape = output.shape + except (AttributeError, TypeError): + pass + # Make sure to reset the _fill_value if needed + if getattr(output, '_fill_value', None) is not None: + if fill_value is None: + if dtype is None: + pass # leave _fill_value as is + else: + output._fill_value = None + else: + output.fill_value = fill_value + return output + view.__doc__ = ndarray.view.__doc__ + + + def astype(self, newtype): + """ + Returns a copy of the MaskedArray cast to given newtype. + + Returns + ------- + output : MaskedArray + A copy of self cast to input newtype. + The returned record shape matches self.shape. + + Examples + -------- + >>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print x + [[1.0 -- 3.1] + [-- 5.0 --] + [7.0 -- 9.0]] + >>> print x.astype(int32) + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + + """ + newtype = np.dtype(newtype) + output = self._data.astype(newtype).view(type(self)) + output._update_from(self) + names = output.dtype.names + if names is None: + output._mask = self._mask.astype(bool) + else: + if self._mask is nomask: + output._mask = nomask + else: + output._mask = self._mask.astype([(n, bool) for n in names]) + # Don't check _fill_value if it's None, that'll speed things up + if self._fill_value is not None: + output._fill_value = _check_fill_value(self._fill_value, newtype) + return output + + + def __getitem__(self, indx): + """x.__getitem__(y) <==> x[y] + + Return the item described by i, as a masked array. + + """ + # This test is useful, but we should keep things light... +# if getmask(indx) is not nomask: +# msg = "Masked arrays must be filled before they can be used as indices!" +# raise IndexError(msg) + _data = ndarray.view(self, ndarray) + dout = ndarray.__getitem__(_data, indx) + # We could directly use ndarray.__getitem__ on self... + # But then we would have to modify __array_finalize__ to prevent the + # mask of being reshaped if it hasn't been set up properly yet... + # So it's easier to stick to the current version + _mask = self._mask + if not getattr(dout, 'ndim', False): + # A record ................ + if isinstance(dout, np.void): + mask = _mask[indx] + # We should always re-cast to mvoid, otherwise users can + # change masks on rows that already have masked values, but not + # on rows that have no masked values, which is inconsistent. + dout = mvoid(dout, mask=mask, hardmask=self._hardmask) + # Just a scalar............ + elif _mask is not nomask and _mask[indx]: + return masked + else: + # Force dout to MA ........ + dout = dout.view(type(self)) + # Inherit attributes from self + dout._update_from(self) + # Check the fill_value .... + if isinstance(indx, basestring): + if self._fill_value is not None: + dout._fill_value = self._fill_value[indx] + dout._isfield = True + # Update the mask if needed + if _mask is not nomask: + dout._mask = _mask[indx] + dout._sharedmask = True +# Note: Don't try to check for m.any(), that'll take too long... + return dout + + def __setitem__(self, indx, value): + """x.__setitem__(i, y) <==> x[i]=y + + Set item described by index. If value is masked, masks those + locations. + + """ + if self is masked: + raise MaskError('Cannot alter the masked element.') + # This test is useful, but we should keep things light... +# if getmask(indx) is not nomask: +# msg = "Masked arrays must be filled before they can be used as indices!" +# raise IndexError(msg) + _data = ndarray.view(self, ndarray.__getattribute__(self, '_baseclass')) + _mask = ndarray.__getattribute__(self, '_mask') + if isinstance(indx, basestring): + ndarray.__setitem__(_data, indx, value) + if _mask is nomask: + self._mask = _mask = make_mask_none(self.shape, self.dtype) + _mask[indx] = getmask(value) + return + #........................................ + _dtype = ndarray.__getattribute__(_data, 'dtype') + nbfields = len(_dtype.names or ()) + #........................................ + if value is masked: + # The mask wasn't set: create a full version... + if _mask is nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + # Now, set the mask to its value. + if nbfields: + _mask[indx] = tuple([True] * nbfields) + else: + _mask[indx] = True + if not self._isfield: + self._sharedmask = False + return + #........................................ + # Get the _data part of the new value + dval = value + # Get the _mask part of the new value + mval = getattr(value, '_mask', nomask) + if nbfields and mval is nomask: + mval = tuple([False] * nbfields) + if _mask is nomask: + # Set the data, then the mask + ndarray.__setitem__(_data, indx, dval) + if mval is not nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + ndarray.__setitem__(_mask, indx, mval) + elif not self._hardmask: + # Unshare the mask if necessary to avoid propagation + if not self._isfield: + self.unshare_mask() + _mask = ndarray.__getattribute__(self, '_mask') + # Set the data, then the mask + ndarray.__setitem__(_data, indx, dval) + ndarray.__setitem__(_mask, indx, mval) + elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): + indx = indx * umath.logical_not(_mask) + ndarray.__setitem__(_data, indx, dval) + else: + if nbfields: + err_msg = "Flexible 'hard' masks are not yet supported..." + raise NotImplementedError(err_msg) + mindx = mask_or(_mask[indx], mval, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + np.copyto(dindx, dval, where=~mindx) + elif mindx is nomask: + dindx = dval + ndarray.__setitem__(_data, indx, dindx) + _mask[indx] = mindx + return + + + def __getslice__(self, i, j): + """x.__getslice__(i, j) <==> x[i:j] + + Return the slice described by (i, j). The use of negative + indices is not supported. + + """ + return self.__getitem__(slice(i, j)) + + def __setslice__(self, i, j, value): + """x.__setslice__(i, j, value) <==> x[i:j]=value + + Set the slice (i,j) of a to value. If value is masked, mask + those locations. + + """ + self.__setitem__(slice(i, j), value) + + + def __setmask__(self, mask, copy=False): + """Set the mask. + + """ + idtype = ndarray.__getattribute__(self, 'dtype') + current_mask = ndarray.__getattribute__(self, '_mask') + if mask is masked: + mask = True + # Make sure the mask is set + if (current_mask is nomask): + # Just don't do anything is there's nothing to do... + if mask is nomask: + return + current_mask = self._mask = make_mask_none(self.shape, idtype) + # No named fields......... + if idtype.names is None: + # Hardmask: don't unmask the data + if self._hardmask: + current_mask |= mask + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method... + elif isinstance(mask, (int, float, np.bool_, np.number)): + current_mask[...] = mask + # ...otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + # Named fields w/ ............ + else: + mdtype = current_mask.dtype + mask = np.array(mask, copy=False) + # Mask is a singleton + if not mask.ndim: + # It's a boolean : make a record + if mask.dtype.kind == 'b': + mask = np.array(tuple([mask.item()]*len(mdtype)), + dtype=mdtype) + # It's a record: make sure the dtype is correct + else: + mask = mask.astype(mdtype) + # Mask is a sequence + else: + # Make sure the new mask is a ndarray with the proper dtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Hardmask: don't unmask the data + if self._hardmask: + for n in idtype.names: + current_mask[n] |= mask[n] + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method... + elif isinstance(mask, (int, float, np.bool_, np.number)): + current_mask[...] = mask + # ...otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + # Reshape if needed + if current_mask.shape: + current_mask.shape = self.shape + return + _set_mask = __setmask__ + #.... + def _get_mask(self): + """Return the current mask. + + """ + # We could try to force a reshape, but that wouldn't work in some cases. +# return self._mask.reshape(self.shape) + return self._mask + mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") + + + def _get_recordmask(self): + """ + Return the mask of the records. + A record is masked when all the fields are masked. + + """ + _mask = ndarray.__getattribute__(self, '_mask').view(ndarray) + if _mask.dtype.names is None: + return _mask + return np.all(flatten_structured_array(_mask), axis= -1) + + + def _set_recordmask(self): + """Return the mask of the records. + A record is masked when all the fields are masked. + + """ + raise NotImplementedError("Coming soon: setting the mask per records!") + recordmask = property(fget=_get_recordmask) + + #............................................ + def harden_mask(self): + """ + Force the mask to hard. + + Whether the mask of a masked array is hard or soft is determined by + its `hardmask` property. `harden_mask` sets `hardmask` to True. + + See Also + -------- + hardmask + + """ + self._hardmask = True + return self + + def soften_mask(self): + """ + Force the mask to soft. + + Whether the mask of a masked array is hard or soft is determined by + its `hardmask` property. `soften_mask` sets `hardmask` to False. + + See Also + -------- + hardmask + + """ + self._hardmask = False + return self + + hardmask = property(fget=lambda self: self._hardmask, + doc="Hardness of the mask") + + + def unshare_mask(self): + """ + Copy the mask and set the sharedmask flag to False. + + Whether the mask is shared between masked arrays can be seen from + the `sharedmask` property. `unshare_mask` ensures the mask is not shared. + A copy of the mask is only made if it was shared. + + See Also + -------- + sharedmask + + """ + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + return self + + sharedmask = property(fget=lambda self: self._sharedmask, + doc="Share status of the mask (read-only).") + + def shrink_mask(self): + """ + Reduce a mask to nomask when possible. + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) + >>> x.mask + array([[False, False], + [False, False]], dtype=bool) + >>> x.shrink_mask() + >>> x.mask + False + + """ + m = self._mask + if m.ndim and not m.any(): + self._mask = nomask + return self + + #............................................ + + baseclass = property(fget=lambda self:self._baseclass, + doc="Class of the underlying data (read-only).") + + def _get_data(self): + """Return the current data, as a view of the original + underlying data. + + """ + return ndarray.view(self, self._baseclass) + _data = property(fget=_get_data) + data = property(fget=_get_data) + + def _get_flat(self): + "Return a flat iterator." + return MaskedIterator(self) + # + def _set_flat (self, value): + "Set a flattened version of self to value." + y = self.ravel() + y[:] = value + # + flat = property(fget=_get_flat, fset=_set_flat, + doc="Flat version of the array.") + + + def get_fill_value(self): + """ + Return the filling value of the masked array. + + Returns + ------- + fill_value : scalar + The filling value. + + Examples + -------- + >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: + ... np.ma.array([0, 1], dtype=dt).get_fill_value() + ... + 999999 + 999999 + 1e+20 + (1e+20+0j) + + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.get_fill_value() + -inf + + """ + if self._fill_value is None: + self._fill_value = _check_fill_value(None, self.dtype) + return self._fill_value[()] + + def set_fill_value(self, value=None): + """ + Set the filling value of the masked array. + + Parameters + ---------- + value : scalar, optional + The new filling value. Default is None, in which case a default + based on the data type is used. + + See Also + -------- + ma.set_fill_value : Equivalent function. + + Examples + -------- + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.fill_value + -inf + >>> x.set_fill_value(np.pi) + >>> x.fill_value + 3.1415926535897931 + + Reset to default: + + >>> x.set_fill_value() + >>> x.fill_value + 1e+20 + + """ + target = _check_fill_value(value, self.dtype) + _fill_value = self._fill_value + if _fill_value is None: + # Create the attribute if it was undefined + self._fill_value = target + else: + # Don't overwrite the attribute, just fill it (for propagation) + _fill_value[()] = target + + fill_value = property(fget=get_fill_value, fset=set_fill_value, + doc="Filling value.") + + + def filled(self, fill_value=None): + """ + Return a copy of self, with masked values filled with a given value. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, the `fill_value` attribute of the array is used instead. + + Returns + ------- + filled_array : ndarray + A copy of ``self`` with invalid entries replaced by *fill_value* + (be it the function argument or the attribute of ``self``. + + Notes + ----- + The result is **not** a MaskedArray! + + Examples + -------- + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([1, 2, -999, 4, -999]) + >>> type(x.filled()) + + + Subclassing is preserved. This means that if the data part of the masked + array is a matrix, `filled` returns a matrix: + + >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.filled() + matrix([[ 1, 999999], + [999999, 4]]) + + """ + m = self._mask + if m is nomask: + return self._data + # + if fill_value is None: + fill_value = self.fill_value + else: + fill_value = _check_fill_value(fill_value, self.dtype) + # + if self is masked_singleton: + return np.asanyarray(fill_value) + # + if m.dtype.names: + result = self._data.copy('K') + _recursive_filled(result, self._mask, fill_value) + elif not m.any(): + return self._data + else: + result = self._data.copy('K') + try: + np.copyto(result, fill_value, where=m) + except (TypeError, AttributeError): + fill_value = narray(fill_value, dtype=object) + d = result.astype(object) + result = np.choose(m, (d, fill_value)) + except IndexError: + #ok, if scalar + if self._data.shape: + raise + elif m: + result = np.array(fill_value, dtype=self.dtype) + else: + result = self._data + return result + + def compressed(self): + """ + Return all the non-masked data as a 1-D array. + + Returns + ------- + data : ndarray + A new `ndarray` holding the non-masked data is returned. + + Notes + ----- + The result is **not** a MaskedArray! + + Examples + -------- + >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) + >>> x.compressed() + array([0, 1]) + >>> type(x.compressed()) + + + """ + data = ndarray.ravel(self._data) + if self._mask is not nomask: + data = data.compress(np.logical_not(ndarray.ravel(self._mask))) + return data + + + def compress(self, condition, axis=None, out=None): + """ + Return `a` where condition is ``True``. + + If condition is a `MaskedArray`, missing values are considered + as ``False``. + + Parameters + ---------- + condition : var + Boolean 1-d array selecting which entries to return. If len(condition) + is less than the size of a along the axis, then output is truncated + to length of condition array. + axis : {None, int}, optional + Axis along which the operation must be performed. + out : {None, ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + result : MaskedArray + A :class:`MaskedArray` object. + + Notes + ----- + Please note the difference with :meth:`compressed` ! + The output of :meth:`compress` has a mask, the output of + :meth:`compressed` does not. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print x + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> x.compress([1, 0, 1]) + masked_array(data = [1 3], + mask = [False False], + fill_value=999999) + + >>> x.compress([1, 0, 1], axis=1) + masked_array(data = + [[1 3] + [-- --] + [7 9]], + mask = + [[False False] + [ True True] + [False False]], + fill_value=999999) + + """ + # Get the basic components + (_data, _mask) = (self._data, self._mask) + # Force the condition to a regular ndarray (forget the missing values...) + condition = np.array(condition, copy=False, subok=False) + # + _new = _data.compress(condition, axis=axis, out=out).view(type(self)) + _new._update_from(self) + if _mask is not nomask: + _new._mask = _mask.compress(condition, axis=axis) + return _new + + #............................................ + def __str__(self): + """String representation. + + """ + if masked_print_option.enabled(): + f = masked_print_option + if self is masked: + return str(f) + m = self._mask + if m is nomask: + res = self._data + else: + if m.shape == (): + if m.dtype.names: + m = m.view((bool, len(m.dtype))) + if m.any(): + return str(tuple((f if _m else _d) for _d, _m in + zip(self._data.tolist(), m))) + else: + return str(self._data) + elif m: + return str(f) + else: + return str(self._data) + # convert to object array to make filled work + names = self.dtype.names + if names is None: + res = self._data.astype("O") + res.view(ndarray)[m] = f + else: + rdtype = _recursive_make_descr(self.dtype, "O") + res = self._data.astype(rdtype) + _recursive_printoption(res, m, f) + else: + res = self.filled(self.fill_value) + return str(res) + + def __repr__(self): + """Literal string representation. + + """ + n = len(self.shape) + if self._baseclass is np.ndarray: + name = 'array' + else: + name = self._baseclass.__name__ + + parameters = dict(name=name, nlen=" " * len(name), + data=str(self), mask=str(self._mask), + fill=str(self.fill_value), dtype=str(self.dtype)) + if self.dtype.names: + if n <= 1: + return _print_templates['short_flx'] % parameters + return _print_templates['long_flx'] % parameters + elif n <= 1: + return _print_templates['short_std'] % parameters + return _print_templates['long_std'] % parameters + + def __eq__(self, other): + "Check whether other equals self elementwise" + if self is masked: + return masked + omask = getattr(other, '_mask', nomask) + if omask is nomask: + check = ndarray.__eq__(self.filled(0), other) + try: + check = check.view(type(self)) + check._mask = self._mask + except AttributeError: + # Dang, we have a bool instead of an array: return the bool + return check + else: + odata = filled(other, 0) + check = ndarray.__eq__(self.filled(0), odata).view(type(self)) + if self._mask is nomask: + check._mask = omask + else: + mask = mask_or(self._mask, omask) + if mask.dtype.names: + if mask.size > 1: + axis = 1 + else: + axis = None + try: + mask = mask.view((bool_, len(self.dtype))).all(axis) + except ValueError: + mask = np.all([[f[n].all() for n in mask.dtype.names] + for f in mask], axis=axis) + check._mask = mask + return check + # + def __ne__(self, other): + "Check whether other doesn't equal self elementwise" + if self is masked: + return masked + omask = getattr(other, '_mask', nomask) + if omask is nomask: + check = ndarray.__ne__(self.filled(0), other) + try: + check = check.view(type(self)) + check._mask = self._mask + except AttributeError: + # In case check is a boolean (or a numpy.bool) + return check + else: + odata = filled(other, 0) + check = ndarray.__ne__(self.filled(0), odata).view(type(self)) + if self._mask is nomask: + check._mask = omask + else: + mask = mask_or(self._mask, omask) + if mask.dtype.names: + if mask.size > 1: + axis = 1 + else: + axis = None + try: + mask = mask.view((bool_, len(self.dtype))).all(axis) + except ValueError: + mask = np.all([[f[n].all() for n in mask.dtype.names] + for f in mask], axis=axis) + check._mask = mask + return check + # + def __add__(self, other): + "Add other to self, and return a new masked array." + return add(self, other) + # + def __radd__(self, other): + "Add other to self, and return a new masked array." + return add(self, other) + # + def __sub__(self, other): + "Subtract other to self, and return a new masked array." + return subtract(self, other) + # + def __rsub__(self, other): + "Subtract other to self, and return a new masked array." + return subtract(other, self) + # + def __mul__(self, other): + "Multiply other by self, and return a new masked array." + return multiply(self, other) + # + def __rmul__(self, other): + "Multiply other by self, and return a new masked array." + return multiply(self, other) + # + def __div__(self, other): + "Divide other into self, and return a new masked array." + return divide(self, other) + # + def __truediv__(self, other): + "Divide other into self, and return a new masked array." + return true_divide(self, other) + # + def __rtruediv__(self, other): + "Divide other into self, and return a new masked array." + return true_divide(other, self) + # + def __floordiv__(self, other): + "Divide other into self, and return a new masked array." + return floor_divide(self, other) + # + def __rfloordiv__(self, other): + "Divide other into self, and return a new masked array." + return floor_divide(other, self) + # + def __pow__(self, other): + "Raise self to the power other, masking the potential NaNs/Infs" + return power(self, other) + # + def __rpow__(self, other): + "Raise self to the power other, masking the potential NaNs/Infs" + return power(other, self) + #............................................ + def __iadd__(self, other): + "Add other to self in-place." + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + else: + if m is not nomask: + self._mask += m + ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other))) + return self + #.... + def __isub__(self, other): + "Subtract other from self in-place." + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other))) + return self + #.... + def __imul__(self, other): + "Multiply self by other in-place." + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other))) + return self + #.... + def __idiv__(self, other): + "Divide self by other in-place." + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.divide] + other_data = np.where(dom_mask, fval, other_data) +# self._mask = mask_or(self._mask, new_mask) + self._mask |= new_mask + ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data)) + return self + #.... + def __ifloordiv__(self, other): + "Floor divide self by other in-place." + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.floor_divide] + other_data = np.where(dom_mask, fval, other_data) +# self._mask = mask_or(self._mask, new_mask) + self._mask |= new_mask + ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data)) + return self + #.... + def __itruediv__(self, other): + "True divide self by other in-place." + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.true_divide] + other_data = np.where(dom_mask, fval, other_data) +# self._mask = mask_or(self._mask, new_mask) + self._mask |= new_mask + ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data)) + return self + #... + def __ipow__(self, other): + "Raise self to the power other, in place." + other_data = getdata(other) + other_mask = getmask(other) + with np.errstate(divide='ignore', invalid='ignore'): + ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data)) + invalid = np.logical_not(np.isfinite(self._data)) + if invalid.any(): + if self._mask is not nomask: + self._mask |= invalid + else: + self._mask = invalid + np.copyto(self._data, self.fill_value, where=invalid) + new_mask = mask_or(other_mask, invalid) + self._mask = mask_or(self._mask, new_mask) + return self + #............................................ + def __float__(self): + "Convert to float." + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + warnings.warn("Warning: converting a masked element to nan.") + return np.nan + return float(self.item()) + + def __int__(self): + "Convert to int." + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python int.') + return int(self.item()) + + + def get_imag(self): + """ + Return the imaginary part of the masked array. + + The returned array is a view on the imaginary part of the `MaskedArray` + whose `get_imag` method is called. + + Parameters + ---------- + None + + Returns + ------- + result : MaskedArray + The imaginary part of the masked array. + + See Also + -------- + get_real, real, imag + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.get_imag() + masked_array(data = [1.0 -- 1.6], + mask = [False True False], + fill_value = 1e+20) + + """ + result = self._data.imag.view(type(self)) + result.__setmask__(self._mask) + return result + imag = property(fget=get_imag, doc="Imaginary part.") + + def get_real(self): + """ + Return the real part of the masked array. + + The returned array is a view on the real part of the `MaskedArray` + whose `get_real` method is called. + + Parameters + ---------- + None + + Returns + ------- + result : MaskedArray + The real part of the masked array. + + See Also + -------- + get_imag, real, imag + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.get_real() + masked_array(data = [1.0 -- 3.45], + mask = [False True False], + fill_value = 1e+20) + + """ + result = self._data.real.view(type(self)) + result.__setmask__(self._mask) + return result + real = property(fget=get_real, doc="Real part") + + + #............................................ + def count(self, axis=None): + """ + Count the non-masked elements of the array along the given axis. + + Parameters + ---------- + axis : int, optional + Axis along which to count the non-masked elements. If `axis` is + `None`, all non-masked elements are counted. + + Returns + ------- + result : int or ndarray + If `axis` is `None`, an integer count is returned. When `axis` is + not `None`, an array with shape determined by the lengths of the + remaining axes, is returned. + + See Also + -------- + count_masked : Count masked elements in array or along a given axis. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(6).reshape((2, 3)) + >>> a[1, :] = ma.masked + >>> a + masked_array(data = + [[0 1 2] + [-- -- --]], + mask = + [[False False False] + [ True True True]], + fill_value = 999999) + >>> a.count() + 3 + + When the `axis` keyword is specified an array of appropriate size is + returned. + + >>> a.count(axis=0) + array([1, 1, 1]) + >>> a.count(axis=1) + array([3, 0]) + + """ + m = self._mask + s = self.shape + if m is nomask: + if axis is None: + return self.size + else: + n = s[axis] + t = list(s) + del t[axis] + return np.full(t, n, dtype=np.intp) + n1 = np.size(m, axis) + n2 = np.sum(m, axis=axis, dtype=np.intp) + if axis is None: + return (n1 - n2) + else: + return narray(n1 - n2) + #............................................ + flatten = _arraymethod('flatten') + # + def ravel(self): + """ + Returns a 1D version of self, as a view. + + Returns + ------- + MaskedArray + Output view is of shape ``(self.size,)`` (or + ``(np.ma.product(self.shape),)``). + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print x + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> print x.ravel() + [1 -- 3 -- 5 -- 7 -- 9] + + """ + r = ndarray.ravel(self._data).view(type(self)) + r._update_from(self) + if self._mask is not nomask: + r._mask = ndarray.ravel(self._mask).reshape(r.shape) + else: + r._mask = nomask + return r + # + repeat = _arraymethod('repeat') + # + def reshape (self, *s, **kwargs): + """ + Give a new shape to the array without changing its data. + + Returns a masked array containing the same data, but with a new shape. + The result is a view on the original array; if this is not possible, a + ValueError is raised. + + Parameters + ---------- + shape : int or tuple of ints + The new shape should be compatible with the original shape. If an + integer is supplied, then the result will be a 1-D array of that + length. + order : {'C', 'F'}, optional + Determines whether the array data should be viewed as in C + (row-major) or FORTRAN (column-major) order. + + Returns + ------- + reshaped_array : array + A new view on the array. + + See Also + -------- + reshape : Equivalent function in the masked array module. + numpy.ndarray.reshape : Equivalent method on ndarray object. + numpy.reshape : Equivalent function in the NumPy module. + + Notes + ----- + The reshaping operation cannot guarantee that a copy will not be made, + to modify the shape in place, use ``a.shape = s`` + + Examples + -------- + >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) + >>> print x + [[-- 2] + [3 --]] + >>> x = x.reshape((4,1)) + >>> print x + [[--] + [2] + [3] + [--]] + + """ + kwargs.update(order=kwargs.get('order', 'C')) + result = self._data.reshape(*s, **kwargs).view(type(self)) + result._update_from(self) + mask = self._mask + if mask is not nomask: + result._mask = mask.reshape(*s, **kwargs) + return result + # + def resize(self, newshape, refcheck=True, order=False): + """ + .. warning:: + + This method does nothing, except raise a ValueError exception. A + masked array does not own its data and therefore cannot safely be + resized in place. Use the `numpy.ma.resize` function instead. + + This method is difficult to implement safely and may be deprecated in + future releases of NumPy. + + """ + # Note : the 'order' keyword looks broken, let's just drop it +# try: +# ndarray.resize(self, newshape, refcheck=refcheck) +# if self.mask is not nomask: +# self._mask.resize(newshape, refcheck=refcheck) +# except ValueError: +# raise ValueError("Cannot resize an array that has been referenced " +# "or is referencing another array in this way.\n" +# "Use the numpy.ma.resize function.") +# return None + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) + # + def put(self, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + Sets self._data.flat[n] = values[n] for each n in indices. + If `values` is shorter than `indices` then it will repeat. + If `values` has some masked values, the initial mask is updated + in consequence, else the corresponding values are unmasked. + + Parameters + ---------- + indices : 1-D array_like + Target indices, interpreted as integers. + values : array_like + Values to place in self._data copy at target indices. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + 'raise' : raise an error. + 'wrap' : wrap around. + 'clip' : clip to the range. + + Notes + ----- + `values` can be a scalar or length 1 array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print x + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> x.put([0,4,8],[10,20,30]) + >>> print x + [[10 -- 3] + [-- 20 --] + [7 -- 30]] + + >>> x.put(4,999) + >>> print x + [[10 -- 3] + [-- 999 --] + [7 -- 30]] + + """ + m = self._mask + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = narray(indices, copy=False) + values = narray(values, copy=False, subok=True) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + #.... + self._data.put(indices, values, mode=mode) + #.... + if m is nomask: + m = getmask(values) + else: + m = m.copy() + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, shrink=True) + self._mask = m + #............................................ + def ids (self): + """ + Return the addresses of the data and mask areas. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) + >>> x.ids() + (166670640, 166659832) + + If the array has no mask, the address of `nomask` is returned. This address + is typically not close to the data in memory: + + >>> x = np.ma.array([1, 2, 3]) + >>> x.ids() + (166691080, 3083169284L) + + """ + if self._mask is nomask: + return (self.ctypes.data, id(nomask)) + return (self.ctypes.data, self._mask.ctypes.data) + + def iscontiguous(self): + """ + Return a boolean indicating whether the data is contiguous. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3]) + >>> x.iscontiguous() + True + + `iscontiguous` returns one of the flags of the masked array: + + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : True + OWNDATA : False + WRITEABLE : True + ALIGNED : True + UPDATEIFCOPY : False + + """ + return self.flags['CONTIGUOUS'] + + #............................................ + def all(self, axis=None, out=None): + """ + Check if all of the elements of `a` are true. + + Performs a :func:`logical_and` over the given axis and returns the result. + Masked values are considered as True during computation. + For convenience, the output array is masked where ALL the values along the + current axis are masked: if the output would have been a scalar and that + all the values are masked, then the output is `masked`. + + Parameters + ---------- + axis : {None, integer} + Axis to perform the operation over. + If None, perform over flattened array. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + See Also + -------- + all : equivalent function + + Examples + -------- + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True + + """ + mask = _check_mask_axis(self._mask, axis) + if out is None: + d = self.filled(True).all(axis=axis).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + return masked + return d + self.filled(True).all(axis=axis, out=out) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + + def any(self, axis=None, out=None): + """ + Check if any of the elements of `a` are true. + + Performs a logical_or over the given axis and returns the result. + Masked values are considered as False during computation. + + Parameters + ---------- + axis : {None, integer} + Axis to perform the operation over. + If None, perform over flattened array and return a scalar. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + See Also + -------- + any : equivalent function + + """ + mask = _check_mask_axis(self._mask, axis) + if out is None: + d = self.filled(False).any(axis=axis).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + d = masked + return d + self.filled(False).any(axis=axis, out=out) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + + def nonzero(self): + """ + Return the indices of unmasked elements that are not zero. + + Returns a tuple of arrays, one for each dimension, containing the + indices of the non-zero elements in that dimension. The corresponding + non-zero values can be obtained with:: + + a[a.nonzero()] + + To group the indices by element, rather than dimension, use + instead:: + + np.transpose(a.nonzero()) + + The result of this is always a 2d array, with a row for each non-zero + element. + + Parameters + ---------- + None + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + numpy.nonzero : + Function operating on ndarrays. + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.array(np.eye(3)) + >>> x + masked_array(data = + [[ 1. 0. 0.] + [ 0. 1. 0.] + [ 0. 0. 1.]], + mask = + False, + fill_value=1e+20) + >>> x.nonzero() + (array([0, 1, 2]), array([0, 1, 2])) + + Masked elements are ignored. + + >>> x[1, 1] = ma.masked + >>> x + masked_array(data = + [[1.0 0.0 0.0] + [0.0 -- 0.0] + [0.0 0.0 1.0]], + mask = + [[False False False] + [False True False] + [False False False]], + fill_value=1e+20) + >>> x.nonzero() + (array([0, 2]), array([0, 2])) + + Indices can also be grouped by element. + + >>> np.transpose(x.nonzero()) + array([[0, 0], + [2, 2]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, ma.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) + >>> a > 3 + masked_array(data = + [[False False False] + [ True True True] + [ True True True]], + mask = + False, + fill_value=999999) + >>> ma.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + The ``nonzero`` method of the condition array can also be called. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return narray(self.filled(0), copy=False).nonzero() + + + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + (this docstring should be overwritten) + """ + #!!!: implement out + test! + m = self._mask + if m is nomask: + result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, + axis2=axis2, out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).filled(0).sum(axis=None, out=out) + trace.__doc__ = ndarray.trace.__doc__ + + def sum(self, axis=None, dtype=None, out=None): + """ + Return the sum of the array elements over the given axis. + Masked elements are set to 0 internally. + + Parameters + ---------- + axis : {None, -1, int}, optional + Axis along which the sum is computed. The default + (`axis` = None) is to compute over the flattened array. + dtype : {None, dtype}, optional + Determines the type of the returned array and of the accumulator + where the elements are summed. If dtype has the value None and + the type of a is an integer type of precision less than the default + platform integer, then the default platform integer precision is + used. Otherwise, the dtype is the same as that of a. + out : {None, ndarray}, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + + Returns + ------- + sum_along_axis : MaskedArray or scalar + An array with the same shape as self, with the specified + axis removed. If self is a 0-d array, or if `axis` is None, a scalar + is returned. If an output array is specified, a reference to + `out` is returned. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print x + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> print x.sum() + 25 + >>> print x.sum(axis=1) + [4 5 16] + >>> print x.sum(axis=0) + [8 5 12] + >>> print type(x.sum(axis=0, dtype=np.int64)[0]) + + + """ + _mask = ndarray.__getattribute__(self, '_mask') + newmask = _check_mask_axis(_mask, axis) + # No explicit output + if out is None: + result = self.filled(0).sum(axis, dtype=dtype) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(0).sum(axis, dtype=dtype, out=out) + if isinstance(out, MaskedArray): + outmask = getattr(out, '_mask', nomask) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + + + def cumsum(self, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the elements along the given axis. + The cumulative sum is calculated over the flattened array by + default, otherwise over the specified axis. + + Masked values are set to 0 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Parameters + ---------- + axis : {None, -1, int}, optional + Axis along which the sum is computed. The default (`axis` = None) is to + compute over the flattened array. `axis` may be negative, in which case + it counts from the last to the first axis. + dtype : {None, dtype}, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + + Returns + ------- + cumsum : ndarray. + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. + + Notes + ----- + The mask is lost if `out` is not a valid :class:`MaskedArray` ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) + >>> print marr.cumsum() + [0 1 3 -- -- -- 9 16 24 33] + + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self.mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + + def prod(self, axis=None, dtype=None, out=None): + """ + Return the product of the array elements over the given axis. + Masked elements are set to 1 internally for computation. + + Parameters + ---------- + axis : {None, int}, optional + Axis over which the product is taken. If None is used, then the + product is over all the array elements. + dtype : {None, dtype}, optional + Determines the type of the returned array and of the accumulator + where the elements are multiplied. If ``dtype`` has the value ``None`` + and the type of a is an integer type of precision less than the default + platform integer, then the default platform integer precision is + used. Otherwise, the dtype is the same as that of a. + out : {None, array}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + product_along_axis : {array, scalar}, see dtype parameter above. + Returns an array whose shape is the same as a with the specified + axis removed. Returns a 0d array when a is 1d or axis=None. + Returns a reference to the specified output array if specified. + + See Also + -------- + prod : equivalent function + + Notes + ----- + Arithmetic is modular when using integer types, and no error is raised + on overflow. + + Examples + -------- + >>> np.prod([1.,2.]) + 2.0 + >>> np.prod([1.,2.], dtype=np.int32) + 2 + >>> np.prod([[1.,2.],[3.,4.]]) + 24.0 + >>> np.prod([[1.,2.],[3.,4.]], axis=1) + array([ 2., 12.]) + + """ + _mask = ndarray.__getattribute__(self, '_mask') + newmask = _check_mask_axis(_mask, axis) + # No explicit output + if out is None: + result = self.filled(1).prod(axis, dtype=dtype) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(1).prod(axis, dtype=dtype, out=out) + if isinstance(out, MaskedArray): + outmask = getattr(out, '_mask', nomask) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + + product = prod + + def cumprod(self, axis=None, dtype=None, out=None): + """ + Return the cumulative product of the elements along the given axis. + The cumulative product is taken over the flattened array by + default, otherwise over the specified axis. + + Masked values are set to 1 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Parameters + ---------- + axis : {None, -1, int}, optional + Axis along which the product is computed. The default + (`axis` = None) is to compute over the flattened array. + dtype : {None, dtype}, optional + Determines the type of the returned array and of the accumulator + where the elements are multiplied. If ``dtype`` has the value ``None`` + and the type of ``a`` is an integer type of precision less than the + default platform integer, then the default platform integer precision + is used. Otherwise, the dtype is the same as that of ``a``. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + + Returns + ------- + cumprod : ndarray + A new array holding the result is returned unless out is specified, + in which case a reference to out is returned. + + Notes + ----- + The mask is lost if `out` is not a valid MaskedArray ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + + def mean(self, axis=None, dtype=None, out=None): + """ + Returns the average of the array elements. + + Masked entries are ignored. + The average is taken over the flattened array by default, otherwise over + the specified axis. Refer to `numpy.mean` for the full documentation. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : int, optional + Axis along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : dtype, optional + Type to use in computing the mean. For integer inputs, the default + is float64; for floating point, inputs it is the same as the input + dtype. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + mean : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. + + See Also + -------- + numpy.ma.mean : Equivalent function. + numpy.mean : Equivalent function on non-masked arrays. + numpy.ma.average: Weighted average. + + Examples + -------- + >>> a = np.ma.array([1,2,3], mask=[False, False, True]) + >>> a + masked_array(data = [1 2 --], + mask = [False False True], + fill_value = 999999) + >>> a.mean() + 1.5 + + """ + if self._mask is nomask: + result = super(MaskedArray, self).mean(axis=axis, dtype=dtype) + else: + dsum = self.sum(axis=axis, dtype=dtype) + cnt = self.count(axis=axis) + if cnt.shape == () and (cnt == 0): + result = masked + else: + result = dsum * 1. / cnt + if out is not None: + out.flat = result + if isinstance(out, MaskedArray): + outmask = getattr(out, '_mask', nomask) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = getattr(result, '_mask', nomask) + return out + return result + + def anom(self, axis=None, dtype=None): + """ + Compute the anomalies (deviations from the arithmetic mean) + along the given axis. + + Returns an array of anomalies, with the same shape as the input and + where the arithmetic mean is computed along the given axis. + + Parameters + ---------- + axis : int, optional + Axis over which the anomalies are taken. + The default is to use the mean of the flattened array as reference. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default is float32; for arrays of float types it is the same as + the array type. + + See Also + -------- + mean : Compute the mean of the array. + + Examples + -------- + >>> a = np.ma.array([1,2,3]) + >>> a.anom() + masked_array(data = [-1. 0. 1.], + mask = False, + fill_value = 1e+20) + + """ + m = self.mean(axis, dtype) + if not axis: + return (self - m) + else: + return (self - expand_dims(m, axis)) + + def var(self, axis=None, dtype=None, out=None, ddof=0): + "" + # Easy case: nomask, business as usual + if self._mask is nomask: + return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof) + # Some data are masked, yay! + cnt = self.count(axis=axis) - ddof + danom = self.anom(axis=axis, dtype=dtype) + if iscomplexobj(self): + danom = umath.absolute(danom) ** 2 + else: + danom *= danom + dvar = divide(danom.sum(axis), cnt).view(type(self)) + # Apply the mask if it's not a scalar + if dvar.ndim: + dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0)) + dvar._update_from(self) + elif getattr(dvar, '_mask', False): + # Make sure that masked is returned when the scalar is masked. + dvar = masked + if out is not None: + if isinstance(out, MaskedArray): + out.flat = 0 + out.__setmask__(True) + elif out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or "\ + "more location." + raise MaskError(errmsg) + else: + out.flat = np.nan + return out + # In case with have an explicit output + if out is not None: + # Set the data + out.flat = dvar + # Set the mask if needed + if isinstance(out, MaskedArray): + out.__setmask__(dvar.mask) + return out + return dvar + var.__doc__ = np.var.__doc__ + + + def std(self, axis=None, dtype=None, out=None, ddof=0): + "" + dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof) + if dvar is not masked: + if out is not None: + np.power(out, 0.5, out=out, casting='unsafe') + return out + dvar = sqrt(dvar) + return dvar + std.__doc__ = np.std.__doc__ + + #............................................ + def round(self, decimals=0, out=None): + """ + Return an array rounded a to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.around : equivalent function + + """ + result = self._data.round(decimals=decimals, out=out).view(type(self)) + result._mask = self._mask + result._update_from(self) + # No explicit output: we're done + if out is None: + return result + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + round.__doc__ = ndarray.round.__doc__ + + #............................................ + def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None): + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + `fill_value`. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. The default is -1 (last axis). + If None, the flattened array is used. + fill_value : var, optional + Value used to fill the array before sorting. + The default is the `fill_value` attribute of the input array. + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + ndarray.sort : Inplace sort. + + Notes + ----- + See `sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.ma.array([3,2,1], mask=[False, False, True]) + >>> a + masked_array(data = [3 2 --], + mask = [False False True], + fill_value = 999999) + >>> a.argsort() + array([1, 0, 2]) + + """ + if fill_value is None: + fill_value = default_fill_value(self) + d = self.filled(fill_value).view(ndarray) + return d.argsort(axis=axis, kind=kind, order=order) + + + def argmin(self, axis=None, fill_value=None, out=None): + """ + Return array of indices to the minimum values along the given axis. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : {var}, optional + Value used to fill in the masked values. If None, the output of + minimum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + {ndarray, scalar} + If multi-dimension input, returns a new ndarray of indices to the + minimum values along the given axis. Otherwise, returns a scalar + of index to the minimum values along the given axis. + + Examples + -------- + >>> x = np.ma.array(arange(4), mask=[1,1,0,0]) + >>> x.shape = (2,2) + >>> print x + [[-- --] + [2 3]] + >>> print x.argmin(axis=0, fill_value=-1) + [0 0] + >>> print x.argmin(axis=0, fill_value=9) + [1 1] + + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + return d.argmin(axis, out=out) + + + def argmax(self, axis=None, fill_value=None, out=None): + """ + Returns array of indices of the maximum values along the given axis. + Masked values are treated as if they had the value fill_value. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : {var}, optional + Value used to fill in the masked values. If None, the output of + maximum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + index_array : {integer_array} + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + >>> a.argmax() + 5 + >>> a.argmax(0) + array([1, 1, 1]) + >>> a.argmax(1) + array([2, 2]) + + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + return d.argmax(axis, out=out) + + + def sort(self, axis= -1, kind='quicksort', order=None, + endwith=True, fill_value=None): + """ + Sort the array, in-place + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. Default is 'quicksort'. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + endwith : {True, False}, optional + Whether missing values (if any) should be forced in the upper indices + (at the end of the array) (True) or lower indices (at the beginning). + When the array contains unmasked values of the largest (or smallest if + False) representable value of the datatype the ordering of these values + and the masked values is undefined. To enforce the masked values are + at the end (beginning) in this case one must sort the mask. + fill_value : {var}, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Default + >>> a.sort() + >>> print a + [1 3 5 -- --] + + >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Put missing values in the front + >>> a.sort(endwith=False) + >>> print a + [-- -- 1 3 5] + + >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # fill_value takes over endwith + >>> a.sort(endwith=False, fill_value=3) + >>> print a + [1 -- -- 3 5] + + """ + if self._mask is nomask: + ndarray.sort(self, axis=axis, kind=kind, order=order) + else: + if self is masked: + return self + if fill_value is None: + if endwith: + filler = minimum_fill_value(self) + else: + filler = maximum_fill_value(self) + else: + filler = fill_value + + sidx = self.filled(filler).argsort(axis=axis, kind=kind, + order=order) + # save meshgrid memory for 1d arrays + if self.ndim == 1: + idx = sidx + else: + idx = np.meshgrid(*[np.arange(x) for x in self.shape], sparse=True, + indexing='ij') + idx[axis] = sidx + tmp_mask = self._mask[idx].flat + tmp_data = self._data[idx].flat + self._data.flat = tmp_data + self._mask.flat = tmp_mask + return + + #............................................ + def min(self, axis=None, out=None, fill_value=None): + """ + Return the minimum along a given axis. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + out : array_like, optional + Alternative output array in which to place the result. Must be of + the same shape and buffer length as the expected output. + fill_value : {var}, optional + Value used to fill in the masked values. + If None, use the output of `minimum_fill_value`. + + Returns + ------- + amin : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + minimum_fill_value + Returns the minimum filling value for a given datatype. + + """ + _mask = ndarray.__getattribute__(self, '_mask') + newmask = _check_mask_axis(_mask, axis) + if fill_value is None: + fill_value = minimum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).min(axis=axis, out=out).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).min(axis=axis, out=out) + if isinstance(out, MaskedArray): + outmask = getattr(out, '_mask', nomask) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def mini(self, axis=None): + """ + Return the array minimum along the specified axis. + + Parameters + ---------- + axis : int, optional + The axis along which to find the minima. Default is None, in which case + the minimum value in the whole array is returned. + + Returns + ------- + min : scalar or MaskedArray + If `axis` is None, the result is a scalar. Otherwise, if `axis` is + given and the array is at least 2-D, the result is a masked array with + dimension one smaller than the array on which `mini` is called. + + Examples + -------- + >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) + >>> print x + [[0 --] + [2 3] + [4 --]] + >>> x.mini() + 0 + >>> x.mini(axis=0) + masked_array(data = [0 3], + mask = [False False], + fill_value = 999999) + >>> print x.mini(axis=1) + [0 2 4] + + """ + if axis is None: + return minimum(self) + else: + return minimum.reduce(self, axis) + + #........................ + def max(self, axis=None, out=None, fill_value=None): + """ + Return the maximum along a given axis. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + out : array_like, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + fill_value : {var}, optional + Value used to fill in the masked values. + If None, use the output of maximum_fill_value(). + + Returns + ------- + amax : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + maximum_fill_value + Returns the maximum filling value for a given datatype. + + """ + _mask = ndarray.__getattribute__(self, '_mask') + newmask = _check_mask_axis(_mask, axis) + if fill_value is None: + fill_value = maximum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).max(axis=axis, out=out).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).max(axis=axis, out=out) + if isinstance(out, MaskedArray): + outmask = getattr(out, '_mask', nomask) + if (outmask is nomask): + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def ptp(self, axis=None, out=None, fill_value=None): + """ + Return (maximum - minimum) along the the given dimension + (i.e. peak-to-peak value). + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to find the peaks. If None (default) the + flattened array is used. + out : {None, array_like}, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + fill_value : {var}, optional + Value used to fill in the masked values. + + Returns + ------- + ptp : ndarray. + A new array holding the result, unless ``out`` was + specified, in which case a reference to ``out`` is returned. + + """ + if out is None: + result = self.max(axis=axis, fill_value=fill_value) + result -= self.min(axis=axis, fill_value=fill_value) + return result + out.flat = self.max(axis=axis, out=out, fill_value=fill_value) + min_value = self.min(axis=axis, fill_value=fill_value) + np.subtract(out, min_value, out=out, casting='unsafe') + return out + + def take(self, indices, axis=None, out=None, mode='raise'): + """ + """ + (_data, _mask) = (self._data, self._mask) + cls = type(self) + # Make sure the indices are not masked + maskindices = getattr(indices, '_mask', nomask) + if maskindices is not nomask: + indices = indices.filled(0) + # Get the data + if out is None: + out = _data.take(indices, axis=axis, mode=mode).view(cls) + else: + np.take(_data, indices, axis=axis, mode=mode, out=out) + # Get the mask + if isinstance(out, MaskedArray): + if _mask is nomask: + outmask = maskindices + else: + outmask = _mask.take(indices, axis=axis, mode=mode) + outmask |= maskindices + out.__setmask__(outmask) + return out + + + # Array methods --------------------------------------- + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + transpose = _arraymethod('transpose') + T = property(fget=lambda self:self.transpose()) + swapaxes = _arraymethod('swapaxes') + clip = _arraymethod('clip', onmask=False) + copy = _arraymethod('copy') + squeeze = _arraymethod('squeeze') + #-------------------------------------------- + def tolist(self, fill_value=None): + """ + Return the data portion of the masked array as a hierarchical Python list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to `fill_value`. If `fill_value` is None, + the corresponding entries in the output list will be ``None``. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries. Default is None. + + Returns + ------- + result : list + The Python list representation of the masked array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) + >>> x.tolist() + [[1, None, 3], [None, 5, None], [7, None, 9]] + >>> x.tolist(-999) + [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] + + """ + _mask = self._mask + # No mask ? Just return .data.tolist ? + if _mask is nomask: + return self._data.tolist() + # Explicit fill_value: fill the array and get the list + if fill_value is not None: + return self.filled(fill_value).tolist() + # Structured array ............. + names = self.dtype.names + if names: + result = self._data.astype([(_, object) for _ in names]) + for n in names: + result[n][_mask[n]] = None + return result.tolist() + # Standard arrays ............... + if _mask is nomask: + return [None] + # Set temps to save time when dealing w/ marrays... + inishape = self.shape + result = np.array(self._data.ravel(), dtype=object) + result[_mask.ravel()] = None + result.shape = inishape + return result.tolist() +# if fill_value is not None: +# return self.filled(fill_value).tolist() +# result = self.filled().tolist() +# # Set temps to save time when dealing w/ mrecarrays... +# _mask = self._mask +# if _mask is nomask: +# return result +# nbdims = self.ndim +# dtypesize = len(self.dtype) +# if nbdims == 0: +# return tuple([None] * dtypesize) +# elif nbdims == 1: +# maskedidx = _mask.nonzero()[0].tolist() +# if dtypesize: +# nodata = tuple([None] * dtypesize) +# else: +# nodata = None +# [operator.setitem(result, i, nodata) for i in maskedidx] +# else: +# for idx in zip(*[i.tolist() for i in _mask.nonzero()]): +# tmp = result +# for i in idx[:-1]: +# tmp = tmp[i] +# tmp[idx[-1]] = None +# return result + #........................ + def tostring(self, fill_value=None, order='C'): + """ + This function is a compatibility alias for tobytes. Despite its name it + returns bytes not strings. + """ + + return self.tobytes(fill_value, order='C') + #........................ + def tobytes(self, fill_value=None, order='C'): + """ + Return the array data as a string containing the raw bytes in the array. + + The array is filled with a fill value before the string conversion. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fill_value : scalar, optional + Value used to fill in the masked values. Deafult is None, in which + case `MaskedArray.fill_value` is used. + order : {'C','F','A'}, optional + Order of the data item in the copy. Default is 'C'. + + - 'C' -- C order (row major). + - 'F' -- Fortran order (column major). + - 'A' -- Any, current order of array. + - None -- Same as 'A'. + + See Also + -------- + ndarray.tobytes + tolist, tofile + + Notes + ----- + As for `ndarray.tobytes`, information about the shape, dtype, etc., + but also about `fill_value`, will be lost. + + Examples + -------- + >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.tobytes() + '\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00' + + """ + return self.filled(fill_value).tobytes(order=order) + #........................ + def tofile(self, fid, sep="", format="%s"): + """ + Save a masked array to a file in binary format. + + .. warning:: + This function is not implemented yet. + + Raises + ------ + NotImplementedError + When `tofile` is called. + + """ + raise NotImplementedError("Not implemented yet, sorry...") + + def toflex(self): + """ + Transforms a masked array into a flexible-type array. + + The flexible type array that is returned will have two fields: + + * the ``_data`` field stores the ``_data`` part of the array. + * the ``_mask`` field stores the ``_mask`` part of the array. + + Parameters + ---------- + None + + Returns + ------- + record : ndarray + A new flexible-type `ndarray` with two fields: the first element + containing a value, the second element containing the corresponding + mask boolean. The returned record shape matches self.shape. + + Notes + ----- + A side-effect of transforming a masked array into a flexible `ndarray` is + that meta information (``fill_value``, ...) will be lost. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> print x + [[1 -- 3] + [-- 5 --] + [7 -- 9]] + >>> print x.toflex() + [[(1, False) (2, True) (3, False)] + [(4, True) (5, False) (6, True)] + [(7, False) (8, True) (9, False)]] + + """ + # Get the basic dtype .... + ddtype = self.dtype + # Make sure we have a mask + _mask = self._mask + if _mask is None: + _mask = make_mask_none(self.shape, ddtype) + # And get its dtype + mdtype = self._mask.dtype + # + record = np.ndarray(shape=self.shape, + dtype=[('_data', ddtype), ('_mask', mdtype)]) + record['_data'] = self._data + record['_mask'] = self._mask + return record + torecords = toflex + #-------------------------------------------- + # Pickling + def __getstate__(self): + """Return the internal state of the masked array, for pickling + purposes. + + """ + cf = 'CF'[self.flags.fnc] + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tobytes(cf), + #self._data.tolist(), + getmaskarray(self).tobytes(cf), + #getmaskarray(self).tolist(), + self._fill_value, + ) + return state + # + def __setstate__(self, state): + """Restore the internal state of the masked array, for + pickling purposes. ``state`` is typically the output of the + ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (_, shp, typ, isf, raw, msk, flv) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) + self.fill_value = flv + # + def __reduce__(self): + """Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mareconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + # + def __deepcopy__(self, memo=None): + from copy import deepcopy + copied = MaskedArray.__new__(type(self), self, copy=True) + if memo is None: + memo = {} + memo[id(self)] = copied + for (k, v) in self.__dict__.items(): + copied.__dict__[k] = deepcopy(v, memo) + return copied + + +def _mareconstruct(subtype, baseclass, baseshape, basetype,): + """Internal function that builds a new MaskedArray from the + information stored in a pickle. + + """ + _data = ndarray.__new__(baseclass, baseshape, basetype) + _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + + + + + + +class mvoid(MaskedArray): + """ + Fake a 'void' object to use for masked array with structured dtypes. + """ + # + def __new__(self, data, mask=nomask, dtype=None, fill_value=None, + hardmask=False, copy=False, subok=True): + _data = np.array(data, copy=copy, subok=subok, dtype=dtype) + _data = _data.view(self) + _data._hardmask = hardmask + if mask is not nomask: + if isinstance(mask, np.void): + _data._mask = mask + else: + try: + # Mask is already a 0D array + _data._mask = np.void(mask) + except TypeError: + # Transform the mask to a void + mdtype = make_mask_descr(dtype) + _data._mask = np.array(mask, dtype=mdtype)[()] + if fill_value is not None: + _data.fill_value = fill_value + return _data + + def _get_data(self): + # Make sure that the _data part is a np.void + return self.view(ndarray)[()] + _data = property(fget=_get_data) + + def __getitem__(self, indx): + "Get the index..." + m = self._mask + if m is not nomask and m[indx]: + return masked + return self._data[indx] + + def __setitem__(self, indx, value): + self._data[indx] = value + if self._hardmask: + self._mask[indx] |= getattr(value, "_mask", False) + else: + self._mask[indx] = getattr(value, "_mask", False) + + def __str__(self): + m = self._mask + if (m is nomask): + return self._data.__str__() + m = tuple(m) + if (not any(m)): + return self._data.__str__() + r = self._data.tolist() + p = masked_print_option + if not p.enabled(): + p = 'N/A' + else: + p = str(p) + r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)] + return "(%s)" % ", ".join(r) + + def __repr__(self): + m = self._mask + if (m is nomask): + return self._data.__repr__() + m = tuple(m) + if not any(m): + return self._data.__repr__() + p = masked_print_option + if not p.enabled(): + return self.filled(self.fill_value).__repr__() + p = str(p) + r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)] + return "(%s)" % ", ".join(r) + + def __iter__(self): + "Defines an iterator for mvoid" + (_data, _mask) = (self._data, self._mask) + if _mask is nomask: + for d in _data: + yield d + else: + for (d, m) in zip(_data, _mask): + if m: + yield masked + else: + yield d + + def __len__(self): + return self._data.__len__() + + def filled(self, fill_value=None): + """ + Return a copy with masked fields filled with a given value. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, the `fill_value` attribute is used instead. + + Returns + ------- + filled_void + A `np.void` object + + See Also + -------- + MaskedArray.filled + + """ + return asarray(self).filled(fill_value)[()] + + def tolist(self): + """ + Transforms the mvoid object into a tuple. + + Masked fields are replaced by None. + + Returns + ------- + returned_tuple + Tuple of fields + """ + _mask = self._mask + if _mask is nomask: + return self._data.tolist() + result = [] + for (d, m) in zip(self._data, self._mask): + if m: + result.append(None) + else: + # .item() makes sure we return a standard Python object + result.append(d.item()) + return tuple(result) + + + +#####-------------------------------------------------------------------------- +#---- --- Shortcuts --- +#####--------------------------------------------------------------------------- +def isMaskedArray(x): + """ + Test whether input is an instance of MaskedArray. + + This function returns True if `x` is an instance of MaskedArray + and returns False otherwise. Any object is accepted as input. + + Parameters + ---------- + x : object + Object to test. + + Returns + ------- + result : bool + True if `x` is a MaskedArray. + + See Also + -------- + isMA : Alias to isMaskedArray. + isarray : Alias to isMaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.eye(3, 3) + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> m = ma.masked_values(a, 0) + >>> m + masked_array(data = + [[1.0 -- --] + [-- 1.0 --] + [-- -- 1.0]], + mask = + [[False True True] + [ True False True] + [ True True False]], + fill_value=0.0) + >>> ma.isMaskedArray(a) + False + >>> ma.isMaskedArray(m) + True + >>> ma.isMaskedArray([0, 1, 2]) + False + + """ + return isinstance(x, MaskedArray) +isarray = isMaskedArray +isMA = isMaskedArray #backward compatibility + +# We define the masked singleton as a float for higher precedence... +# Note that it can be tricky sometimes w/ type comparison + +class MaskedConstant(MaskedArray): + # + _data = data = np.array(0.) + _mask = mask = np.array(True) + _baseclass = ndarray + # + def __new__(self): + return self._data.view(self) + # + def __array_finalize__(self, obj): + return + # + def __array_wrap__(self, obj): + return self + # + def __str__(self): + return str(masked_print_option._display) + # + def __repr__(self): + return 'masked' + # + def flatten(self): + return masked_array([self._data], dtype=float, mask=[True]) + + def __reduce__(self): + """Override of MaskedArray's __reduce__. + """ + return (self.__class__, ()) + + +masked = masked_singleton = MaskedConstant() + + + +masked_array = MaskedArray + +def array(data, dtype=None, copy=False, order=False, + mask=nomask, fill_value=None, + keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0, + ): + """array(data, dtype=None, copy=False, order=False, mask=nomask, + fill_value=None, keep_mask=True, hard_mask=False, shrink=True, + subok=True, ndmin=0) + + Acts as shortcut to MaskedArray, with options in a different order + for convenience. And backwards compatibility... + + """ + #!!!: we should try to put 'order' somwehere + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, + keep_mask=keep_mask, hard_mask=hard_mask, + fill_value=fill_value, ndmin=ndmin, shrink=shrink) +array.__doc__ = masked_array.__doc__ + +def is_masked(x): + """ + Determine whether input has masked values. + + Accepts any object as input, but always returns False unless the + input is a MaskedArray containing masked values. + + Parameters + ---------- + x : array_like + Array to check for masked values. + + Returns + ------- + result : bool + True if `x` is a MaskedArray with masked values, False otherwise. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> x + masked_array(data = [-- 1 -- 2 3], + mask = [ True False True False False], + fill_value=999999) + >>> ma.is_masked(x) + True + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) + >>> x + masked_array(data = [0 1 0 2 3], + mask = False, + fill_value=999999) + >>> ma.is_masked(x) + False + + Always returns False if `x` isn't a MaskedArray. + + >>> x = [False, True, False] + >>> ma.is_masked(x) + False + >>> x = 'a string' + >>> ma.is_masked(x) + False + + """ + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +#####--------------------------------------------------------------------------- +#---- --- Extrema functions --- +#####--------------------------------------------------------------------------- +class _extrema_operation(object): + """ + Generic class for maximum/minimum functions. + + .. note:: + This is the base class for `_maximum_operation` and + `_minimum_operation`. + + """ + def __call__(self, a, b=None): + "Executes the call behavior." + if b is None: + return self.reduce(a) + return where(self.compare(a, b), a, b) + #......... + def reduce(self, target, axis=None): + "Reduce target along the given axis." + target = narray(target, copy=False, subok=True) + m = getmask(target) + if axis is not None: + kargs = { 'axis' : axis } + else: + kargs = {} + target = target.ravel() + if not (m is nomask): + m = m.ravel() + if m is nomask: + t = self.ufunc.reduce(target, **kargs) + else: + target = target.filled(self.fill_value_func(target)).view(type(target)) + t = self.ufunc.reduce(target, **kargs) + m = umath.logical_and.reduce(m, **kargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + #......... + def outer (self, a, b): + "Return the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.ufunc.outer(filled(a), filled(b)) + if not isinstance(result, MaskedArray): + result = result.view(MaskedArray) + result._mask = m + return result + +#............................ +class _minimum_operation(_extrema_operation): + "Object to calculate minima" + def __init__ (self): + """minimum(a, b) or minimum(a) +In one argument case, returns the scalar minimum. + """ + self.ufunc = umath.minimum + self.afunc = amin + self.compare = less + self.fill_value_func = minimum_fill_value + +#............................ +class _maximum_operation(_extrema_operation): + "Object to calculate maxima" + def __init__ (self): + """maximum(a, b) or maximum(a) + In one argument case returns the scalar maximum. + """ + self.ufunc = umath.maximum + self.afunc = amax + self.compare = greater + self.fill_value_func = maximum_fill_value + +#.......................................................... +def min(obj, axis=None, out=None, fill_value=None): + try: + return obj.min(axis=axis, fill_value=fill_value, out=out) + except (AttributeError, TypeError): + # If obj doesn't have a min method, + # ...or if the method doesn't accept a fill_value argument + return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out) +min.__doc__ = MaskedArray.min.__doc__ + +def max(obj, axis=None, out=None, fill_value=None): + try: + return obj.max(axis=axis, fill_value=fill_value, out=out) + except (AttributeError, TypeError): + # If obj doesn't have a max method, + # ...or if the method doesn't accept a fill_value argument + return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out) +max.__doc__ = MaskedArray.max.__doc__ + +def ptp(obj, axis=None, out=None, fill_value=None): + """a.ptp(axis=None) = a.max(axis)-a.min(axis)""" + try: + return obj.ptp(axis, out=out, fill_value=fill_value) + except (AttributeError, TypeError): + # If obj doesn't have a ptp method, + # ...or if the method doesn't accept a fill_value argument + return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out) +ptp.__doc__ = MaskedArray.ptp.__doc__ + + +#####--------------------------------------------------------------------------- +#---- --- Definition of functions from the corresponding methods --- +#####--------------------------------------------------------------------------- +class _frommethod: + """ + Define functions from existing MaskedArray methods. + + Parameters + ---------- + methodname : str + Name of the method to transform. + + """ + def __init__(self, methodname, reversed=False): + self.__name__ = methodname + self.__doc__ = self.getdoc() + self.reversed = reversed + # + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + meth = getattr(MaskedArray, self.__name__, None) or\ + getattr(np, self.__name__, None) + signature = self.__name__ + get_object_signature(meth) + if meth is not None: + doc = """ %s\n%s""" % (signature, getattr(meth, '__doc__', None)) + return doc + # + def __call__(self, a, *args, **params): + if self.reversed: + args = list(args) + arr = args[0] + args[0] = a + a = arr + # Get the method from the array (if possible) + method_name = self.__name__ + method = getattr(a, method_name, None) + if method is not None: + return method(*args, **params) + # Still here ? Then a is not a MaskedArray + method = getattr(MaskedArray, method_name, None) + if method is not None: + return method(MaskedArray(a), *args, **params) + # Still here ? OK, let's call the corresponding np function + method = getattr(np, method_name) + return method(a, *args, **params) + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +compress = _frommethod('compress', reversed=True) +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') +ids = _frommethod('ids') +maximum = _maximum_operation() +mean = _frommethod('mean') +minimum = _minimum_operation() +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') +product = _frommethod('prod') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +#take = _frommethod('take') +trace = _frommethod('trace') +var = _frommethod('var') + +def take(a, indices, axis=None, out=None, mode='raise'): + """ + """ + a = masked_array(a) + return a.take(indices, axis=axis, out=out, mode=mode) + + +#.............................................................................. +def power(a, b, third=None): + """ + Returns element-wise base array raised to power from second array. + + This is the masked array version of `numpy.power`. For details see + `numpy.power`. + + See Also + -------- + numpy.power + + Notes + ----- + The *out* argument to `numpy.power` is not supported, `third` has to be + None. + + """ + if third is not None: + raise MaskError("3-argument power not supported.") + # Get the masks + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + # Get the rawdata + fa = getdata(a) + fb = getdata(b) + # Get the type of the result (so that we preserve subclasses) + if isinstance(a, MaskedArray): + basetype = type(a) + else: + basetype = MaskedArray + # Get the result and view it as a (subclass of) MaskedArray + with np.errstate(divide='ignore', invalid='ignore'): + result = np.where(m, fa, umath.power(fa, fb)).view(basetype) + result._update_from(a) + # Find where we're in trouble w/ NaNs and Infs + invalid = np.logical_not(np.isfinite(result.view(ndarray))) + # Add the initial mask + if m is not nomask: + if not (result.ndim): + return masked + result._mask = np.logical_or(m, invalid) + # Fix the invalid parts + if invalid.any(): + if not result.ndim: + return masked + elif result._mask is nomask: + result._mask = invalid + result._data[invalid] = result.fill_value + return result + +# if fb.dtype.char in typecodes["Integer"]: +# return masked_array(umath.power(fa, fb), m) +# m = mask_or(m, (fa < 0) & (fb != fb.astype(int))) +# if m is nomask: +# return masked_array(umath.power(fa, fb)) +# else: +# fa = fa.copy() +# if m.all(): +# fa.flat = 1 +# else: +# np.copyto(fa, 1, where=m) +# return masked_array(umath.power(fa, fb), m) + +#.............................................................................. +def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None): + "Function version of the eponymous method." + if fill_value is None: + fill_value = default_fill_value(a) + d = filled(a, fill_value) + if axis is None: + return d.argsort(kind=kind, order=order) + return d.argsort(axis, kind=kind, order=order) +argsort.__doc__ = MaskedArray.argsort.__doc__ + +def argmin(a, axis=None, fill_value=None): + "Function version of the eponymous method." + if fill_value is None: + fill_value = default_fill_value(a) + d = filled(a, fill_value) + return d.argmin(axis=axis) +argmin.__doc__ = MaskedArray.argmin.__doc__ + +def argmax(a, axis=None, fill_value=None): + "Function version of the eponymous method." + if fill_value is None: + fill_value = default_fill_value(a) + try: + fill_value = -fill_value + except: + pass + d = filled(a, fill_value) + return d.argmax(axis=axis) +argmax.__doc__ = MaskedArray.argmax.__doc__ + +def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None): + "Function version of the eponymous method." + a = narray(a, copy=True, subok=True) + if axis is None: + a = a.flatten() + axis = 0 + if fill_value is None: + if endwith: + filler = minimum_fill_value(a) + else: + filler = maximum_fill_value(a) + else: + filler = fill_value + + sindx = filled(a, filler).argsort(axis=axis, kind=kind, order=order) + + # save meshgrid memory for 1d arrays + if a.ndim == 1: + indx = sindx + else: + indx = np.meshgrid(*[np.arange(x) for x in a.shape], sparse=True, + indexing='ij') + indx[axis] = sindx + return a[indx] +sort.__doc__ = MaskedArray.sort.__doc__ + + +def compressed(x): + """ + Return all the non-masked data as a 1-D array. + + This function is equivalent to calling the "compressed" method of a + `MaskedArray`, see `MaskedArray.compressed` for details. + + See Also + -------- + MaskedArray.compressed + Equivalent method. + + """ + if not isinstance(x, MaskedArray): + x = asanyarray(x) + return x.compressed() + + +def concatenate(arrays, axis=0): + """ + Concatenate a sequence of arrays along the given axis. + + Parameters + ---------- + arrays : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. Default is 0. + + Returns + ------- + result : MaskedArray + The concatenated array with any masked entries preserved. + + See Also + -------- + numpy.concatenate : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(3) + >>> a[1] = ma.masked + >>> b = ma.arange(2, 5) + >>> a + masked_array(data = [0 -- 2], + mask = [False True False], + fill_value = 999999) + >>> b + masked_array(data = [2 3 4], + mask = False, + fill_value = 999999) + >>> ma.concatenate([a, b]) + masked_array(data = [0 -- 2 2 3 4], + mask = [False True False False False False], + fill_value = 999999) + + """ + d = np.concatenate([getdata(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + # Check whether one of the arrays has a non-empty mask... + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + # OK, so we have to concatenate the masks + dm = np.concatenate([getmaskarray(a) for a in arrays], axis) + # If we decide to keep a '_shrinkmask' option, we want to check that ... + # ... all of them are True, and then check for dm.any() +# shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays]) +# if shrink and not dm.any(): + if not dm.dtype.fields and not dm.any(): + data._mask = nomask + else: + data._mask = dm.reshape(d.shape) + return data + +def count(a, axis=None): + if isinstance(a, MaskedArray): + return a.count(axis) + return masked_array(a, copy=False).count(axis) +count.__doc__ = MaskedArray.count.__doc__ + + +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + This function is the equivalent of `numpy.diag` that takes masked + values into account, see `numpy.diag` for details. + + See Also + -------- + numpy.diag : Equivalent function for ndarrays. + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + +def expand_dims(x, axis): + """ + Expand the shape of an array. + + Expands the shape of the array by including a new axis before the one + specified by the `axis` parameter. This function behaves the same as + `numpy.expand_dims` but preserves masked elements. + + See Also + -------- + numpy.expand_dims : Equivalent function in top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.array([1, 2, 4]) + >>> x[1] = ma.masked + >>> x + masked_array(data = [1 -- 4], + mask = [False True False], + fill_value = 999999) + >>> np.expand_dims(x, axis=0) + array([[1, 2, 4]]) + >>> ma.expand_dims(x, axis=0) + masked_array(data = + [[1 -- 4]], + mask = + [[False True False]], + fill_value = 999999) + + The same result can be achieved using slicing syntax with `np.newaxis`. + + >>> x[np.newaxis, :] + masked_array(data = + [[1 -- 4]], + mask = + [[False True False]], + fill_value = 999999) + + """ + result = n_expand_dims(x, axis) + if isinstance(x, MaskedArray): + new_shape = result.shape + result = x.view() + result.shape = new_shape + if result._mask is not nomask: + result._mask.shape = new_shape + return result + +#...................................... +def left_shift (a, n): + """ + Shift the bits of an integer to the left. + + This is the masked array version of `numpy.left_shift`, for details + see that function. + + See Also + -------- + numpy.left_shift + + """ + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + +def right_shift (a, n): + """ + Shift the bits of an integer to the right. + + This is the masked array version of `numpy.right_shift`, for details + see that function. + + See Also + -------- + numpy.right_shift + + """ + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) + +#...................................... +def put(a, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + This function is equivalent to `MaskedArray.put`, see that method + for details. + + See Also + -------- + MaskedArray.put + + """ + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return narray(a, copy=False).put(indices, values, mode=mode) + +def putmask(a, mask, values): #, mode='raise'): + """ + Changes elements of an array based on conditional and input values. + + This is the masked array version of `numpy.putmask`, for details see + `numpy.putmask`. + + See Also + -------- + numpy.putmask + + Notes + ----- + Using a masked array as `values` will **not** transform a `ndarray` into + a `MaskedArray`. + + """ + # We can't use 'frommethod', the order of arguments is different + if not isinstance(a, MaskedArray): + a = a.view(MaskedArray) + (valdata, valmask) = (getdata(values), getmask(values)) + if getmask(a) is nomask: + if valmask is not nomask: + a._sharedmask = True + a._mask = make_mask_none(a.shape, a.dtype) + np.copyto(a._mask, valmask, where=mask) + elif a._hardmask: + if valmask is not nomask: + m = a._mask.copy() + np.copyto(m, valmask, where=mask) + a.mask |= m + else: + if valmask is nomask: + valmask = getmaskarray(values) + np.copyto(a._mask, valmask, where=mask) + np.copyto(a._data, valdata, where=mask) + return + +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + This function is exactly equivalent to `numpy.transpose`. + + See Also + -------- + numpy.transpose : Equivalent function in top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.arange(4).reshape((2,2)) + >>> x[1, 1] = ma.masked + >>>> x + masked_array(data = + [[0 1] + [2 --]], + mask = + [[False False] + [False True]], + fill_value = 999999) + >>> ma.transpose(x) + masked_array(data = + [[0 2] + [1 --]], + mask = + [[False False] + [False True]], + fill_value = 999999) + + """ + #We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return narray(a, copy=False).transpose(axes).view(MaskedArray) + +def reshape(a, new_shape, order='C'): + """ + Returns an array containing the same data with a new shape. + + Refer to `MaskedArray.reshape` for full documentation. + + See Also + -------- + MaskedArray.reshape : equivalent function + + """ + #We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape, order=order) + except AttributeError: + _tmp = narray(a, copy=False).reshape(new_shape, order=order) + return _tmp.view(MaskedArray) + +def resize(x, new_shape): + """ + Return a new masked array with the specified size and shape. + + This is the masked equivalent of the `numpy.resize` function. The new + array is filled with repeated copies of `x` (in the order that the + data are stored in memory). If `x` is masked, the new array will be + masked, and the new mask will be a repetition of the old one. + + See Also + -------- + numpy.resize : Equivalent function in the top level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.array([[1, 2] ,[3, 4]]) + >>> a[0, 1] = ma.masked + >>> a + masked_array(data = + [[1 --] + [3 4]], + mask = + [[False True] + [False False]], + fill_value = 999999) + >>> np.resize(a, (3, 3)) + array([[1, 2, 3], + [4, 1, 2], + [3, 4, 1]]) + >>> ma.resize(a, (3, 3)) + masked_array(data = + [[1 -- 3] + [4 1 --] + [3 4 1]], + mask = + [[False True False] + [False False True] + [False False False]], + fill_value = 999999) + + A MaskedArray is always returned, regardless of the input type. + + >>> a = np.array([[1, 2] ,[3, 4]]) + >>> ma.resize(a, (3, 3)) + masked_array(data = + [[1 2 3] + [4 1 2] + [3 4 1]], + mask = + False, + fill_value = 999999) + + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = np.resize(m, new_shape) + result = np.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +#................................................ +def rank(obj): + "maskedarray version of the numpy function." + return np.rank(getdata(obj)) +rank.__doc__ = np.rank.__doc__ +# +def shape(obj): + "maskedarray version of the numpy function." + return np.shape(getdata(obj)) +shape.__doc__ = np.shape.__doc__ +# +def size(obj, axis=None): + "maskedarray version of the numpy function." + return np.size(getdata(obj), axis) +size.__doc__ = np.size.__doc__ +#................................................ + +#####-------------------------------------------------------------------------- +#---- --- Extra functions --- +#####-------------------------------------------------------------------------- +def where (condition, x=None, y=None): + """ + Return a masked array with elements from x or y, depending on condition. + + Returns a masked array, shaped like condition, where the elements + are from `x` when `condition` is True, and from `y` otherwise. + If neither `x` nor `y` are given, the function returns a tuple of + indices where `condition` is True (the result of + ``condition.nonzero()``). + + Parameters + ---------- + condition : array_like, bool + The condition to meet. For each True element, yield the corresponding + element from `x`, otherwise from `y`. + x, y : array_like, optional + Values from which to choose. `x` and `y` need to have the same shape + as condition, or be broadcast-able to that shape. + + Returns + ------- + out : MaskedArray or tuple of ndarrays + The resulting masked array if `x` and `y` were given, otherwise + the result of ``condition.nonzero()``. + + See Also + -------- + numpy.where : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], + ... [1, 0, 1], + ... [0, 1, 0]]) + >>> print x + [[0.0 -- 2.0] + [-- 4.0 --] + [6.0 -- 8.0]] + >>> np.ma.where(x > 5) # return the indices where x > 5 + (array([2, 2]), array([0, 2])) + + >>> print np.ma.where(x > 5, x, -3.1416) + [[-3.1416 -- -3.1416] + [-- -3.1416 --] + [6.0 -- 8.0]] + + """ + if x is None and y is None: + return filled(condition, 0).nonzero() + elif x is None or y is None: + raise ValueError("Either both or neither x and y should be given.") + # Get the condition ............... + fc = filled(condition, 0).astype(MaskType) + notfc = np.logical_not(fc) + # Get the data ...................................... + xv = getdata(x) + yv = getdata(y) + if x is masked: + ndtype = yv.dtype + elif y is masked: + ndtype = xv.dtype + else: + ndtype = np.find_common_type([xv.dtype, yv.dtype], []) + # Construct an empty array and fill it + d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray) + _data = d._data + np.copyto(_data, xv.astype(ndtype), where=fc) + np.copyto(_data, yv.astype(ndtype), where=notfc) + # Create an empty mask and fill it + _mask = d._mask = np.zeros(fc.shape, dtype=MaskType) + np.copyto(_mask, getmask(x), where=fc) + np.copyto(_mask, getmask(y), where=notfc) + _mask |= getmaskarray(condition) + if not _mask.any(): + d._mask = nomask + return d + +def choose (indices, choices, out=None, mode='raise'): + """ + Use an index array to construct a new array from a set of choices. + + Given an array of integers and a set of n choice arrays, this method + will create a new array that merges each of the choice arrays. Where a + value in `a` is i, the new array will have the value that choices[i] + contains in the same place. + + Parameters + ---------- + a : ndarray of ints + This array must contain integers in ``[0, n-1]``, where n is the + number of choices. + choices : sequence of arrays + Choice arrays. The index array and all of the choices should be + broadcastable to the same shape. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and `dtype`. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' : raise an error + * 'wrap' : wrap around + * 'clip' : clip to the range + + Returns + ------- + merged_array : array + + See Also + -------- + choose : equivalent function + + Examples + -------- + >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) + >>> a = np.array([2, 1, 0]) + >>> np.ma.choose(a, choice) + masked_array(data = [3 2 1], + mask = False, + fill_value=999999) + + """ + def fmask (x): + "Returns the filled array, or True if masked." + if x is masked: + return True + return filled(x) + def nmask (x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return True + return getmask(x) + # Get the indices...... + c = filled(indices, 0) + # Get the masks........ + masks = [nmask(x) for x in choices] + data = [fmask(x) for x in choices] + # Construct the mask + outputmask = np.choose(c, masks, mode=mode) + outputmask = make_mask(mask_or(outputmask, getmask(indices)), + copy=0, shrink=True) + # Get the choices...... + d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(outputmask) + return out + d.__setmask__(outputmask) + return d + + +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + """ + if out is None: + return np.round_(a, decimals, out) + else: + np.round_(getdata(a), decimals, out) + if hasattr(out, '_mask'): + out._mask = getmask(a) + return out +round = round_ + +def inner(a, b): + """ + Returns the inner product of a and b for arrays of floating point types. + + Like the generic NumPy equivalent the product sum is over the last dimension + of a and b. + + Notes + ----- + The first argument is not conjugated. + + """ + fa = filled(a, 0) + fb = filled(b, 0) + if len(fa.shape) == 0: + fa.shape = (1,) + if len(fb.shape) == 0: + fb.shape = (1,) + return np.inner(fa, fb).view(MaskedArray) +inner.__doc__ = doc_note(np.inner.__doc__, + "Masked values are replaced by 0.") +innerproduct = inner + +def outer(a, b): + "maskedarray version of the numpy function." + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = np.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0) + return masked_array(d, mask=m) +outer.__doc__ = doc_note(np.outer.__doc__, + "Masked values are replaced by 0.") +outerproduct = outer + +def allequal (a, b, fill_value=True): + """ + Return True if all entries of a and b are equal, using + fill_value as a truth value where either or both are masked. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : bool, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, + then False is returned. + + See Also + -------- + all, any + numpy.ma.allclose + + Examples + -------- + >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data = [10000000000.0 1e-07 --], + mask = [False False True], + fill_value=1e+20) + + >>> b = array([1e10, 1e-7, -42.0]) + >>> b + array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) + >>> ma.allequal(a, b, fill_value=False) + False + >>> ma.allequal(a, b) + True + + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + +def allclose (a, b, masked_equal=True, rtol=1e-5, atol=1e-8): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This function is equivalent to `allclose` except that masked values + are treated as equal (default) or unequal, depending on the `masked_equal` + argument. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + masked_equal : bool, optional + Whether masked values in `a` and `b` are considered equal (True) or not + (False). They are considered equal by default. + rtol : float, optional + Relative tolerance. The relative difference is equal to ``rtol * b``. + Default is 1e-5. + atol : float, optional + Absolute tolerance. The absolute difference is equal to `atol`. + Default is 1e-8. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any + numpy.allclose : the non-masked `allclose`. + + Notes + ----- + If the following equation is element-wise True, then `allclose` returns + True:: + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of `a` and `b` are equal subject to + given tolerances. + + Examples + -------- + >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data = [10000000000.0 1e-07 --], + mask = [False False True], + fill_value = 1e+20) + >>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) + >>> ma.allclose(a, b) + False + + >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) + >>> ma.allclose(a, b) + True + >>> ma.allclose(a, b, masked_equal=False) + False + + Masked values are not compared directly. + + >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) + >>> ma.allclose(a, b) + True + >>> ma.allclose(a, b, masked_equal=False) + False + + """ + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = np.result_type(y, 1.) + if y.dtype != dtype: + y = masked_array(y, dtype=dtype, copy=False) + + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(umath.less_equal(umath.absolute(x - y), + atol + rtol * umath.absolute(y)), + masked_equal) + return np.all(d) + + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + + d = filled(umath.less_equal(umath.absolute(x - y), + atol + rtol * umath.absolute(y)), + masked_equal) + + return np.all(d) + +#.............................................................................. +def asarray(a, dtype=None, order=None): + """ + Convert the input to a masked array of the given data-type. + + No copy is performed if the input is already an `ndarray`. If `a` is + a subclass of `MaskedArray`, a base class `MaskedArray` is returned. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to a masked array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists, ndarrays and masked arrays. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + Masked array interpretation of `a`. + + See Also + -------- + asanyarray : Similar to `asarray`, but conserves subclasses. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[ 0., 1., 2., 3., 4.], + [ 5., 6., 7., 8., 9.]]) + >>> np.ma.asarray(x) + masked_array(data = + [[ 0. 1. 2. 3. 4.] + [ 5. 6. 7. 8. 9.]], + mask = + False, + fill_value = 1e+20) + >>> type(np.ma.asarray(x)) + + + """ + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False) + +def asanyarray(a, dtype=None): + """ + Convert the input to a masked array, conserving subclasses. + + If `a` is a subclass of `MaskedArray`, its class is conserved. + No copy is performed if the input is already an `ndarray`. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + MaskedArray interpretation of `a`. + + See Also + -------- + asarray : Similar to `asanyarray`, but does not conserve subclass. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[ 0., 1., 2., 3., 4.], + [ 5., 6., 7., 8., 9.]]) + >>> np.ma.asanyarray(x) + masked_array(data = + [[ 0. 1. 2. 3. 4.] + [ 5. 6. 7. 8. 9.]], + mask = + False, + fill_value = 1e+20) + >>> type(np.ma.asanyarray(x)) + + + """ + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + + +#####-------------------------------------------------------------------------- +#---- --- Pickling --- +#####-------------------------------------------------------------------------- +def dump(a, F): + """ + Pickle a masked array to a file. + + This is a wrapper around ``cPickle.dump``. + + Parameters + ---------- + a : MaskedArray + The array to be pickled. + F : str or file-like object + The file to pickle `a` to. If a string, the full path to the file. + + """ + if not hasattr(F, 'readline'): + F = open(F, 'w') + return pickle.dump(a, F) + +def dumps(a): + """ + Return a string corresponding to the pickling of a masked array. + + This is a wrapper around ``cPickle.dumps``. + + Parameters + ---------- + a : MaskedArray + The array for which the string representation of the pickle is + returned. + + """ + return pickle.dumps(a) + +def load(F): + """ + Wrapper around ``cPickle.load`` which accepts either a file-like object + or a filename. + + Parameters + ---------- + F : str or file + The file or file name to load. + + See Also + -------- + dump : Pickle an array + + Notes + ----- + This is different from `numpy.load`, which does not use cPickle but loads + the NumPy binary .npy format. + + """ + if not hasattr(F, 'readline'): + F = open(F, 'r') + return pickle.load(F) + +def loads(strg): + """ + Load a pickle from the current string. + + The result of ``cPickle.loads(strg)`` is returned. + + Parameters + ---------- + strg : str + The string to load. + + See Also + -------- + dumps : Return a string corresponding to the pickling of a masked array. + + """ + return pickle.loads(strg) + +################################################################################ +def fromfile(file, dtype=float, count= -1, sep=''): + raise NotImplementedError("Not yet implemented. Sorry") + + +def fromflex(fxarray): + """ + Build a masked array from a suitable flexible-type array. + + The input array has to have a data-type with ``_data`` and ``_mask`` + fields. This type of array is output by `MaskedArray.toflex`. + + Parameters + ---------- + fxarray : ndarray + The structured input array, containing ``_data`` and ``_mask`` + fields. If present, other fields are discarded. + + Returns + ------- + result : MaskedArray + The constructed masked array. + + See Also + -------- + MaskedArray.toflex : Build a flexible-type array from a masked array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) + >>> rec = x.toflex() + >>> rec + array([[(0, False), (1, True), (2, False)], + [(3, True), (4, False), (5, True)], + [(6, False), (7, True), (8, False)]], + dtype=[('_data', '>> x2 = np.ma.fromflex(rec) + >>> x2 + masked_array(data = + [[0 -- 2] + [-- 4 --] + [6 -- 8]], + mask = + [[False True False] + [ True False True] + [False True False]], + fill_value = 999999) + + Extra fields can be present in the structured array but are discarded: + + >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) + >>> rec2 + array([[(0, False, 0.0), (0, False, 0.0)], + [(0, False, 0.0), (0, False, 0.0)]], + dtype=[('_data', '>> y = np.ma.fromflex(rec2) + >>> y + masked_array(data = + [[0 0] + [0 0]], + mask = + [[False False] + [False False]], + fill_value = 999999) + + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + + +class _convert2ma: + """ + Convert functions from numpy to numpy.ma. + + Parameters + ---------- + _methodname : string + Name of the method to transform. + + """ + __doc__ = None + # + def __init__(self, funcname, params=None): + self._func = getattr(np, funcname) + self.__doc__ = self.getdoc() + self._extras = params or {} + # + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + doc = getattr(self._func, '__doc__', None) + sig = get_object_signature(self._func) + if doc: + # Add the signature of the function at the beginning of the doc + if sig: + sig = "%s%s\n" % (self._func.__name__, sig) + doc = sig + doc + return doc + # + def __call__(self, a, *args, **params): + # Find the common parameters to the call and the definition + _extras = self._extras + common_params = set(params).intersection(_extras) + # Drop the common parameters from the call + for p in common_params: + _extras[p] = params.pop(p) + # Get the result + result = self._func.__call__(a, *args, **params).view(MaskedArray) + if "fill_value" in common_params: + result.fill_value = _extras.get("fill_value", None) + if "hardmask" in common_params: + result._hardmask = bool(_extras.get("hard_mask", False)) + return result + +arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False)) +clip = np.clip +diff = np.diff +empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False)) +empty_like = _convert2ma('empty_like') +frombuffer = _convert2ma('frombuffer') +fromfunction = _convert2ma('fromfunction') +identity = _convert2ma('identity', params=dict(fill_value=None, hardmask=False)) +indices = np.indices +ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False)) +ones_like = np.ones_like +squeeze = np.squeeze +zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False)) +zeros_like = np.zeros_like + +############################################################################### +def append(a, b, axis=None): + """Append values to the end of an array. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If `axis` + is not specified, `values` can be any shape and will be flattened + before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not given, + both `arr` and `values` are flattened before use. + + Returns + ------- + append : MaskedArray + A copy of `arr` with `values` appended to `axis`. Note that `append` + does not occur in-place: a new array is allocated and filled. If + `axis` is None, the result is a flattened array. + + See Also + -------- + numpy.append : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_values([1, 2, 3], 2) + >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + >>> print(ma.append(a, b)) + [1 -- 3 4 5 6 -- 8 9] + """ + return concatenate([a, b], axis) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/extras.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/extras.py new file mode 100644 index 0000000000000..82a61a67c3fb2 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/extras.py @@ -0,0 +1,1923 @@ +""" +Masked arrays add-ons. + +A collection of utilities for `numpy.ma`. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +from __future__ import division, absolute_import, print_function + +__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" +__version__ = '1.0' +__revision__ = "$Revision: 3473 $" +__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' + +__all__ = ['apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', + 'atleast_3d', 'average', + 'clump_masked', 'clump_unmasked', 'column_stack', 'compress_cols', + 'compress_rowcols', 'compress_rows', 'count_masked', 'corrcoef', + 'cov', + 'diagflat', 'dot', 'dstack', + 'ediff1d', + 'flatnotmasked_contiguous', 'flatnotmasked_edges', + 'hsplit', 'hstack', + 'in1d', 'intersect1d', + 'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all', + 'masked_all_like', 'median', 'mr_', + 'notmasked_contiguous', 'notmasked_edges', + 'polyfit', + 'row_stack', + 'setdiff1d', 'setxor1d', + 'unique', 'union1d', + 'vander', 'vstack', + ] + +import itertools +import warnings + +from . import core as ma +from .core import MaskedArray, MAError, add, array, asarray, concatenate, count, \ + filled, getmask, getmaskarray, make_mask_descr, masked, masked_array, \ + mask_or, nomask, ones, sort, zeros +#from core import * + +import numpy as np +from numpy import ndarray, array as nxarray +import numpy.core.umath as umath +from numpy.lib.index_tricks import AxisConcatenator +from numpy.linalg import lstsq + + +#............................................................................... +def issequence(seq): + """Is seq a sequence (ndarray, list or tuple)?""" + if isinstance(seq, (ndarray, tuple, list)): + return True + return False + +def count_masked(arr, axis=None): + """ + Count the number of masked elements along the given axis. + + Parameters + ---------- + arr : array_like + An array with (possibly) masked elements. + axis : int, optional + Axis along which to count. If None (default), a flattened + version of the array is used. + + Returns + ------- + count : int, ndarray + The total number of masked elements (axis=None) or the number + of masked elements along each slice of the given axis. + + See Also + -------- + MaskedArray.count : Count non-masked elements. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(9).reshape((3,3)) + >>> a = ma.array(a) + >>> a[1, 0] = ma.masked + >>> a[1, 2] = ma.masked + >>> a[2, 1] = ma.masked + >>> a + masked_array(data = + [[0 1 2] + [-- 4 --] + [6 -- 8]], + mask = + [[False False False] + [ True False True] + [False True False]], + fill_value=999999) + >>> ma.count_masked(a) + 3 + + When the `axis` keyword is used an array is returned. + + >>> ma.count_masked(a, axis=0) + array([1, 1, 1]) + >>> ma.count_masked(a, axis=1) + array([0, 2, 1]) + + """ + m = getmaskarray(arr) + return m.sum(axis) + +def masked_all(shape, dtype=float): + """ + Empty masked array with all elements masked. + + Return an empty masked array of the given shape and dtype, where all the + data are masked. + + Parameters + ---------- + shape : tuple + Shape of the required MaskedArray. + dtype : dtype, optional + Data type of the output. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + See Also + -------- + masked_all_like : Empty masked array modelled on an existing array. + + Examples + -------- + >>> import numpy.ma as ma + >>> ma.masked_all((3, 3)) + masked_array(data = + [[-- -- --] + [-- -- --] + [-- -- --]], + mask = + [[ True True True] + [ True True True] + [ True True True]], + fill_value=1e+20) + + The `dtype` parameter defines the underlying data type. + + >>> a = ma.masked_all((3, 3)) + >>> a.dtype + dtype('float64') + >>> a = ma.masked_all((3, 3), dtype=np.int32) + >>> a.dtype + dtype('int32') + + """ + a = masked_array(np.empty(shape, dtype), + mask=np.ones(shape, make_mask_descr(dtype))) + return a + +def masked_all_like(arr): + """ + Empty masked array with the properties of an existing array. + + Return an empty masked array of the same shape and dtype as + the array `arr`, where all the data are masked. + + Parameters + ---------- + arr : ndarray + An array describing the shape and dtype of the required MaskedArray. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + Raises + ------ + AttributeError + If `arr` doesn't have a shape attribute (i.e. not an ndarray) + + See Also + -------- + masked_all : Empty masked array with all elements masked. + + Examples + -------- + >>> import numpy.ma as ma + >>> arr = np.zeros((2, 3), dtype=np.float32) + >>> arr + array([[ 0., 0., 0.], + [ 0., 0., 0.]], dtype=float32) + >>> ma.masked_all_like(arr) + masked_array(data = + [[-- -- --] + [-- -- --]], + mask = + [[ True True True] + [ True True True]], + fill_value=1e+20) + + The dtype of the masked array matches the dtype of `arr`. + + >>> arr.dtype + dtype('float32') + >>> ma.masked_all_like(arr).dtype + dtype('float32') + + """ + a = np.empty_like(arr).view(MaskedArray) + a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) + return a + + +#####-------------------------------------------------------------------------- +#---- --- Standard functions --- +#####-------------------------------------------------------------------------- +class _fromnxfunction: + """ + Defines a wrapper to adapt NumPy functions to masked arrays. + + + An instance of `_fromnxfunction` can be called with the same parameters + as the wrapped NumPy function. The docstring of `newfunc` is adapted from + the wrapped function as well, see `getdoc`. + + Parameters + ---------- + funcname : str + The name of the function to be adapted. The function should be + in the NumPy namespace (i.e. ``np.funcname``). + + """ + + def __init__(self, funcname): + self.__name__ = funcname + self.__doc__ = self.getdoc() + + def getdoc(self): + """ + Retrieve the docstring and signature from the function. + + The ``__doc__`` attribute of the function is used as the docstring for + the new masked array version of the function. A note on application + of the function to the mask is appended. + + .. warning:: + If the function docstring already contained a Notes section, the + new docstring will have two Notes sections instead of appending a note + to the existing section. + + Parameters + ---------- + None + + """ + npfunc = getattr(np, self.__name__, None) + doc = getattr(npfunc, '__doc__', None) + if doc: + sig = self.__name__ + ma.get_object_signature(npfunc) + locdoc = "Notes\n-----\nThe function is applied to both the _data"\ + " and the _mask, if any." + return '\n'.join((sig, doc, locdoc)) + return + + + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + if len(args) == 1: + x = args[0] + if isinstance(x, ndarray): + _d = func(x.__array__(), **params) + _m = func(getmaskarray(x), **params) + return masked_array(_d, mask=_m) + elif isinstance(x, tuple) or isinstance(x, list): + _d = func(tuple([np.asarray(a) for a in x]), **params) + _m = func(tuple([getmaskarray(a) for a in x]), **params) + return masked_array(_d, mask=_m) + else: + arrays = [] + args = list(args) + while len(args) > 0 and issequence(args[0]): + arrays.append(args.pop(0)) + res = [] + for x in arrays: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + res.append(masked_array(_d, mask=_m)) + return res + +atleast_1d = _fromnxfunction('atleast_1d') +atleast_2d = _fromnxfunction('atleast_2d') +atleast_3d = _fromnxfunction('atleast_3d') +#atleast_1d = np.atleast_1d +#atleast_2d = np.atleast_2d +#atleast_3d = np.atleast_3d + +vstack = row_stack = _fromnxfunction('vstack') +hstack = _fromnxfunction('hstack') +column_stack = _fromnxfunction('column_stack') +dstack = _fromnxfunction('dstack') + +hsplit = _fromnxfunction('hsplit') + +diagflat = _fromnxfunction('diagflat') + + +#####-------------------------------------------------------------------------- +#---- +#####-------------------------------------------------------------------------- +def flatten_inplace(seq): + """Flatten a sequence in place.""" + k = 0 + while (k != len(seq)): + while hasattr(seq[k], '__iter__'): + seq[k:(k + 1)] = seq[k] + k += 1 + return seq + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + (This docstring should be overwritten) + """ + arr = array(arr, copy=False, subok=True) + nd = arr.ndim + if axis < 0: + axis += nd + if (axis >= nd): + raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." + % (axis, nd)) + ind = [0] * (nd - 1) + i = np.zeros(nd, 'O') + indlist = list(range(nd)) + indlist.remove(axis) + i[axis] = slice(None, None) + outshape = np.asarray(arr.shape).take(indlist) + i.put(indlist, ind) + j = i.copy() + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + # if res is a number, then we have a smaller output array + asscalar = np.isscalar(res) + if not asscalar: + try: + len(res) + except TypeError: + asscalar = True + # Note: we shouldn't set the dtype of the output from the first result... + #...so we force the type to object, and build a list of dtypes + #...we'll just take the largest, to avoid some downcasting + dtypes = [] + if asscalar: + dtypes.append(np.asarray(res).dtype) + outarr = zeros(outshape, object) + outarr[tuple(ind)] = res + Ntot = np.product(outshape) + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= outshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(ind)] = res + dtypes.append(asarray(res).dtype) + k += 1 + else: + res = array(res, copy=False, subok=True) + j = i.copy() + j[axis] = ([slice(None, None)] * res.ndim) + j.put(indlist, ind) + Ntot = np.product(outshape) + holdshape = outshape + outshape = list(arr.shape) + outshape[axis] = res.shape + dtypes.append(asarray(res).dtype) + outshape = flatten_inplace(outshape) + outarr = zeros(outshape, object) + outarr[tuple(flatten_inplace(j.tolist()))] = res + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + j.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(flatten_inplace(j.tolist()))] = res + dtypes.append(asarray(res).dtype) + k += 1 + max_dtypes = np.dtype(np.asarray(dtypes).max()) + if not hasattr(arr, '_mask'): + result = np.asarray(outarr, dtype=max_dtypes) + else: + result = asarray(outarr, dtype=max_dtypes) + result.fill_value = ma.default_fill_value(result) + return result +apply_along_axis.__doc__ = np.apply_along_axis.__doc__ + + +def apply_over_axes(func, a, axes): + """ + (This docstring will be overwritten) + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = ma.expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val +apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ + :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ + """ + + Examples + -------- + >>> a = ma.arange(24).reshape(2,3,4) + >>> a[:,0,1] = ma.masked + >>> a[:,1,:] = ma.masked + >>> print a + [[[0 -- 2 3] + [-- -- -- --] + [8 9 10 11]] + + [[12 -- 14 15] + [-- -- -- --] + [20 21 22 23]]] + >>> print ma.apply_over_axes(ma.sum, a, [0,2]) + [[[46] + [--] + [124]]] + + Tuple axis arguments to ufuncs are equivalent: + + >>> print ma.sum(a, axis=(0,2)).reshape((1,-1,1)) + [[[46] + [--] + [124]]] +""" + + +def average(a, axis=None, weights=None, returned=False): + """ + Return the weighted average of array over the given axis. + + Parameters + ---------- + a : array_like + Data to be averaged. + Masked entries are not taken into account in the computation. + axis : int, optional + Axis along which the average is computed. The default is to compute + the average of the flattened array. + weights : array_like, optional + The importance that each element has in the computation of the average. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If ``weights=None``, then all data in `a` are assumed to have a + weight equal to one. If `weights` is complex, the imaginary parts + are ignored. + returned : bool, optional + Flag indicating whether a tuple ``(result, sum of weights)`` + should be returned as output (True), or just the result (False). + Default is False. + + Returns + ------- + average, [sum_of_weights] : (tuple of) scalar or MaskedArray + The average along the specified axis. When returned is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. The return type is `np.float64` + if `a` is of integer type, otherwise it is of the same type as `a`. + If returned, `sum_of_weights` is of the same type as `average`. + + Examples + -------- + >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) + >>> np.ma.average(a, weights=[3, 1, 0, 0]) + 1.25 + + >>> x = np.ma.arange(6.).reshape(3, 2) + >>> print x + [[ 0. 1.] + [ 2. 3.] + [ 4. 5.]] + >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], + ... returned=True) + >>> print avg + [2.66666666667 3.66666666667] + + """ + a = asarray(a) + mask = a.mask + ash = a.shape + if ash == (): + ash = (1,) + if axis is None: + if mask is nomask: + if weights is None: + n = a.sum(axis=None) + d = float(a.size) + else: + w = filled(weights, 0.0).ravel() + n = umath.add.reduce(a._data.ravel() * w) + d = umath.add.reduce(w) + del w + else: + if weights is None: + n = a.filled(0).sum(axis=None) + d = float(umath.add.reduce((~mask).ravel())) + else: + w = array(filled(weights, 0.0), float, mask=mask).ravel() + n = add.reduce(a.ravel() * w) + d = add.reduce(w) + del w + else: + if mask is nomask: + if weights is None: + d = ash[axis] * 1.0 + n = add.reduce(a._data, axis) + else: + w = filled(weights, 0.0) + wsh = w.shape + if wsh == (): + wsh = (1,) + if wsh == ash: + w = np.array(w, float, copy=0) + n = add.reduce(a * w, axis) + d = add.reduce(w, axis) + del w + elif wsh == (ash[axis],): + ni = ash[axis] + r = [None] * len(ash) + r[axis] = slice(None, None, 1) + w = eval ("w[" + repr(tuple(r)) + "] * ones(ash, float)") + n = add.reduce(a * w, axis) + d = add.reduce(w, axis, dtype=float) + del w, r + else: + raise ValueError('average: weights wrong shape.') + else: + if weights is None: + n = add.reduce(a, axis) + d = umath.add.reduce((~mask), axis=axis, dtype=float) + else: + w = filled(weights, 0.0) + wsh = w.shape + if wsh == (): + wsh = (1,) + if wsh == ash: + w = array(w, dtype=float, mask=mask, copy=0) + n = add.reduce(a * w, axis) + d = add.reduce(w, axis, dtype=float) + elif wsh == (ash[axis],): + ni = ash[axis] + r = [None] * len(ash) + r[axis] = slice(None, None, 1) + w = eval ("w[" + repr(tuple(r)) + \ + "] * masked_array(ones(ash, float), mask)") + n = add.reduce(a * w, axis) + d = add.reduce(w, axis, dtype=float) + else: + raise ValueError('average: weights wrong shape.') + del w + if n is masked or d is masked: + return masked + result = n / d + del n + + if isinstance(result, MaskedArray): + if ((axis is None) or (axis == 0 and a.ndim == 1)) and \ + (result.mask is nomask): + result = result._data + if returned: + if not isinstance(d, MaskedArray): + d = masked_array(d) + if isinstance(d, ndarray) and (not d.shape == result.shape): + d = ones(result.shape, dtype=float) * d + if returned: + return result, d + else: + return result + + +def median(a, axis=None, out=None, overwrite_input=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int, optional + Axis along which the medians are computed. The default (None) is + to compute the median along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True, and the input + is not already an `ndarray`, an error will be raised. + + Returns + ------- + median : ndarray + A new array holding the result is returned unless out is + specified, in which case a reference to out is returned. + Return data-type is `float64` for integers and floats smaller than + `float64`, or the input data-type, otherwise. + + See Also + -------- + mean + + Notes + ----- + Given a vector ``V`` with ``N`` non masked values, the median of ``V`` + is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. + ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` + when ``N`` is even. + + Examples + -------- + >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) + >>> np.ma.extras.median(x) + 1.5 + + >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + >>> np.ma.extras.median(x) + 2.5 + >>> np.ma.extras.median(x, axis=-1, overwrite_input=True) + masked_array(data = [ 2. 5.], + mask = False, + fill_value = 1e+20) + + """ + if not hasattr(a, 'mask') or np.count_nonzero(a.mask) == 0: + return masked_array(np.median(a, axis=axis, out=out, + overwrite_input=overwrite_input), copy=False) + if overwrite_input: + if axis is None: + asorted = a.ravel() + asorted.sort() + else: + a.sort(axis=axis) + asorted = a + else: + asorted = sort(a, axis=axis) + if axis is None: + axis = 0 + elif axis < 0: + axis += a.ndim + + counts = asorted.shape[axis] - (asorted.mask).sum(axis=axis) + h = counts // 2 + # create indexing mesh grid for all but reduced axis + axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape) + if i != axis] + ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij') + # insert indices of low and high median + ind.insert(axis, h - 1) + low = asorted[ind] + ind[axis] = h + high = asorted[ind] + # duplicate high if odd number of elements so mean does nothing + odd = counts % 2 == 1 + if asorted.ndim == 1: + if odd: + low = high + else: + low[odd] = high[odd] + return np.ma.mean([low, high], axis=0, out=out) + + +#.............................................................................. +def compress_rowcols(x, axis=None): + """ + Suppress the rows and/or columns of a 2-D array that contain + masked values. + + The suppression behavior is selected with the `axis` parameter. + + - If axis is None, both rows and columns are suppressed. + - If axis is 0, only rows are suppressed. + - If axis is 1 or -1, only columns are suppressed. + + Parameters + ---------- + axis : int, optional + Axis along which to perform the operation. Default is None. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x + masked_array(data = + [[-- 1 2] + [-- 4 5] + [6 7 8]], + mask = + [[ True False False] + [ True False False] + [False False False]], + fill_value = 999999) + + >>> np.ma.extras.compress_rowcols(x) + array([[7, 8]]) + >>> np.ma.extras.compress_rowcols(x, 0) + array([[6, 7, 8]]) + >>> np.ma.extras.compress_rowcols(x, 1) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + x = asarray(x) + if x.ndim != 2: + raise NotImplementedError("compress2d works for 2D arrays only.") + m = getmask(x) + # Nothing is masked: return x + if m is nomask or not m.any(): + return x._data + # All is masked: return empty + if m.all(): + return nxarray([]) + # Builds a list of rows/columns indices + (idxr, idxc) = (list(range(len(x))), list(range(x.shape[1]))) + masked = m.nonzero() + if not axis: + for i in np.unique(masked[0]): + idxr.remove(i) + if axis in [None, 1, -1]: + for j in np.unique(masked[1]): + idxc.remove(j) + return x._data[idxr][:, idxc] + +def compress_rows(a): + """ + Suppress whole rows of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.extras.compress_rowcols(a, 0)``, see + `extras.compress_rowcols` for details. + + See Also + -------- + extras.compress_rowcols + + """ + return compress_rowcols(a, 0) + +def compress_cols(a): + """ + Suppress whole columns of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.extras.compress_rowcols(a, 1)``, see + `extras.compress_rowcols` for details. + + See Also + -------- + extras.compress_rowcols + + """ + return compress_rowcols(a, 1) + +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked). The result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=np.int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array(data = + [[0 0 0] + [0 -- 0] + [0 0 0]], + mask = + [[False False False] + [False True False] + [False False False]], + fill_value=999999) + >>> ma.mask_rowcols(a) + masked_array(data = + [[0 -- 0] + [-- -- --] + [0 -- 0]], + mask = + [[False True False] + [ True True True] + [False True False]], + fill_value=999999) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + +def mask_rows(a, axis=None): + """ + Mask rows of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=np.int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array(data = + [[0 0 0] + [0 -- 0] + [0 0 0]], + mask = + [[False False False] + [False True False] + [False False False]], + fill_value=999999) + >>> ma.mask_rows(a) + masked_array(data = + [[0 0 0] + [-- -- --] + [0 0 0]], + mask = + [[False False False] + [ True True True] + [False False False]], + fill_value=999999) + + """ + return mask_rowcols(a, 0) + +def mask_cols(a, axis=None): + """ + Mask columns of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=np.int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array(data = + [[0 0 0] + [0 -- 0] + [0 0 0]], + mask = + [[False False False] + [False True False] + [False False False]], + fill_value=999999) + >>> ma.mask_cols(a) + masked_array(data = + [[0 -- 0] + [0 -- 0] + [0 -- 0]], + mask = + [[False True False] + [False True False] + [False True False]], + fill_value=999999) + + """ + return mask_rowcols(a, 1) + + +def dot(a, b, strict=False): + """ + Return the dot product of two arrays. + + .. note:: + Works only with 2-D arrays at the moment. + + This function is the equivalent of `numpy.dot` that takes masked values + into account, see `numpy.dot` for details. + + Parameters + ---------- + a, b : ndarray + Inputs arrays. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) for the + computation. Default is False. + Propagating the mask means that if a masked value appears in a row or + column, the whole row or column is considered masked. + + See Also + -------- + numpy.dot : Equivalent function for ndarrays. + + Examples + -------- + >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> np.ma.dot(a, b) + masked_array(data = + [[21 26] + [45 64]], + mask = + [[False False] + [False False]], + fill_value = 999999) + >>> np.ma.dot(a, b, strict=True) + masked_array(data = + [[-- --] + [-- 64]], + mask = + [[ True True] + [ True False]], + fill_value = 999999) + + """ + #!!!: Works only with 2D arrays. There should be a way to get it to run with higher dimension + if strict and (a.ndim == 2) and (b.ndim == 2): + a = mask_rows(a) + b = mask_cols(b) + # + d = np.dot(filled(a, 0), filled(b, 0)) + # + am = (~getmaskarray(a)) + bm = (~getmaskarray(b)) + m = ~np.dot(am, bm) + return masked_array(d, mask=m) + +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- + +def ediff1d(arr, to_end=None, to_begin=None): + """ + Compute the differences between consecutive elements of an array. + + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account, see `numpy.ediff1d` for details. + + See Also + -------- + numpy.ediff1d : Equivalent function for ndarrays. + + """ + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] + # + if to_begin is not None: + arrays.insert(0, to_begin) + if to_end is not None: + arrays.append(to_end) + # + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) + # + return ed + + +def unique(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). The output array + is always a masked array. See `numpy.unique` for more details. + + See Also + -------- + numpy.unique : Equivalent function for ndarrays. + + """ + output = np.unique(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) + else: + output = output.view(MaskedArray) + return output + + +def intersect1d(ar1, ar2, assume_unique=False): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See `numpy.intersect1d` for more details. + + See Also + -------- + numpy.intersect1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> intersect1d(x, y) + masked_array(data = [1 3 --], + mask = [False False True], + fill_value = 999999) + + """ + if assume_unique: + aux = ma.concatenate((ar1, ar2)) + else: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique(ar1), unique(ar2))) + aux.sort() + return aux[:-1][aux[1:] == aux[:-1]] + + +def setxor1d(ar1, ar2, assume_unique=False): + """ + Set exclusive-or of 1-D arrays with unique elements. + + The output is always a masked array. See `numpy.setxor1d` for more details. + + See Also + -------- + numpy.setxor1d : Equivalent function for ndarrays. + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = ma.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of an array is also present in a second + array. + + The output is always a masked array. See `numpy.in1d` for more details. + + See Also + -------- + numpy.in1d : Equivalent function for ndarrays. + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if not assume_unique: + ar1, rev_idx = unique(ar1, return_inverse=True) + ar2 = unique(ar2) + + ar = ma.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = ma.concatenate((bool_ar, [invert])) + indx = order.argsort(kind='mergesort')[:len(ar1)] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] + + +def union1d(ar1, ar2): + """ + Union of two arrays. + + The output is always a masked array. See `numpy.union1d` for more details. + + See also + -------- + numpy.union1d : Equivalent function for ndarrays. + + """ + return unique(ma.concatenate((ar1, ar2))) + + +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Set difference of 1D arrays with unique elements. + + The output is always a masked array. See `numpy.setdiff1d` for more + details. + + See Also + -------- + numpy.setdiff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) + >>> np.ma.extras.setdiff1d(x, [1, 2]) + masked_array(data = [3 --], + mask = [False True], + fill_value = 999999) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + aux = in1d(ar1, ar2, assume_unique=True) + if aux.size == 0: + return aux + else: + return ma.asarray(ar1)[aux == 0] + + +#####-------------------------------------------------------------------------- +#---- --- Covariance --- +#####-------------------------------------------------------------------------- + + + + +def _covhelper(x, y=None, rowvar=True, allow_masked=True): + """ + Private function for the computation of covariance and correlation + coefficients. + + """ + x = ma.array(x, ndmin=2, copy=True, dtype=float) + xmask = ma.getmaskarray(x) + # Quick exit if we can't process masked data + if not allow_masked and xmask.any(): + raise ValueError("Cannot process masked data...") + # + if x.shape[0] == 1: + rowvar = True + # Make sure that rowvar is either 0 or 1 + rowvar = int(bool(rowvar)) + axis = 1 - rowvar + if rowvar: + tup = (slice(None), None) + else: + tup = (None, slice(None)) + # + if y is None: + xnotmask = np.logical_not(xmask).astype(int) + else: + y = array(y, copy=False, ndmin=2, dtype=float) + ymask = ma.getmaskarray(y) + if not allow_masked and ymask.any(): + raise ValueError("Cannot process masked data...") + if xmask.any() or ymask.any(): + if y.shape == x.shape: + # Define some common mask + common_mask = np.logical_or(xmask, ymask) + if common_mask is not nomask: + x.unshare_mask() + y.unshare_mask() + xmask = x._mask = y._mask = ymask = common_mask + x = ma.concatenate((x, y), axis) + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) + x -= x.mean(axis=rowvar)[tup] + return (x, xnotmask, rowvar) + + +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): + """ + Estimate the covariance matrix. + + Except for the handling of missing data this function does the same as + `numpy.cov`. For more details and examples, see `numpy.cov`. + + By default, masked values are recognized as such. If `x` and `y` have the + same shape, a common mask is allocated: if ``x[i,j]`` is masked, then + ``y[i,j]`` will also be masked. + Setting `allow_masked` to False will raise an exception if values are + missing in either of the input arrays. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + form as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N-1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. This keyword can be overridden by + the keyword ``ddof`` in numpy versions >= 1.5. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises a `ValueError` exception when some values are missing. + ddof : {None, int}, optional + .. versionadded:: 1.5 + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + + Raises + ------ + ValueError + Raised if some values are missing and `allow_masked` is False. + + See Also + -------- + numpy.cov + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError("ddof must be an integer") + # Set up ddof + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof + result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof + result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + return result + + +def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): + """ + Return correlation coefficients of the input array. + + Except for the handling of missing data this function does the same as + `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N-1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is 1, + then normalization is by ``N``. This keyword can be overridden by + the keyword ``ddof`` in numpy versions >= 1.5. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. + ddof : {None, int}, optional + .. versionadded:: 1.5 + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + See Also + -------- + numpy.corrcoef : Equivalent function in top-level NumPy module. + cov : Estimate the covariance matrix. + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError("ddof must be an integer") + # Set up ddof + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + # Get the data + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + # Compute the covariance matrix + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof + c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof + c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + # Check whether we have a scalar + try: + diag = ma.diagonal(c) + except ValueError: + return 1 + # + if xnotmask.all(): + _denom = ma.sqrt(ma.multiply.outer(diag, diag)) + else: + _denom = diagflat(diag) + n = x.shape[1 - rowvar] + if rowvar: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols(vstack((x[i], x[j]))).var(axis=1, ddof=ddof) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + else: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols( + vstack((x[:, i], x[:, j]))).var(axis=1, ddof=ddof) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + return c / _denom + +#####-------------------------------------------------------------------------- +#---- --- Concatenation helpers --- +#####-------------------------------------------------------------------------- + +class MAxisConcatenator(AxisConcatenator): + """ + Translate slice objects to concatenation along an axis. + + For documentation on usage, see `mr_class`. + + See Also + -------- + mr_class + + """ + + def __init__(self, axis=0): + AxisConcatenator.__init__(self, axis, matrix=False) + + def __getitem__(self, key): + if isinstance(key, str): + raise MAError("Unavailable for masked array.") + if not isinstance(key, tuple): + key = (key,) + objs = [] + scalars = [] + final_dtypedescr = None + for k in range(len(key)): + scalar = False + if isinstance(key[k], slice): + step = key[k].step + start = key[k].start + stop = key[k].stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, complex): + size = int(abs(step)) + newobj = np.linspace(start, stop, num=size) + else: + newobj = np.arange(start, stop, step) + elif isinstance(key[k], str): + if (key[k] in 'rc'): + self.matrix = True + self.col = (key[k] == 'c') + continue + try: + self.axis = int(key[k]) + continue + except (ValueError, TypeError): + raise ValueError("Unknown special directive") + elif type(key[k]) in np.ScalarType: + newobj = asarray([key[k]]) + scalars.append(k) + scalar = True + else: + newobj = key[k] + objs.append(newobj) + if isinstance(newobj, ndarray) and not scalar: + if final_dtypedescr is None: + final_dtypedescr = newobj.dtype + elif newobj.dtype > final_dtypedescr: + final_dtypedescr = newobj.dtype + if final_dtypedescr is not None: + for k in scalars: + objs[k] = objs[k].astype(final_dtypedescr) + res = concatenate(tuple(objs), axis=self.axis) + return self._retval(res) + +class mr_class(MAxisConcatenator): + """ + Translate slice objects to concatenation along the first axis. + + This is the masked array version of `lib.index_tricks.RClass`. + + See Also + -------- + lib.index_tricks.RClass + + Examples + -------- + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + array([1, 2, 3, 0, 0, 4, 5, 6]) + + """ + def __init__(self): + MAxisConcatenator.__init__(self, 0) + +mr_ = mr_class() + +#####-------------------------------------------------------------------------- +#---- Find unmasked data --- +#####-------------------------------------------------------------------------- + +def flatnotmasked_edges(a): + """ + Find the indices of the first and last unmasked values. + + Expects a 1-D `MaskedArray`, returns None if all values are masked. + + Parameters + ---------- + arr : array_like + Input 1-D `MaskedArray` + + Returns + ------- + edges : ndarray or None + The indices of first and last non-masked value in the array. + Returns None if all values are masked. + + See Also + -------- + flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 1-D arrays. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> flatnotmasked_edges(a) + [0,-1] + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> flatnotmasked_edges(a) + array([3, 8]) + + >>> a[:] = np.ma.masked + >>> print flatnotmasked_edges(ma) + None + + """ + m = getmask(a) + if m is nomask or not np.any(m): + return np.array([0, a.size - 1]) + unmasked = np.flatnonzero(~m) + if len(unmasked) > 0: + return unmasked[[0, -1]] + else: + return None + + +def notmasked_edges(a, axis=None): + """ + Find the indices of the first and last unmasked values along an axis. + + If all values are masked, return None. Otherwise, return a list + of two tuples, corresponding to the indices of the first and last + unmasked values respectively. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array. + + Returns + ------- + edges : ndarray or list + An array of start and end indexes if there are any masked data in + the array. If there are no masked data in the array, `edges` is a + list of the first and last index. + + See Also + -------- + flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous, + clump_masked, clump_unmasked + + Examples + -------- + >>> a = np.arange(9).reshape((3, 3)) + >>> m = np.zeros_like(a) + >>> m[1:, 1:] = 1 + + >>> am = np.ma.array(a, mask=m) + >>> np.array(am[~am.mask]) + array([0, 1, 2, 3, 6]) + + >>> np.ma.extras.notmasked_edges(ma) + array([0, 6]) + + """ + a = asarray(a) + if axis is None or a.ndim == 1: + return flatnotmasked_edges(a) + m = getmaskarray(a) + idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) + return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), + tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] + + +def flatnotmasked_contiguous(a): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : narray + The input array. + + Returns + ------- + slice_list : list + A sorted sequence of slices (start index, end index). + + See Also + -------- + flatnotmasked_edges, notmasked_contiguous, notmasked_edges, + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> np.ma.extras.flatnotmasked_contiguous(a) + slice(0, 10, None) + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.extras.flatnotmasked_contiguous(a) + [slice(3, 5, None), slice(6, 9, None)] + >>> a[:] = np.ma.masked + >>> print np.ma.extras.flatnotmasked_edges(a) + None + + """ + m = getmask(a) + if m is nomask: + return slice(0, a.size, None) + i = 0 + result = [] + for (k, g) in itertools.groupby(m.ravel()): + n = len(list(g)) + if not k: + result.append(slice(i, i + n)) + i += n + return result or None + +def notmasked_contiguous(a, axis=None): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array. + + Returns + ------- + endpoints : list + A list of slices (start and end indexes) of unmasked indexes + in the array. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.arange(9).reshape((3, 3)) + >>> mask = np.zeros_like(a) + >>> mask[1:, 1:] = 1 + + >>> ma = np.ma.array(a, mask=mask) + >>> np.array(ma[~ma.mask]) + array([0, 1, 2, 3, 6]) + + >>> np.ma.extras.notmasked_contiguous(ma) + [slice(0, 4, None), slice(6, 7, None)] + + """ + a = asarray(a) + nd = a.ndim + if nd > 2: + raise NotImplementedError("Currently limited to atmost 2D array.") + if axis is None or nd == 1: + return flatnotmasked_contiguous(a) + # + result = [] + # + other = (axis + 1) % 2 + idx = [0, 0] + idx[axis] = slice(None, None) + # + for i in range(a.shape[other]): + idx[other] = i + result.append(flatnotmasked_contiguous(a[idx]) or None) + return result + + +def _ezclump(mask): + """ + Finds the clumps (groups of data with the same values) for a 1D bool array. + + Returns a series of slices. + """ + #def clump_masked(a): + if mask.ndim > 1: + mask = mask.ravel() + idx = (mask[1:] ^ mask[:-1]).nonzero() + idx = idx[0] + 1 + slices = [slice(left, right) + for (left, right) in zip(itertools.chain([0], idx), + itertools.chain(idx, [len(mask)]),)] + return slices + + +def clump_unmasked(a): + """ + Return list of slices corresponding to the unmasked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of unmasked + elements in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, + notmasked_contiguous, clump_masked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.extras.clump_unmasked(a) + [slice(3, 6, None), slice(7, 8, None)] + + """ + mask = getattr(a, '_mask', nomask) + if mask is nomask: + return [slice(0, a.size)] + slices = _ezclump(mask) + if a[0] is masked: + result = slices[1::2] + else: + result = slices[::2] + return result + + +def clump_masked(a): + """ + Returns a list of slices corresponding to the masked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of masked elements + in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, + notmasked_contiguous, clump_unmasked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.extras.clump_masked(a) + [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] + + """ + mask = ma.getmask(a) + if mask is nomask: + return [] + slices = _ezclump(mask) + if len(slices): + if a[0] is masked: + slices = slices[::2] + else: + slices = slices[1::2] + return slices + + + +#####-------------------------------------------------------------------------- +#---- Polynomial fit --- +#####-------------------------------------------------------------------------- + +def vander(x, n=None): + """ + Masked values in the input array result in rows of zeros. + """ + _vander = np.vander(x, n) + m = getmask(x) + if m is not nomask: + _vander[m] = 0 + return _vander +vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Any masked values in x is propagated in y, and vice-versa. + """ + x = asarray(x) + y = asarray(y) + + m = getmask(x) + if y.ndim == 1: + m = mask_or(m, getmask(y)) + elif y.ndim == 2: + my = getmask(mask_rows(y)) + if my is not nomask: + m = mask_or(m, my[:, 0]) + else: + raise TypeError("Expected a 1D or 2D array for y!") + + if w is not None: + w = asarray(w) + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0] : + raise TypeError("expected w and y to have the same length") + m = mask_or(m, getmask(w)) + + if m is not nomask: + if w is not None: + w = ~m*w + else: + w = ~m + + return np.polyfit(x, y, deg, rcond, full, w, cov) + +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) + +################################################################################ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/mrecords.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/mrecords.py new file mode 100644 index 0000000000000..e66596509f63e --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/mrecords.py @@ -0,0 +1,734 @@ +""":mod:`numpy.ma..mrecords` + +Defines the equivalent of :class:`numpy.recarrays` for masked arrays, +where fields can be accessed as attributes. +Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes +and the masking of individual fields. + +:author: Pierre Gerard-Marchant + +""" +from __future__ import division, absolute_import, print_function + +#!!!: * We should make sure that no field is called '_mask','mask','_fieldmask', +#!!!: or whatever restricted keywords. +#!!!: An idea would be to no bother in the first place, and then rename the +#!!!: invalid fields with a trailing underscore... +#!!!: Maybe we could just overload the parser function ? + + +__author__ = "Pierre GF Gerard-Marchant" + +import sys +import warnings + +import numpy as np +import numpy.core.numerictypes as ntypes +from numpy.compat import basestring +from numpy import ( + bool_, dtype, ndarray, recarray, array as narray + ) +from numpy.core.records import ( + fromarrays as recfromarrays, fromrecords as recfromrecords + ) + +_byteorderconv = np.core.records._byteorderconv +_typestr = ntypes._typestr + +import numpy.ma as ma +from numpy.ma import MAError, MaskedArray, masked, nomask, masked_array, \ + getdata, getmaskarray, filled + +_check_fill_value = ma.core._check_fill_value + + +__all__ = ['MaskedRecords', 'mrecarray', + 'fromarrays', 'fromrecords', 'fromtextfile', 'addfield', + ] + +reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] + +def _getformats(data): + "Returns the formats of each array of arraylist as a comma-separated string." + if hasattr(data, 'dtype'): + return ",".join([desc[1] for desc in data.dtype.descr]) + + formats = '' + for obj in data: + obj = np.asarray(obj) + formats += _typestr[obj.dtype.type] + if issubclass(obj.dtype.type, ntypes.flexible): + formats += repr(obj.itemsize) + formats += ',' + return formats[:-1] + +def _checknames(descr, names=None): + """Checks that the field names of the descriptor ``descr`` are not some +reserved keywords. If this is the case, a default 'f%i' is substituted. +If the argument `names` is not None, updates the field names to valid names. + """ + ndescr = len(descr) + default_names = ['f%i' % i for i in range(ndescr)] + if names is None: + new_names = default_names + else: + if isinstance(names, (tuple, list)): + new_names = names + elif isinstance(names, str): + new_names = names.split(',') + else: + raise NameError("illegal input names %s" % repr(names)) + nnames = len(new_names) + if nnames < ndescr: + new_names += default_names[nnames:] + ndescr = [] + for (n, d, t) in zip(new_names, default_names, descr.descr): + if n in reserved_fields: + if t[0] in reserved_fields: + ndescr.append((d, t[1])) + else: + ndescr.append(t) + else: + ndescr.append((n, t[1])) + return np.dtype(ndescr) + + +def _get_fieldmask(self): + mdescr = [(n, '|b1') for n in self.dtype.names] + fdmask = np.empty(self.shape, dtype=mdescr) + fdmask.flat = tuple([False] * len(mdescr)) + return fdmask + + +class MaskedRecords(MaskedArray, object): + """ + +*IVariables*: + _data : {recarray} + Underlying data, as a record array. + _mask : {boolean array} + Mask of the records. A record is masked when all its fields are masked. + _fieldmask : {boolean recarray} + Record array of booleans, setting the mask of each individual field of each record. + _fill_value : {record} + Filling values for each field. + """ + #............................................ + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False, + mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, + copy=False, + **options): + # + self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) + # + mdtype = ma.make_mask_descr(self.dtype) + if mask is nomask or not np.size(mask): + if not keep_mask: + self._mask = tuple([False] * len(mdtype)) + else: + mask = np.array(mask, copy=copy) + if mask.shape != self.shape: + (nd, nm) = (self.size, mask.size) + if nm == 1: + mask = np.resize(mask, self.shape) + elif nm == nd: + mask = np.reshape(mask, self.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MAError(msg % (nd, nm)) + copy = True + if not keep_mask: + self.__setmask__(mask) + self._sharedmask = True + else: + if mask.dtype == mdtype: + _mask = mask + else: + _mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + self._mask = _mask + return self + #...................................................... + def __array_finalize__(self, obj): + # Make sure we have a _fieldmask by default .. + _mask = getattr(obj, '_mask', None) + if _mask is None: + objmask = getattr(obj, '_mask', nomask) + _dtype = ndarray.__getattribute__(self, 'dtype') + if objmask is nomask: + _mask = ma.make_mask_none(self.shape, dtype=_dtype) + else: + mdescr = ma.make_mask_descr(_dtype) + _mask = narray([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(recarray) + # Update some of the attributes + _dict = self.__dict__ + _dict.update(_mask=_mask) + self._update_from(obj) + if _dict['_baseclass'] == ndarray: + _dict['_baseclass'] = recarray + return + + + def _getdata(self): + "Returns the data as a recarray." + return ndarray.view(self, recarray) + _data = property(fget=_getdata) + + def _getfieldmask(self): + "Alias to mask" + return self._mask + _fieldmask = property(fget=_getfieldmask) + + def __len__(self): + "Returns the length" + # We have more than one record + if self.ndim: + return len(self._data) + # We have only one record: return the nb of fields + return len(self.dtype) + + def __getattribute__(self, attr): + try: + return object.__getattribute__(self, attr) + except AttributeError: # attr must be a fieldname + pass + fielddict = ndarray.__getattribute__(self, 'dtype').fields + try: + res = fielddict[attr][:2] + except (TypeError, KeyError): + raise AttributeError("record array has no attribute %s" % attr) + # So far, so good... + _localdict = ndarray.__getattribute__(self, '__dict__') + _data = ndarray.view(self, _localdict['_baseclass']) + obj = _data.getfield(*res) + if obj.dtype.fields: + raise NotImplementedError("MaskedRecords is currently limited to"\ + "simple records...") + # Get some special attributes + # Reset the object's mask + hasmasked = False + _mask = _localdict.get('_mask', None) + if _mask is not None: + try: + _mask = _mask[attr] + except IndexError: + # Couldn't find a mask: use the default (nomask) + pass + hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any() + if (obj.shape or hasmasked): + obj = obj.view(MaskedArray) + obj._baseclass = ndarray + obj._isfield = True + obj._mask = _mask + # Reset the field values + _fill_value = _localdict.get('_fill_value', None) + if _fill_value is not None: + try: + obj._fill_value = _fill_value[attr] + except ValueError: + obj._fill_value = None + else: + obj = obj.item() + return obj + + + def __setattr__(self, attr, val): + "Sets the attribute attr to the value val." + # Should we call __setmask__ first ? + if attr in ['mask', 'fieldmask']: + self.__setmask__(val) + return + # Create a shortcut (so that we don't have to call getattr all the time) + _localdict = object.__getattribute__(self, '__dict__') + # Check whether we're creating a new field + newattr = attr not in _localdict + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except: + # Not a generic attribute: exit if it's not a valid field + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = ndarray.__getattribute__(self, '_optinfo') or {} + if not (attr in fielddict or attr in optinfo): + exctype, value = sys.exc_info()[:2] + raise exctype(value) + else: + # Get the list of names ...... + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + # Check the attribute + if attr not in fielddict: + return ret + if newattr: # We just added this one + try: # or this setattr worked on an internal + # attribute. + object.__delattr__(self, attr) + except: + return ret + # Let's try to set the field + try: + res = fielddict[attr][:2] + except (TypeError, KeyError): + raise AttributeError("record array has no attribute %s" % attr) + # + if val is masked: + _fill_value = _localdict['_fill_value'] + if _fill_value is not None: + dval = _localdict['_fill_value'][attr] + else: + dval = val + mval = True + else: + dval = filled(val) + mval = getmaskarray(val) + obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) + _localdict['_mask'].__setitem__(attr, mval) + return obj + + + def __getitem__(self, indx): + """Returns all the fields sharing the same fieldname base. +The fieldname base is either `_data` or `_mask`.""" + _localdict = self.__dict__ + _mask = ndarray.__getattribute__(self, '_mask') + _data = ndarray.view(self, _localdict['_baseclass']) + # We want a field ........ + if isinstance(indx, basestring): + #!!!: Make sure _sharedmask is True to propagate back to _fieldmask + #!!!: Don't use _set_mask, there are some copies being made... + #!!!: ...that break propagation + #!!!: Don't force the mask to nomask, that wrecks easy masking + obj = _data[indx].view(MaskedArray) + obj._mask = _mask[indx] + obj._sharedmask = True + fval = _localdict['_fill_value'] + if fval is not None: + obj._fill_value = fval[indx] + # Force to masked if the mask is True + if not obj.ndim and obj._mask: + return masked + return obj + # We want some elements .. + # First, the data ........ + obj = np.array(_data[indx], copy=False).view(mrecarray) + obj._mask = np.array(_mask[indx], copy=False).view(recarray) + return obj + #.... + def __setitem__(self, indx, value): + "Sets the given record to value." + MaskedArray.__setitem__(self, indx, value) + if isinstance(indx, basestring): + self._mask[indx] = ma.getmaskarray(value) + + + def __str__(self): + "Calculates the string representation." + if self.size > 1: + mstr = ["(%s)" % ",".join([str(i) for i in s]) + for s in zip(*[getattr(self, f) for f in self.dtype.names])] + return "[%s]" % ", ".join(mstr) + else: + mstr = ["%s" % ",".join([str(i) for i in s]) + for s in zip([getattr(self, f) for f in self.dtype.names])] + return "(%s)" % ", ".join(mstr) + # + def __repr__(self): + "Calculates the repr representation." + _names = self.dtype.names + fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) + reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] + reprstr.insert(0, 'masked_records(') + reprstr.extend([fmt % (' fill_value', self.fill_value), + ' )']) + return str("\n".join(reprstr)) +# #...................................................... + def view(self, dtype=None, type=None): + """Returns a view of the mrecarray.""" + # OK, basic copy-paste from MaskedArray.view... + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + # Here again... + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + # OK, there's the change + except TypeError: + dtype = np.dtype(dtype) + # we need to revert to MaskedArray, but keeping the possibility + # ...of subclasses (eg, TimeSeriesRecords), so we'll force a type + # ...set to the first parent + if dtype.fields is None: + basetype = self.__class__.__bases__[0] + output = self.__array__().view(dtype, basetype) + output._update_from(self) + else: + output = ndarray.view(self, dtype) + output._fill_value = None + else: + output = ndarray.view(self, dtype, type) + # Update the mask, just like in MaskedArray.view + if (getattr(output, '_mask', nomask) is not nomask): + mdtype = ma.make_mask_descr(output.dtype) + output._mask = self._mask.view(mdtype, ndarray) + output._mask.shape = output.shape + return output + + def harden_mask(self): + "Forces the mask to hard" + self._hardmask = True + def soften_mask(self): + "Forces the mask to soft" + self._hardmask = False + + def copy(self): + """Returns a copy of the masked record.""" + _localdict = self.__dict__ + copied = self._data.copy().view(type(self)) + copied._mask = self._mask.copy() + return copied + + def tolist(self, fill_value=None): + """Copy the data portion of the array to a hierarchical python + list and returns that list. + + Data items are converted to the nearest compatible Python + type. Masked values are converted to fill_value. If + fill_value is None, the corresponding entries in the output + list will be ``None``. + + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = narray(self.filled().tolist(), dtype=object) + mask = narray(self._mask.tolist()) + result[mask] = None + return result.tolist() + #-------------------------------------------- + # Pickling + def __getstate__(self): + """Return the internal state of the masked array, for pickling purposes. + + """ + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tobytes(), + self._mask.tobytes(), + self._fill_value, + ) + return state + # + def __setstate__(self, state): + """Restore the internal state of the masked array, for pickling purposes. + ``state`` is typically the output of the ``__getstate__`` output, and is a + 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (ver, shp, typ, isf, raw, msk, flv) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr]) + self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) + self.fill_value = flv + # + def __reduce__(self): + """Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mrreconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + +def _mrreconstruct(subtype, baseclass, baseshape, basetype,): + """Internal function that builds a new MaskedArray from the + information stored in a pickle. + + """ + _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) +# _data._mask = ndarray.__new__(ndarray, baseshape, 'b1') +# return _data + _mask = ndarray.__new__(ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + + +mrecarray = MaskedRecords + +#####--------------------------------------------------------------------------- +#---- --- Constructors --- +#####--------------------------------------------------------------------------- + +def fromarrays(arraylist, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, + fill_value=None): + """Creates a mrecarray from a (flat) list of masked arrays. + + Parameters + ---------- + arraylist : sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None, integer}, optional + Number of records. If None, shape is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + """ + datalist = [getdata(x) for x in arraylist] + masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] + _array = recfromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) + _array._mask.flat = list(zip(*masklist)) + if fill_value is not None: + _array.fill_value = fill_value + return _array + + +#.............................................................................. +def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None, + fill_value=None, mask=nomask): + """Creates a MaskedRecords from a list of records. + + Parameters + ---------- + reclist : sequence + A list of records. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None,int}, optional + Number of records. If None, ``shape`` is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + mask : {nomask, sequence}, optional. + External mask to apply on the data. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + """ + # Grab the initial _fieldmask, if needed: + _mask = getattr(reclist, '_mask', None) + # Get the list of records..... + try: + nfields = len(reclist[0]) + except TypeError: + nfields = len(reclist[0].dtype) + if isinstance(reclist, ndarray): + # Make sure we don't have some hidden mask + if isinstance(reclist, MaskedArray): + reclist = reclist.filled().view(ndarray) + # Grab the initial dtype, just in case + if dtype is None: + dtype = reclist.dtype + reclist = reclist.tolist() + mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, + aligned=aligned, byteorder=byteorder).view(mrecarray) + # Set the fill_value if needed + if fill_value is not None: + mrec.fill_value = fill_value + # Now, let's deal w/ the mask + if mask is not nomask: + mask = np.array(mask, copy=False) + maskrecordlength = len(mask.dtype) + if maskrecordlength: + mrec._mask.flat = mask + elif len(mask.shape) == 2: + mrec._mask.flat = [tuple(m) for m in mask] + else: + mrec.__setmask__(mask) + if _mask is not None: + mrec._mask[:] = _mask + return mrec + +def _guessvartypes(arr): + """Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise +conversion. Returns a list of dtypes. +The array is first converted to ndarray. If the array is 2D, the test is performed +on the first line. An exception is raised if the file is 3D or more. + """ + vartypes = [] + arr = np.asarray(arr) + if len(arr.shape) == 2 : + arr = arr[0] + elif len(arr.shape) > 2: + raise ValueError("The array should be 2D at most!") + # Start the conversion loop ....... + for f in arr: + try: + int(f) + except ValueError: + try: + float(f) + except ValueError: + try: + val = complex(f) + except ValueError: + vartypes.append(arr.dtype) + else: + vartypes.append(np.dtype(complex)) + else: + vartypes.append(np.dtype(float)) + else: + vartypes.append(np.dtype(int)) + return vartypes + +def openfile(fname): + "Opens the file handle of file `fname`" + # A file handle ................... + if hasattr(fname, 'readline'): + return fname + # Try to open the file and guess its type + try: + f = open(fname) + except IOError: + raise IOError("No such file: '%s'" % fname) + if f.readline()[:2] != "\\x": + f.seek(0, 0) + return f + f.close() + raise NotImplementedError("Wow, binary file") + + +def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', + varnames=None, vartypes=None): + """Creates a mrecarray from data stored in the file `filename`. + + Parameters + ---------- + filename : {file name/handle} + Handle of an opened file. + delimitor : {None, string}, optional + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + commentchar : {'#', string}, optional + Alphanumeric character used to mark the start of a comment. + missingchar : {'', string}, optional + String indicating missing data, and used to create the masks. + varnames : {None, sequence}, optional + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + vartypes : {None, sequence}, optional + Sequence of the variables dtypes. If None, it will be estimated from + the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + # Try to open the file ...................... + f = openfile(fname) + + # Get the first non-empty line as the varnames + while True: + line = f.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimitor) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + + # Get the data .............................. + _variables = masked_array([line.strip().split(delimitor) for line in f + if line[0] != commentchar and len(line) > 1]) + (_, nfields) = _variables.shape + f.close() + + # Try to guess the dtype .................... + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [np.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = "Attempting to %i dtypes for %i fields!" + msg += " Reverting to default." + warnings.warn(msg % (len(vartypes), nfields)) + vartypes = _guessvartypes(_variables[0]) + + # Construct the descriptor .................. + mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] + mfillv = [ma.default_fill_value(f) for f in vartypes] + + # Get the data and the mask ................. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) + for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] + + return fromarrays(_datalist, dtype=mdescr) + +#.................................................................... +def addfield(mrecord, newfield, newfieldname=None): + """Adds a new field to the masked record array, using `newfield` as data +and `newfieldname` as name. If `newfieldname` is None, the new field name is +set to 'fi', where `i` is the number of existing fields. + """ + _data = mrecord._data + _mask = mrecord._mask + if newfieldname is None or newfieldname in reserved_fields: + newfieldname = 'f%i' % len(_data.dtype) + newfield = ma.array(newfield) + # Get the new data ............ + # Create a new empty recarray + newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) + newdata = recarray(_data.shape, newdtype) + # Add the exisintg field + [newdata.setfield(_data.getfield(*f), *f) + for f in _data.dtype.fields.values()] + # Add the new field + newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) + newdata = newdata.view(MaskedRecords) + # Get the new mask ............. + # Create a new empty recarray + newmdtype = np.dtype([(n, bool_) for n in newdtype.names]) + newmask = recarray(_data.shape, newmdtype) + # Add the old masks + [newmask.setfield(_mask.getfield(*f), *f) + for f in _mask.dtype.fields.values()] + # Add the mask of the new field + newmask.setfield(getmaskarray(newfield), + *newmask.dtype.fields[newfieldname]) + newdata._mask = newmask + return newdata diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/setup.py new file mode 100644 index 0000000000000..5486ff46a21ab --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/setup.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" +__version__ = '1.0' +__revision__ = "$Revision: 3473 $" +__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' + +import os + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('ma', parent_package, top_path) + config.add_data_dir('tests') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + config = configuration(top_path='').todict() + setup(**config) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_core.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_core.py new file mode 100644 index 0000000000000..34951875d3747 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_core.py @@ -0,0 +1,3684 @@ +# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +""" +from __future__ import division, absolute_import, print_function + +__author__ = "Pierre GF Gerard-Marchant" + +import warnings +import sys +import pickle +from functools import reduce + +from nose.tools import assert_raises + +import numpy as np +import numpy.ma.core +import numpy.core.fromnumeric as fromnumeric +from numpy import ndarray +from numpy.ma.testutils import * +from numpy.ma.core import * +from numpy.compat import asbytes, asbytes_nested + +pi = np.pi + + +#.............................................................................. +class TestMaskedArray(TestCase): + # Base test class for MaskedArrays. + + def setUp(self): + # Base data definition. + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1e+20, x) + xm.set_fill_value(1e+20) + self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + + def test_basicattributes(self): + # Tests some basic array attributes. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a.ndim, 1) + assert_equal(b.ndim, 1) + assert_equal(a.size, 3) + assert_equal(b.size, 3) + assert_equal(a.shape, (3,)) + assert_equal(b.shape, (3,)) + + def test_basic0d(self): + # Checks masking a scalar + x = masked_array(0) + assert_equal(str(x), '0') + x = masked_array(0, mask=True) + assert_equal(str(x), str(masked_print_option)) + x = masked_array(0, mask=False) + assert_equal(str(x), '0') + x = array(0, mask=1) + self.assertTrue(x.filled().dtype is x._data.dtype) + + def test_basic1d(self): + # Test of basic array creation and properties in 1 dimension. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + self.assertTrue(not isMaskedArray(x)) + self.assertTrue(isMaskedArray(xm)) + self.assertTrue((xm - ym).filled(0).any()) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) + s = x.shape + assert_equal(np.shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.dtype, x.dtype) + assert_equal(zm.dtype, z.dtype) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_array_equal(xm, xf) + assert_array_equal(filled(xm, 1.e20), xf) + assert_array_equal(x, xm) + + def test_basic2d(self): + # Test of basic array creation and properties in 2 dimensions. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + # + self.assertTrue(not isMaskedArray(x)) + self.assertTrue(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm, xf) + assert_equal(filled(xm, 1.e20), xf) + assert_equal(x, xm) + + def test_concatenate_basic(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # basic concatenation + assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) + assert_equal(np.concatenate((x, y)), concatenate((x, y))) + assert_equal(np.concatenate((x, y)), concatenate((xm, y))) + assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) + + def test_concatenate_alongaxis(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # Concatenation along an axis + s = (3, 4) + x.shape = y.shape = xm.shape = ym.shape = s + assert_equal(xm.mask, np.reshape(m1, s)) + assert_equal(ym.mask, np.reshape(m2, s)) + xmym = concatenate((xm, ym), 1) + assert_equal(np.concatenate((x, y), 1), xmym) + assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) + # + x = zeros(2) + y = array(ones(2), mask=[False, True]) + z = concatenate((x, y)) + assert_array_equal(z, [0, 0, 1, 1]) + assert_array_equal(z.mask, [False, False, False, True]) + z = concatenate((y, x)) + assert_array_equal(z, [1, 1, 0, 0]) + assert_array_equal(z.mask, [False, True, False, False]) + + def test_concatenate_flexible(self): + # Tests the concatenation on flexible arrays. + data = masked_array(list(zip(np.random.rand(10), + np.arange(10))), + dtype=[('a', float), ('b', int)]) + # + test = concatenate([data[:5], data[5:]]) + assert_equal_records(test, data) + + def test_creation_ndmin(self): + # Check the use of ndmin + x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) + assert_equal(x.shape, (1, 3)) + assert_equal(x._data, [[1, 2, 3]]) + assert_equal(x._mask, [[1, 0, 0]]) + + def test_creation_ndmin_from_maskedarray(self): + # Make sure we're not losing the original mask w/ ndmin + x = array([1, 2, 3]) + x[-1] = masked + xx = array(x, ndmin=2, dtype=float) + assert_equal(x.shape, x._mask.shape) + assert_equal(xx.shape, xx._mask.shape) + + def test_creation_maskcreation(self): + # Tests how masks are initialized at the creation of Maskedarrays. + data = arange(24, dtype=float) + data[[3, 6, 15]] = masked + dma_1 = MaskedArray(data) + assert_equal(dma_1.mask, data.mask) + dma_2 = MaskedArray(dma_1) + assert_equal(dma_2.mask, dma_1.mask) + dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) + fail_if_equal(dma_3.mask, dma_1.mask) + + def test_creation_with_list_of_maskedarrays(self): + # Tests creaating a masked array from alist of masked arrays. + x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) + # + x.mask = nomask + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + self.assertTrue(data.mask is nomask) + + def test_asarray(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm.fill_value = -9999 + xm._hardmask = True + xmm = asarray(xm) + assert_equal(xmm._data, xm._data) + assert_equal(xmm._mask, xm._mask) + assert_equal(xmm.fill_value, xm.fill_value) + assert_equal(xmm._hardmask, xm._hardmask) + + def test_fix_invalid(self): + # Checks fix_invalid. + with np.errstate(invalid='ignore'): + data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) + data_fixed = fix_invalid(data) + assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) + assert_equal(data_fixed._mask, [1., 0., 1.]) + + def test_maskedelement(self): + # Test of masked element + x = arange(6) + x[1] = masked + self.assertTrue(str(masked) == '--') + self.assertTrue(x[1] is masked) + assert_equal(filled(x[1], 0), 0) + # don't know why these should raise an exception... + #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) + #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) + #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) + #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) + + def test_set_element_as_object(self): + # Tests setting elements with object + a = empty(1, dtype=object) + x = (1, 2, 3, 4, 5) + a[0] = x + assert_equal(a[0], x) + self.assertTrue(a[0] is x) + # + import datetime + dt = datetime.datetime.now() + a[0] = dt + self.assertTrue(a[0] is dt) + + def test_indexing(self): + # Tests conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + junk, garbage = str(x2), repr(x2) + assert_equal(np.sort(x1), sort(x2, endwith=False)) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_equal(x1[2], x2[2]) + assert_equal(x1[2:5], x2[2:5]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[2] = 9 + x2[2] = 9 + assert_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + assert_equal(x1, x2) + x2[1] = masked + assert_equal(x1, x2) + x2[1:3] = masked + assert_equal(x1, x2) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_equal(3.0, x2.fill_value) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_copy(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + self.assertTrue(m is m2) + m3 = make_mask(m, copy=1) + self.assertTrue(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + #self.assertTrue( y1._data is x1) + assert_equal(y1._data.__array_interface__, x1.__array_interface__) + self.assertTrue(allequal(x1, y1.data)) + #self.assertTrue( y1.mask is m) + assert_equal(y1._mask.__array_interface__, m.__array_interface__) + + y1a = array(y1) + self.assertTrue(y1a._data.__array_interface__ == + y1._data.__array_interface__) + self.assertTrue(y1a.mask is y1.mask) + + y2 = array(x1, mask=m) + self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__) + #self.assertTrue( y2.mask is m) + self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__) + self.assertTrue(y2[2] is masked) + y2[2] = 9 + self.assertTrue(y2[2] is not masked) + #self.assertTrue( y2.mask is not m) + self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__) + self.assertTrue(allequal(y2.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_equal(concatenate([x4, x4]), y4) + assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = repeat(x4, 2, axis=0) + assert_equal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert_equal(y5, y7) + y8 = x4.repeat(2, 0) + assert_equal(y5, y8) + + y9 = x4.copy() + assert_equal(y9._data, x4._data) + assert_equal(y9._mask, x4._mask) + # + x = masked_array([1, 2, 3], mask=[0, 1, 0]) + # Copy is False by default + y = masked_array(x) + assert_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) + y = masked_array(x, copy=True) + assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + + def test_deepcopy(self): + from copy import deepcopy + a = array([0, 1, 2], mask=[False, True, False]) + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + assert_not_equal(id(a._mask), id(copied._mask)) + # + copied[1] = 1 + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + # + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + copied.mask[1] = False + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + def test_str_repr(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' + ' mask = [False True False],\n' + ' fill_value = 999999)\n') + + def test_pickling(self): + # Tests pickling + a = arange(10) + a[::3] = masked + a.fill_value = 999 + a_pickled = pickle.loads(a.dumps()) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled._data, a._data) + assert_equal(a_pickled.fill_value, 999) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + a_pickled = pickle.loads(a.dumps()) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + self.assertTrue(isinstance(a_pickled._data, np.matrix)) + + def test_pickling_maskedconstant(self): + # Test pickling MaskedConstant + mc = np.ma.masked + mc_pickled = pickle.loads(mc.dumps()) + assert_equal(mc_pickled._baseclass, mc._baseclass) + assert_equal(mc_pickled._mask, mc._mask) + assert_equal(mc_pickled._data, mc._data) + + def test_pickling_wstructured(self): + # Tests pickling w/ structured array + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + a_pickled = pickle.loads(a.dumps()) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + + def test_pickling_keepalignment(self): + # Tests pickling w/ F_CONTIGUOUS arrays + a = arange(10) + a.shape = (-1, 2) + b = a.T + test = pickle.loads(pickle.dumps(b)) + assert_equal(test, b) + + def test_single_element_subscript(self): + # Tests single element subscripts of Maskedarrays. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_topython(self): + # Tests some communication issues with Python. + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + self.assertRaises(TypeError, float, array([1, 1])) + # + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + assert_(np.isnan(float(array([1], mask=[1])))) + # + a = array([1, 2, 3], mask=[1, 0, 0]) + self.assertRaises(TypeError, lambda:float(a)) + assert_equal(float(a[-1]), 3.) + self.assertTrue(np.isnan(float(a[0]))) + self.assertRaises(TypeError, int, a) + assert_equal(int(a[-1]), 3) + self.assertRaises(MAError, lambda:int(a[0])) + + def test_oddfeatures_1(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_equal(z.real, x) + assert_equal(z.imag, 10 * x) + assert_equal((z * conjugate(z)).real, 101 * x * x) + z.imag[...] = 0.0 + # + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + # + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_equal(x, z) + + def test_oddfeatures_2(self): + # Tests some more features. + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_oddfeatures_3(self): + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) + + def test_filled_w_object_dtype(self): + a = np.ma.masked_all(1, dtype='O') + assert_equal(a.filled('x')[0], 'x') + + def test_filled_w_flexible_dtype(self): + # Test filled w/ flexible dtype + flexi = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + flexi[0] = masked + assert_equal(flexi.filled(), + np.array([(default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),)], dtype=flexi.dtype)) + flexi[0] = masked + assert_equal(flexi.filled(1), + np.array([(1, '1', 1.)], dtype=flexi.dtype)) + + def test_filled_w_mvoid(self): + # Test filled w/ mvoid + ndtype = [('a', int), ('b', float)] + a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) + # Filled using default + test = a.filled() + assert_equal(tuple(test), (1, default_fill_value(1.))) + # Explicit fill_value + test = a.filled((-1, -1)) + assert_equal(tuple(test), (1, -1)) + # Using predefined filling values + a.fill_value = (-999, -999) + assert_equal(tuple(a.filled()), (1, -999)) + + def test_filled_w_nested_dtype(self): + # Test filled w/ nested dtype + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + # + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + def test_filled_w_f_order(self): + # Test filled w/ F-contiguous array + a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), + mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), + order='F') # this is currently ignored + self.assertTrue(a.flags['F_CONTIGUOUS']) + self.assertTrue(a.filled(0).flags['F_CONTIGUOUS']) + + def test_optinfo_propagation(self): + # Checks that _optinfo dictionary isn't back-propagated + x = array([1, 2, 3, ], dtype=float) + x._optinfo['info'] = '???' + y = x.copy() + assert_equal(y._optinfo['info'], '???') + y._optinfo['info'] = '!!!' + assert_equal(x._optinfo['info'], '???') + + def test_fancy_printoptions(self): + # Test printing a masked array w/ fancy dtype. + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + def test_flatten_structured_array(self): + # Test flatten_structured_array on arrays + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + def test_void0d(self): + # Test creating a mvoid object + ndtype = [('a', int), ('b', int)] + a = np.array([(1, 2,)], dtype=ndtype)[0] + f = mvoid(a) + assert_(isinstance(f, mvoid)) + # + a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] + assert_(isinstance(a, mvoid)) + # + a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + f = mvoid(a._data[0], a._mask[0]) + assert_(isinstance(f, mvoid)) + + def test_mvoid_getitem(self): + # Test mvoid.__getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + f = a[0] + self.assertTrue(isinstance(f, mvoid)) + assert_equal((f[0], f['a']), (1, 1)) + assert_equal(f['b'], 2) + # w/ mask + f = a[1] + self.assertTrue(isinstance(f, mvoid)) + self.assertTrue(f[0] is masked) + self.assertTrue(f['a'] is masked) + assert_equal(f[1], 4) + + def test_mvoid_iter(self): + # Test iteration on __getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + assert_equal(list(a[0]), [1, 2]) + # w/ mask + assert_equal(list(a[1]), [masked, 4]) + + def test_mvoid_print(self): + # Test printing a mvoid + mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) + assert_equal(str(mx[0]), "(1, 1)") + mx['b'][0] = masked + ini_display = masked_print_option._display + masked_print_option.set_display("-X-") + try: + assert_equal(str(mx[0]), "(1, -X-)") + assert_equal(repr(mx[0]), "(1, -X-)") + finally: + masked_print_option.set_display(ini_display) + + +#------------------------------------------------------------------------------ +class TestMaskedArrayArithmetic(TestCase): + # Base test class for MaskedArrays. + + def setUp(self): + # Base data definition. + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1e+20, x) + xm.set_fill_value(1e+20) + self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + self.err_status = np.geterr() + np.seterr(divide='ignore', invalid='ignore') + + def tearDown(self): + np.seterr(**self.err_status) + + def test_basic_arithmetic(self): + # Test of basic arithmetic. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + a2d = array([[1, 2], [0, 4]]) + a2dm = masked_array(a2d, [[0, 0], [1, 0]]) + assert_equal(a2d * a2d, a2d * a2dm) + assert_equal(a2d + a2d, a2d + a2dm) + assert_equal(a2d - a2d, a2d - a2dm) + for s in [(12,), (4, 3), (2, 6)]: + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) + assert_equal(-x, -xm) + assert_equal(x + y, xm + ym) + assert_equal(x - y, xm - ym) + assert_equal(x * y, xm * ym) + assert_equal(x / y, xm / ym) + assert_equal(a10 + y, a10 + ym) + assert_equal(a10 - y, a10 - ym) + assert_equal(a10 * y, a10 * ym) + assert_equal(a10 / y, a10 / ym) + assert_equal(x + a10, xm + a10) + assert_equal(x - a10, xm - a10) + assert_equal(x * a10, xm * a10) + assert_equal(x / a10, xm / a10) + assert_equal(x ** 2, xm ** 2) + assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5) + assert_equal(x ** y, xm ** ym) + assert_equal(np.add(x, y), add(xm, ym)) + assert_equal(np.subtract(x, y), subtract(xm, ym)) + assert_equal(np.multiply(x, y), multiply(xm, ym)) + assert_equal(np.divide(x, y), divide(xm, ym)) + + def test_divide_on_different_shapes(self): + x = arange(6, dtype=float) + x.shape = (2, 3) + y = arange(3, dtype=float) + # + z = x / y + assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) + assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) + # + z = x / y[None,:] + assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) + assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) + # + y = arange(2, dtype=float) + z = x / y[:, None] + assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]]) + assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]]) + + def test_mixed_arithmetic(self): + # Tests mixed arithmetics. + na = np.array([1]) + ma = array([1]) + self.assertTrue(isinstance(na + ma, MaskedArray)) + self.assertTrue(isinstance(ma + na, MaskedArray)) + + def test_limits_arithmetic(self): + tiny = np.finfo(float).tiny + a = array([tiny, 1. / tiny, 0.]) + assert_equal(getmaskarray(a / 2), [0, 0, 0]) + assert_equal(getmaskarray(2 / a), [1, 0, 1]) + + def test_masked_singleton_arithmetic(self): + # Tests some scalar arithmetics on MaskedArrays. + # Masked singleton should remain masked no matter what + xm = array(0, mask=1) + self.assertTrue((1 / array(0)).mask) + self.assertTrue((1 + xm).mask) + self.assertTrue((-xm).mask) + self.assertTrue(maximum(xm, xm).mask) + self.assertTrue(minimum(xm, xm).mask) + + def test_masked_singleton_equality(self): + # Tests (in)equality on masked snigleton + a = array([1, 2, 3], mask=[1, 1, 0]) + assert_((a[0] == 0) is masked) + assert_((a[0] != 0) is masked) + assert_equal((a[-1] == 0), False) + assert_equal((a[-1] != 0), True) + + def test_arithmetic_with_masked_singleton(self): + # Checks that there's no collapsing to masked + x = masked_array([1, 2]) + y = x * masked + assert_equal(y.shape, x.shape) + assert_equal(y._mask, [True, True]) + y = x[0] * masked + assert_(y is masked) + y = x + masked + assert_equal(y.shape, x.shape) + assert_equal(y._mask, [True, True]) + + def test_arithmetic_with_masked_singleton_on_1d_singleton(self): + # Check that we're not losing the shape of a singleton + x = masked_array([1, ]) + y = x + masked + assert_equal(y.shape, x.shape) + assert_equal(y.mask, [True, ]) + + def test_scalar_arithmetic(self): + x = array(0, mask=0) + assert_equal(x.filled().ctypes.data, x.ctypes.data) + # Make sure we don't lose the shape in some circumstances + xm = array((0, 0)) / 0. + assert_equal(xm.shape, (2,)) + assert_equal(xm.mask, [1, 1]) + + def test_basic_ufuncs(self): + # Test various functions such as sin, cos. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(np.cos(x), cos(xm)) + assert_equal(np.cosh(x), cosh(xm)) + assert_equal(np.sin(x), sin(xm)) + assert_equal(np.sinh(x), sinh(xm)) + assert_equal(np.tan(x), tan(xm)) + assert_equal(np.tanh(x), tanh(xm)) + assert_equal(np.sqrt(abs(x)), sqrt(xm)) + assert_equal(np.log(abs(x)), log(xm)) + assert_equal(np.log10(abs(x)), log10(xm)) + assert_equal(np.exp(x), exp(xm)) + assert_equal(np.arcsin(z), arcsin(zm)) + assert_equal(np.arccos(z), arccos(zm)) + assert_equal(np.arctan(z), arctan(zm)) + assert_equal(np.arctan2(x, y), arctan2(xm, ym)) + assert_equal(np.absolute(x), absolute(xm)) + assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym)) + assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True)) + assert_equal(np.equal(x, y), equal(xm, ym)) + assert_equal(np.not_equal(x, y), not_equal(xm, ym)) + assert_equal(np.less(x, y), less(xm, ym)) + assert_equal(np.greater(x, y), greater(xm, ym)) + assert_equal(np.less_equal(x, y), less_equal(xm, ym)) + assert_equal(np.greater_equal(x, y), greater_equal(xm, ym)) + assert_equal(np.conjugate(x), conjugate(xm)) + + def test_count_func(self): + # Tests count + assert_equal(1, count(1)) + assert_equal(0, array(1, mask=[1])) + + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + res = count(ott) + self.assertTrue(res.dtype.type is np.intp) + assert_equal(3, res) + + ott = ott.reshape((2, 2)) + res = count(ott) + assert_(res.dtype.type is np.intp) + assert_equal(3, res) + res = count(ott, 0) + assert_(isinstance(res, ndarray)) + assert_equal([1, 2], res) + assert_(getmask(res) is nomask) + + ott= array([0., 1., 2., 3.]) + res = count(ott, 0) + assert_(isinstance(res, ndarray)) + assert_(res.dtype.type is np.intp) + + assert_raises(IndexError, ott.count, 1) + + def test_minmax_func(self): + # Tests minimum and maximum. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # max doesn't work if shaped + xr = np.ravel(x) + xmr = ravel(xm) + # following are true because of careful selection of data + assert_equal(max(xr), maximum(xmr)) + assert_equal(min(xr), minimum(xmr)) + # + assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]) + assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]) + x = arange(5) + y = arange(5) - 2 + x[3] = masked + y[0] = masked + assert_equal(minimum(x, y), where(less(x, y), x, y)) + assert_equal(maximum(x, y), where(greater(x, y), x, y)) + assert_(minimum(x) == 0) + assert_(maximum(x) == 4) + # + x = arange(4).reshape(2, 2) + x[-1, -1] = masked + assert_equal(maximum(x), 2) + + def test_minimummaximum_func(self): + a = np.ones((2, 2)) + aminimum = minimum(a, a) + self.assertTrue(isinstance(aminimum, MaskedArray)) + assert_equal(aminimum, np.minimum(a, a)) + # + aminimum = minimum.outer(a, a) + self.assertTrue(isinstance(aminimum, MaskedArray)) + assert_equal(aminimum, np.minimum.outer(a, a)) + # + amaximum = maximum(a, a) + self.assertTrue(isinstance(amaximum, MaskedArray)) + assert_equal(amaximum, np.maximum(a, a)) + # + amaximum = maximum.outer(a, a) + self.assertTrue(isinstance(amaximum, MaskedArray)) + assert_equal(amaximum, np.maximum.outer(a, a)) + + def test_minmax_reduce(self): + # Test np.min/maximum.reduce on array w/ full False mask + a = array([1, 2, 3], mask=[False, False, False]) + b = np.maximum.reduce(a) + assert_equal(b, 3) + + def test_minmax_funcs_with_output(self): + # Tests the min/max functions with explicit outputs + mask = np.random.rand(12).round() + xm = array(np.random.uniform(0, 10, 12), mask=mask) + xm.shape = (3, 4) + for funcname in ('min', 'max'): + # Initialize + npfunc = getattr(np, funcname) + mafunc = getattr(numpy.ma.core, funcname) + # Use the np version + nout = np.empty((4,), dtype=int) + try: + result = npfunc(xm, axis=0, out=nout) + except MaskError: + pass + nout = np.empty((4,), dtype=float) + result = npfunc(xm, axis=0, out=nout) + self.assertTrue(result is nout) + # Use the ma version + nout.fill(-999) + result = mafunc(xm, axis=0, out=nout) + self.assertTrue(result is nout) + + def test_minmax_methods(self): + # Additional tests on max/min + (_, _, _, _, _, xm, _, _, _, _) = self.d + xm.shape = (xm.size,) + assert_equal(xm.max(), 10) + self.assertTrue(xm[0].max() is masked) + self.assertTrue(xm[0].max(0) is masked) + self.assertTrue(xm[0].max(-1) is masked) + assert_equal(xm.min(), -10.) + self.assertTrue(xm[0].min() is masked) + self.assertTrue(xm[0].min(0) is masked) + self.assertTrue(xm[0].min(-1) is masked) + assert_equal(xm.ptp(), 20.) + self.assertTrue(xm[0].ptp() is masked) + self.assertTrue(xm[0].ptp(0) is masked) + self.assertTrue(xm[0].ptp(-1) is masked) + # + x = array([1, 2, 3], mask=True) + self.assertTrue(x.min() is masked) + self.assertTrue(x.max() is masked) + self.assertTrue(x.ptp() is masked) + + def test_addsumprod(self): + # Tests add, sum, product. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(np.add.reduce(x), add.reduce(x)) + assert_equal(np.add.accumulate(x), add.accumulate(x)) + assert_equal(4, sum(array(4), axis=0)) + assert_equal(4, sum(array(4), axis=0)) + assert_equal(np.sum(x, axis=0), sum(x, axis=0)) + assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)) + assert_equal(np.sum(x, 0), sum(x, 0)) + assert_equal(np.product(x, axis=0), product(x, axis=0)) + assert_equal(np.product(x, 0), product(x, 0)) + assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0)) + s = (3, 4) + x.shape = y.shape = xm.shape = ym.shape = s + if len(s) > 1: + assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) + assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) + assert_equal(np.sum(x, 1), sum(x, 1)) + assert_equal(np.product(x, 1), product(x, 1)) + + def test_binops_d2D(self): + # Test binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + # + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_domained_binops_d2D(self): + # Test domained binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + # + test = a / b + control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b / a + control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b / a + control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_noshrinking(self): + # Check that we don't shrink a mask when not wanted + # Binary operations + a = masked_array([1., 2., 3.], mask=[False, False, False], + shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): + # Tests mod + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + + def test_TakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) + assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) + assert_equal(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y)) + assert_equal(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y)) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_imag_real(self): + # Check complex + xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) + assert_equal(xx.imag, [10, 2]) + assert_equal(xx.imag.filled(), [1e+20, 2]) + assert_equal(xx.imag.dtype, xx._data.imag.dtype) + assert_equal(xx.real, [1, 20]) + assert_equal(xx.real.filled(), [1e+20, 20]) + assert_equal(xx.real.dtype, xx._data.real.dtype) + + def test_methods_with_output(self): + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + # + funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) + # + for funcname in funclist: + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + # A ndarray as explicit input + output = np.empty(4, dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + # + output = empty(4, dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + assert_(output[0] is masked) + + def test_eq_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + test = (a == a) + assert_equal(test, [True, True]) + assert_equal(test.mask, [False, False]) + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test, [False, True]) + assert_equal(test.mask, [True, False]) + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test, [True, False]) + assert_equal(test.mask, [False, False]) + + def test_ne_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + test = (a != a) + assert_equal(test, [False, False]) + assert_equal(test.mask, [False, False]) + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test, [True, False]) + assert_equal(test.mask, [True, False]) + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test, [False, True]) + assert_equal(test.mask, [False, False]) + + def test_eq_w_None(self): + # Really, comparisons with None should not be done, but + # check them anyway + # With partial mask + a = array([1, 2], mask=[0, 1]) + assert_equal(a == None, False) + assert_equal(a.data == None, False) + assert_equal(a.mask == None, False) + assert_equal(a != None, True) + # With nomask + a = array([1, 2], mask=False) + assert_equal(a == None, False) + assert_equal(a != None, True) + # With complete mask + a = array([1, 2], mask=True) + assert_equal(a == None, False) + assert_equal(a != None, True) + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) + + def test_eq_w_scalar(self): + a = array(1) + assert_equal(a == 1, True) + assert_equal(a == 0, False) + assert_equal(a != 1, False) + assert_equal(a != 0, True) + + def test_numpyarithmetics(self): + # Check that the mask is not back-propagated when using numpy functions + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + # + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + # + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + +#------------------------------------------------------------------------------ +class TestMaskedArrayAttributes(TestCase): + + def test_keepmask(self): + # Tests the keep mask flag + x = masked_array([1, 2, 3], mask=[1, 0, 0]) + mx = masked_array(x) + assert_equal(mx.mask, x.mask) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) + assert_equal(mx.mask, [0, 1, 0]) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) + assert_equal(mx.mask, [1, 1, 0]) + # We default to true + mx = masked_array(x, mask=[0, 1, 0]) + assert_equal(mx.mask, [1, 1, 0]) + + def test_hardmask(self): + # Test hard_mask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + # We need to copy, to avoid updating d in xh ! + xs = array(d, mask=m, hard_mask=False, copy=True) + xh[[1, 4]] = [10, 40] + xs[[1, 4]] = [10, 40] + assert_equal(xh._data, [0, 10, 2, 3, 4]) + assert_equal(xs._data, [0, 10, 2, 3, 40]) + #assert_equal(xh.mask.ctypes._data, m.ctypes._data) + assert_equal(xs.mask, [0, 0, 0, 1, 0]) + self.assertTrue(xh._hardmask) + self.assertTrue(not xs._hardmask) + xh[1:4] = [10, 20, 30] + xs[1:4] = [10, 20, 30] + assert_equal(xh._data, [0, 10, 20, 3, 4]) + assert_equal(xs._data, [0, 10, 20, 30, 40]) + #assert_equal(xh.mask.ctypes._data, m.ctypes._data) + assert_equal(xs.mask, nomask) + xh[0] = masked + xs[0] = masked + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, [1, 0, 0, 0, 0]) + xh[:] = 1 + xs[:] = 1 + assert_equal(xh._data, [0, 1, 1, 3, 4]) + assert_equal(xs._data, [1, 1, 1, 1, 1]) + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, nomask) + # Switch to soft mask + xh.soften_mask() + xh[:] = arange(5) + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh.mask, nomask) + # Switch back to hard mask + xh.harden_mask() + xh[xh < 3] = masked + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + xh[filled(xh > 1, False)] = 5 + assert_equal(xh._data, [0, 1, 2, 5, 5]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + # + xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) + xh[0] = 0 + assert_equal(xh._data, [[1, 0], [3, 4]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[-1, -1] = 5 + assert_equal(xh._data, [[1, 0], [3, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[filled(xh < 5, False)] = 2 + assert_equal(xh._data, [[1, 2], [2, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + + def test_hardmask_again(self): + # Another test of hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + xh[4:5] = 999 + #assert_equal(xh.mask.ctypes._data, m.ctypes._data) + xh[0:1] = 999 + assert_equal(xh._data, [999, 1, 2, 3, 4]) + + def test_hardmask_oncemore_yay(self): + # OK, yet another test of hardmask + # Make sure that harden_mask/soften_mask//unshare_mask returns self + a = array([1, 2, 3], mask=[1, 0, 0]) + b = a.harden_mask() + assert_equal(a, b) + b[0] = 0 + assert_equal(a, b) + assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) + a = b.soften_mask() + a[0] = 0 + assert_equal(a, b) + assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) + + def test_smallmask(self): + # Checks the behaviour of _smallmask + a = arange(10) + a[1] = masked + a[1] = 1 + assert_equal(a._mask, nomask) + a = arange(10) + a._smallmask = False + a[1] = masked + a[1] = 1 + assert_equal(a._mask, zeros(10)) + + def test_shrink_mask(self): + # Tests .shrink_mask() + a = array([1, 2, 3], mask=[0, 0, 0]) + b = a.shrink_mask() + assert_equal(a, b) + assert_equal(a.mask, nomask) + + def test_flat(self): + # Test that flat can return all types of items [#4585, #4615] + # test simple access + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + assert_equal(test.flat[1], 2) + assert_equal(test.flat[2], masked) + self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2])) + # Test flat on masked_matrices + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # Test setting + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) + testflat[0] = 9 + assert_equal(test[0, 0], 9) + # test 2-D record array + # ... on structured array w/ masked records + x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], + [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x['a'][0, 1] = masked + x['b'][1, 0] = masked + x['c'][0, 2] = masked + x[-1, -1] = masked + xflat = x.flat + assert_equal(xflat[0], x[0, 0]) + assert_equal(xflat[1], x[0, 1]) + assert_equal(xflat[2], x[0, 2]) + assert_equal(xflat[:3], x[0]) + assert_equal(xflat[3], x[1, 0]) + assert_equal(xflat[4], x[1, 1]) + assert_equal(xflat[5], x[1, 2]) + assert_equal(xflat[3:], x[1]) + assert_equal(xflat[-1], x[-1, -1]) + i = 0 + j = 0 + for xf in xflat: + assert_equal(xf, x[j, i]) + i += 1 + if i >= x.shape[-1]: + i = 0 + j += 1 + # test that matrices keep the correct shape (#4615) + a = masked_array(np.matrix(np.eye(2)), mask=0) + b = a.flat + b01 = b[:2] + assert_equal(b01.data, array([[1., 0.]])) + assert_equal(b01.mask, array([[False, False]])) + + +#------------------------------------------------------------------------------ +class TestFillingValues(TestCase): + + def test_check_on_scalar(self): + # Test _check_fill_value set to valid and invalid values + _check_fill_value = np.ma.core._check_fill_value + # + fval = _check_fill_value(0, int) + assert_equal(fval, 0) + fval = _check_fill_value(None, int) + assert_equal(fval, default_fill_value(0)) + # + fval = _check_fill_value(0, "|S3") + assert_equal(fval, asbytes("0")) + fval = _check_fill_value(None, "|S3") + assert_equal(fval, default_fill_value("|S3")) + self.assertRaises(TypeError, _check_fill_value, 1e+20, int) + self.assertRaises(TypeError, _check_fill_value, 'stuff', int) + + def test_check_on_fields(self): + # Tests _check_fill_value with records + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('a', int), ('b', float), ('c', "|S3")] + # A check on a list should return a single record + fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) + self.assertTrue(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) + # A check on None should output the defaults + fval = _check_fill_value(None, ndtype) + self.assertTrue(isinstance(fval, ndarray)) + assert_equal(fval.item(), [default_fill_value(0), + default_fill_value(0.), + asbytes(default_fill_value("0"))]) + #.....Using a structured type as fill_value should work + fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) + fval = _check_fill_value(fill_val, ndtype) + self.assertTrue(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) + + #.....Using a flexible type w/ a different type shouldn't matter + # BEHAVIOR in 1.5 and earlier: match structured types by position + #fill_val = np.array((-999, -12345678.9, "???"), + # dtype=[("A", int), ("B", float), ("C", "|S3")]) + # BEHAVIOR in 1.6 and later: match structured types by name + fill_val = np.array(("???", -999, -12345678.9), + dtype=[("c", "|S3"), ("a", int), ("b", float), ]) + fval = _check_fill_value(fill_val, ndtype) + self.assertTrue(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) + + #.....Using an object-array shouldn't matter either + fill_val = np.ndarray(shape=(1,), dtype=object) + fill_val[0] = (-999, -12345678.9, asbytes("???")) + fval = _check_fill_value(fill_val, object) + self.assertTrue(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) + # NOTE: This test was never run properly as "fill_value" rather than + # "fill_val" was assigned. Written properly, it fails. + #fill_val = np.array((-999, -12345678.9, "???")) + #fval = _check_fill_value(fill_val, ndtype) + #self.assertTrue(isinstance(fval, ndarray)) + #assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) + #.....One-field-only flexible type should work as well + ndtype = [("a", int)] + fval = _check_fill_value(-999999999, ndtype) + self.assertTrue(isinstance(fval, ndarray)) + assert_equal(fval.item(), (-999999999,)) + + def test_fillvalue_conversion(self): + # Tests the behavior of fill_value during conversion + # We had a tailored comment to make sure special attributes are + # properly dealt with + a = array(asbytes_nested(['3', '4', '5'])) + a._optinfo.update({'comment':"updated!"}) + # + b = array(a, dtype=int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + # + b = array(a, dtype=float) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0.)) + # + b = a.astype(int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + assert_equal(b._optinfo['comment'], "updated!") + # + b = a.astype([('a', '|S3')]) + assert_equal(b['a']._data, a._data) + assert_equal(b['a'].fill_value, a.fill_value) + + def test_fillvalue(self): + # Yet more fun with the fill_value + data = masked_array([1, 2, 3], fill_value=-999) + series = data[[0, 2, 1]] + assert_equal(series._fill_value, data._fill_value) + # + mtype = [('f', float), ('s', '|S3')] + x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) + x.fill_value = 999 + assert_equal(x.fill_value.item(), [999., asbytes('999')]) + assert_equal(x['f'].fill_value, 999) + assert_equal(x['s'].fill_value, asbytes('999')) + # + x.fill_value = (9, '???') + assert_equal(x.fill_value.item(), (9, asbytes('???'))) + assert_equal(x['f'].fill_value, 9) + assert_equal(x['s'].fill_value, asbytes('???')) + # + x = array([1, 2, 3.1]) + x.fill_value = 999 + assert_equal(np.asarray(x.fill_value).dtype, float) + assert_equal(x.fill_value, 999.) + assert_equal(x._fill_value, np.array(999.)) + + def test_fillvalue_exotic_dtype(self): + # Tests yet more exotic flexible dtypes + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('i', int), ('s', '|S8'), ('f', float)] + control = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),), + dtype=ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + # The shape shouldn't matter + ndtype = [('f0', float, (2, 2))] + control = np.array((default_fill_value(0.),), + dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + # + ndtype = np.dtype("int, (2,3)float, float") + control = np.array((default_fill_value(0), + default_fill_value(0.), + default_fill_value(0.),), + dtype="int, float, float").astype(ndtype) + test = _check_fill_value(None, ndtype) + assert_equal(test, control) + control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + + def test_extremum_fill_value(self): + # Tests extremum fill values for flexible type. + a = array([(1, (2, 3)), (4, (5, 6))], + dtype=[('A', int), ('B', [('BA', int), ('BB', int)])]) + test = a.fill_value + assert_equal(test['A'], default_fill_value(a['A'])) + assert_equal(test['B']['BA'], default_fill_value(a['B']['BA'])) + assert_equal(test['B']['BB'], default_fill_value(a['B']['BB'])) + # + test = minimum_fill_value(a) + assert_equal(test[0], minimum_fill_value(a['A'])) + assert_equal(test[1][0], minimum_fill_value(a['B']['BA'])) + assert_equal(test[1][1], minimum_fill_value(a['B']['BB'])) + assert_equal(test[1], minimum_fill_value(a['B'])) + # + test = maximum_fill_value(a) + assert_equal(test[0], maximum_fill_value(a['A'])) + assert_equal(test[1][0], maximum_fill_value(a['B']['BA'])) + assert_equal(test[1][1], maximum_fill_value(a['B']['BB'])) + assert_equal(test[1], maximum_fill_value(a['B'])) + + def test_fillvalue_individual_fields(self): + # Test setting fill_value on individual fields + ndtype = [('a', int), ('b', int)] + # Explicit fill_value + a = array(list(zip([1, 2, 3], [4, 5, 6])), + fill_value=(-999, -999), dtype=ndtype) + aa = a['a'] + aa.set_fill_value(10) + assert_equal(aa._fill_value, np.array(10)) + assert_equal(tuple(a.fill_value), (10, -999)) + a.fill_value['b'] = -10 + assert_equal(tuple(a.fill_value), (10, -10)) + # Implicit fill_value + t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype) + tt = t['a'] + tt.set_fill_value(10) + assert_equal(tt._fill_value, np.array(10)) + assert_equal(tuple(t.fill_value), (10, default_fill_value(0))) + + def test_fillvalue_implicit_structured_array(self): + # Check that fill_value is always defined for structured arrays + ndtype = ('b', float) + adtype = ('a', float) + a = array([(1.,), (2.,)], mask=[(False,), (False,)], + fill_value=(np.nan,), dtype=np.dtype([adtype])) + b = empty(a.shape, dtype=[adtype, ndtype]) + b['a'] = a['a'] + b['a'].set_fill_value(a['a'].fill_value) + f = b._fill_value[()] + assert_(np.isnan(f[0])) + assert_equal(f[-1], default_fill_value(1.)) + + def test_fillvalue_as_arguments(self): + # Test adding a fill_value parameter to empty/ones/zeros + a = empty(3, fill_value=999.) + assert_equal(a.fill_value, 999.) + # + a = ones(3, fill_value=999., dtype=float) + assert_equal(a.fill_value, 999.) + # + a = zeros(3, fill_value=0., dtype=complex) + assert_equal(a.fill_value, 0.) + # + a = identity(3, fill_value=0., dtype=complex) + assert_equal(a.fill_value, 0.) + + def test_fillvalue_in_view(self): + # Test the behavior of fill_value in view + + # Create initial masked array + x = array([1, 2, 3], fill_value=1, dtype=np.int64) + + # Check that fill_value is preserved by default + y = x.view() + assert_(y.fill_value == 1) + + # Check that fill_value is preserved if dtype is specified and the + # dtype is an ndarray sub-class and has a _fill_value attribute + y = x.view(MaskedArray) + assert_(y.fill_value == 1) + + # Check that fill_value is preserved if type is specified and the + # dtype is an ndarray sub-class and has a _fill_value attribute (by + # default, the first argument is dtype, not type) + y = x.view(type=MaskedArray) + assert_(y.fill_value == 1) + + # Check that code does not crash if passed an ndarray sub-class that + # does not have a _fill_value attribute + y = x.view(np.ndarray) + y = x.view(type=np.ndarray) + + # Check that fill_value can be overriden with view + y = x.view(MaskedArray, fill_value=2) + assert_(y.fill_value == 2) + + # Check that fill_value can be overriden with view (using type=) + y = x.view(type=MaskedArray, fill_value=2) + assert_(y.fill_value == 2) + + # Check that fill_value gets reset if passed a dtype but not a + # fill_value. This is because even though in some cases one can safely + # cast the fill_value, e.g. if taking an int64 view of an int32 array, + # in other cases, this cannot be done (e.g. int32 view of an int64 + # array with a large fill_value). + y = x.view(dtype=np.int32) + assert_(y.fill_value == 999999) + + +#------------------------------------------------------------------------------ +class TestUfuncs(TestCase): + # Test class for the application of ufuncs on MaskedArrays. + + def setUp(self): + # Base data definition. + self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) + self.err_status = np.geterr() + np.seterr(divide='ignore', invalid='ignore') + + def tearDown(self): + np.seterr(**self.err_status) + + def test_testUfuncRegression(self): + # Tests new ufuncs on MaskedArrays. + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', + 'sin', 'cos', 'tan', + 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', + 'arcsinh', + 'arccosh', + 'arctanh', + 'absolute', 'fabs', 'negative', + # 'nonzero', 'around', + 'floor', 'ceil', + # 'sometrue', 'alltrue', + 'logical_not', + 'add', 'subtract', 'multiply', + 'divide', 'true_divide', 'floor_divide', + 'remainder', 'fmod', 'hypot', 'arctan2', + 'equal', 'not_equal', 'less_equal', 'greater_equal', + 'less', 'greater', + 'logical_and', 'logical_or', 'logical_xor', + ]: + try: + uf = getattr(umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(numpy.ma.core, f) + args = self.d[:uf.nin] + ur = uf(*args) + mr = mf(*args) + assert_equal(ur.filled(0), mr.filled(0), f) + assert_mask_equal(ur.mask, mr.mask, err_msg=f) + + def test_reduce(self): + # Tests reduce on MaskedArrays. + a = self.d[0] + self.assertTrue(not alltrue(a, axis=0)) + self.assertTrue(sometrue(a, axis=0)) + assert_equal(sum(a[:3], axis=0), 0) + assert_equal(product(a, axis=0), 0) + assert_equal(add.reduce(a), pi) + + def test_minmax(self): + # Tests extrema on MaskedArrays. + a = arange(1, 13).reshape(3, 4) + amask = masked_where(a < 5, a) + assert_equal(amask.max(), a.max()) + assert_equal(amask.min(), 5) + assert_equal(amask.max(0), a.max(0)) + assert_equal(amask.min(0), [5, 6, 7, 8]) + self.assertTrue(amask.max(1)[0].mask) + self.assertTrue(amask.min(1)[0].mask) + + def test_ndarray_mask(self): + # Check that the mask of the result is a ndarray (not a MaskedArray...) + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + test = np.sqrt(a) + control = masked_array([-1, 0, 1, np.sqrt(2), -1], + mask=[1, 0, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + self.assertTrue(not isinstance(test.mask, MaskedArray)) + + def test_treatment_of_NotImplemented(self): + # Check any NotImplemented returned by umath. is passed on + a = masked_array([1., 2.], mask=[1, 0]) + # basic tests for _MaskedBinaryOperation + assert_(a.__mul__('abc') is NotImplemented) + assert_(multiply.outer(a, 'abc') is NotImplemented) + # and for _DomainedBinaryOperation + assert_(a.__div__('abc') is NotImplemented) + + # also check explicitly that rmul of another class can be accessed + class MyClass(str): + def __mul__(self, other): + return "My mul" + + def __rmul__(self, other): + return "My rmul" + + me = MyClass() + assert_(me * a == "My mul") + assert_(a * me == "My rmul") + + +#------------------------------------------------------------------------------ +class TestMaskedArrayInPlaceArithmetics(TestCase): + # Test MaskedArray Arithmetics + + def setUp(self): + x = arange(10) + y = arange(10) + xm = arange(10) + xm[2] = masked + self.intdata = (x, y, xm) + self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) + + def test_inplace_addition_scalar(self): + # Test of inplace additions + (x, y, xm) = self.intdata + xm[2] = masked + x += 1 + assert_equal(x, y + 1) + xm += 1 + assert_equal(xm, y + 1) + # + (x, _, xm) = self.floatdata + id1 = x.data.ctypes._data + x += 1. + assert_(id1 == x.data.ctypes._data) + assert_equal(x, y + 1.) + + def test_inplace_addition_array(self): + # Test of inplace additions + (x, y, xm) = self.intdata + m = xm.mask + a = arange(10, dtype=np.int16) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar(self): + # Test of inplace subtractions + (x, y, xm) = self.intdata + x -= 1 + assert_equal(x, y - 1) + xm -= 1 + assert_equal(xm, y - 1) + + def test_inplace_subtraction_array(self): + # Test of inplace subtractions + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + x *= 2.0 + assert_equal(x, y * 2) + xm *= 2.0 + assert_equal(xm, y * 2) + + def test_inplace_multiplication_array(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_division_scalar_int(self): + # Test of inplace division + (x, y, xm) = self.intdata + x = arange(10) * 2 + xm = arange(10) * 2 + xm[2] = masked + x //= 2 + assert_equal(x, y) + xm //= 2 + assert_equal(xm, y) + + def test_inplace_division_scalar_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + x /= 2.0 + assert_equal(x, y / 2.0) + xm /= arange(10) + assert_equal(xm, ones((10,))) + + def test_inplace_division_array_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x /= a + xm /= a + assert_equal(x, y / a) + assert_equal(xm, y / a) + assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) + + def test_inplace_division_misc(self): + # + x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] + y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + # + z = xm / ym + assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + #assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) + # + xm = xm.copy() + xm /= ym + assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + #assert_equal(xm._data, + # [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) + + def test_datafriendly_add(self): + # Test keeping data w/ (inplace) addition + x = array([1, 2, 3], mask=[0, 0, 1]) + # Test add w/ scalar + xx = x + 1 + assert_equal(xx.data, [2, 3, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test iadd w/ scalar + x += 1 + assert_equal(x.data, [2, 3, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test add w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x + array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 4, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test iadd w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x += array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 4, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_sub(self): + # Test keeping data w/ (inplace) subtraction + # Test sub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - 1 + assert_equal(xx.data, [0, 1, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test isub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= 1 + assert_equal(x.data, [0, 1, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test sub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 0, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test isub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 0, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_mul(self): + # Test keeping data w/ (inplace) multiplication + # Test mul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * 2 + assert_equal(xx.data, [2, 4, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test imul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= 2 + assert_equal(x.data, [2, 4, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test mul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 40, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test imul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(x.data, [1, 40, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_div(self): + # Test keeping data w/ (inplace) division + # Test div on scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x / 2. + assert_equal(xx.data, [1 / 2., 2 / 2., 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test idiv on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= 2. + assert_equal(x.data, [1 / 2., 2 / 2., 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test div on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x / array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(xx.data, [1., 2. / 20., 3.]) + assert_equal(xx.mask, [1, 0, 1]) + # Test idiv on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(x.data, [1., 2 / 20., 3.]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_pow(self): + # Test keeping data w/ (inplace) power + # Test pow on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x ** 2.5 + assert_equal(xx.data, [1., 2. ** 2.5, 3.]) + assert_equal(xx.mask, [0, 0, 1]) + # Test ipow on scalar + x **= 2.5 + assert_equal(x.data, [1., 2. ** 2.5, 3]) + assert_equal(x.mask, [0, 0, 1]) + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + # + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + # + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + # + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + +#------------------------------------------------------------------------------ +class TestMaskedArrayMethods(TestCase): + # Test class for miscellaneous MaskedArrays methods. + def setUp(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_generic_methods(self): + # Tests some MaskedArray methods. + a = array([1, 3, 2]) + assert_equal(a.any(), a._data.any()) + assert_equal(a.all(), a._data.all()) + assert_equal(a.argmax(), a._data.argmax()) + assert_equal(a.argmin(), a._data.argmin()) + assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) + assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) + assert_equal(a.conj(), a._data.conj()) + assert_equal(a.conjugate(), a._data.conjugate()) + # + m = array([[1, 2], [3, 4]]) + assert_equal(m.diagonal(), m._data.diagonal()) + assert_equal(a.sum(), a._data.sum()) + assert_equal(a.take([1, 2]), a._data.take([1, 2])) + assert_equal(m.transpose(), m._data.transpose()) + + def test_allclose(self): + # Tests allclose on arrays + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + self.assertTrue(allclose(a, b)) + # Test allclose w/ infs + a[0] = np.inf + self.assertTrue(not allclose(a, b)) + b[0] = np.inf + self.assertTrue(allclose(a, b)) + # Test all close w/ masked + a = masked_array(a) + a[-1] = masked + self.assertTrue(allclose(a, b, masked_equal=True)) + self.assertTrue(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + self.assertTrue(allclose(a, 0, masked_equal=True)) + + # Test that the function works for MIN_INT integer typed arrays + a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) + self.assertTrue(allclose(a, a)) + + def test_allany(self): + # Checks the any/all methods/functions. + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool_) + mx = masked_array(x, mask=m) + mxbig = (mx > 0.5) + mxsmall = (mx < 0.5) + # + self.assertFalse(mxbig.all()) + self.assertTrue(mxbig.any()) + assert_equal(mxbig.all(0), [False, False, True]) + assert_equal(mxbig.all(1), [False, False, True]) + assert_equal(mxbig.any(0), [False, False, True]) + assert_equal(mxbig.any(1), [True, True, True]) + # + self.assertFalse(mxsmall.all()) + self.assertTrue(mxsmall.any()) + assert_equal(mxsmall.all(0), [True, True, False]) + assert_equal(mxsmall.all(1), [False, False, False]) + assert_equal(mxsmall.any(0), [True, True, False]) + assert_equal(mxsmall.any(1), [True, True, False]) + + def test_allany_onmatrices(self): + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + X = np.matrix(x) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool_) + mX = masked_array(X, mask=m) + mXbig = (mX > 0.5) + mXsmall = (mX < 0.5) + # + self.assertFalse(mXbig.all()) + self.assertTrue(mXbig.any()) + assert_equal(mXbig.all(0), np.matrix([False, False, True])) + assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), np.matrix([False, False, True])) + assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) + # + self.assertFalse(mXsmall.all()) + self.assertTrue(mXsmall.any()) + assert_equal(mXsmall.all(0), np.matrix([True, True, False])) + assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), np.matrix([True, True, False])) + assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) + + def test_allany_oddities(self): + # Some fun with all and any + store = empty((), dtype=bool) + full = array([1, 2, 3], mask=True) + # + self.assertTrue(full.all() is masked) + full.all(out=store) + self.assertTrue(store) + self.assertTrue(store._mask, True) + self.assertTrue(store is not masked) + # + store = empty((), dtype=bool) + self.assertTrue(full.any() is masked) + full.any(out=store) + self.assertTrue(not store) + self.assertTrue(store._mask, True) + self.assertTrue(store is not masked) + + def test_argmax_argmin(self): + # Tests argmin & argmax on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + # + assert_equal(mx.argmin(), 35) + assert_equal(mX.argmin(), 35) + assert_equal(m2x.argmin(), 4) + assert_equal(m2X.argmin(), 4) + assert_equal(mx.argmax(), 28) + assert_equal(mX.argmax(), 28) + assert_equal(m2x.argmax(), 31) + assert_equal(m2X.argmax(), 31) + # + assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) + assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) + assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) + assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) + # + assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) + assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) + assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) + assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) + + def test_clip(self): + # Tests clip on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) + mx = array(x, mask=m) + clipped = mx.clip(2, 8) + assert_equal(clipped.mask, mx.mask) + assert_equal(clipped._data, x.clip(2, 8)) + assert_equal(clipped._data, mx._data.clip(2, 8)) + + def test_compress(self): + # test compress + a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) + condition = (a > 1.5) & (a < 3.5) + assert_equal(a.compress(condition), [2., 3.]) + # + a[[2, 3]] = masked + b = a.compress(condition) + assert_equal(b._data, [2., 3.]) + assert_equal(b._mask, [0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + # + condition = (a < 4.) + b = a.compress(condition) + assert_equal(b._data, [1., 2., 3.]) + assert_equal(b._mask, [0, 0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + # + a = masked_array([[10, 20, 30], [40, 50, 60]], + mask=[[0, 0, 1], [1, 0, 0]]) + b = a.compress(a.ravel() >= 22) + assert_equal(b._data, [30, 40, 50, 60]) + assert_equal(b._mask, [1, 1, 0, 0]) + # + x = np.array([3, 1, 2]) + b = a.compress(x >= 2, axis=1) + assert_equal(b._data, [[10, 30], [40, 60]]) + assert_equal(b._mask, [[0, 1], [1, 0]]) + + def test_compressed(self): + # Tests compressed + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + a[0] = masked + b = a.compressed() + assert_equal(b, [2, 3, 4]) + # + a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + self.assertTrue(isinstance(b, np.matrix)) + a[0, 0] = masked + b = a.compressed() + assert_equal(b, [[2, 3, 4]]) + + def test_empty(self): + # Tests empty/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + # + b = empty_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + # + b = empty(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + def test_put(self): + # Tests put. + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + x = array(d, mask=m) + self.assertTrue(x[3] is masked) + self.assertTrue(x[4] is masked) + x[[1, 4]] = [10, 40] + #self.assertTrue(x.mask is not m) + self.assertTrue(x[3] is masked) + self.assertTrue(x[4] is not masked) + assert_equal(x, [0, 10, 2, -1, 40]) + # + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + i = [0, 2, 4, 6] + x.put(i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + # + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + put(x, i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + def test_put_hardmask(self): + # Tests put on hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d + 1, mask=m, hard_mask=True, copy=True) + xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) + assert_equal(xh._data, [3, 4, 2, 4, 5]) + + def test_putmask(self): + x = arange(6) + 1 + mx = array(x, mask=[0, 0, 0, 1, 1, 1]) + mask = [0, 0, 1, 0, 0, 1] + # w/o mask, w/o masked values + xx = x.copy() + putmask(xx, mask, 99) + assert_equal(xx, [1, 2, 99, 4, 5, 99]) + # w/ mask, w/o masked values + mxx = mx.copy() + putmask(mxx, mask, 99) + assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) + assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) + # w/o mask, w/ masked values + values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) + xx = x.copy() + putmask(xx, mask, values) + assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) + # w/ mask, w/ masked values + mxx = mx.copy() + putmask(mxx, mask, values) + assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) + # w/ mask, w/ masked values + hardmask + mxx = mx.copy() + mxx.harden_mask() + putmask(mxx, mask, values) + assert_equal(mxx, [1, 2, 30, 4, 5, 60]) + + def test_ravel(self): + # Tests ravel + a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, aravel.shape) + a = array([0, 0], mask=[1, 1]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, a.shape) + a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel.shape, (1, 5)) + assert_equal(aravel._mask.shape, a.shape) + # Checks that small_mask is preserved + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) + assert_equal(a.ravel()._mask, [0, 0, 0, 0]) + # Test that the fill_value is preserved + a.fill_value = -99 + a.shape = (2, 2) + ar = a.ravel() + assert_equal(ar._mask, [0, 0, 0, 0]) + assert_equal(ar._data, [1, 2, 3, 4]) + assert_equal(ar.fill_value, -99) + + def test_reshape(self): + # Tests reshape + x = arange(4) + x[0] = masked + y = x.reshape(2, 2) + assert_equal(y.shape, (2, 2,)) + assert_equal(y._mask.shape, (2, 2,)) + assert_equal(x.shape, (4,)) + assert_equal(x._mask.shape, (4,)) + + def test_sort(self): + # Test sort + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + # + sortedx = sort(x) + assert_equal(sortedx._data, [1, 2, 3, 4]) + assert_equal(sortedx._mask, [0, 0, 0, 1]) + # + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [4, 1, 2, 3]) + assert_equal(sortedx._mask, [1, 0, 0, 0]) + # + x.sort() + assert_equal(x._data, [1, 2, 3, 4]) + assert_equal(x._mask, [0, 0, 0, 1]) + # + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + x.sort(endwith=False) + assert_equal(x._data, [4, 1, 2, 3]) + assert_equal(x._mask, [1, 0, 0, 0]) + # + x = [1, 4, 2, 3] + sortedx = sort(x) + self.assertTrue(not isinstance(sorted, MaskedArray)) + # + x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) + x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [1, 2, -2, -1, 0]) + assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) + + def test_sort_2d(self): + # Check sort of 2D array. + # 2D array w/o mask + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + # 2D array w/mask + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) + # 3D + a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], + [[1, 2, 3], [7, 8, 9], [4, 5, 6]], + [[7, 8, 9], [1, 2, 3], [4, 5, 6]], + [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) + a[a % 4 == 0] = masked + am = a.copy() + an = a.filled(99) + am.sort(0) + an.sort(0) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(1) + an.sort(1) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(2) + an.sort(2) + assert_equal(am, an) + + def test_sort_flexible(self): + # Test sort on flexible dtype. + a = array( + data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + # + test = sort(a) + b = array( + data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, b) + assert_equal(test.mask, b.mask) + # + test = sort(a, endwith=False) + b = array( + data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ], + dtype=[('A', int), ('B', int)]) + assert_equal(test, b) + assert_equal(test.mask, b.mask) + + def test_argsort(self): + # Test argsort + a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) + assert_equal(np.argsort(a), argsort(a)) + + def test_squeeze(self): + # Check squeeze + data = masked_array([[1, 2, 3]]) + assert_equal(data.squeeze(), [1, 2, 3]) + data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) + assert_equal(data.squeeze(), [1, 2, 3]) + assert_equal(data.squeeze()._mask, [1, 1, 1]) + data = masked_array([[1]], mask=True) + self.assertTrue(data.squeeze() is masked) + + def test_swapaxes(self): + # Tests swapaxes on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mX = array(x, mask=m).reshape(6, 6) + mXX = mX.reshape(3, 2, 2, 3) + # + mXswapped = mX.swapaxes(0, 1) + assert_equal(mXswapped[-1], mX[:, -1]) + + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_take(self): + # Tests take + x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) + assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) + assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) + assert_equal(x.take([[0, 1], [0, 1]]), + masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) + # + x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) + assert_equal(x.take([0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + assert_equal(take(x, [0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + + def test_take_masked_indices(self): + # Test take w/ masked indices + a = np.array((40, 18, 37, 9, 22)) + indices = np.arange(3)[None,:] + np.arange(5)[:, None] + mindices = array(indices, mask=(indices >= len(a))) + # No mask + test = take(a, mindices, mode='clip') + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 22], + [22, 22, 22]]) + assert_equal(test, ctrl) + # Masked indices + test = take(a, mindices) + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 40], + [22, 40, 40]]) + ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # Masked input + masked indices + a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) + test = take(a, mindices) + ctrl[0, 1] = ctrl[1, 0] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_tolist(self): + # Tests to list + # ... on 1D + x = array(np.arange(12)) + x[[1, -2]] = masked + xlist = x.tolist() + self.assertTrue(xlist[1] is None) + self.assertTrue(xlist[-2] is None) + # ... on 2D + x.shape = (3, 4) + xlist = x.tolist() + ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] + assert_equal(xlist[0], [0, None, 2, 3]) + assert_equal(xlist[1], [4, 5, 6, 7]) + assert_equal(xlist[2], [8, 9, None, 11]) + assert_equal(xlist, ctrl) + # ... on structured array w/ masked records + x = array(list(zip([1, 2, 3], + [1.1, 2.2, 3.3], + ['one', 'two', 'thr'])), + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x[-1] = masked + assert_equal(x.tolist(), + [(1, 1.1, asbytes('one')), + (2, 2.2, asbytes('two')), + (None, None, None)]) + # ... on structured array w/ masked fields + a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], + dtype=[('a', int), ('b', int)]) + test = a.tolist() + assert_equal(test, [[1, None], [3, 4]]) + # ... on mvoid + a = a[0] + test = a.tolist() + assert_equal(test, [1, None]) + + def test_tolist_specialcase(self): + # Test mvoid.tolist: make sure we return a standard Python object + a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) + # w/o mask: each entry is a np.void whose elements are standard Python + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + # w/ mask: each entry is a ma.void whose elements should be + # standard Python + a.mask[0] = (0, 1) + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + + def test_toflex(self): + # Test the conversion to records + data = arange(10) + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + # + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + # + ndtype = [('i', int), ('s', '|S3'), ('f', float)] + data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + # + ndtype = np.dtype("int, (2,3)float, float") + data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + # Test the reconstruction of a masked_array from a record + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + # + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + # + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + + def test_arraymethod(self): + # Test a _arraymethod w/ n argument + marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) + control = masked_array([[1], [2], [3], [4], [5]], + mask=[0, 0, 1, 0, 0]) + assert_equal(marray.T, control) + assert_equal(marray.transpose(), control) + # + assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) + + +#------------------------------------------------------------------------------ +class TestMaskedArrayMathMethods(TestCase): + + def setUp(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_cumsumprod(self): + # Tests cumsum & cumprod on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXcp = mX.cumsum(0) + assert_equal(mXcp._data, mX.filled(0).cumsum(0)) + mXcp = mX.cumsum(1) + assert_equal(mXcp._data, mX.filled(0).cumsum(1)) + # + mXcp = mX.cumprod(0) + assert_equal(mXcp._data, mX.filled(1).cumprod(0)) + mXcp = mX.cumprod(1) + assert_equal(mXcp._data, mX.filled(1).cumprod(1)) + + def test_cumsumprod_with_output(self): + # Tests cumsum/cumprod w/ output + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + # + for funcname in ('cumsum', 'cumprod'): + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + self.assertTrue(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + # + output = empty((3, 4), dtype=int) + result = xmmeth(axis=0, out=output) + self.assertTrue(result is output) + + def test_ptp(self): + # Tests ptp on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), mx.compressed().ptp()) + rows = np.zeros(n, np.float) + cols = np.zeros(m, np.float) + for k in range(m): + cols[k] = mX[:, k].compressed().ptp() + for k in range(n): + rows[k] = mX[k].compressed().ptp() + assert_equal(mX.ptp(0), cols) + assert_equal(mX.ptp(1), rows) + + def test_sum_object(self): + # Test sum on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) + assert_equal(a.sum(), 5) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.sum(axis=0), [5, 7, 9]) + + def test_prod_object(self): + # Test prod on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) + assert_equal(a.prod(), 2 * 3) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.prod(axis=0), [4, 10, 18]) + + def test_meananom_object(self): + # Test mean/anom on object dtype + a = masked_array([1, 2, 3], dtype=np.object) + assert_equal(a.mean(), 2) + assert_equal(a.anom(), [-1, 0, 1]) + + def test_trace(self): + # Tests trace on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_almost_equal(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0)) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_almost_equal(mX.std(axis=None, ddof=1), + mX.compressed().std(ddof=1)) + assert_almost_equal(mX.var(axis=None, ddof=1), + mX.compressed().var(ddof=1)) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + def test_varstd_specialcases(self): + # Test a special case for var + nout = np.array(-1, dtype=float) + mout = array(-1, dtype=float) + # + x = array(arange(10), mask=True) + for methodname in ('var', 'std'): + method = getattr(x, methodname) + self.assertTrue(method() is masked) + self.assertTrue(method(0) is masked) + self.assertTrue(method(-1) is masked) + # Using a masked array as explicit output + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + _ = method(out=mout) + self.assertTrue(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + _ = method(out=nout) + self.assertTrue(np.isnan(nout)) + # + x = array(arange(10), mask=True) + x[-1] = 9 + for methodname in ('var', 'std'): + method = getattr(x, methodname) + self.assertTrue(method(ddof=1) is masked) + self.assertTrue(method(0, ddof=1) is masked) + self.assertTrue(method(-1, ddof=1) is masked) + # Using a masked array as explicit output + method(out=mout, ddof=1) + self.assertTrue(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout, ddof=1) + self.assertTrue(np.isnan(nout)) + + def test_varstd_ddof(self): + a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) + test = a.std(axis=0, ddof=0) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=1) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=2) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [1, 1, 1]) + + def test_diag(self): + # Test diag + x = arange(9).reshape((3, 3)) + x[1, 1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + def test_axis_methods_nomask(self): + # Test the combination nomask & methods w/ axis + a = array([[1, 2, 3], [4, 5, 6]]) + # + assert_equal(a.sum(0), [5, 7, 9]) + assert_equal(a.sum(-1), [6, 15]) + assert_equal(a.sum(1), [6, 15]) + # + assert_equal(a.prod(0), [4, 10, 18]) + assert_equal(a.prod(-1), [6, 120]) + assert_equal(a.prod(1), [6, 120]) + # + assert_equal(a.min(0), [1, 2, 3]) + assert_equal(a.min(-1), [1, 4]) + assert_equal(a.min(1), [1, 4]) + # + assert_equal(a.max(0), [4, 5, 6]) + assert_equal(a.max(-1), [3, 6]) + assert_equal(a.max(1), [3, 6]) + + +#------------------------------------------------------------------------------ +class TestMaskedArrayMathMethodsComplex(TestCase): + # Test class for miscellaneous MaskedArrays methods. + def setUp(self): + # Base data definition. + x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, + 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + +#------------------------------------------------------------------------------ +class TestMaskedArrayFunctions(TestCase): + # Test class for miscellaneous functions. + + def setUp(self): + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + self.info = (xm, ym) + + def test_masked_where_bool(self): + x = [1, 2] + y = masked_where(False, x) + assert_equal(y, [1, 2]) + assert_equal(y[1], 2) + + def test_masked_equal_wlist(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [0, 0, 1]) + mx = masked_not_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [1, 1, 0]) + + def test_masked_equal_fill_value(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx._mask, [0, 0, 1]) + assert_equal(mx.fill_value, 3) + + def test_masked_where_condition(self): + # Tests masking functions. + x = array([1., 2., 3., 4., 5.]) + x[2] = masked + assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) + assert_equal(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2)) + assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) + assert_equal(masked_where(less_equal(x, 2), x), + masked_less_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5]) + + def test_masked_where_oddities(self): + # Tests some generic features. + atest = ones((10, 10, 10), dtype=float) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_equal(atest, ctest) + + def test_masked_where_shape_constraint(self): + a = arange(10) + try: + test = masked_equal(1, a) + except IndexError: + pass + else: + raise AssertionError("Should have failed...") + test = masked_equal(a, 1) + assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + + def test_masked_otherfunctions(self): + assert_equal(masked_inside(list(range(5)), 1, 3), + [0, 199, 199, 199, 4]) + assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) + assert_equal(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0]) + assert_equal(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1]) + assert_equal(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0]) + assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1]) + + def test_round(self): + a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], + mask=[0, 1, 0, 0, 0]) + assert_equal(a.round(), [1., 2., 3., 5., 6.]) + assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) + assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) + b = empty_like(a) + a.round(out=b) + assert_equal(b, [1., 2., 3., 5., 6.]) + + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_round_with_output(self): + # Testing round with an explicit output + + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = np.round(xm, decimals=2, out=output) + # ... the result should be the given output + self.assertTrue(result is output) + assert_equal(result, xm.round(decimals=2, out=output)) + # + output = empty((3, 4), dtype=float) + result = xm.round(decimals=2, out=output) + self.assertTrue(result is output) + + def test_identity(self): + a = identity(5) + self.assertTrue(isinstance(a, MaskedArray)) + assert_equal(a, np.identity(5)) + + def test_power(self): + x = -1.1 + assert_almost_equal(power(x, 2.), 1.21) + self.assertTrue(power(x, masked) is masked) + x = array([-1.1, -1.1, 1.1, 1.1, 0.]) + b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) + y = power(x, b) + assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + b.mask = nomask + y = power(x, b) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + z = x ** b + assert_equal(z._mask, y._mask) + assert_almost_equal(z, y) + assert_almost_equal(z._data, y._data) + x **= b + assert_equal(x._mask, y._mask) + assert_almost_equal(x, y) + assert_almost_equal(x._data, y._data) + + def test_power_w_broadcasting(self): + # Test power w/ broadcasting + a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) + a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) + b1 = np.array([2, 4, 3]) + b2 = np.array([b1, b1]) + b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) + # + ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], + mask=[[1, 1, 0], [0, 1, 1]]) + # No broadcasting, base & exp w/ mask + test = a2m ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # No broadcasting, base w/ mask, exp w/o mask + test = a2m ** b2 + assert_equal(test, ctrl) + assert_equal(test.mask, a2m.mask) + # No broadcasting, base w/o mask, exp w/ mask + test = a2 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, b2m.mask) + # + ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], + mask=[[0, 1, 0], [0, 1, 0]]) + test = b1 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + test = b2m ** b1 + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_where(self): + # Test the where function + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + # + d = where(xm > 2, xm, -9) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + assert_equal(d._mask, xm._mask) + d = where(xm > 2, -9, ym) + assert_equal(d, [5., 0., 3., 2., -1., -9., + -9., -10., -9., 1., 0., -9.]) + assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) + d = where(xm > 2, xm, masked) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + tmp = xm._mask.copy() + tmp[(xm <= 2).filled(True)] = True + assert_equal(d._mask, tmp) + # + ixm = xm.astype(int) + d = where(ixm > 2, ixm, masked) + assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) + assert_equal(d.dtype, ixm.dtype) + + def test_where_with_masked_choice(self): + x = arange(10) + x[3] = masked + c = x >= 8 + # Set False to masked + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_equal(x, z) + # Set True to masked + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + + def test_where_with_masked_condition(self): + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + # + x = arange(1, 6) + x[-1] = masked + y = arange(1, 6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_equal(z, zm) + assert_(getmask(zm) is nomask) + assert_equal(zm, [1, 2, 3, 40, 50]) + z = where(c, masked, 1) + assert_equal(z, [99, 99, 99, 1, 1]) + z = where(c, 1, masked) + assert_equal(z, [99, 1, 1, 99, 99]) + + def test_where_type(self): + # Test the type conservation with where + x = np.arange(4, dtype=np.int32) + y = np.arange(4, dtype=np.float32) * 2.2 + test = where(x > 1.5, y, x).dtype + control = np.find_common_type([np.int32, np.float32], []) + assert_equal(test, control) + + def test_choose(self): + # Test choose + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + chosen = choose([2, 3, 1, 0], choices) + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='clip') + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='wrap') + assert_equal(chosen, array([20, 1, 12, 3])) + # Check with some masked indices + indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([99, 1, 12, 99])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + # Check with some masked choices + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([20, 31, 12, 3])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + + def test_choose_with_out(self): + # Test choose with an explicit out keyword + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + store = empty(4, dtype=int) + chosen = choose([2, 3, 1, 0], choices, out=store) + assert_equal(store, array([20, 31, 12, 3])) + self.assertTrue(store is chosen) + # Check with some masked indices + out + store = empty(4, dtype=int) + indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([99, 31, 12, 99])) + assert_equal(store.mask, [1, 0, 0, 1]) + # Check with some masked choices + out ina ndarray ! + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + store = empty(4, dtype=int).view(ndarray) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([999999, 31, 12, 999999])) + + def test_reshape(self): + a = arange(10) + a[0] = masked + # Try the default + b = a.reshape((5, 2)) + assert_equal(b.shape, (5, 2)) + self.assertTrue(b.flags['C']) + # Try w/ arguments as list instead of tuple + b = a.reshape(5, 2) + assert_equal(b.shape, (5, 2)) + self.assertTrue(b.flags['C']) + # Try w/ order + b = a.reshape((5, 2), order='F') + assert_equal(b.shape, (5, 2)) + self.assertTrue(b.flags['F']) + # Try w/ order + b = a.reshape(5, 2, order='F') + assert_equal(b.shape, (5, 2)) + self.assertTrue(b.flags['F']) + # + c = np.reshape(a, (2, 5)) + self.assertTrue(isinstance(c, MaskedArray)) + assert_equal(c.shape, (2, 5)) + self.assertTrue(c[0, 0] is masked) + self.assertTrue(c.flags['C']) + + def test_make_mask_descr(self): + # Test make_mask_descr + # Flexible + ntype = [('a', np.float), ('b', np.float)] + test = make_mask_descr(ntype) + assert_equal(test, [('a', np.bool), ('b', np.bool)]) + # Standard w/ shape + ntype = (np.float, 2) + test = make_mask_descr(ntype) + assert_equal(test, (np.bool, 2)) + # Standard standard + ntype = np.float + test = make_mask_descr(ntype) + assert_equal(test, np.dtype(np.bool)) + # Nested + ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])] + test = make_mask_descr(ntype) + control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) + assert_equal(test, control) + # Named+ shape + ntype = [('a', (np.float, 2))] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([('a', (np.bool, 2))])) + # 2 names + ntype = [(('A', 'a'), float)] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([(('A', 'a'), bool)])) + + def test_make_mask(self): + # Test make_mask + # w/ a list as an input + mask = [0, 1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a ndarray as an input + mask = np.array([0, 1], dtype=np.bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1, 1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', np.float), ('b', np.float)] + bdtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + + def test_mask_or(self): + # Initialize + mtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using True as input. Won't work, but keep it for the kicks + # test = mask_or(mask, True) + # control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype) + # assert_equal(test, control) + # Using another array w / the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w / a different dtype + othertype = [('A', np.bool), ('B', np.bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + # Using nested arrays + dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) + + def test_flatten_mask(self): + # Tests flatten mask + # Standarad dtype + mask = np.array([0, 0, 1], dtype=np.bool) + assert_equal(flatten_mask(mask), mask) + # Flexible dtype + mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + data = [(0, (0, 0)), (0, (0, 1))] + mask = np.array(data, dtype=mdtype) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + def test_on_ndarray(self): + # Test functions on ndarrays + a = np.array([1, 2, 3, 4]) + m = array(a, mask=False) + test = anom(a) + assert_equal(test, m.anom()) + test = reshape(a, (2, 2)) + assert_equal(test, m.reshape(2, 2)) + + def test_compress(self): + # Test compress function on ndarray and masked array + # Address Github #2495. + arr = np.arange(8) + arr.shape = 4, 2 + cond = np.array([True, False, True, True]) + control = arr[[0, 2, 3]] + test = np.ma.compress(cond, arr, axis=0) + assert_equal(test, control) + marr = np.ma.array(arr) + test = np.ma.compress(cond, marr, axis=0) + assert_equal(test, control) + + def test_compressed(self): + # Test ma.compressed function. + # Address gh-4026 + a = np.ma.array([1, 2]) + test = np.ma.compressed(a) + assert_(type(test) is np.ndarray) + # Test case when input data is ndarray subclass + class A(np.ndarray): + pass + a = np.ma.array(A(shape=0)) + test = np.ma.compressed(a) + assert_(type(test) is A) + # Test that compress flattens + test = np.ma.compressed([[1],[2]]) + assert_equal(test.ndim, 1) + test = np.ma.compressed([[[[[1]]]]]) + assert_equal(test.ndim, 1) + # Test case when input is MaskedArray subclass + class M(MaskedArray): + pass + test = np.ma.compressed(M(shape=(0,1,2))) + assert_equal(test.ndim, 1) + # with .compessed() overriden + class M(MaskedArray): + def compressed(self): + return 42 + test = np.ma.compressed(M(shape=(0,1,2))) + assert_equal(test, 42) + +#------------------------------------------------------------------------------ +class TestMaskedFields(TestCase): + # + def setUp(self): + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = ['one', 'two', 'three', 'four', 'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mdtype = [('a', bool), ('b', bool), ('c', bool)] + mask = [0, 1, 0, 0, 1] + base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) + + def test_set_records_masks(self): + base = self.data['base'] + mdtype = self.data['mdtype'] + # Set w/ nomask or masked + base.mask = nomask + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = masked + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ simple boolean + base.mask = False + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = True + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ list + base.mask = [0, 0, 0, 1, 1] + assert_equal_records(base._mask, + np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], + dtype=mdtype)) + + def test_set_record_element(self): + # Check setting an element of a record) + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[0] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 2, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + asbytes_nested(['pi', 'two', 'three', 'four', 'five'])) + + def test_set_record_slice(self): + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[:3] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 3, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + asbytes_nested(['pi', 'pi', 'pi', 'four', 'five'])) + + def test_mask_element(self): + "Check record access" + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[0] = masked + # + for n in ('a', 'b', 'c'): + assert_equal(base[n].mask, [1, 1, 0, 0, 1]) + assert_equal(base[n]._data, base._data[n]) + + def test_getmaskarray(self): + # Test getmaskarray on flexible dtype + ndtype = [('a', int), ('b', float)] + test = empty(3, dtype=ndtype) + assert_equal(getmaskarray(test), + np.array([(0, 0), (0, 0), (0, 0)], + dtype=[('a', '|b1'), ('b', '|b1')])) + test[:] = masked + assert_equal(getmaskarray(test), + np.array([(1, 1), (1, 1), (1, 1)], + dtype=[('a', '|b1'), ('b', '|b1')])) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + # Transform globally to simple dtype + test = a.view(float) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + # Transform globally to dty + test = a.view((float, 2)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + # + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + self.assertTrue(isinstance(test, np.matrix)) + + def test_getitem(self): + ndtype = [('a', float), ('b', float)] + a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) + a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), + dtype=[('a', bool), ('b', bool)]) + # No mask + self.assertTrue(isinstance(a[1], MaskedArray)) + # One element masked + self.assertTrue(isinstance(a[0], MaskedArray)) + assert_equal_records(a[0]._data, a._data[0]) + assert_equal_records(a[0]._mask, a._mask[0]) + # All element masked + self.assertTrue(isinstance(a[-2], MaskedArray)) + assert_equal_records(a[-2]._data, a._data[-2]) + assert_equal_records(a[-2]._mask, a._mask[-2]) + + def test_setitem(self): + # Issue 4866: check that one can set individual items in [record][col] + # and [col][record] order + ndtype = np.dtype([('a', float), ('b', int)]) + ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) + ma['a'][1] = 3.0 + assert_equal(ma['a'], np.array([1.0, 3.0])) + ma[1]['a'] = 4.0 + assert_equal(ma['a'], np.array([1.0, 4.0])) + # Issue 2403 + mdtype = np.dtype([('a', bool), ('b', bool)]) + # soft mask + control = np.array([(False, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a[0]['a'] = 2 + assert_equal(a.mask, control) + # hard mask + control = np.array([(True, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a[0]['a'] = 2 + assert_equal(a.mask, control) + + def test_element_len(self): + # check that len() works for mvoid (Github issue #576) + for rec in self.data['base']: + assert_equal(len(rec), len(self.data['ddtype'])) + + +#------------------------------------------------------------------------------ +class TestMaskedView(TestCase): + # + def setUp(self): + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + self.data = (data, a, controlmask) + + def test_view_to_nothing(self): + (data, a, controlmask) = self.data + test = a.view() + self.assertTrue(isinstance(test, MaskedArray)) + assert_equal(test._data, a._data) + assert_equal(test._mask, a._mask) + + def test_view_to_type(self): + (data, a, controlmask) = self.data + test = a.view(np.ndarray) + self.assertTrue(not isinstance(test, MaskedArray)) + assert_equal(test, a._data) + assert_equal_records(test, data.view(a.dtype).squeeze()) + + def test_view_to_simple_dtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view(float) + self.assertTrue(isinstance(test, MaskedArray)) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + + def test_view_to_flexible_dtype(self): + (data, a, controlmask) = self.data + # + test = a.view([('A', float), ('B', float)]) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a']) + assert_equal(test['B'], a['b']) + # + test = a[0].view([('A', float), ('B', float)]) + self.assertTrue(isinstance(test, MaskedArray)) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][0]) + assert_equal(test['B'], a['b'][0]) + # + test = a[-1].view([('A', float), ('B', float)]) + self.assertTrue(isinstance(test, MaskedArray)) + assert_equal(test.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][-1]) + assert_equal(test['B'], a['b'][-1]) + + def test_view_to_subdtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view((float, 2)) + self.assertTrue(isinstance(test, MaskedArray)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + # View on 1 masked element + test = a[0].view((float, 2)) + self.assertTrue(isinstance(test, MaskedArray)) + assert_equal(test, data[0]) + assert_equal(test.mask, (1, 0)) + # View on 1 unmasked element + test = a[-1].view((float, 2)) + self.assertTrue(isinstance(test, MaskedArray)) + assert_equal(test, data[-1]) + + def test_view_to_dtype_and_type(self): + (data, a, controlmask) = self.data + # + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + self.assertTrue(isinstance(test, np.matrix)) + self.assertTrue(not isinstance(test, MaskedArray)) + + +def test_masked_array(): + a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) + assert_equal(np.argwhere(a), [[1], [3]]) + +def test_append_masked_array(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_equal([4,3,2], value=2) + + result = np.ma.append(a, b) + expected_data = [1, 2, 3, 4, 3, 2] + expected_mask = [False, True, False, False, False, True] + assert_array_equal(result.data, expected_data) + assert_array_equal(result.mask, expected_mask) + + a = np.ma.masked_all((2,2)) + b = np.ma.ones((3,1)) + + result = np.ma.append(a, b) + expected_data = [1] * 3 + expected_mask = [True] * 4 + [False] * 3 + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + result = np.ma.append(a, b, axis=None) + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + +def test_append_masked_array_along_axis(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + + # When `axis` is specified, `values` must have the correct shape. + assert_raises(ValueError, np.ma.append, a, b, axis=0) + + result = np.ma.append(a[np.newaxis,:], b, axis=0) + expected = np.ma.arange(1, 10) + expected[[1, 6]] = np.ma.masked + expected = expected.reshape((3,3)) + assert_array_equal(result.data, expected.data) + assert_array_equal(result.mask, expected.mask) + + +############################################################################### +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_extras.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_extras.py new file mode 100644 index 0000000000000..6ce1dc346a1d1 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_extras.py @@ -0,0 +1,947 @@ +# pylint: disable-msg=W0611, W0612, W0511 +"""Tests suite for MaskedArray. +Adapted from the original test_ma by Pierre Gerard-Marchant + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +from __future__ import division, absolute_import, print_function + +__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" +__version__ = '1.0' +__revision__ = "$Revision: 3473 $" +__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' + +import numpy as np +from numpy.testing import TestCase, run_module_suite +from numpy.ma.testutils import (rand, assert_, assert_array_equal, + assert_equal, assert_almost_equal) +from numpy.ma.core import (array, arange, masked, MaskedArray, masked_array, + getmaskarray, shape, nomask, ones, zeros, count) +from numpy.ma.extras import ( + atleast_2d, mr_, dot, polyfit, + cov, corrcoef, median, average, + unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, ediff1d, + apply_over_axes, apply_along_axis, + compress_rowcols, mask_rowcols, + clump_masked, clump_unmasked, + flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, + masked_all, masked_all_like) + + +class TestGeneric(TestCase): + # + def test_masked_all(self): + # Tests masked_all + # Standard dtype + test = masked_all((2,), dtype=float) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + test = masked_all((2,), dtype=dt) + control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + test = masked_all((2, 2), dtype=dt) + control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], + mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], + dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((1, 1), dtype=dt) + control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) + assert_equal(test, control) + + def test_masked_all_like(self): + # Tests masked_all + # Standard dtype + base = array([1, 2], dtype=float) + test = masked_all_like(base) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + test = masked_all_like(base) + control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + test = masked_all_like(control) + assert_equal(test, control) + + def test_clump_masked(self): + # Test clump_masked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + # + test = clump_masked(a) + control = [slice(0, 3), slice(6, 7), slice(8, 10)] + assert_equal(test, control) + + def test_clump_unmasked(self): + # Test clump_unmasked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + test = clump_unmasked(a) + control = [slice(3, 6), slice(7, 8), ] + assert_equal(test, control) + + def test_flatnotmasked_contiguous(self): + # Test flatnotmasked_contiguous + a = arange(10) + # No mask + test = flatnotmasked_contiguous(a) + assert_equal(test, slice(0, a.size)) + # Some mask + a[(a < 3) | (a > 8) | (a == 5)] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(3, 5), slice(6, 9)]) + # + a[:] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, None) + + +class TestAverage(TestCase): + # Several tests of average. Why so many ? Good point... + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + assert_equal(2.0, average(ott, axis=0)) + assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) + assert_equal(2.0, result) + self.assertTrue(wts == 4.0) + ott[:] = masked + assert_equal(average(ott, axis=0).mask, [True]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_equal(average(ott, axis=0), [2.0, 0.0]) + assert_equal(average(ott, axis=1).mask[0], [True]) + assert_equal([2., 0.], average(ott, axis=0)) + result, wts = average(ott, axis=0, returned=1) + assert_equal(wts, [1., 0.]) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6, dtype=np.float_) + assert_equal(average(x, axis=0), 2.5) + assert_equal(average(x, axis=0, weights=w1), 2.5) + y = array([arange(6, dtype=np.float_), 2.0 * arange(6)]) + assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) + assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + assert_equal(average(y, None, weights=w2), 20. / 6.) + assert_equal(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.]) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_equal(average(masked_array(x, m1), axis=0), 2.5) + assert_equal(average(masked_array(x, m2), axis=0), 2.5) + assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_equal(average(z, None), 20. / 6.) + assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + assert_equal(average(z, axis=1), [2.5, 5.0]) + assert_equal(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0]) + + def test_testAverage3(self): + # Yet more tests of average! + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=1) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) + assert_equal(shape(w2), shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[False, False], [True, False]]) + a2da = average(a2d, axis=0) + assert_equal(a2da, [0.5, 3.0]) + a2dma = average(a2dm, axis=0) + assert_equal(a2dma, [1.0, 3.0]) + a2dma = average(a2dm, axis=None) + assert_equal(a2dma, 7. / 3.) + a2dma = average(a2dm, axis=1) + assert_equal(a2dma, [1.5, 4.0]) + + def test_onintegers_with_mask(self): + # Test average on integers with mask + a = average(array([1, 2])) + assert_equal(a, 1.5) + a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) + assert_equal(a, 1.5) + + def test_complex(self): + # Test with complex data. + # (Regression test for https://github.com/numpy/numpy/issues/2684) + mask = np.array([[0, 0, 0, 1, 0], + [0, 1, 0, 0, 0]], dtype=bool) + a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], + [9j, 0+1j, 2+3j, 4+5j, 7+7j]], + mask=mask) + + av = average(a) + expected = np.average(a.compressed()) + assert_almost_equal(av.real, expected.real) + assert_almost_equal(av.imag, expected.imag) + + av0 = average(a, axis=0) + expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j + assert_almost_equal(av0.real, expected0.real) + assert_almost_equal(av0.imag, expected0.imag) + + av1 = average(a, axis=1) + expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j + assert_almost_equal(av1.real, expected1.real) + assert_almost_equal(av1.imag, expected1.imag) + + # Test with the 'weights' argument. + wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], + [1.0, 1.0, 1.0, 1.0, 1.0]]) + wav = average(a, weights=wts) + expected = np.average(a.compressed(), weights=wts[~mask]) + assert_almost_equal(wav.real, expected.real) + assert_almost_equal(wav.imag, expected.imag) + + wav0 = average(a, weights=wts, axis=0) + expected0 = (average(a.real, weights=wts, axis=0) + + average(a.imag, weights=wts, axis=0)*1j) + assert_almost_equal(wav0.real, expected0.real) + assert_almost_equal(wav0.imag, expected0.imag) + + wav1 = average(a, weights=wts, axis=1) + expected1 = (average(a.real, weights=wts, axis=1) + + average(a.imag, weights=wts, axis=1)*1j) + assert_almost_equal(wav1.real, expected1.real) + assert_almost_equal(wav1.imag, expected1.imag) + + +class TestConcatenator(TestCase): + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_1d(self): + # Tests mr_ on 1D arrays. + assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) + b = ones(5) + m = [1, 0, 0, 0, 0] + d = masked_array(b, mask=m) + c = mr_[d, 0, 0, d] + self.assertTrue(isinstance(c, MaskedArray)) + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + assert_array_equal(c.mask, mr_[m, 0, 0, m]) + + def test_2d(self): + # Tests mr_ on 2D arrays. + a_1 = rand(5, 5) + a_2 = rand(5, 5) + m_1 = np.round_(rand(5, 5), 0) + m_2 = np.round_(rand(5, 5), 0) + b_1 = masked_array(a_1, mask=m_1) + b_2 = masked_array(a_2, mask=m_2) + # append columns + d = mr_['1', b_1, b_2] + self.assertTrue(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b_1) + assert_array_equal(d[:, 5:], b_2) + assert_array_equal(d.mask, np.r_['1', m_1, m_2]) + d = mr_[b_1, b_2] + self.assertTrue(d.shape == (10, 5)) + assert_array_equal(d[:5,:], b_1) + assert_array_equal(d[5:,:], b_2) + assert_array_equal(d.mask, np.r_[m_1, m_2]) + + +class TestNotMasked(TestCase): + # Tests notmasked_edges and notmasked_contiguous. + + def test_edges(self): + # Tests unmasked_edges + data = masked_array(np.arange(25).reshape(5, 5), + mask=[[0, 0, 1, 0, 0], + [0, 0, 0, 1, 1], + [1, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [1, 1, 1, 0, 0]],) + test = notmasked_edges(data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, 1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) + # + test = notmasked_edges(data.data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data.data, 0) + assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data.data, -1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) + # + data[-2] = masked + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, -1) + assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) + assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) + + def test_contiguous(self): + # Tests notmasked_contiguous + a = masked_array(np.arange(24).reshape(3, 8), + mask=[[0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0], ]) + tmp = notmasked_contiguous(a, None) + assert_equal(tmp[-1], slice(23, 24, None)) + assert_equal(tmp[-2], slice(16, 22, None)) + assert_equal(tmp[-3], slice(0, 4, None)) + # + tmp = notmasked_contiguous(a, 0) + self.assertTrue(len(tmp[-1]) == 1) + self.assertTrue(tmp[-2] is None) + assert_equal(tmp[-3], tmp[-1]) + self.assertTrue(len(tmp[0]) == 2) + # + tmp = notmasked_contiguous(a, 1) + assert_equal(tmp[0][-1], slice(0, 4, None)) + self.assertTrue(tmp[1] is None) + assert_equal(tmp[2][-1], slice(7, 8, None)) + assert_equal(tmp[2][-2], slice(0, 6, None)) + + +class Test2DFunctions(TestCase): + # Tests 2D functions + def test_compress2d(self): + # Tests compress2d + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) + assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) + assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[8]]) + assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) + assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_equal(compress_rowcols(x).size, 0) + assert_equal(compress_rowcols(x, 0).size, 0) + assert_equal(compress_rowcols(x, 1).size, 0) + + def test_mask_rowcols(self): + # Tests mask_rowcols. + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1,).mask, + [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + self.assertTrue(mask_rowcols(x).all() is masked) + self.assertTrue(mask_rowcols(x, 0).all() is masked) + self.assertTrue(mask_rowcols(x, 1).all() is masked) + self.assertTrue(mask_rowcols(x).mask.all()) + self.assertTrue(mask_rowcols(x, 0).mask.all()) + self.assertTrue(mask_rowcols(x, 1).mask.all()) + + def test_dot(self): + # Tests dot product + n = np.arange(1, 7) + # + m = [1, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[1, 1], [1, 0]]) + c = dot(b, a, True) + assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + c = dot(a, b, False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 1] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[0, 1], [1, 1]]) + c = dot(b, a, True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) + c = dot(a, b, False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + assert_equal(c, dot(a, b)) + c = dot(b, a, False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b) + assert_equal(c.mask, nomask) + c = dot(b, a) + assert_equal(c.mask, nomask) + # + a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[1, 1], [0, 0]]) + c = dot(a, b, False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, True) + assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + c = dot(b, a, False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[0, 0], [1, 1]]) + c = dot(a, b) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + c = dot(b, a, False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[1, 0], [1, 1]]) + c = dot(a, b, False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, True) + assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) + c = dot(b, a, False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + + +class TestApplyAlongAxis(TestCase): + # Tests 2D functions + def test_3d(self): + a = arange(12.).reshape(2, 2, 3) + + def myfunc(b): + return b[1] + + xa = apply_along_axis(myfunc, 2, a) + assert_equal(xa, [[1, 4], [7, 10]]) + + # Tests kwargs functions + def test_3d_kwargs(self): + a = arange(12).reshape(2, 2, 3) + + def myfunc(b, offset=0): + return b[1+offset] + + xa = apply_along_axis(myfunc, 2, a, offset=1) + assert_equal(xa, [[2, 5], [8, 11]]) + + +class TestApplyOverAxes(TestCase): + # Tests apply_over_axes + def test_basic(self): + a = arange(24).reshape(2, 3, 4) + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[60], [92], [124]]]) + assert_equal(test, ctrl) + a[(a % 2).astype(np.bool)] = masked + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[28], [44], [60]]]) + assert_equal(test, ctrl) + + +class TestMedian(TestCase): + + def test_2d(self): + # Tests median w/ 2D + (n, p) = (101, 30) + x = masked_array(np.linspace(-1., 1., n),) + x[:10] = x[-10:] = masked + z = masked_array(np.empty((n, p), dtype=float)) + z[:, 0] = x[:] + idx = np.arange(len(x)) + for i in range(1, p): + np.random.shuffle(idx) + z[:, i] = x[idx] + assert_equal(median(z[:, 0]), 0) + assert_equal(median(z), 0) + assert_equal(median(z, axis=0), np.zeros(p)) + assert_equal(median(z.T, axis=1), np.zeros(p)) + + def test_2d_waxis(self): + # Tests median w/ 2D arrays and different axis. + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x), 14.5) + assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) + assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) + assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) + + def test_3d(self): + # Tests median w/ 3D + x = np.ma.arange(24).reshape(3, 4, 2) + x[x % 3 == 0] = masked + assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) + x.shape = (4, 3, 2) + assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) + x = np.ma.arange(24).reshape(4, 3, 2) + x[x % 5 == 0] = masked + assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) + + def test_neg_axis(self): + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x, axis=-1), median(x, axis=1)) + + def test_out(self): + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(10)) + r = median(x, axis=1, out=out) + assert_equal(r, out) + assert_(type(r) == MaskedArray) + + +class TestCov(TestCase): + + def setUp(self): + self.data = array(np.random.rand(12)) + + def test_1d_wo_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_2d_wo_missing(self): + # Test cov on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_1d_w_missing(self): + # Test cov 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.cov(nx), cov(x)) + assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(nx, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + # + try: + cov(x, allow_masked=False) + except ValueError: + pass + # + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), + cov(x, x[::-1], rowvar=False)) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), + cov(x, x[::-1], rowvar=False, bias=True)) + + def test_2d_w_missing(self): + # Test cov on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + valid = np.logical_not(getmaskarray(x)).astype(int) + frac = np.dot(valid, valid.T) + xf = (x - x.mean(1)[:, None]).filled(0) + assert_almost_equal(cov(x), + np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) + assert_almost_equal(cov(x, bias=True), + np.cov(xf, bias=True) * x.shape[1] / frac) + frac = np.dot(valid.T, valid) + xf = (x - x.mean(0)).filled(0) + assert_almost_equal(cov(x, rowvar=False), + (np.cov(xf, rowvar=False) * + (x.shape[0] - 1) / (frac - 1.))) + assert_almost_equal(cov(x, rowvar=False, bias=True), + (np.cov(xf, rowvar=False, bias=True) * + x.shape[0] / frac)) + + +class TestCorrcoef(TestCase): + + def setUp(self): + self.data = array(np.random.rand(12)) + + def test_ddof(self): + # Test ddof keyword + x = self.data + assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) + + def test_1d_wo_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_2d_wo_missing(self): + # Test corrcoef on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_1d_w_missing(self): + # Test corrcoef 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.corrcoef(nx), corrcoef(x)) + assert_almost_equal(np.corrcoef(nx, rowvar=False), + corrcoef(x, rowvar=False)) + assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + # + try: + corrcoef(x, allow_masked=False) + except ValueError: + pass + # + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) + assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), + corrcoef(x, x[::-1], rowvar=False)) + assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False, bias=True), + corrcoef(x, x[::-1], rowvar=False, bias=True)) + + def test_2d_w_missing(self): + # Test corrcoef on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + + test = corrcoef(x) + control = np.corrcoef(x) + assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) + + +class TestPolynomial(TestCase): + # + def test_polyfit(self): + # Tests polyfit + # On ndarrays + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) + # ON 1D maskedarrays + x = x.view(MaskedArray) + x[0] = masked + y = y.view(MaskedArray) + y[0, 0] = y[-1, -1] = masked + # + (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, + full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + w = np.random.rand(10) + 1 + wo = w.copy() + xs = x[1:-1] + ys = y[1:-1] + ws = w[1:-1] + (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) + (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) + assert_equal(w, wo) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + +class TestArraySetOps(TestCase): + + def test_unique_onlist(self): + # Test unique on list + data = [1, 1, 1, 2, 2, 3] + test = unique(data, return_index=True, return_inverse=True) + self.assertTrue(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique_onmaskedarray(self): + # Test unique on masked data w/use_mask=True + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array(data=[1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique_allmasked(self): + # Test all masked + data = masked_array([1, 1, 1], mask=True) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, ], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + # Test masked + data = masked + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + # Tests mediff1d + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin(self): + # Test ediff1d w/ to_begin + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1, 2, 3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_ediff1d_toend(self): + # Test ediff1d w/ to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin_toend(self): + # Test ediff1d w/ to_begin and to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], + mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_ediff1d_ndarray(self): + # Test ediff1d w/ a ndarray + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + self.assertTrue(isinstance(test, MaskedArray)) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + self.assertTrue(isinstance(test, MaskedArray)) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_intersect1d(self): + # Test intersect1d + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + def test_setxor1d(self): + # Test setxor1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 2, 3]) + b = array([6, 5, 4]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([], [])) + + def test_in1d(self): + # Test in1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, False, True, True]) + # + assert_array_equal([], in1d([], [])) + + def test_in1d_invert(self): + # Test in1d's invert parameter + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + assert_array_equal([], in1d([], [], invert=True)) + + def test_union1d(self): + # Test union1d + a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + # + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + # Test setdiff1d + a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + + def test_setdiff1d_char_array(self): + # Test setdiff1d_charray + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + +class TestShapeBase(TestCase): + # + def test_atleast2d(self): + # Test atleast_2d + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = atleast_2d(a) + assert_equal(b.shape, (1, 3)) + assert_equal(b.mask.shape, b.data.shape) + assert_equal(a.shape, (3,)) + assert_equal(a.mask.shape, a.data.shape) + + +############################################################################### +#------------------------------------------------------------------------------ +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_mrecords.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_mrecords.py new file mode 100644 index 0000000000000..54945e8f007f8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_mrecords.py @@ -0,0 +1,521 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for mrecords. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import pickle + +import numpy as np +import numpy.ma as ma +from numpy import recarray +from numpy.core.records import (fromrecords as recfromrecords, + fromarrays as recfromarrays) + +from numpy.compat import asbytes, asbytes_nested +from numpy.ma.testutils import * +from numpy.ma import masked, nomask +from numpy.ma.mrecords import (MaskedRecords, mrecarray, fromarrays, + fromtextfile, fromrecords, addfield) + + +__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" +__revision__ = "$Revision: 3473 $" +__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' + + +#.............................................................................. +class TestMRecords(TestCase): + # Base test class for MaskedArrays. + def __init__(self, *args, **kwds): + TestCase.__init__(self, *args, **kwds) + self.setup() + + def setup(self): + # Generic setup + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = asbytes_nested(['one', 'two', 'three', 'four', 'five']) + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + self.base = ma.array(list(zip(ilist, flist, slist)), + mask=mask, dtype=ddtype) + + def test_byview(self): + # Test creation by view + base = self.base + mbase = base.view(mrecarray) + assert_equal(mbase.recordmask, base.recordmask) + assert_equal_records(mbase._mask, base._mask) + assert_(isinstance(mbase._data, recarray)) + assert_equal_records(mbase._data, base._data.view(recarray)) + for field in ('a', 'b', 'c'): + assert_equal(base[field], mbase[field]) + assert_equal_records(mbase.view(mrecarray), mbase) + + def test_get(self): + # Tests fields retrieval + base = self.base.copy() + mbase = base.view(mrecarray) + # As fields.......... + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase, field), mbase[field]) + assert_equal(base[field], mbase[field]) + # as elements ....... + mbase_first = mbase[0] + assert_(isinstance(mbase_first, mrecarray)) + assert_equal(mbase_first.dtype, mbase.dtype) + assert_equal(mbase_first.tolist(), (1, 1.1, asbytes('one'))) + # Used to be mask, now it's recordmask + assert_equal(mbase_first.recordmask, nomask) + assert_equal(mbase_first._mask.item(), (False, False, False)) + assert_equal(mbase_first['a'], mbase['a'][0]) + mbase_last = mbase[-1] + assert_(isinstance(mbase_last, mrecarray)) + assert_equal(mbase_last.dtype, mbase.dtype) + assert_equal(mbase_last.tolist(), (None, None, None)) + # Used to be mask, now it's recordmask + assert_equal(mbase_last.recordmask, True) + assert_equal(mbase_last._mask.item(), (True, True, True)) + assert_equal(mbase_last['a'], mbase['a'][-1]) + assert_((mbase_last['a'] is masked)) + # as slice .......... + mbase_sl = mbase[:2] + assert_(isinstance(mbase_sl, mrecarray)) + assert_equal(mbase_sl.dtype, mbase.dtype) + # Used to be mask, now it's recordmask + assert_equal(mbase_sl.recordmask, [0, 1]) + assert_equal_records(mbase_sl.mask, + np.array([(False, False, False), + (True, True, True)], + dtype=mbase._mask.dtype)) + assert_equal_records(mbase_sl, base[:2].view(mrecarray)) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase_sl, field), base[:2][field]) + + def test_set_fields(self): + # Tests setting fields. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase = mbase.copy() + mbase.fill_value = (999999, 1e20, 'N/A') + # Change the data, the mask should be conserved + mbase.a._data[:] = 5 + assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) + assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) + # Change the elements, and the mask will follow + mbase.a = 1 + assert_equal(mbase['a']._data, [1]*5) + assert_equal(ma.getmaskarray(mbase['a']), [0]*5) + # Use to be _mask, now it's recordmask + assert_equal(mbase.recordmask, [False]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 0), + (0, 1, 1)], + dtype=bool)) + # Set a field to mask ........................ + mbase.c = masked + # Use to be mask, and now it's still mask ! + assert_equal(mbase.c.mask, [1]*5) + assert_equal(mbase.c.recordmask, [1]*5) + assert_equal(ma.getmaskarray(mbase['c']), [1]*5) + assert_equal(ma.getdata(mbase['c']), [asbytes('N/A')]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 1), + (0, 1, 1), + (0, 0, 1), + (0, 0, 1), + (0, 1, 1)], + dtype=bool)) + # Set fields by slices ....................... + mbase = base.view(mrecarray).copy() + mbase.a[3:] = 5 + assert_equal(mbase.a, [1, 2, 3, 5, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) + mbase.b[3:] = masked + assert_equal(mbase.b, base['b']) + assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) + # Set fields globally.......................... + ndtype = [('alpha', '|S1'), ('num', int)] + data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) + rdata = data.view(MaskedRecords) + val = ma.array([10, 20, 30], mask=[1, 0, 0]) + # + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + rdata['num'] = val + assert_equal(rdata.num, val) + assert_equal(rdata.num.mask, [1, 0, 0]) + + def test_set_fields_mask(self): + # Tests setting the mask of a field. + base = self.base.copy() + # This one has already a mask.... + mbase = base.view(mrecarray) + mbase['a'][-2] = masked + assert_equal(mbase.a, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) + # This one has not yet + mbase = fromarrays([np.arange(5), np.random.rand(5)], + dtype=[('a', int), ('b', float)]) + mbase['a'][-2] = masked + assert_equal(mbase.a, [0, 1, 2, 3, 4]) + assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) + + def test_set_mask(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Set the mask to True ....................... + mbase.mask = masked + assert_equal(ma.getmaskarray(mbase['b']), [1]*5) + assert_equal(mbase['a']._mask, mbase['b']._mask) + assert_equal(mbase['a']._mask, mbase['c']._mask) + assert_equal(mbase._mask.tolist(), + np.array([(1, 1, 1)]*5, dtype=bool)) + # Delete the mask ............................ + mbase.mask = nomask + assert_equal(ma.getmaskarray(mbase['c']), [0]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0)]*5, dtype=bool)) + + def test_set_mask_fromarray(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Sets the mask w/ an array + mbase.mask = [1, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) + # Yay, once more ! + mbase.mask = [0, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) + + def test_set_mask_fromfields(self): + mbase = self.base.copy().view(mrecarray) + # + nmask = np.array( + [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], + dtype=[('a', bool), ('b', bool), ('c', bool)]) + mbase.mask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + # Reinitalizes and redo + mbase.mask = False + mbase.fieldmask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + + def test_set_elements(self): + base = self.base.copy() + # Set an element to mask ..................... + mbase = base.view(mrecarray).copy() + mbase[-2] = masked + assert_equal( + mbase._mask.tolist(), + np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], + dtype=bool)) + # Used to be mask, now it's recordmask! + assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) + # Set slices ................................. + mbase = base.view(mrecarray).copy() + mbase[:2] = (5, 5, 5) + assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c._data, + asbytes_nested(['5', '5', 'three', 'four', 'five'])) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + # + mbase = base.view(mrecarray).copy() + mbase[:2] = masked + assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.c._data, + asbytes_nested(['one', 'two', 'three', 'four', 'five'])) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + + def test_setslices_hardmask(self): + # Tests setting slices w/ hardmask. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + try: + mbase[-2:] = (5, 5, 5) + assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) + assert_equal(mbase.c._data, + asbytes_nested(['one', 'two', 'three', '5', 'five'])) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) + assert_equal(mbase.b._mask, mbase.a._mask) + assert_equal(mbase.b._mask, mbase.c._mask) + except NotImplementedError: + # OK, not implemented yet... + pass + except AssertionError: + raise + else: + raise Exception("Flexible hard masks should be supported !") + # Not using a tuple should crash + try: + mbase[-2:] = 3 + except (NotImplementedError, TypeError): + pass + else: + raise TypeError("Should have expected a readable buffer object!") + + def test_hardmask(self): + # Test hardmask + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + self.assertTrue(mbase._hardmask) + mbase.mask = nomask + assert_equal_records(mbase._mask, base._mask) + mbase.soften_mask() + self.assertTrue(not mbase._hardmask) + mbase.mask = nomask + # So, the mask of a field is no longer set to nomask... + assert_equal_records(mbase._mask, + ma.make_mask_none(base.shape, base.dtype)) + self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask) + assert_equal(mbase['a']._mask, mbase['b']._mask) + + def test_pickling(self): + # Test pickling + base = self.base.copy() + mrec = base.view(mrecarray) + _ = pickle.dumps(mrec) + mrec_ = pickle.loads(_) + assert_equal(mrec_.dtype, mrec.dtype) + assert_equal_records(mrec_._data, mrec._data) + assert_equal(mrec_._mask, mrec._mask) + assert_equal_records(mrec_._mask, mrec._mask) + + def test_filled(self): + # Test filling the array + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + mrecfilled = mrec.filled() + assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) + assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), + dtype=float)) + assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), + dtype='|S8')) + + def test_tolist(self): + # Test tolist. + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + # + assert_equal(mrec.tolist(), + [(1, 1.1, None), (2, 2.2, asbytes('two')), + (None, None, asbytes('three'))]) + + def test_withnames(self): + # Test the creation w/ format and names + x = mrecarray(1, formats=float, names='base') + x[0]['base'] = 10 + assert_equal(x['base'][0], 10) + + def test_exotic_formats(self): + # Test that 'exotic' formats are processed properly + easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) + easy[0] = masked + assert_equal(easy.filled(1).item(), (1, asbytes('1'), 1.)) + # + solo = mrecarray(1, dtype=[('f0', ' 1: + self.assertTrue(eq(np.concatenate((x, y), 1), + concatenate((xm, ym), 1))) + self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1))) + self.assertTrue(eq(np.sum(x, 1), sum(x, 1))) + self.assertTrue(eq(np.product(x, 1), product(x, 1))) + + def test_testCI(self): + # Test of conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + junk, garbage = str(x2), repr(x2) + assert_(eq(np.sort(x1), sort(x2, fill_value=0))) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_(eq(x1[2], x2[2])) + assert_(eq(x1[2:5], x2[2:5])) + assert_(eq(x1[:], x2[:])) + assert_(eq(x1[1:], x3[1:])) + x1[2] = 9 + x2[2] = 9 + assert_(eq(x1, x2)) + x1[1:3] = 99 + x2[1:3] = 99 + assert_(eq(x1, x2)) + x2[1] = masked + assert_(eq(x1, x2)) + x2[1:3] = masked + assert_(eq(x1, x2)) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_(eq(x1, x2)) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_(eq(3.0, x2.fill_value)) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + self.assertEqual(type(s2), str) + self.assertEqual(type(s1), str) + self.assertEqual(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_testCopySize(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + self.assertTrue(m is m2) + m3 = make_mask(m, copy=1) + self.assertTrue(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + self.assertTrue(y1._data is not x1) + self.assertTrue(allequal(x1, y1._data)) + self.assertTrue(y1.mask is m) + + y1a = array(y1, copy=0) + self.assertTrue(y1a.mask is y1.mask) + + y2 = array(x1, mask=m, copy=0) + self.assertTrue(y2.mask is m) + self.assertTrue(y2[2] is masked) + y2[2] = 9 + self.assertTrue(y2[2] is not masked) + self.assertTrue(y2.mask is not m) + self.assertTrue(allequal(y2.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + self.assertTrue(eq(concatenate([x4, x4]), y4)) + self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) + y6 = repeat(x4, 2, axis=0) + self.assertTrue(eq(y5, y6)) + + def test_testPut(self): + # Test of put + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + x = array(d, mask=m) + self.assertTrue(x[3] is masked) + self.assertTrue(x[4] is masked) + x[[1, 4]] = [10, 40] + self.assertTrue(x.mask is not m) + self.assertTrue(x[3] is masked) + self.assertTrue(x[4] is not masked) + self.assertTrue(eq(x, [0, 10, 2, -1, 40])) + + x = array(d, mask=m) + x.put([0, 1, 2], [-1, 100, 200]) + self.assertTrue(eq(x, [-1, 100, 200, 0, 0])) + self.assertTrue(x[3] is masked) + self.assertTrue(x[4] is masked) + + def test_testMaPut(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] + i = np.nonzero(m)[0] + put(ym, i, zm) + assert_(all(take(ym, i, axis=0) == zm)) + + def test_testOddFeatures(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_(eq(z.real, x)) + assert_(eq(z.imag, 10 * x)) + assert_(eq((z * conjugate(z)).real, 101 * x * x)) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_(eq(x, z)) + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_(eq(x, z)) + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + c[0] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) + assert_(eq(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2))) + assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) + assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) + assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) + assert_(eq(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0])) + assert_(eq(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1])) + assert_(eq(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0])) + assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1])) + assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5])) + atest = ones((10, 10, 10), dtype=float32) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_(eq(atest, ctest)) + z = choose(c, (-x, x)) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + x = arange(6) + x[5] = masked + y = arange(6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_(eq(z, zm)) + assert_(getmask(zm) is nomask) + assert_(eq(zm, [0, 1, 2, 30, 40, 50])) + z = where(c, masked, 1) + assert_(eq(z, [99, 99, 99, 1, 1, 1])) + z = where(c, 1, masked) + assert_(eq(z, [99, 1, 1, 99, 99, 99])) + + def test_testMinMax2(self): + # Test of minumum, maximum. + assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) + assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) + x = arange(5) + y = arange(5) - 2 + x[3] = masked + y[0] = masked + assert_(eq(minimum(x, y), where(less(x, y), x, y))) + assert_(eq(maximum(x, y), where(greater(x, y), x, y))) + assert_(minimum(x) == 0) + assert_(maximum(x) == 4) + + def test_testTakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) + assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) + assert_(eq(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y))) + assert_(eq(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y))) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_testInplace(self): + # Test of inplace operations and rich comparisons + y = arange(10) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x += 1 + assert_(eq(x, y + 1)) + xm += 1 + assert_(eq(x, y + 1)) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x -= 1 + assert_(eq(x, y - 1)) + xm -= 1 + assert_(eq(xm, y - 1)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x *= 2.0 + assert_(eq(x, y * 2)) + xm *= 2.0 + assert_(eq(xm, y * 2)) + + x = arange(10) * 2 + xm = arange(10) + xm[2] = masked + x //= 2 + assert_(eq(x, y)) + xm //= 2 + assert_(eq(x, y)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x /= 2.0 + assert_(eq(x, y / 2.0)) + xm /= arange(10) + assert_(eq(xm, ones((10,)))) + + x = arange(10).astype(float32) + xm = arange(10) + xm[2] = masked + x += 1. + assert_(eq(x, y + 1.)) + + def test_testPickle(self): + # Test of pickling + import pickle + x = arange(12) + x[4:10:2] = masked + x = x.reshape(4, 3) + s = pickle.dumps(x) + y = pickle.loads(s) + assert_(eq(x, y)) + + def test_testMasked(self): + # Test of masked element + xx = arange(6) + xx[1] = masked + self.assertTrue(str(masked) == '--') + self.assertTrue(xx[1] is masked) + self.assertEqual(filled(xx[1], 0), 0) + # don't know why these should raise an exception... + #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) + #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) + #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) + #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) + + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + self.assertTrue(eq(2.0, average(ott, axis=0))) + self.assertTrue(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) + self.assertTrue(eq(2.0, result)) + self.assertTrue(wts == 4.0) + ott[:] = masked + self.assertTrue(average(ott, axis=0) is masked) + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + self.assertTrue(eq(average(ott, axis=0), [2.0, 0.0])) + self.assertTrue(average(ott, axis=1)[0] is masked) + self.assertTrue(eq([2., 0.], average(ott, axis=0))) + result, wts = average(ott, axis=0, returned=1) + self.assertTrue(eq(wts, [1., 0.])) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6) + self.assertTrue(allclose(average(x, axis=0), 2.5)) + self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5)) + y = array([arange(6), 2.0 * arange(6)]) + self.assertTrue(allclose(average(y, None), + np.add.reduce(np.arange(6)) * 3. / 12.)) + self.assertTrue(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) + self.assertTrue(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.)) + self.assertTrue(allclose(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.])) + self.assertTrue(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + self.assertTrue(allclose(average(masked_array(x, m1), axis=0), 2.5)) + self.assertTrue(allclose(average(masked_array(x, m2), axis=0), 2.5)) + self.assertTrue(average(masked_array(x, m4), axis=0) is masked) + self.assertEqual(average(masked_array(x, m5), axis=0), 0.0) + self.assertEqual(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + self.assertTrue(allclose(average(z, None), 20. / 6.)) + self.assertTrue(allclose(average(z, axis=0), + [0., 1., 99., 99., 4.0, 7.5])) + self.assertTrue(allclose(average(z, axis=1), [2.5, 5.0])) + self.assertTrue(allclose(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0])) + + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) + self.assertEqual(shape(r1), shape(w1)) + self.assertEqual(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) + self.assertEqual(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=1) + self.assertEqual(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) + self.assertTrue(shape(w2) == shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[0, 0], [1, 0]]) + a2da = average(a2d, axis=0) + self.assertTrue(eq(a2da, [0.5, 3.0])) + a2dma = average(a2dm, axis=0) + self.assertTrue(eq(a2dma, [1.0, 3.0])) + a2dma = average(a2dm, axis=None) + self.assertTrue(eq(a2dma, 7. / 3.)) + a2dma = average(a2dm, axis=1) + self.assertTrue(eq(a2dma, [1.5, 4.0])) + + def test_testToPython(self): + self.assertEqual(1, int(array(1))) + self.assertEqual(1.0, float(array(1))) + self.assertEqual(1, int(array([[[1]]]))) + self.assertEqual(1.0, float(array([[1]]))) + self.assertRaises(TypeError, float, array([1, 1])) + self.assertRaises(ValueError, bool, array([0, 1])) + self.assertRaises(ValueError, bool, array([0, 0], mask=[0, 1])) + + def test_testScalarArithmetic(self): + xm = array(0, mask=1) + #TODO FIXME: Find out what the following raises a warning in r8247 + with np.errstate(divide='ignore'): + self.assertTrue((1 / array(0)).mask) + self.assertTrue((1 + xm).mask) + self.assertTrue((-xm).mask) + self.assertTrue((-xm).mask) + self.assertTrue(maximum(xm, xm).mask) + self.assertTrue(minimum(xm, xm).mask) + self.assertTrue(xm.filled().dtype is xm._data.dtype) + x = array(0, mask=0) + self.assertTrue(x.filled() == x._data) + self.assertEqual(str(xm), str(masked_print_option)) + + def test_testArrayMethods(self): + a = array([1, 3, 2]) + self.assertTrue(eq(a.any(), a._data.any())) + self.assertTrue(eq(a.all(), a._data.all())) + self.assertTrue(eq(a.argmax(), a._data.argmax())) + self.assertTrue(eq(a.argmin(), a._data.argmin())) + self.assertTrue(eq(a.choose(0, 1, 2, 3, 4), + a._data.choose(0, 1, 2, 3, 4))) + self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) + self.assertTrue(eq(a.conj(), a._data.conj())) + self.assertTrue(eq(a.conjugate(), a._data.conjugate())) + m = array([[1, 2], [3, 4]]) + self.assertTrue(eq(m.diagonal(), m._data.diagonal())) + self.assertTrue(eq(a.sum(), a._data.sum())) + self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2]))) + self.assertTrue(eq(m.transpose(), m._data.transpose())) + + def test_testArrayAttributes(self): + a = array([1, 3, 2]) + self.assertEqual(a.ndim, 1) + + def test_testAPI(self): + self.assertFalse([m for m in dir(np.ndarray) + if m not in dir(MaskedArray) and + not m.startswith('_')]) + + def test_testSingleElementSubscript(self): + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + self.assertEqual(a[0].shape, ()) + self.assertEqual(b[0].shape, ()) + self.assertEqual(b[1].shape, ()) + + +class TestUfuncs(TestCase): + def setUp(self): + self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) + + def test_testUfuncRegression(self): + f_invalid_ignore = [ + 'sqrt', 'arctanh', 'arcsin', 'arccos', + 'arccosh', 'arctanh', 'log', 'log10', 'divide', + 'true_divide', 'floor_divide', 'remainder', 'fmod'] + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', + 'sin', 'cos', 'tan', + 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', + 'arcsinh', + 'arccosh', + 'arctanh', + 'absolute', 'fabs', 'negative', + # 'nonzero', 'around', + 'floor', 'ceil', + # 'sometrue', 'alltrue', + 'logical_not', + 'add', 'subtract', 'multiply', + 'divide', 'true_divide', 'floor_divide', + 'remainder', 'fmod', 'hypot', 'arctan2', + 'equal', 'not_equal', 'less_equal', 'greater_equal', + 'less', 'greater', + 'logical_and', 'logical_or', 'logical_xor']: + try: + uf = getattr(umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(np.ma, f) + args = self.d[:uf.nin] + with np.errstate(): + if f in f_invalid_ignore: + np.seterr(invalid='ignore') + if f in ['arctanh', 'log', 'log10']: + np.seterr(divide='ignore') + ur = uf(*args) + mr = mf(*args) + self.assertTrue(eq(ur.filled(0), mr.filled(0), f)) + self.assertTrue(eqmask(ur.mask, mr.mask)) + + def test_reduce(self): + a = self.d[0] + self.assertFalse(alltrue(a, axis=0)) + self.assertTrue(sometrue(a, axis=0)) + self.assertEqual(sum(a[:3], axis=0), 0) + self.assertEqual(product(a, axis=0), 0) + + def test_minmax(self): + a = arange(1, 13).reshape(3, 4) + amask = masked_where(a < 5, a) + self.assertEqual(amask.max(), a.max()) + self.assertEqual(amask.min(), 5) + self.assertTrue((amask.max(0) == a.max(0)).all()) + self.assertTrue((amask.min(0) == [5, 6, 7, 8]).all()) + self.assertTrue(amask.max(1)[0].mask) + self.assertTrue(amask.min(1)[0].mask) + + def test_nonzero(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) + self.assertTrue(eq(nonzero(x), [0])) + + +class TestArrayMethods(TestCase): + + def setUp(self): + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + self.d = (x, X, XX, m, mx, mX, mXX) + + #------------------------------------------------------ + def test_trace(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXdiag = mX.diagonal() + self.assertEqual(mX.trace(), mX.diagonal().compressed().sum()) + self.assertTrue(eq(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0))) + + def test_clip(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + clipped = mx.clip(2, 8) + self.assertTrue(eq(clipped.mask, mx.mask)) + self.assertTrue(eq(clipped._data, x.clip(2, 8))) + self.assertTrue(eq(clipped._data, mx._data.clip(2, 8))) + + def test_ptp(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + (n, m) = X.shape + self.assertEqual(mx.ptp(), mx.compressed().ptp()) + rows = np.zeros(n, np.float_) + cols = np.zeros(m, np.float_) + for k in range(m): + cols[k] = mX[:, k].compressed().ptp() + for k in range(n): + rows[k] = mX[k].compressed().ptp() + self.assertTrue(eq(mX.ptp(0), cols)) + self.assertTrue(eq(mX.ptp(1), rows)) + + def test_swapaxes(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXswapped = mX.swapaxes(0, 1) + self.assertTrue(eq(mXswapped[-1], mX[:, -1])) + mXXswapped = mXX.swapaxes(0, 2) + self.assertEqual(mXXswapped.shape, (2, 2, 3, 3)) + + def test_cumprod(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumprod(0) + self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(0))) + mXcp = mX.cumprod(1) + self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(1))) + + def test_cumsum(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumsum(0) + self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(0))) + mXcp = mX.cumsum(1) + self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(1))) + + def test_varstd(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + self.assertTrue(eq(mX.var(axis=None), mX.compressed().var())) + self.assertTrue(eq(mX.std(axis=None), mX.compressed().std())) + self.assertTrue(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) + self.assertTrue(eq(mX.var().shape, X.var().shape)) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + for k in range(6): + self.assertTrue(eq(mXvar1[k], mX[k].compressed().var())) + self.assertTrue(eq(mXvar0[k], mX[:, k].compressed().var())) + self.assertTrue(eq(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std())) + + +def eqmask(m1, m2): + if m1 is nomask: + return m2 is nomask + if m2 is nomask: + return m1 is nomask + return (m1 == m2).all() + +#def timingTest(): +# for f in [testf, testinplace]: +# for n in [1000,10000,50000]: +# t = testta(n, f) +# t1 = testtb(n, f) +# t2 = testtc(n, f) +# print f.test_name +# print """\ +#n = %7d +#numpy time (ms) %6.1f +#MA maskless ratio %6.1f +#MA masked ratio %6.1f +#""" % (n, t*1000.0, t1/t, t2/t) + +#def testta(n, f): +# x=np.arange(n) + 1.0 +# tn0 = time.time() +# z = f(x) +# return time.time() - tn0 + +#def testtb(n, f): +# x=arange(n) + 1.0 +# tn0 = time.time() +# z = f(x) +# return time.time() - tn0 + +#def testtc(n, f): +# x=arange(n) + 1.0 +# x[0] = masked +# tn0 = time.time() +# z = f(x) +# return time.time() - tn0 + +#def testf(x): +# for i in range(25): +# y = x **2 + 2.0 * x - 1.0 +# w = x **2 + 1.0 +# z = (y / w) ** 2 +# return z +#testf.test_name = 'Simple arithmetic' + +#def testinplace(x): +# for i in range(25): +# y = x**2 +# y += 2.0*x +# y -= 1.0 +# y /= x +# return y +#testinplace.test_name = 'Inplace operations' + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_regression.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_regression.py new file mode 100644 index 0000000000000..7b32199ea6064 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_regression.py @@ -0,0 +1,75 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import * +from numpy.compat import sixu + +rlevel = 1 + + +class TestRegression(TestCase): + def test_masked_array_create(self,level=rlevel): + # Ticket #17 + x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], + mask=[0, 0, 0, 1, 1, 1, 0, 0]) + assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) + + def test_masked_array(self,level=rlevel): + # Ticket #61 + np.ma.array(1, mask=[1]) + + def test_mem_masked_where(self,level=rlevel): + # Ticket #62 + from numpy.ma import masked_where, MaskType + a = np.zeros((1, 1)) + b = np.zeros(a.shape, MaskType) + c = masked_where(b, a) + a-c + + def test_masked_array_multiply(self,level=rlevel): + # Ticket #254 + a = np.ma.zeros((4, 1)) + a[2, 0] = np.ma.masked + b = np.zeros((4, 2)) + a*b + b*a + + def test_masked_array_repeat(self, level=rlevel): + # Ticket #271 + np.ma.array([1], mask=False).repeat(10) + + def test_masked_array_repr_unicode(self): + # Ticket #1256 + repr(np.ma.array(sixu("Unicode"))) + + def test_atleast_2d(self): + # Ticket #1559 + a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) + b = np.atleast_2d(a) + assert_(a.mask.ndim == 1) + assert_(b.mask.ndim == 2) + + def test_set_fill_value_unicode_py3(self): + # Ticket #2733 + a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) + a.fill_value = 'X' + assert_(a.fill_value == 'X') + + def test_var_sets_maskedarray_scalar(self): + # Issue gh-2757 + a = np.ma.array(np.arange(5), mask=True) + mout = np.ma.array(-1, dtype=float) + a.var(out=mout) + assert_(mout._data == 0) + + def test_ddof_corrcoef(self): + # See gh-3336 + x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) + y = np.array([2, 2.5, 3.1, 3, 5]) + r0 = np.ma.corrcoef(x, y, ddof=0) + r1 = np.ma.corrcoef(x, y, ddof=1) + # ddof should not have an effect (it gets cancelled out) + assert_allclose(r0.data, r1.data) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_subclassing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_subclassing.py new file mode 100644 index 0000000000000..ade5c59daebfc --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_subclassing.py @@ -0,0 +1,236 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +from __future__ import division, absolute_import, print_function + +__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" +__version__ = '1.0' +__revision__ = "$Revision: 3473 $" +__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' + +import numpy as np +from numpy.testing import * +from numpy.ma.testutils import * +from numpy.ma.core import * + + +class SubArray(np.ndarray): + # Defines a generic np.ndarray subclass, that stores some metadata + # in the dictionary `info`. + def __new__(cls,arr,info={}): + x = np.asanyarray(arr).view(cls) + x.info = info + return x + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', {}) + return + + def __add__(self, other): + result = np.ndarray.__add__(self, other) + result.info.update({'added':result.info.pop('added', 0)+1}) + return result + +subarray = SubArray + + +class MSubArray(SubArray, MaskedArray): + + def __new__(cls, data, info={}, mask=nomask): + subarr = SubArray(data, info) + _data = MaskedArray.__new__(cls, data=subarr, mask=mask) + _data.info = subarr.info + return _data + + def __array_finalize__(self, obj): + MaskedArray.__array_finalize__(self, obj) + SubArray.__array_finalize__(self, obj) + return + + def _get_series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + _series = property(fget=_get_series) + +msubarray = MSubArray + + +class MMatrix(MaskedArray, np.matrix,): + + def __new__(cls, data, mask=nomask): + mat = np.matrix(data) + _data = MaskedArray.__new__(cls, data=mat, mask=mask) + return _data + + def __array_finalize__(self, obj): + np.matrix.__array_finalize__(self, obj) + MaskedArray.__array_finalize__(self, obj) + return + + def _get_series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + _series = property(fget=_get_series) + +mmatrix = MMatrix + + +# also a subclass that overrides __str__, __repr__ and __setitem__, disallowing +# setting to non-class values (and thus np.ma.core.masked_print_option) +class ComplicatedSubArray(SubArray): + def __str__(self): + return 'myprefix {0} mypostfix'.format( + super(ComplicatedSubArray, self).__str__()) + + def __repr__(self): + # Return a repr that does not start with 'name(' + return '<{0} {1}>'.format(self.__class__.__name__, self) + + def __setitem__(self, item, value): + # this ensures direct assignment to masked_print_option will fail + if not isinstance(value, ComplicatedSubArray): + raise ValueError("Can only set to MySubArray values") + super(ComplicatedSubArray, self).__setitem__(item, value) + + +class TestSubclassing(TestCase): + # Test suite for masked subclasses of ndarray. + + def setUp(self): + x = np.arange(5) + mx = mmatrix(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_data_subclassing(self): + # Tests whether the subclass is kept. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xsub = SubArray(x) + xmsub = masked_array(xsub, mask=m) + self.assertTrue(isinstance(xmsub, MaskedArray)) + assert_equal(xmsub._data, xsub) + self.assertTrue(isinstance(xmsub._data, SubArray)) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + self.assertTrue(isinstance(mx._data, np.matrix)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + self.assertTrue(isinstance(log(mx), mmatrix)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a mmatrix + self.assertTrue(isinstance(add(mx, mx), mmatrix)) + self.assertTrue(isinstance(add(mx, x), mmatrix)) + # Result should work + assert_equal(add(mx, x), mx+x) + self.assertTrue(isinstance(add(mx, mx)._data, np.matrix)) + self.assertTrue(isinstance(add.outer(mx, mx), mmatrix)) + self.assertTrue(isinstance(hypot(mx, mx), mmatrix)) + self.assertTrue(isinstance(hypot(mx, x), mmatrix)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + self.assertTrue(isinstance(divide(mx, mx), mmatrix)) + self.assertTrue(isinstance(divide(mx, x), mmatrix)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + + def test_attributepropagation(self): + x = array(arange(5), mask=[0]+[1]*4) + my = masked_array(subarray(x)) + ym = msubarray(x) + # + z = (my+1) + self.assertTrue(isinstance(z, MaskedArray)) + self.assertTrue(not isinstance(z, MSubArray)) + self.assertTrue(isinstance(z._data, SubArray)) + assert_equal(z._data.info, {}) + # + z = (ym+1) + self.assertTrue(isinstance(z, MaskedArray)) + self.assertTrue(isinstance(z, MSubArray)) + self.assertTrue(isinstance(z._data, SubArray)) + self.assertTrue(z._data.info['added'] > 0) + # + ym._set_mask([1, 0, 0, 0, 1]) + assert_equal(ym._mask, [1, 0, 0, 0, 1]) + ym._series._set_mask([0, 0, 0, 0, 1]) + assert_equal(ym._mask, [0, 0, 0, 0, 1]) + # + xsub = subarray(x, info={'name':'x'}) + mxsub = masked_array(xsub) + self.assertTrue(hasattr(mxsub, 'info')) + assert_equal(mxsub.info, xsub.info) + + def test_subclasspreservation(self): + # Checks that masked_array(...,subok=True) preserves the class. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xinfo = [(i, j) for (i, j) in zip(x, m)] + xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) + # + mxsub = masked_array(xsub, subok=False) + self.assertTrue(not isinstance(mxsub, MSubArray)) + self.assertTrue(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = asarray(xsub) + self.assertTrue(not isinstance(mxsub, MSubArray)) + self.assertTrue(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = masked_array(xsub, subok=True) + self.assertTrue(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, xsub._mask) + # + mxsub = asanyarray(xsub) + self.assertTrue(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, m) + + def test_subclass_repr(self): + """test that repr uses the name of the subclass + and 'array' for np.ndarray""" + x = np.arange(5) + mx = masked_array(x, mask=[True, False, True, False, False]) + self.assertTrue(repr(mx).startswith('masked_array')) + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + self.assertTrue(repr(mxsub).startswith( + 'masked_{0}(data = [-- 1 -- 3 4]'.format(SubArray.__name__))) + + def test_subclass_str(self): + """test str with subclass that has overridden str, setitem""" + # first without override + x = np.arange(5) + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + self.assertTrue(str(mxsub) == '[-- 1 -- 3 4]') + + xcsub = ComplicatedSubArray(x) + assert_raises(ValueError, xcsub.__setitem__, 0, + np.ma.core.masked_print_option) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + self.assertTrue(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix') + + +############################################################################### +if __name__ == '__main__': + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/testutils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/testutils.py new file mode 100644 index 0000000000000..feff3e8793d61 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/testutils.py @@ -0,0 +1,240 @@ +"""Miscellaneous functions for testing masked arrays and subclasses + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ + +""" +from __future__ import division, absolute_import, print_function + +__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" +__version__ = "1.0" +__revision__ = "$Revision: 3529 $" +__date__ = "$Date: 2007-11-13 10:01:14 +0200 (Tue, 13 Nov 2007) $" + + +import operator + +import numpy as np +from numpy import ndarray, float_ +import numpy.core.umath as umath +from numpy.testing import * +import numpy.testing.utils as utils + +from .core import mask_or, getmask, masked_array, nomask, masked, filled, \ + equal, less + +#------------------------------------------------------------------------------ +def approx (a, b, fill_value=True, rtol=1e-5, atol=1e-8): + """Returns true if all components of a and b are equal subject to given tolerances. + +If fill_value is True, masked values considered equal. Otherwise, masked values +are considered unequal. +The relative error rtol should be positive and << 1.0 +The absolute error atol comes into play for those elements of b that are very +small or zero; it says how small a must be also. + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) + d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) + return d.ravel() + + +def almost(a, b, decimal=6, fill_value=True): + """Returns True if a and b are equal up to decimal places. +If fill_value is True, masked values considered equal. Otherwise, masked values +are considered unequal. + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) + d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) + return d.ravel() + + +#................................................ +def _assert_equal_on_sequences(actual, desired, err_msg=''): + "Asserts the equality of two non-array sequences." + assert_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) + return + +def assert_equal_records(a, b): + """Asserts that two records are equal. Pretty crude for now.""" + assert_equal(a.dtype, b.dtype) + for f in a.dtype.names: + (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) + if not (af is masked) and not (bf is masked): + assert_equal(operator.getitem(a, f), operator.getitem(b, f)) + return + + +def assert_equal(actual, desired, err_msg=''): + "Asserts that two items are equal." + # Case #1: dictionary ..... + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if not k in actual: + raise AssertionError("%s not in %s" % (k, actual)) + assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) + return + # Case #2: lists ..... + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + return _assert_equal_on_sequences(actual, desired, err_msg='') + if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): + msg = build_err_msg([actual, desired], err_msg,) + if not desired == actual: + raise AssertionError(msg) + return + # Case #4. arrays or equivalent + if ((actual is masked) and not (desired is masked)) or \ + ((desired is masked) and not (actual is masked)): + msg = build_err_msg([actual, desired], + err_msg, header='', names=('x', 'y')) + raise ValueError(msg) + actual = np.array(actual, copy=False, subok=True) + desired = np.array(desired, copy=False, subok=True) + (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) + if actual_dtype.char == "S" and desired_dtype.char == "S": + return _assert_equal_on_sequences(actual.tolist(), + desired.tolist(), + err_msg='') +# elif actual_dtype.char in "OV" and desired_dtype.char in "OV": +# if (actual_dtype != desired_dtype) and actual_dtype: +# msg = build_err_msg([actual_dtype, desired_dtype], +# err_msg, header='', names=('actual', 'desired')) +# raise ValueError(msg) +# return _assert_equal_on_sequences(actual.tolist(), +# desired.tolist(), +# err_msg='') + return assert_array_equal(actual, desired, err_msg) + + +def fail_if_equal(actual, desired, err_msg='',): + """Raises an assertion error if two items are equal. + """ + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + fail_if_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if not k in actual: + raise AssertionError(repr(k)) + fail_if_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + fail_if_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) + return + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return fail_if_array_equal(actual, desired, err_msg) + msg = build_err_msg([actual, desired], err_msg) + if not desired != actual: + raise AssertionError(msg) + +assert_not_equal = fail_if_equal + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """Asserts that two items are almost equal. + The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal) + """ + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return assert_array_almost_equal(actual, desired, decimal=decimal, + err_msg=err_msg, verbose=verbose) + msg = build_err_msg([actual, desired], + err_msg=err_msg, verbose=verbose) + if not round(abs(desired - actual), decimal) == 0: + raise AssertionError(msg) + + +assert_close = assert_almost_equal + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + fill_value=True): + """Asserts that a comparison relation between two masked arrays is satisfied + elementwise.""" + # Fill the data first +# xf = filled(x) +# yf = filled(y) + # Allocate a common mask and refill + m = mask_or(getmask(x), getmask(y)) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) + if ((x is masked) and not (y is masked)) or \ + ((y is masked) and not (x is masked)): + msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, + header=header, names=('x', 'y')) + raise ValueError(msg) + # OK, now run the basic tests on filled versions + return utils.assert_array_compare(comparison, + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """Checks the elementwise equality of two masked arrays.""" + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def fail_if_array_equal(x, y, err_msg='', verbose=True): + "Raises an assertion error if two masked arrays are not equal (elementwise)." + def compare(x, y): + return (not np.alltrue(approx(x, y))) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): + """Checks the elementwise equality of two masked arrays, up to a given + number of decimals.""" + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return approx(x, y, rtol=10. ** -decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """Checks the elementwise equality of two masked arrays, up to a given + number of decimals.""" + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return almost(x, y, decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_less(x, y, err_msg='', verbose=True): + "Checks that x is smaller than y elementwise." + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not less-ordered') + + +def assert_mask_equal(m1, m2, err_msg=''): + """Asserts the equality of two masks.""" + if m1 is nomask: + assert_(m2 is nomask) + if m2 is nomask: + assert_(m1 is nomask) + assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/timer_comparison.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/timer_comparison.py new file mode 100644 index 0000000000000..b1c056cfc56cf --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/timer_comparison.py @@ -0,0 +1,459 @@ +from __future__ import division, absolute_import, print_function + +import timeit +from functools import reduce + +import numpy as np +from numpy import float_ +import np.core.fromnumeric as fromnumeric + +from np.testing.utils import build_err_msg + +# Fixme: this does not look right. +np.seterr(all='ignore') + +pi = np.pi + + +class moduletester(object): + def __init__(self, module): + self.module = module + self.allequal = module.allequal + self.arange = module.arange + self.array = module.array +# self.average = module.average + self.concatenate = module.concatenate + self.count = module.count + self.equal = module.equal + self.filled = module.filled + self.getmask = module.getmask + self.getmaskarray = module.getmaskarray + self.id = id + self.inner = module.inner + self.make_mask = module.make_mask + self.masked = module.masked + self.masked_array = module.masked_array + self.masked_values = module.masked_values + self.mask_or = module.mask_or + self.nomask = module.nomask + self.ones = module.ones + self.outer = module.outer + self.repeat = module.repeat + self.resize = module.resize + self.sort = module.sort + self.take = module.take + self.transpose = module.transpose + self.zeros = module.zeros + self.MaskType = module.MaskType + try: + self.umath = module.umath + except AttributeError: + self.umath = module.core.umath + self.testnames = [] + + def assert_array_compare(self, comparison, x, y, err_msg='', header='', + fill_value=True): + """Asserts that a comparison relation between two masked arrays is satisfied + elementwise.""" + xf = self.filled(x) + yf = self.filled(y) + m = self.mask_or(self.getmask(x), self.getmask(y)) + + x = self.filled(self.masked_array(xf, mask=m), fill_value) + y = self.filled(self.masked_array(yf, mask=m), fill_value) + if (x.dtype.char != "O"): + x = x.astype(float_) + if isinstance(x, np.ndarray) and x.size > 1: + x[np.isnan(x)] = 0 + elif np.isnan(x): + x = 0 + if (y.dtype.char != "O"): + y = y.astype(float_) + if isinstance(y, np.ndarray) and y.size > 1: + y[np.isnan(y)] = 0 + elif np.isnan(y): + y = 0 + try: + cond = (x.shape==() or y.shape==()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + '\n(shapes %s, %s mismatch)' % (x.shape, + y.shape), + header=header, + names=('x', 'y')) + assert cond, msg + val = comparison(x, y) + if m is not self.nomask and fill_value: + val = self.masked_array(val, mask=m) + if isinstance(val, bool): + cond = val + reduced = [0] + else: + reduced = val.ravel() + cond = reduced.all() + reduced = reduced.tolist() + if not cond: + match = 100-100.0*reduced.count(1)/len(reduced) + msg = build_err_msg([x, y], + err_msg + + '\n(mismatch %s%%)' % (match,), + header=header, + names=('x', 'y')) + assert cond, msg + except ValueError: + msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) + raise ValueError(msg) + + def assert_array_equal(self, x, y, err_msg=''): + """Checks the elementwise equality of two masked arrays.""" + self.assert_array_compare(self.equal, x, y, err_msg=err_msg, + header='Arrays are not equal') + + def test_0(self): + "Tests creation" + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + xm = self.masked_array(x, mask=m) + xm[0] + + def test_1(self): + "Tests creation" + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = self.masked_array(x, mask=m1) + ym = self.masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = self.masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1.e+20, x) + xm.set_fill_value(1.e+20) + + assert((xm-ym).filled(0).any()) + #fail_if_equal(xm.mask.astype(int_), ym.mask.astype(int_)) + s = x.shape + assert(xm.size == reduce(lambda x, y:x*y, s)) + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + def test_2(self): + "Tests conversions and indexing" + x1 = np.array([1, 2, 4, 3]) + x2 = self.array(x1, mask=[1, 0, 0, 0]) + x3 = self.array(x1, mask=[0, 1, 0, 1]) + x4 = self.array(x1) + # test conversion to strings + junk, garbage = str(x2), repr(x2) +# assert_equal(np.sort(x1), self.sort(x2, fill_value=0)) + # tests of indexing + assert type(x2[1]) is type(x1[1]) + assert x1[1] == x2[1] +# assert self.allequal(x1[2],x2[2]) +# assert self.allequal(x1[2:5],x2[2:5]) +# assert self.allequal(x1[:],x2[:]) +# assert self.allequal(x1[1:], x3[1:]) + x1[2] = 9 + x2[2] = 9 + self.assert_array_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 +# assert self.allequal(x1,x2) + x2[1] = self.masked +# assert self.allequal(x1,x2) + x2[1:3] = self.masked +# assert self.allequal(x1,x2) + x2[:] = x1 + x2[1] = self.masked +# assert self.allequal(self.getmask(x2),self.array([0,1,0,0])) + x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) +# assert self.allequal(self.getmask(x3), self.array([0,1,1,0])) + x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) +# assert self.allequal(self.getmask(x4), self.array([0,1,1,0])) +# assert self.allequal(x4, self.array([1,2,3,4])) + x1 = np.arange(5)*1.0 + x2 = self.masked_values(x1, 3.0) +# assert self.allequal(x1,x2) +# assert self.allequal(self.array([0,0,0,1,0], self.MaskType), x2.mask) + x1 = self.array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert x1[1:1].shape == (0,) + # Tests copy-size + n = [0, 0, 1, 0, 0] + m = self.make_mask(n) + m2 = self.make_mask(m) + assert(m is m2) + m3 = self.make_mask(m, copy=1) + assert(m is not m3) + + + def test_3(self): + "Tests resize/repeat" + x4 = self.arange(4) + x4[2] = self.masked + y4 = self.resize(x4, (8,)) + assert self.allequal(self.concatenate([x4, x4]), y4) + assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) + self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = self.repeat(x4, 2, axis=0) + assert self.allequal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert self.allequal(y5, y7) + y8 = x4.repeat(2, 0) + assert self.allequal(y5, y8) + + #---------------------------------- + def test_4(self): + "Test of take, transpose, inner, outer products" + x = self.arange(24) + y = np.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) + assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) + assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), + self.inner(x, y)) + assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), + self.outer(x, y)) + y = self.array(['abc', 1, 'def', 2, 3], object) + y[2] = self.masked + t = self.take(y, [0, 3, 4]) + assert t[0] == 'abc' + assert t[1] == 2 + assert t[2] == 3 + #---------------------------------- + def test_5(self): + "Tests inplace w/ scalar" + + x = self.arange(10) + y = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x += 1 + assert self.allequal(x, y+1) + xm += 1 + assert self.allequal(xm, y+1) + + x = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x -= 1 + assert self.allequal(x, y-1) + xm -= 1 + assert self.allequal(xm, y-1) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x *= 2.0 + assert self.allequal(x, y*2) + xm *= 2.0 + assert self.allequal(xm, y*2) + + x = self.arange(10)*2 + xm = self.arange(10)*2 + xm[2] = self.masked + x /= 2 + assert self.allequal(x, y) + xm /= 2 + assert self.allequal(xm, y) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x /= 2.0 + assert self.allequal(x, y/2.0) + xm /= self.arange(10) + self.assert_array_equal(xm, self.ones((10,))) + + x = self.arange(10).astype(float_) + xm = self.arange(10) + xm[2] = self.masked + id1 = self.id(x.raw_data()) + x += 1. + #assert id1 == self.id(x.raw_data()) + assert self.allequal(x, y+1.) + + + def test_6(self): + "Tests inplace w/ array" + + x = self.arange(10, dtype=float_) + y = self.arange(10) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x += a + xm += a + assert self.allequal(x, y+a) + assert self.allequal(xm, y+a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x -= a + xm -= a + assert self.allequal(x, y-a) + assert self.allequal(xm, y-a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x *= a + xm *= a + assert self.allequal(x, y*a) + assert self.allequal(xm, y*a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x /= a + xm /= a + + #---------------------------------- + def test_7(self): + "Tests ufunc" + d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), + self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', +# 'sin', 'cos', 'tan', +# 'arcsin', 'arccos', 'arctan', +# 'sinh', 'cosh', 'tanh', +# 'arcsinh', +# 'arccosh', +# 'arctanh', +# 'absolute', 'fabs', 'negative', +# # 'nonzero', 'around', +# 'floor', 'ceil', +# # 'sometrue', 'alltrue', +# 'logical_not', +# 'add', 'subtract', 'multiply', +# 'divide', 'true_divide', 'floor_divide', +# 'remainder', 'fmod', 'hypot', 'arctan2', +# 'equal', 'not_equal', 'less_equal', 'greater_equal', +# 'less', 'greater', +# 'logical_and', 'logical_or', 'logical_xor', + ]: + #print f + try: + uf = getattr(self.umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(self.module, f) + args = d[:uf.nin] + ur = uf(*args) + mr = mf(*args) + self.assert_array_equal(ur.filled(0), mr.filled(0), f) + self.assert_array_equal(ur._mask, mr._mask) + + #---------------------------------- + def test_99(self): + # test average + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + self.assert_array_equal(2.0, self.average(ott, axis=0)) + self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) + result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) + self.assert_array_equal(2.0, result) + assert(wts == 4.0) + ott[:] = self.masked + assert(self.average(ott, axis=0) is self.masked) + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = self.masked + self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) + assert(self.average(ott, axis=1)[0] is self.masked) + self.assert_array_equal([2., 0.], self.average(ott, axis=0)) + result, wts = self.average(ott, axis=0, returned=1) + self.assert_array_equal(wts, [1., 0.]) + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = self.arange(6) + self.assert_array_equal(self.average(x, axis=0), 2.5) + self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) + y = self.array([self.arange(6), 2.0*self.arange(6)]) + self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) + self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) + self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + m1 = self.zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = self.ones(6) + m5 = [0, 1, 1, 1, 1, 1] + self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) + # assert(self.average(masked_array(x, m4),axis=0) is masked) + self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) + self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) + z = self.masked_array(y, m3) + self.assert_array_equal(self.average(z, None), 20./6.) + self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) + self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) + #------------------------ + def test_A(self): + x = self.arange(24) + y = np.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + + +################################################################################ +if __name__ == '__main__': + + setup_base = "from __main__ import moduletester \n"\ + "import numpy\n" \ + "tester = moduletester(module)\n" +# setup_new = "import np.ma.core_ini as module\n"+setup_base + setup_cur = "import np.ma.core as module\n"+setup_base +# setup_alt = "import np.ma.core_alt as module\n"+setup_base +# setup_tmp = "import np.ma.core_tmp as module\n"+setup_base + + (nrepeat, nloop) = (10, 10) + + if 1: + for i in range(1, 8): + func = 'tester.test_%i()' % i +# new = timeit.Timer(func, setup_new).repeat(nrepeat, nloop*10) + cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) +# alt = timeit.Timer(func, setup_alt).repeat(nrepeat, nloop*10) +# tmp = timeit.Timer(func, setup_tmp).repeat(nrepeat, nloop*10) +# new = np.sort(new) + cur = np.sort(cur) +# alt = np.sort(alt) +# tmp = np.sort(tmp) + print("#%i" % i +50*'.') + print(eval("moduletester.test_%i.__doc__" % i)) +# print "core_ini : %.3f - %.3f" % (new[0], new[1]) + print("core_current : %.3f - %.3f" % (cur[0], cur[1])) +# print "core_alt : %.3f - %.3f" % (alt[0], alt[1]) +# print "core_tmp : %.3f - %.3f" % (tmp[0], tmp[1]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/version.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/version.py new file mode 100644 index 0000000000000..a2c5c42a806ac --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/version.py @@ -0,0 +1,14 @@ +"""Version number + +""" +from __future__ import division, absolute_import, print_function + +version = '1.00' +release = False + +if not release: + from . import core + from . import extras + revision = [core.__revision__.split(':')[-1][:-1].strip(), + extras.__revision__.split(':')[-1][:-1].strip(),] + version += '.dev%04i' % max([int(rev) for rev in revision]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matlib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matlib.py new file mode 100644 index 0000000000000..677400367b00b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matlib.py @@ -0,0 +1,358 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.matrixlib.defmatrix import matrix, asmatrix +# need * as we're copying the numpy namespace +from numpy import * + +__version__ = np.__version__ + +__all__ = np.__all__[:] # copy numpy namespace +__all__ += ['rand', 'randn', 'repmat'] + +def empty(shape, dtype=None, order='C'): + """ + Return a new matrix of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty matrix. + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in C (row-major) or + Fortran (column-major) order in memory. + + See Also + -------- + empty_like, zeros + + Notes + ----- + `empty`, unlike `zeros`, does not set the matrix values to zero, + and may therefore be marginally faster. On the other hand, it requires + the user to manually set all the values in the array, and should be + used with caution. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], + [ 7.39337286e-309, 3.22135945e-309]]) #random + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], + [ 6586976, 22740995]]) #random + + """ + return ndarray.__new__(matrix, shape, dtype, order=order) + +def ones(shape, dtype=None, order='C'): + """ + Matrix of ones. + + Return a matrix of given shape and type, filled with ones. + + Parameters + ---------- + shape : {sequence of ints, int} + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is np.float64. + order : {'C', 'F'}, optional + Whether to store matrix in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Matrix of ones of given shape, dtype, and order. + + See Also + -------- + ones : Array of ones. + matlib.zeros : Zero matrix. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> np.matlib.ones((2,3)) + matrix([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> np.matlib.ones(2) + matrix([[ 1., 1.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(1) + return a + +def zeros(shape, dtype=None, order='C'): + """ + Return a matrix of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is float. + order : {'C', 'F'}, optional + Whether to store the result in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Zero matrix of given shape, dtype, and order. + + See Also + -------- + numpy.zeros : Equivalent array function. + matlib.ones : Return a matrix of ones. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.zeros((2, 3)) + matrix([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> np.matlib.zeros(2) + matrix([[ 0., 0.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(0) + return a + +def identity(n,dtype=None): + """ + Returns the square identity matrix of given size. + + Parameters + ---------- + n : int + Size of the returned identity matrix. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : matrix + `n` x `n` matrix with its main diagonal set to one, + and all other elements zero. + + See Also + -------- + numpy.identity : Equivalent array function. + matlib.eye : More general matrix identity function. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.identity(3, dtype=int) + matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + + """ + a = array([1]+n*[0], dtype=dtype) + b = empty((n, n), dtype=dtype) + b.flat = a + return b + +def eye(n,M=None, k=0, dtype=float): + """ + Return a matrix with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + n : int + Number of rows in the output. + M : int, optional + Number of columns in the output, defaults to `n`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, + a positive value refers to an upper diagonal, + and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned matrix. + + Returns + ------- + I : matrix + A `n` x `M` matrix where all elements are equal to zero, + except for the `k`-th diagonal, whose values are equal to one. + + See Also + -------- + numpy.eye : Equivalent array function. + identity : Square identity matrix. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.eye(3, k=1, dtype=float) + matrix([[ 0., 1., 0.], + [ 0., 0., 1.], + [ 0., 0., 0.]]) + + """ + return asmatrix(np.eye(n, M, k, dtype)) + +def rand(*args): + """ + Return a matrix of random values with given shape. + + Create a matrix of the given shape and propagate it with + random samples from a uniform distribution over ``[0, 1)``. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. + If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + out : ndarray + The matrix of random values with shape given by `\\*args`. + + See Also + -------- + randn, numpy.random.rand + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.rand(2, 3) + matrix([[ 0.68340382, 0.67926887, 0.83271405], + [ 0.00793551, 0.20468222, 0.95253525]]) #random + >>> np.matlib.rand((2, 3)) + matrix([[ 0.84682055, 0.73626594, 0.11308016], + [ 0.85429008, 0.3294825 , 0.89139555]]) #random + + If the first argument is a tuple, other arguments are ignored: + + >>> np.matlib.rand((2, 3), 4) + matrix([[ 0.46898646, 0.15163588, 0.95188261], + [ 0.59208621, 0.09561818, 0.00583606]]) #random + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.rand(*args)) + +def randn(*args): + """ + Return a random matrix with data from the "standard normal" distribution. + + `randn` generates a matrix filled with random floats sampled from a + univariate "normal" (Gaussian) distribution of mean 0 and variance 1. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + Z : matrix of floats + A matrix of floating-point samples drawn from the standard normal + distribution. + + See Also + -------- + rand, random.randn + + Notes + ----- + For random samples from :math:`N(\\mu, \\sigma^2)`, use: + + ``sigma * np.matlib.randn(...) + mu`` + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.randn(1) + matrix([[-0.09542833]]) #random + >>> np.matlib.randn(1, 2, 3) + matrix([[ 0.16198284, 0.0194571 , 0.18312985], + [-0.7509172 , 1.61055 , 0.45298599]]) #random + + Two-by-four matrix of samples from :math:`N(3, 6.25)`: + + >>> 2.5 * np.matlib.randn((2, 4)) + 3 + matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922], + [ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.randn(*args)) + +def repmat(a, m, n): + """ + Repeat a 0-D to 2-D array or matrix MxN times. + + Parameters + ---------- + a : array_like + The array or matrix to be repeated. + m, n : int + The number of times `a` is repeated along the first and second axes. + + Returns + ------- + out : ndarray + The result of repeating `a`. + + Examples + -------- + >>> import numpy.matlib + >>> a0 = np.array(1) + >>> np.matlib.repmat(a0, 2, 3) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> a1 = np.arange(4) + >>> np.matlib.repmat(a1, 2, 2) + array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + + >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) + >>> np.matlib.repmat(a2, 2, 3) + matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5], + [0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5]]) + + """ + a = asanyarray(a) + ndim = a.ndim + if ndim == 0: + origrows, origcols = (1, 1) + elif ndim == 1: + origrows, origcols = (1, a.shape[0]) + else: + origrows, origcols = a.shape + rows = origrows * m + cols = origcols * n + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) + return c.reshape(rows, cols) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/__init__.py new file mode 100644 index 0000000000000..d20696154ab25 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/__init__.py @@ -0,0 +1,12 @@ +"""Sub-package containing the matrix class and related functions. + +""" +from __future__ import division, absolute_import, print_function + +from .defmatrix import * + +__all__ = defmatrix.__all__ + +from numpy.testing import Tester +test = Tester().test +bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/defmatrix.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/defmatrix.py new file mode 100644 index 0000000000000..0fd5db66a21ea --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/defmatrix.py @@ -0,0 +1,1094 @@ +from __future__ import division, absolute_import, print_function + +__all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] + +import sys +import numpy.core.numeric as N +from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray +from numpy.core.numerictypes import issubdtype + +# make translation table +_numchars = '0123456789.-+jeEL' + +if sys.version_info[0] >= 3: + class _NumCharTable: + def __getitem__(self, i): + if chr(i) in _numchars: + return chr(i) + else: + return None + _table = _NumCharTable() + def _eval(astr): + str_ = astr.translate(_table) + if not str_: + raise TypeError("Invalid data string supplied: " + astr) + else: + return eval(str_) + +else: + _table = [None]*256 + for k in range(256): + _table[k] = chr(k) + _table = ''.join(_table) + + _todelete = [] + for k in _table: + if k not in _numchars: + _todelete.append(k) + _todelete = ''.join(_todelete) + del k + + def _eval(astr): + str_ = astr.translate(_table, _todelete) + if not str_: + raise TypeError("Invalid data string supplied: " + astr) + else: + return eval(str_) + +def _convert_from_string(data): + rows = data.split(';') + newdata = [] + count = 0 + for row in rows: + trow = row.split(',') + newrow = [] + for col in trow: + temp = col.split() + newrow.extend(map(_eval, temp)) + if count == 0: + Ncols = len(newrow) + elif len(newrow) != Ncols: + raise ValueError("Rows not the same size.") + count += 1 + newdata.append(newrow) + return newdata + +def asmatrix(data, dtype=None): + """ + Interpret the input as a matrix. + + Unlike `matrix`, `asmatrix` does not make a copy if the input is already + a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. + + Parameters + ---------- + data : array_like + Input data. + + Returns + ------- + mat : matrix + `data` interpreted as a matrix. + + Examples + -------- + >>> x = np.array([[1, 2], [3, 4]]) + + >>> m = np.asmatrix(x) + + >>> x[0,0] = 5 + + >>> m + matrix([[5, 2], + [3, 4]]) + + """ + return matrix(data, dtype=dtype, copy=False) + +def matrix_power(M, n): + """ + Raise a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix + squarings and matrix multiplications. If ``n == 0``, the identity matrix + of the same shape as M is returned. If ``n < 0``, the inverse + is computed and then raised to the ``abs(n)``. + + Parameters + ---------- + M : ndarray or matrix object + Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``, + with `m` a positive integer. + n : int + The exponent can be any integer or long integer, positive, + negative, or zero. + + Returns + ------- + M**n : ndarray or matrix object + The return value is the same shape and type as `M`; + if the exponent is positive or zero then the type of the + elements is the same as those of `M`. If the exponent is + negative the elements are floating-point. + + Raises + ------ + LinAlgError + If the matrix is not numerically invertible. + + See Also + -------- + matrix + Provides an equivalent function as the exponentiation operator + (``**``, not ``^``). + + Examples + -------- + >>> from numpy import linalg as LA + >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit + >>> LA.matrix_power(i, 3) # should = -i + array([[ 0, -1], + [ 1, 0]]) + >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix + matrix([[ 0, -1], + [ 1, 0]]) + >>> LA.matrix_power(i, 0) + array([[1, 0], + [0, 1]]) + >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements + array([[ 0., 1.], + [-1., 0.]]) + + Somewhat more sophisticated example + + >>> q = np.zeros((4, 4)) + >>> q[0:2, 0:2] = -i + >>> q[2:4, 2:4] = i + >>> q # one of the three quarternion units not equal to 1 + array([[ 0., -1., 0., 0.], + [ 1., 0., 0., 0.], + [ 0., 0., 0., 1.], + [ 0., 0., -1., 0.]]) + >>> LA.matrix_power(q, 2) # = -np.eye(4) + array([[-1., 0., 0., 0.], + [ 0., -1., 0., 0.], + [ 0., 0., -1., 0.], + [ 0., 0., 0., -1.]]) + + """ + M = asanyarray(M) + if len(M.shape) != 2 or M.shape[0] != M.shape[1]: + raise ValueError("input must be a square array") + if not issubdtype(type(n), int): + raise TypeError("exponent must be an integer") + + from numpy.linalg import inv + + if n==0: + M = M.copy() + M[:] = identity(M.shape[0]) + return M + elif n<0: + M = inv(M) + n *= -1 + + result = M + if n <= 3: + for _ in range(n-1): + result=N.dot(result, M) + return result + + # binary decomposition to reduce the number of Matrix + # multiplications for n > 3. + beta = binary_repr(n) + Z, q, t = M, 0, len(beta) + while beta[t-q-1] == '0': + Z = N.dot(Z, Z) + q += 1 + result = Z + for k in range(q+1, t): + Z = N.dot(Z, Z) + if beta[t-k-1] == '1': + result = N.dot(result, Z) + return result + + +class matrix(N.ndarray): + """ + matrix(data, dtype=None, copy=True) + + Returns a matrix from an array-like object, or from a string of data. + A matrix is a specialized 2-D array that retains its 2-D nature + through operations. It has certain special operators, such as ``*`` + (matrix multiplication) and ``**`` (matrix power). + + Parameters + ---------- + data : array_like or string + If `data` is a string, it is interpreted as a matrix with commas + or spaces separating columns, and semicolons separating rows. + dtype : data-type + Data-type of the output matrix. + copy : bool + If `data` is already an `ndarray`, then this flag determines + whether the data is copied (the default), or whether a view is + constructed. + + See Also + -------- + array + + Examples + -------- + >>> a = np.matrix('1 2; 3 4') + >>> print a + [[1 2] + [3 4]] + + >>> np.matrix([[1, 2], [3, 4]]) + matrix([[1, 2], + [3, 4]]) + + """ + __array_priority__ = 10.0 + def __new__(subtype, data, dtype=None, copy=True): + if isinstance(data, matrix): + dtype2 = data.dtype + if (dtype is None): + dtype = dtype2 + if (dtype2 == dtype) and (not copy): + return data + return data.astype(dtype) + + if isinstance(data, N.ndarray): + if dtype is None: + intype = data.dtype + else: + intype = N.dtype(dtype) + new = data.view(subtype) + if intype != data.dtype: + return new.astype(intype) + if copy: return new.copy() + else: return new + + if isinstance(data, str): + data = _convert_from_string(data) + + # now convert data to an array + arr = N.array(data, dtype=dtype, copy=copy) + ndim = arr.ndim + shape = arr.shape + if (ndim > 2): + raise ValueError("matrix must be 2-dimensional") + elif ndim == 0: + shape = (1, 1) + elif ndim == 1: + shape = (1, shape[0]) + + order = False + if (ndim == 2) and arr.flags.fortran: + order = True + + if not (order or arr.flags.contiguous): + arr = arr.copy() + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=order) + return ret + + def __array_finalize__(self, obj): + self._getitem = False + if (isinstance(obj, matrix) and obj._getitem): return + ndim = self.ndim + if (ndim == 2): + return + if (ndim > 2): + newshape = tuple([x for x in self.shape if x > 1]) + ndim = len(newshape) + if ndim == 2: + self.shape = newshape + return + elif (ndim > 2): + raise ValueError("shape too large to be a matrix.") + else: + newshape = self.shape + if ndim == 0: + self.shape = (1, 1) + elif ndim == 1: + self.shape = (1, newshape[0]) + return + + def __getitem__(self, index): + self._getitem = True + + try: + out = N.ndarray.__getitem__(self, index) + finally: + self._getitem = False + + if not isinstance(out, N.ndarray): + return out + + if out.ndim == 0: + return out[()] + if out.ndim == 1: + sh = out.shape[0] + # Determine when we should have a column array + try: + n = len(index) + except: + n = 0 + if n > 1 and isscalar(index[1]): + out.shape = (sh, 1) + else: + out.shape = (1, sh) + return out + + def __mul__(self, other): + if isinstance(other, (N.ndarray, list, tuple)) : + # This promotes 1-D vectors to row vectors + return N.dot(self, asmatrix(other)) + if isscalar(other) or not hasattr(other, '__rmul__') : + return N.dot(self, other) + return NotImplemented + + def __rmul__(self, other): + return N.dot(other, self) + + def __imul__(self, other): + self[:] = self * other + return self + + def __pow__(self, other): + return matrix_power(self, other) + + def __ipow__(self, other): + self[:] = self ** other + return self + + def __rpow__(self, other): + return NotImplemented + + def __repr__(self): + s = repr(self.__array__()).replace('array', 'matrix') + # now, 'matrix' has 6 letters, and 'array' 5, so the columns don't + # line up anymore. We need to add a space. + l = s.splitlines() + for i in range(1, len(l)): + if l[i]: + l[i] = ' ' + l[i] + return '\n'.join(l) + + def __str__(self): + return str(self.__array__()) + + def _align(self, axis): + """A convenience function for operations that need to preserve axis + orientation. + """ + if axis is None: + return self[0, 0] + elif axis==0: + return self + elif axis==1: + return self.transpose() + else: + raise ValueError("unsupported axis") + + def _collapse(self, axis): + """A convenience function for operations that want to collapse + to a scalar like _align, but are using keepdims=True + """ + if axis is None: + return self[0, 0] + else: + return self + + # Necessary because base-class tolist expects dimension + # reduction by x[0] + def tolist(self): + """ + Return the matrix as a (possibly nested) list. + + See `ndarray.tolist` for full documentation. + + See Also + -------- + ndarray.tolist + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.tolist() + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + + """ + return self.__array__().tolist() + + # To preserve orientation of result... + def sum(self, axis=None, dtype=None, out=None): + """ + Returns the sum of the matrix elements, along the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum + + Notes + ----- + This is the same as `ndarray.sum`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix([[1, 2], [4, 3]]) + >>> x.sum() + 10 + >>> x.sum(axis=1) + matrix([[3], + [7]]) + >>> x.sum(axis=1, dtype='float') + matrix([[ 3.], + [ 7.]]) + >>> out = np.zeros((1, 2), dtype='float') + >>> x.sum(axis=1, dtype='float', out=out) + matrix([[ 3.], + [ 7.]]) + + """ + return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def mean(self, axis=None, dtype=None, out=None): + """ + Returns the average of the matrix elements along the given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean + + Notes + ----- + Same as `ndarray.mean` except that, where that returns an `ndarray`, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.mean() + 5.5 + >>> x.mean(0) + matrix([[ 4., 5., 6., 7.]]) + >>> x.mean(1) + matrix([[ 1.5], + [ 5.5], + [ 9.5]]) + + """ + return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def std(self, axis=None, dtype=None, out=None, ddof=0): + """ + Return the standard deviation of the array elements along the given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std + + Notes + ----- + This is the same as `ndarray.std`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.std() + 3.4520525295346629 + >>> x.std(0) + matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) + >>> x.std(1) + matrix([[ 1.11803399], + [ 1.11803399], + [ 1.11803399]]) + + """ + return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0): + """ + Returns the variance of the matrix elements, along the given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var + + Notes + ----- + This is the same as `ndarray.var`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.var() + 11.916666666666666 + >>> x.var(0) + matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) + >>> x.var(1) + matrix([[ 1.25], + [ 1.25], + [ 1.25]]) + + """ + return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def prod(self, axis=None, dtype=None, out=None): + """ + Return the product of the array elements over the given axis. + + Refer to `prod` for full documentation. + + See Also + -------- + prod, ndarray.prod + + Notes + ----- + Same as `ndarray.prod`, except, where that returns an `ndarray`, this + returns a `matrix` object instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.prod() + 0 + >>> x.prod(0) + matrix([[ 0, 45, 120, 231]]) + >>> x.prod(1) + matrix([[ 0], + [ 840], + [7920]]) + + """ + return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def any(self, axis=None, out=None): + """ + Test whether any array element along a given axis evaluates to True. + + Refer to `numpy.any` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which logical OR is performed + out : ndarray, optional + Output to existing array instead of creating new one, must have + same shape as expected output + + Returns + ------- + any : bool, ndarray + Returns a single bool if `axis` is ``None``; otherwise, + returns `ndarray` + + """ + return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) + + def all(self, axis=None, out=None): + """ + Test whether all matrix elements along a given axis evaluate to True. + + Parameters + ---------- + See `numpy.all` for complete descriptions + + See Also + -------- + numpy.all + + Notes + ----- + This is the same as `ndarray.all`, but it returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> y = x[0]; y + matrix([[0, 1, 2, 3]]) + >>> (x == y) + matrix([[ True, True, True, True], + [False, False, False, False], + [False, False, False, False]], dtype=bool) + >>> (x == y).all() + False + >>> (x == y).all(0) + matrix([[False, False, False, False]], dtype=bool) + >>> (x == y).all(1) + matrix([[ True], + [False], + [False]], dtype=bool) + + """ + return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) + + def max(self, axis=None, out=None): + """ + Return the maximum value along an axis. + + Parameters + ---------- + See `amax` for complete descriptions + + See Also + -------- + amax, ndarray.max + + Notes + ----- + This is the same as `ndarray.max`, but returns a `matrix` object + where `ndarray.max` would return an ndarray. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.max() + 11 + >>> x.max(0) + matrix([[ 8, 9, 10, 11]]) + >>> x.max(1) + matrix([[ 3], + [ 7], + [11]]) + + """ + return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) + + def argmax(self, axis=None, out=None): + """ + Indices of the maximum values along an axis. + + Parameters + ---------- + See `numpy.argmax` for complete descriptions + + See Also + -------- + numpy.argmax + + Notes + ----- + This is the same as `ndarray.argmax`, but returns a `matrix` object + where `ndarray.argmax` would return an `ndarray`. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.argmax() + 11 + >>> x.argmax(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmax(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmax(self, axis, out)._align(axis) + + def min(self, axis=None, out=None): + """ + Return the minimum value along an axis. + + Parameters + ---------- + See `amin` for complete descriptions. + + See Also + -------- + amin, ndarray.min + + Notes + ----- + This is the same as `ndarray.min`, but returns a `matrix` object + where `ndarray.min` would return an ndarray. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.min() + -11 + >>> x.min(0) + matrix([[ -8, -9, -10, -11]]) + >>> x.min(1) + matrix([[ -3], + [ -7], + [-11]]) + + """ + return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) + + def argmin(self, axis=None, out=None): + """ + Return the indices of the minimum values along an axis. + + Parameters + ---------- + See `numpy.argmin` for complete descriptions. + + See Also + -------- + numpy.argmin + + Notes + ----- + This is the same as `ndarray.argmin`, but returns a `matrix` object + where `ndarray.argmin` would return an `ndarray`. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.argmin() + 11 + >>> x.argmin(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmin(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmin(self, axis, out)._align(axis) + + def ptp(self, axis=None, out=None): + """ + Peak-to-peak (maximum - minimum) value along the given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp + + Notes + ----- + Same as `ndarray.ptp`, except, where that would return an `ndarray` object, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.ptp() + 11 + >>> x.ptp(0) + matrix([[8, 8, 8, 8]]) + >>> x.ptp(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.ptp(self, axis, out)._align(axis) + + def getI(self): + """ + Returns the (multiplicative) inverse of invertible `self`. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + If `self` is non-singular, `ret` is such that ``ret * self`` == + ``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return + ``True``. + + Raises + ------ + numpy.linalg.LinAlgError: Singular matrix + If `self` is singular. + + See Also + -------- + linalg.inv + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]'); m + matrix([[1, 2], + [3, 4]]) + >>> m.getI() + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> m.getI() * m + matrix([[ 1., 0.], + [ 0., 1.]]) + + """ + M, N = self.shape + if M == N: + from numpy.dual import inv as func + else: + from numpy.dual import pinv as func + return asmatrix(func(self)) + + def getA(self): + """ + Return `self` as an `ndarray` object. + + Equivalent to ``np.asarray(self)``. + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self` as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA() + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + """ + return self.__array__() + + def getA1(self): + """ + Return `self` as a flattened `ndarray`. + + Equivalent to ``np.asarray(x).ravel()`` + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self`, 1-D, as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA1() + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + """ + return self.__array__().ravel() + + def getT(self): + """ + Returns the transpose of the matrix. + + Does *not* conjugate! For the complex conjugate transpose, use ``.H``. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + The (non-conjugated) transpose of the matrix. + + See Also + -------- + transpose, getH + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]') + >>> m + matrix([[1, 2], + [3, 4]]) + >>> m.getT() + matrix([[1, 3], + [2, 4]]) + + """ + return self.transpose() + + def getH(self): + """ + Returns the (complex) conjugate transpose of `self`. + + Equivalent to ``np.transpose(self)`` if `self` is real-valued. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + complex conjugate transpose of `self` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))) + >>> z = x - 1j*x; z + matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], + [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], + [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) + >>> z.getH() + matrix([[ 0. +0.j, 4. +4.j, 8. +8.j], + [ 1. +1.j, 5. +5.j, 9. +9.j], + [ 2. +2.j, 6. +6.j, 10.+10.j], + [ 3. +3.j, 7. +7.j, 11.+11.j]]) + + """ + if issubclass(self.dtype.type, N.complexfloating): + return self.transpose().conjugate() + else: + return self.transpose() + + T = property(getT, None) + A = property(getA, None) + A1 = property(getA1, None) + H = property(getH, None) + I = property(getI, None) + +def _from_string(str, gdict, ldict): + rows = str.split(';') + rowtup = [] + for row in rows: + trow = row.split(',') + newrow = [] + for x in trow: + newrow.extend(x.split()) + trow = newrow + coltup = [] + for col in trow: + col = col.strip() + try: + thismat = ldict[col] + except KeyError: + try: + thismat = gdict[col] + except KeyError: + raise KeyError("%s not found" % (col,)) + + coltup.append(thismat) + rowtup.append(concatenate(coltup, axis=-1)) + return concatenate(rowtup, axis=0) + + +def bmat(obj, ldict=None, gdict=None): + """ + Build a matrix object from a string, nested sequence, or array. + + Parameters + ---------- + obj : str or array_like + Input data. Names of variables in the current scope may be + referenced, even if `obj` is a string. + + Returns + ------- + out : matrix + Returns a matrix object, which is a specialized 2-D array. + + See Also + -------- + matrix + + Examples + -------- + >>> A = np.mat('1 1; 1 1') + >>> B = np.mat('2 2; 2 2') + >>> C = np.mat('3 4; 5 6') + >>> D = np.mat('7 8; 9 0') + + All the following expressions construct the same block matrix: + + >>> np.bmat([[A, B], [C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat('A,B; C,D') + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + + """ + if isinstance(obj, str): + if gdict is None: + # get previous frame + frame = sys._getframe().f_back + glob_dict = frame.f_globals + loc_dict = frame.f_locals + else: + glob_dict = gdict + loc_dict = ldict + + return matrix(_from_string(obj, glob_dict, loc_dict)) + + if isinstance(obj, (tuple, list)): + # [[A,B],[C,D]] + arr_rows = [] + for row in obj: + if isinstance(row, N.ndarray): # not 2-d + return matrix(concatenate(obj, axis=-1)) + else: + arr_rows.append(concatenate(row, axis=-1)) + return matrix(concatenate(arr_rows, axis=0)) + if isinstance(obj, N.ndarray): + return matrix(obj) + +mat = asmatrix diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/setup.py new file mode 100644 index 0000000000000..8c383cecec7b8 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/setup.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +from __future__ import division, print_function + +import os + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('matrixlib', parent_package, top_path) + config.add_data_dir('tests') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + config = configuration(top_path='').todict() + setup(**config) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_defmatrix.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_defmatrix.py new file mode 100644 index 0000000000000..a06a564aa8bae --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_defmatrix.py @@ -0,0 +1,400 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import * +from numpy.core import * +from numpy import matrix, asmatrix, bmat +from numpy.matrixlib.defmatrix import matrix_power +from numpy.matrixlib import mat +import numpy as np +import collections + +class TestCtor(TestCase): + def test_basic(self): + A = array([[1, 2], [3, 4]]) + mA = matrix(A) + assert_(all(mA.A == A)) + + B = bmat("A,A;A,A") + C = bmat([[A, A], [A, A]]) + D = array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + assert_(all(B.A == D)) + assert_(all(C.A == D)) + + E = array([[5, 6], [7, 8]]) + AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) + assert_(all(bmat([A, E]) == AEresult)) + + vec = arange(5) + mvec = matrix(vec) + assert_(mvec.shape == (1, 5)) + + def test_exceptions(self): + # Check for TypeError when called with invalid string data. + assert_raises(TypeError, matrix, "invalid") + + def test_bmat_nondefault_str(self): + A = array([[1, 2], [3, 4]]) + B = array([[5, 6], [7, 8]]) + Aresult = array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + Bresult = array([[5, 6, 5, 6], + [7, 8, 7, 8], + [5, 6, 5, 6], + [7, 8, 7, 8]]) + mixresult = array([[1, 2, 5, 6], + [3, 4, 7, 8], + [5, 6, 1, 2], + [7, 8, 3, 4]]) + assert_(all(bmat("A,A;A,A") == Aresult)) + assert_(all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) + assert_(all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) + assert_(all(b2 == mixresult)) + + +class TestProperties(TestCase): + def test_sum(self): + """Test whether matrix.sum(axis=1) preserves orientation. + Fails in NumPy <= 0.9.6.2127. + """ + M = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + sum0 = matrix([8, 12, 4, 6]) + sum1 = matrix([3, 7, 6, 14]).T + sumall = 30 + assert_array_equal(sum0, M.sum(axis=0)) + assert_array_equal(sum1, M.sum(axis=1)) + assert_equal(sumall, M.sum()) + + assert_array_equal(sum0, np.sum(M, axis=0)) + assert_array_equal(sum1, np.sum(M, axis=1)) + assert_equal(sumall, np.sum(M)) + + + def test_prod(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.prod(), 720) + assert_equal(x.prod(0), matrix([[4, 10, 18]])) + assert_equal(x.prod(1), matrix([[6], [120]])) + + assert_equal(np.prod(x), 720) + assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) + assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) + + y = matrix([0, 1, 3]) + assert_(y.prod() == 0) + + def test_max(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.max(), 6) + assert_equal(x.max(0), matrix([[4, 5, 6]])) + assert_equal(x.max(1), matrix([[3], [6]])) + + assert_equal(np.max(x), 6) + assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) + assert_equal(np.max(x, axis=1), matrix([[3], [6]])) + + def test_min(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.min(), 1) + assert_equal(x.min(0), matrix([[1, 2, 3]])) + assert_equal(x.min(1), matrix([[1], [4]])) + + assert_equal(np.min(x), 1) + assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) + assert_equal(np.min(x, axis=1), matrix([[1], [4]])) + + def test_ptp(self): + x = np.arange(4).reshape((2, 2)) + assert_(x.ptp() == 3) + assert_(all(x.ptp(0) == array([2, 2]))) + assert_(all(x.ptp(1) == array([1, 1]))) + + def test_var(self): + x = np.arange(9).reshape((3, 3)) + mx = x.view(np.matrix) + assert_equal(x.var(ddof=0), mx.var(ddof=0)) + assert_equal(x.var(ddof=1), mx.var(ddof=1)) + + def test_basic(self): + import numpy.linalg as linalg + + A = array([[1., 2.], + [3., 4.]]) + mA = matrix(A) + assert_(allclose(linalg.inv(A), mA.I)) + assert_(all(array(transpose(A) == mA.T))) + assert_(all(array(transpose(A) == mA.H))) + assert_(all(A == mA.A)) + + B = A + 2j*A + mB = matrix(B) + assert_(allclose(linalg.inv(B), mB.I)) + assert_(all(array(transpose(B) == mB.T))) + assert_(all(array(conjugate(transpose(B)) == mB.H))) + + def test_pinv(self): + x = matrix(arange(6).reshape(2, 3)) + xpinv = matrix([[-0.77777778, 0.27777778], + [-0.11111111, 0.11111111], + [ 0.55555556, -0.05555556]]) + assert_almost_equal(x.I, xpinv) + + def test_comparisons(self): + A = arange(100).reshape(10, 10) + mA = matrix(A) + mB = matrix(A) + 0.1 + assert_(all(mB == A+0.1)) + assert_(all(mB == matrix(A+0.1))) + assert_(not any(mB == matrix(A-0.1))) + assert_(all(mA < mB)) + assert_(all(mA <= mB)) + assert_(all(mA <= mA)) + assert_(not any(mA < mA)) + + assert_(not any(mB < mA)) + assert_(all(mB >= mA)) + assert_(all(mB >= mB)) + assert_(not any(mB > mB)) + + assert_(all(mA == mA)) + assert_(not any(mA == mB)) + assert_(all(mB != mA)) + + assert_(not all(abs(mA) > 0)) + assert_(all(abs(mB > 0))) + + def test_asmatrix(self): + A = arange(100).reshape(10, 10) + mA = asmatrix(A) + A[0, 0] = -10 + assert_(A[0, 0] == mA[0, 0]) + + def test_noaxis(self): + A = matrix([[1, 0], [0, 1]]) + assert_(A.sum() == matrix(2)) + assert_(A.mean() == matrix(0.5)) + + def test_repr(self): + A = matrix([[1, 0], [0, 1]]) + assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") + +class TestCasting(TestCase): + def test_basic(self): + A = arange(100).reshape(10, 10) + mA = matrix(A) + + mB = mA.copy() + O = ones((10, 10), float64) * 0.1 + mB = mB + O + assert_(mB.dtype.type == float64) + assert_(all(mA != mB)) + assert_(all(mB == mA+0.1)) + + mC = mA.copy() + O = ones((10, 10), complex128) + mC = mC * O + assert_(mC.dtype.type == complex128) + assert_(all(mA != mB)) + + +class TestAlgebra(TestCase): + def test_basic(self): + import numpy.linalg as linalg + + A = array([[1., 2.], + [3., 4.]]) + mA = matrix(A) + + B = identity(2) + for i in range(6): + assert_(allclose((mA ** i).A, B)) + B = dot(B, A) + + Ainv = linalg.inv(A) + B = identity(2) + for i in range(6): + assert_(allclose((mA ** -i).A, B)) + B = dot(B, Ainv) + + assert_(allclose((mA * mA).A, dot(A, A))) + assert_(allclose((mA + mA).A, (A + A))) + assert_(allclose((3*mA).A, (3*A))) + + mA2 = matrix(A) + mA2 *= 3 + assert_(allclose(mA2.A, 3*A)) + + def test_pow(self): + """Test raising a matrix to an integer power works as expected.""" + m = matrix("1. 2.; 3. 4.") + m2 = m.copy() + m2 **= 2 + mi = m.copy() + mi **= -1 + m4 = m2.copy() + m4 **= 2 + assert_array_almost_equal(m2, m**2) + assert_array_almost_equal(m4, np.dot(m2, m2)) + assert_array_almost_equal(np.dot(mi, m), np.eye(2)) + + def test_notimplemented(self): + '''Check that 'not implemented' operations produce a failure.''' + A = matrix([[1., 2.], + [3., 4.]]) + + # __rpow__ + try: + 1.0**A + except TypeError: + pass + else: + self.fail("matrix.__rpow__ doesn't raise a TypeError") + + # __mul__ with something not a list, ndarray, tuple, or scalar + try: + A*object() + except TypeError: + pass + else: + self.fail("matrix.__mul__ with non-numeric object doesn't raise" + "a TypeError") + +class TestMatrixReturn(TestCase): + def test_instance_methods(self): + a = matrix([1.0], dtype='f8') + methodargs = { + 'astype': ('intc',), + 'clip': (0.0, 1.0), + 'compress': ([1],), + 'repeat': (1,), + 'reshape': (1,), + 'swapaxes': (0, 0), + 'dot': np.array([1.0]), + } + excluded_methods = [ + 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', + 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', + 'searchsorted', 'setflags', 'setfield', 'sort', + 'partition', 'argpartition', + 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', + 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', + 'prod', 'std', 'ctypes', 'itemset', 'setasflat' + ] + for attrib in dir(a): + if attrib.startswith('_') or attrib in excluded_methods: + continue + f = getattr(a, attrib) + if isinstance(f, collections.Callable): + # reset contents of a + a.astype('f8') + a.fill(1.0) + if attrib in methodargs: + args = methodargs[attrib] + else: + args = () + b = f(*args) + assert_(type(b) is matrix, "%s" % attrib) + assert_(type(a.real) is matrix) + assert_(type(a.imag) is matrix) + c, d = matrix([0.0]).nonzero() + assert_(type(c) is matrix) + assert_(type(d) is matrix) + + +class TestIndexing(TestCase): + def test_basic(self): + x = asmatrix(zeros((3, 2), float)) + y = zeros((3, 1), float) + y[:, 0] = [0.8, 0.2, 0.3] + x[:, 1] = y>0.5 + assert_equal(x, [[0, 1], [0, 0], [0, 0]]) + + +class TestNewScalarIndexing(TestCase): + def setUp(self): + self.a = matrix([[1, 2], [3, 4]]) + + def test_dimesions(self): + a = self.a + x = a[0] + assert_equal(x.ndim, 2) + + def test_array_from_matrix_list(self): + a = self.a + x = array([a, a]) + assert_equal(x.shape, [2, 2, 2]) + + def test_array_to_list(self): + a = self.a + assert_equal(a.tolist(), [[1, 2], [3, 4]]) + + def test_fancy_indexing(self): + a = self.a + x = a[1, [0, 1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4, 3]])) + x = a[[1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4], [1, 2]])) + x = a[[[1], [0]], [[1, 0], [0, 1]]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[4, 3], [1, 2]])) + + def test_matrix_element(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x[0][0], matrix([[1, 2, 3]])) + assert_equal(x[0][0].shape, (1, 3)) + assert_equal(x[0].shape, (1, 3)) + assert_equal(x[:, 0].shape, (2, 1)) + + x = matrix(0) + assert_equal(x[0, 0], 0) + assert_equal(x[0], 0) + assert_equal(x[:, 0].shape, x.shape) + + def test_scalar_indexing(self): + x = asmatrix(zeros((3, 2), float)) + assert_equal(x[0, 0], x[0][0]) + + def test_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0,:], [[1, 0]]) + assert_array_equal(x[1,:], [[0, 1]]) + assert_array_equal(x[:, 0], [[1], [0]]) + assert_array_equal(x[:, 1], [[0], [1]]) + + def test_boolean_indexing(self): + A = arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, array([True, False])], x[:, 0]) + assert_array_equal(x[array([True, False, False]),:], x[0,:]) + + def test_list_indexing(self): + A = arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, [1, 0]], x[:, ::-1]) + assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) + +class TestPower(TestCase): + def test_returntype(self): + a = array([[0, 1], [0, 0]]) + assert_(type(matrix_power(a, 2)) is ndarray) + a = mat(a) + assert_(type(matrix_power(a, 2)) is matrix) + + def test_list(self): + assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_multiarray.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_multiarray.py new file mode 100644 index 0000000000000..fc5b1df17d7e1 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_multiarray.py @@ -0,0 +1,18 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import * + +class TestView(TestCase): + def test_type(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x.view(np.matrix), np.matrix)) + + def test_keywords(self): + x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype='= 2.6. + +""" +from __future__ import division, absolute_import, print_function + +from abc import ABCMeta, abstractmethod, abstractproperty +from numbers import Number + +import numpy as np +from . import polyutils as pu + +__all__ = ['ABCPolyBase'] + +class ABCPolyBase(object): + """An abstract base class for series classes. + + ABCPolyBase provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the + methods listed below. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + coef : array_like + Series coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where + ``P_i`` is the basis polynomials of degree ``i``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is the derived class domain. + window : (2,) array_like, optional + Window, see domain for its use. The default value is the + derived class window. + + Attributes + ---------- + coef : (N,) ndarray + Series coefficients in order of increasing degree. + domain : (2,) ndarray + Domain that is mapped to window. + window : (2,) ndarray + Window that domain is mapped to. + + Class Attributes + ---------------- + maxpower : int + Maximum power allowed, i.e., the largest number ``n`` such that + ``p(x)**n`` is allowed. This is to limit runaway polynomial size. + domain : (2,) ndarray + Default domain of the class. + window : (2,) ndarray + Default window of the class. + + """ + __metaclass__ = ABCMeta + + # Not hashable + __hash__ = None + + # Don't let participate in array operations. Value doesn't matter. + __array_priority__ = 1000 + + # Limit runaway size. T_n^m has degree n*m + maxpower = 100 + + @abstractproperty + def domain(self): + pass + + @abstractproperty + def window(self): + pass + + @abstractproperty + def nickname(self): + pass + + @abstractmethod + def _add(self): + pass + + @abstractmethod + def _sub(self): + pass + + @abstractmethod + def _mul(self): + pass + + @abstractmethod + def _div(self): + pass + + @abstractmethod + def _pow(self): + pass + + @abstractmethod + def _val(self): + pass + + @abstractmethod + def _int(self): + pass + + @abstractmethod + def _der(self): + pass + + @abstractmethod + def _fit(self): + pass + + @abstractmethod + def _line(self): + pass + + @abstractmethod + def _roots(self): + pass + + @abstractmethod + def _fromroots(self): + pass + + def has_samecoef(self, other): + """Check if coefficients match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``coef`` attribute. + + Returns + ------- + bool : boolean + True if the coefficients are the same, False otherwise. + + """ + if len(self.coef) != len(other.coef): + return False + elif not np.all(self.coef == other.coef): + return False + else: + return True + + def has_samedomain(self, other): + """Check if domains match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``domain`` attribute. + + Returns + ------- + bool : boolean + True if the domains are the same, False otherwise. + + """ + return np.all(self.domain == other.domain) + + def has_samewindow(self, other): + """Check if windows match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``window`` attribute. + + Returns + ------- + bool : boolean + True if the windows are the same, False otherwise. + + """ + return np.all(self.window == other.window) + + def has_sametype(self, other): + """Check if types match. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + other : object + Class instance. + + Returns + ------- + bool : boolean + True if other is same class as self + + """ + return isinstance(other, self.__class__) + + def _get_coefficients(self, other): + """Interpret other as polynomial coefficients. + + The `other` argument is checked to see if it is of the same + class as self with identical domain and window. If so, + return its coefficients, otherwise return `other`. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + other : anything + Object to be checked. + + Returns + ------- + coef: + The coefficients of`other` if it is a compatible instance, + of ABCPolyBase, otherwise `other`. + + Raises + ------ + TypeError: + When `other` is an incompatible instance of ABCPolyBase. + + """ + if isinstance(other, ABCPolyBase): + if not isinstance(other, self.__class__): + raise TypeError("Polynomial types differ") + elif not np.all(self.domain == other.domain): + raise TypeError("Domains differ") + elif not np.all(self.window == other.window): + raise TypeError("Windows differ") + return other.coef + return other + + def __init__(self, coef, domain=None, window=None): + [coef] = pu.as_series([coef], trim=False) + self.coef = coef + + if domain is not None: + [domain] = pu.as_series([domain], trim=False) + if len(domain) != 2: + raise ValueError("Domain has wrong number of elements.") + self.domain = domain + + if window is not None: + [window] = pu.as_series([window], trim=False) + if len(window) != 2: + raise ValueError("Window has wrong number of elements.") + self.window = window + + def __repr__(self): + format = "%s(%s, %s, %s)" + coef = repr(self.coef)[6:-1] + domain = repr(self.domain)[6:-1] + window = repr(self.window)[6:-1] + name = self.__class__.__name__ + return format % (name, coef, domain, window) + + def __str__(self): + format = "%s(%s)" + coef = str(self.coef) + name = self.nickname + return format % (name, coef) + + # Pickle and copy + + def __getstate__(self): + ret = self.__dict__.copy() + ret['coef'] = self.coef.copy() + ret['domain'] = self.domain.copy() + ret['window'] = self.window.copy() + return ret + + def __setstate__(self, dict): + self.__dict__ = dict + + # Call + + def __call__(self, arg): + off, scl = pu.mapparms(self.domain, self.window) + arg = off + scl*arg + return self._val(arg, self.coef) + + def __iter__(self): + return iter(self.coef) + + def __len__(self): + return len(self.coef) + + # Numeric properties. + + def __neg__(self): + return self.__class__(-self.coef, self.domain, self.window) + + def __pos__(self): + return self + + def __add__(self, other): + try: + othercoef = self._get_coefficients(other) + coef = self._add(self.coef, othercoef) + except TypeError as e: + raise e + except: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __sub__(self, other): + try: + othercoef = self._get_coefficients(other) + coef = self._sub(self.coef, othercoef) + except TypeError as e: + raise e + except: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __mul__(self, other): + try: + othercoef = self._get_coefficients(other) + coef = self._mul(self.coef, othercoef) + except TypeError as e: + raise e + except: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __div__(self, other): + # set to __floordiv__, /, for now. + return self.__floordiv__(other) + + def __truediv__(self, other): + # there is no true divide if the rhs is not a Number, although it + # could return the first n elements of an infinite series. + # It is hard to see where n would come from, though. + if not isinstance(other, Number) or isinstance(other, bool): + form = "unsupported types for true division: '%s', '%s'" + raise TypeError(form % (type(self), type(other))) + return self.__floordiv__(other) + + def __floordiv__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __mod__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __divmod__(self, other): + try: + othercoef = self._get_coefficients(other) + quo, rem = self._div(self.coef, othercoef) + except (TypeError, ZeroDivisionError) as e: + raise e + except: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window) + rem = self.__class__(rem, self.domain, self.window) + return quo, rem + + def __pow__(self, other): + coef = self._pow(self.coef, other, maxpower=self.maxpower) + res = self.__class__(coef, self.domain, self.window) + return res + + def __radd__(self, other): + try: + coef = self._add(other, self.coef) + except: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rsub__(self, other): + try: + coef = self._sub(other, self.coef) + except: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rmul__(self, other): + try: + coef = self._mul(other, self.coef) + except: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rdiv__(self, other): + # set to __floordiv__ /. + return self.__rfloordiv__(other) + + def __rtruediv__(self, other): + # An instance of ABCPolyBase is not considered a + # Number. + return NotImplemented + + def __rfloordiv__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __rmod__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __rdivmod__(self, other): + try: + quo, rem = self._div(other, self.coef) + except ZeroDivisionError as e: + raise e + except: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window) + rem = self.__class__(rem, self.domain, self.window) + return quo, rem + + # Enhance me + # some augmented arithmetic operations could be added here + + def __eq__(self, other): + res = (isinstance(other, self.__class__) and + np.all(self.domain == other.domain) and + np.all(self.window == other.window) and + (self.coef.shape == other.coef.shape) and + np.all(self.coef == other.coef)) + return res + + def __ne__(self, other): + return not self.__eq__(other) + + # + # Extra methods. + # + + def copy(self): + """Return a copy. + + Returns + ------- + new_series : series + Copy of self. + + """ + return self.__class__(self.coef, self.domain, self.window) + + def degree(self): + """The degree of the series. + + .. versionadded:: 1.5.0 + + Returns + ------- + degree : int + Degree of the series, one less than the number of coefficients. + + """ + return len(self) - 1 + + def cutdeg(self, deg): + """Truncate series to the given degree. + + Reduce the degree of the series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns + ------- + new_series : series + New instance of series with reduced degree. + + """ + return self.truncate(deg + 1) + + def trim(self, tol=0): + """Remove trailing coefficients + + Remove trailing coefficients until a coefficient is reached whose + absolute value greater than `tol` or the beginning of the series is + reached. If all the coefficients would be removed the series is set + to ``[0]``. A new series instance is returned with the new + coefficients. The current instance remains unchanged. + + Parameters + ---------- + tol : non-negative number. + All trailing coefficients less than `tol` will be removed. + + Returns + ------- + new_series : series + Contains the new set of coefficients. + + """ + coef = pu.trimcoef(self.coef, tol) + return self.__class__(coef, self.domain, self.window) + + def truncate(self, size): + """Truncate series to length `size`. + + Reduce the series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. + + Parameters + ---------- + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. + + Returns + ------- + new_series : series + New instance of series with truncated coefficients. + + """ + isize = int(size) + if isize != size or isize < 1: + raise ValueError("size must be a positive integer") + if isize >= len(self.coef): + coef = self.coef + else: + coef = self.coef[:isize] + return self.__class__(coef, self.domain, self.window) + + def convert(self, domain=None, kind=None, window=None): + """Convert series to a different kind and/or domain and/or window. + + Parameters + ---------- + domain : array_like, optional + The domain of the converted series. If the value is None, + the default domain of `kind` is used. + kind : class, optional + The polynomial series type class to which the current instance + should be converted. If kind is None, then the class of the + current instance is used. + window : array_like, optional + The window of the converted series. If the value is None, + the default window of `kind` is used. + + Returns + ------- + new_series : series + The returned class can be of different type than the current + instance and/or have a different domain and/or different + window. + + Notes + ----- + Conversion between domains and class types can result in + numerically ill defined series. + + Examples + -------- + + """ + if kind is None: + kind = self.__class__ + if domain is None: + domain = kind.domain + if window is None: + window = kind.window + return self(kind.identity(domain, window=window)) + + def mapparms(self): + """Return the mapping parameters. + + The returned values define a linear map ``off + scl*x`` that is + applied to the input arguments before the series is evaluated. The + map depends on the ``domain`` and ``window``; if the current + ``domain`` is equal to the ``window`` the resulting map is the + identity. If the coefficients of the series instance are to be + used by themselves outside this class, then the linear function + must be substituted for the ``x`` in the standard representation of + the base polynomials. + + Returns + ------- + off, scl : float or complex + The mapping function is defined by ``off + scl*x``. + + Notes + ----- + If the current domain is the interval ``[l1, r1]`` and the window + is ``[l2, r2]``, then the linear mapping function ``L`` is + defined by the equations:: + + L(l1) = l2 + L(r1) = r2 + + """ + return pu.mapparms(self.domain, self.window) + + def integ(self, m=1, k=[], lbnd=None): + """Integrate. + + Return a series instance that is the definite integral of the + current series. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + k : array_like + Integration constants. The first constant is applied to the + first integration, the second to the second, and so on. The + list of values must less than or equal to `m` in length and any + missing values are set to zero. + lbnd : Scalar + The lower bound of the definite integral. + + Returns + ------- + new_series : series + A new series representing the integral. The domain is the same + as the domain of the integrated series. + + """ + off, scl = self.mapparms() + if lbnd is None: + lbnd = 0 + else: + lbnd = off + scl*lbnd + coef = self._int(self.coef, m, k, lbnd, 1./scl) + return self.__class__(coef, self.domain, self.window) + + def deriv(self, m=1): + """Differentiate. + + Return a series instance of that is the derivative of the current + series. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + + Returns + ------- + new_series : series + A new series representing the derivative. The domain is the same + as the domain of the differentiated series. + + """ + off, scl = self.mapparms() + coef = self._der(self.coef, m, scl) + return self.__class__(coef, self.domain, self.window) + + def roots(self): + """Return the roots of the series polynomial. + + Compute the roots for the series. Note that the accuracy of the + roots decrease the further outside the domain they lie. + + Returns + ------- + roots : ndarray + Array containing the roots of the series. + + """ + roots = self._roots(self.coef) + return pu.mapdomain(roots, self.window, self.domain) + + def linspace(self, n=100, domain=None): + """Return x, y values at equally spaced points in domain. + + Returns the x, y values at `n` linearly spaced points across the + domain. Here y is the value of the polynomial at the points x. By + default the domain is the same as that of the series instance. + This method is intended mostly as a plotting aid. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + n : int, optional + Number of point pairs to return. The default value is 100. + domain : {None, array_like}, optional + If not None, the specified domain is used instead of that of + the calling instance. It should be of the form ``[beg,end]``. + The default is None which case the class domain is used. + + Returns + ------- + x, y : ndarray + x is equal to linspace(self.domain[0], self.domain[1], n) and + y is the series evaluated at element of x. + + """ + if domain is None: + domain = self.domain + x = np.linspace(domain[0], domain[1], n) + y = self(x) + return x, y + + @classmethod + def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, + window=None): + """Least squares fit to data. + + Return a series instance that is the least squares fit to the data + `y` sampled at `x`. The domain of the returned instance can be + specified and this will often result in a superior fit with less + chance of ill conditioning. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial. + domain : {None, [beg, end], []}, optional + Domain to use for the returned series. If ``None``, + then a minimal domain that covers the points `x` is chosen. If + ``[]`` the class domain is used. The default value was the + class domain in NumPy 1.4 and ``None`` in later versions. + The ``[]`` option was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is len(x)*eps, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products + ``w[i]*y[i]`` all have the same variance. The default value is + None. + + .. versionadded:: 1.5.0 + window : {[beg, end]}, optional + Window to use for the returned series. The default + value is the default class domain + + .. versionadded:: 1.6.0 + + Returns + ------- + new_series : series + A series that represents the least squares fit to the data and + has the domain specified in the call. + + [resid, rank, sv, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + """ + if domain is None: + domain = pu.getdomain(x) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + xnew = pu.mapdomain(x, domain, window) + res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full: + [coef, status] = res + return cls(coef, domain=domain, window=window), status + else: + coef = res + return cls(coef, domain=domain, window=window) + + @classmethod + def fromroots(cls, roots, domain=[], window=None): + """Return series instance that has the specified roots. + + Returns a series representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {[], None, array_like}, optional + Domain for the resulting series. If None the domain is the + interval from the smallest root to the largest. If [] the + domain is the class domain. The default is []. + window : {None, array_like}, optional + Window for the returned series. If None the class window is + used. The default is None. + + Returns + ------- + new_series : series + Series with the specified roots. + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None: + domain = pu.getdomain(roots) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl*roots + coef = cls._fromroots(rnew) / scl**deg + return cls(coef, domain=domain, window=window) + + @classmethod + def identity(cls, domain=None, window=None): + """Identity function. + + If ``p`` is the returned series, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + Series of representing the identity. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + off, scl = pu.mapparms(window, domain) + coef = cls._line(off, scl) + return cls(coef, domain, window) + + @classmethod + def basis(cls, deg, domain=None, window=None): + """Series basis polynomial of degree `deg`. + + Returns the series representing the basis polynomial of degree `deg`. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + deg : int + Degree of the basis polynomial for the series. Must be >= 0. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series with the coefficient of the `deg` term set to one and + all others zero. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + ideg = int(deg) + + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return cls([0]*ideg + [1], domain, window) + + @classmethod + def cast(cls, series, domain=None, window=None): + """Convert series to series of this class. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + series : series + The series instance to be converted. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series of the same kind as the calling class and equal to + `series` when evaluated. + + See Also + -------- + convert : similar instance method + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + return series.convert(domain, cls, window) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/chebyshev.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/chebyshev.py new file mode 100644 index 0000000000000..f213ab3fd0497 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/chebyshev.py @@ -0,0 +1,2056 @@ +""" +Objects for dealing with Chebyshev series. + +This module provides a number of objects (mostly functions) useful for +dealing with Chebyshev series, including a `Chebyshev` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `chebdomain` -- Chebyshev series default domain, [-1,1]. +- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates + identically to 0. +- `chebone` -- (Coefficients of the) Chebyshev series that evaluates + identically to 1. +- `chebx` -- (Coefficients of the) Chebyshev series for the identity map, + ``f(x) = x``. + +Arithmetic +---------- +- `chebadd` -- add two Chebyshev series. +- `chebsub` -- subtract one Chebyshev series from another. +- `chebmul` -- multiply two Chebyshev series. +- `chebdiv` -- divide one Chebyshev series by another. +- `chebpow` -- raise a Chebyshev series to an positive integer power +- `chebval` -- evaluate a Chebyshev series at given points. +- `chebval2d` -- evaluate a 2D Chebyshev series at given points. +- `chebval3d` -- evaluate a 3D Chebyshev series at given points. +- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product. +- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product. + +Calculus +-------- +- `chebder` -- differentiate a Chebyshev series. +- `chebint` -- integrate a Chebyshev series. + +Misc Functions +-------------- +- `chebfromroots` -- create a Chebyshev series with specified roots. +- `chebroots` -- find the roots of a Chebyshev series. +- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials. +- `chebvander2d` -- Vandermonde-like matrix for 2D power series. +- `chebvander3d` -- Vandermonde-like matrix for 3D power series. +- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights. +- `chebweight` -- Chebyshev weight function. +- `chebcompanion` -- symmetrized companion matrix in Chebyshev form. +- `chebfit` -- least-squares fit returning a Chebyshev series. +- `chebpts1` -- Chebyshev points of the first kind. +- `chebpts2` -- Chebyshev points of the second kind. +- `chebtrim` -- trim leading coefficients from a Chebyshev series. +- `chebline` -- Chebyshev series representing given straight line. +- `cheb2poly` -- convert a Chebyshev series to a polynomial. +- `poly2cheb` -- convert a polynomial to a Chebyshev series. + +Classes +------- +- `Chebyshev` -- A Chebyshev series class. + +See also +-------- +`numpy.polynomial` + +Notes +----- +The implementations of multiplication, division, integration, and +differentiation use the algebraic identities [1]_: + +.. math :: + T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ + z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. + +where + +.. math :: x = \\frac{z + z^{-1}}{2}. + +These identities allow a Chebyshev series to be expressed as a finite, +symmetric Laurent series. In this module, this sort of Laurent series +is referred to as a "z-series." + +References +---------- +.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev + Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 + (preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', + 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', + 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', + 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', + 'chebgauss', 'chebweight'] + +chebtrim = pu.trimcoef + +# +# A collection of functions for manipulating z-series. These are private +# functions and do minimal error checking. +# + +def _cseries_to_zseries(c): + """Covert Chebyshev series to z-series. + + Covert a Chebyshev series to the equivalent z-series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high + + Returns + ------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + """ + n = c.size + zs = np.zeros(2*n-1, dtype=c.dtype) + zs[n-1:] = c/2 + return zs + zs[::-1] + + +def _zseries_to_cseries(zs): + """Covert z-series to a Chebyshev series. + + Covert a z series to the equivalent Chebyshev series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + Returns + ------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high. + + """ + n = (zs.size + 1)//2 + c = zs[n-1:].copy() + c[1:n] *= 2 + return c + + +def _zseries_mul(z1, z2): + """Multiply two z-series. + + Multiply two z-series to produce a z-series. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D but this is not checked. + + Returns + ------- + product : 1-D ndarray + The product z-series. + + Notes + ----- + This is simply convolution. If symmetric/anti-symmetric z-series are + denoted by S/A then the following rules apply: + + S*S, A*A -> S + S*A, A*S -> A + + """ + return np.convolve(z1, z2) + + +def _zseries_div(z1, z2): + """Divide the first z-series by the second. + + Divide `z1` by `z2` and return the quotient and remainder as z-series. + Warning: this implementation only applies when both z1 and z2 have the + same symmetry, which is sufficient for present purposes. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D and have the same symmetry, but this is not + checked. + + Returns + ------- + + (quotient, remainder) : 1-D ndarrays + Quotient and remainder as z-series. + + Notes + ----- + This is not the same as polynomial division on account of the desired form + of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A + then the following rules apply: + + S/S -> S,S + A/A -> S,A + + The restriction to types of the same symmetry could be fixed but seems like + unneeded generality. There is no natural form for the remainder in the case + where there is no symmetry. + + """ + z1 = z1.copy() + z2 = z2.copy() + len1 = len(z1) + len2 = len(z2) + if len2 == 1: + z1 /= z2 + return z1, z1[:1]*0 + elif len1 < len2: + return z1[:1]*0, z1 + else: + dlen = len1 - len2 + scl = z2[0] + z2 /= scl + quo = np.empty(dlen + 1, dtype=z1.dtype) + i = 0 + j = dlen + while i < j: + r = z1[i] + quo[i] = z1[i] + quo[dlen - i] = r + tmp = r*z2 + z1[i:i+len2] -= tmp + z1[j:j+len2] -= tmp + i += 1 + j -= 1 + r = z1[i] + quo[i] = r + tmp = r*z2 + z1[i:i+len2] -= tmp + quo /= scl + rem = z1[i+1:i-1+len2].copy() + return quo, rem + + +def _zseries_der(zs): + """Differentiate a z-series. + + The derivative is with respect to x, not z. This is achieved using the + chain rule and the value of dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to differentiate. + + Returns + ------- + derivative : z-series + The derivative + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + multiplying the value of zs by two also so that the two cancels in the + division. + + """ + n = len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs *= np.arange(-n, n+1)*2 + d, r = _zseries_div(zs, ns) + return d + + +def _zseries_int(zs): + """Integrate a z-series. + + The integral is with respect to x, not z. This is achieved by a change + of variable using dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to integrate + + Returns + ------- + integral : z-series + The indefinite integral + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + dividing the resulting zs by two. + + """ + n = 1 + len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs = _zseries_mul(zs, ns) + div = np.arange(-n, n+1)*2 + zs[:n] /= div[:n] + zs[n+1:] /= div[n+1:] + zs[n] = 0 + return zs + +# +# Chebyshev series functions +# + + +def poly2cheb(pol): + """ + Convert a polynomial to a Chebyshev series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Chebyshev series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Chebyshev + series. + + See Also + -------- + cheb2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(range(4)) + >>> p + Polynomial([ 0., 1., 2., 3.], [-1., 1.]) + >>> c = p.convert(kind=P.Chebyshev) + >>> c + Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.]) + >>> P.poly2cheb(range(4)) + array([ 1. , 3.25, 1. , 0.75]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = chebadd(chebmulx(res), pol[i]) + return res + + +def cheb2poly(c): + """ + Convert a Chebyshev series to a polynomial. + + Convert an array representing the coefficients of a Chebyshev series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Chebyshev series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2cheb + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Chebyshev(range(4)) + >>> c + Chebyshev([ 0., 1., 2., 3.], [-1., 1.]) + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([ -2., -8., 4., 12.], [-1., 1.]) + >>> P.cheb2poly(range(4)) + array([ -2., -8., 4., 12.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Chebyshev default domain. +chebdomain = np.array([-1, 1]) + +# Chebyshev coefficients representing zero. +chebzero = np.array([0]) + +# Chebyshev coefficients representing one. +chebone = np.array([1]) + +# Chebyshev coefficients representing the identity x. +chebx = np.array([0, 1]) + + +def chebline(off, scl): + """ + Chebyshev series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Chebyshev series for + ``off + scl*x``. + + See Also + -------- + polyline + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebline(3,2) + array([3, 2]) + >>> C.chebval(-3, C.chebline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def chebfromroots(roots): + """ + Generate a Chebyshev series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Chebyshev form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Chebyshev form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, legfromroots, lagfromroots, hermfromroots, + hermefromroots. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.25, 0. , 0.25]) + >>> j = complex(0,1) + >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.5+0.j, 0.0+0.j, 0.5+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [chebline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [chebmul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = chebmul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def chebadd(c1, c2): + """ + Add one Chebyshev series to another. + + Returns the sum of two Chebyshev series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Chebyshev series of their sum. + + See Also + -------- + chebsub, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Chebyshev series + is a Chebyshev series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebadd(c1,c2) + array([ 4., 4., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def chebsub(c1, c2): + """ + Subtract one Chebyshev series from another. + + Returns the difference of two Chebyshev series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their difference. + + See Also + -------- + chebadd, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Chebyshev + series is a Chebyshev series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebsub(c1,c2) + array([-2., 0., 2.]) + >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) + array([ 2., 0., -2.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def chebmulx(c): + """Multiply a Chebyshev series by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + if len(c) > 1: + tmp = c[1:]/2 + prd[2:] = tmp + prd[0:-2] += tmp + return prd + + +def chebmul(c1, c2): + """ + Multiply one Chebyshev series by another. + + Returns the product of two Chebyshev series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their product. + + See Also + -------- + chebadd, chebsub, chebdiv, chebpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Chebyshev polynomial basis set. Thus, to express + the product as a C-series, it is typically necessary to "reproject" + the product onto said basis set, which typically produces + "unintuitive live" (but correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebmul(c1,c2) # multiplication requires "reprojection" + array([ 6.5, 12. , 12. , 4. , 1.5]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + prd = _zseries_mul(z1, z2) + ret = _zseries_to_cseries(prd) + return pu.trimseq(ret) + + +def chebdiv(c1, c2): + """ + Divide one Chebyshev series by another. + + Returns the quotient-with-remainder of two Chebyshev series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Chebyshev series coefficients representing the quotient and + remainder. + + See Also + -------- + chebadd, chebsub, chebmul, chebpow + + Notes + ----- + In general, the (polynomial) division of one C-series by another + results in quotient and remainder terms that are not in the Chebyshev + polynomial basis set. Thus, to express these results as C-series, it + is typically necessary to "reproject" the results onto said basis + set, which typically produces "unintuitive" (but correct) results; + see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not + (array([ 3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> C.chebdiv(c2,c1) # neither "intuitive" + (array([ 0., 2.]), array([-2., -4.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + quo, rem = _zseries_div(z1, z2) + quo = pu.trimseq(_zseries_to_cseries(quo)) + rem = pu.trimseq(_zseries_to_cseries(rem)) + return quo, rem + + +def chebpow(c, pow, maxpower=16): + """Raise a Chebyshev series to a power. + + Returns the Chebyshev series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Chebyshev series of power. + + See Also + -------- + chebadd, chebsub, chebmul, chebdiv + + Examples + -------- + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + zs = _cseries_to_zseries(c) + prd = zs + for i in range(2, power + 1): + prd = np.convolve(prd, zs) + return _zseries_to_cseries(prd) + + +def chebder(c, m=1, scl=1, axis=0): + """ + Differentiate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` + while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + + 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Chebyshev series of the derivative. + + See Also + -------- + chebint + + Notes + ----- + In general, the result of differentiating a C-series needs to be + "reprojected" onto the C-series basis set. Thus, typically, the + result of this function is "unintuitive," albeit correct; see Examples + section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3,4) + >>> C.chebder(c) + array([ 14., 12., 24.]) + >>> C.chebder(c,3) + array([ 96.]) + >>> C.chebder(c,scl=-1) + array([-14., -12., -24.]) + >>> C.chebder(c,2,-1) + array([ 12., 96.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j)*c[j] + c[j - 2] += (j*c[j])/(j - 2) + if n > 1: + der[1] = 4*c[2] + der[0] = c[1] + c = der + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] + represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + C-series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or + ``np.isscalar(scl) == False``. + + See Also + -------- + chebder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + .. math::`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a`- perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3) + >>> C.chebint(c) + array([ 0.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,3) + array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, + 0.00625 ]) + >>> C.chebint(c, k=3) + array([ 3.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,lbnd=-2) + array([ 8.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,scl=-2) + array([-1., 1., -1., -1.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/4 + for j in range(2, n): + t = c[j]/(2*j + 1) + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[j - 1] -= c[j]/(2*(j - 1)) + tmp[0] += k[i] - chebval(lbnd, tmp) + c = tmp + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def chebval(x, c, tensor=True): + """ + Evaluate a Chebyshev series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + chebval2d, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + x2 = 2*x + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + c0 = c[-i] - c1 + c1 = tmp + c1*x2 + return c0 + c1*x + + +def chebval2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than 2 the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + chebval, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except: + raise ValueError('x, y are incompatible') + + c = chebval(x, c) + c = chebval(y, c, tensor=False) + return c + + +def chebgrid2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \sum_{i,j} c_{i,j} * T_i(a) * T_j(b), + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = chebval(x, c) + c = chebval(y, c) + return c + + +def chebval3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebgrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except: + raise ValueError('x, y, z are incompatible') + + c = chebval(x, c) + c = chebval(y, c, tensor=False) + c = chebval(z, c, tensor=False) + return c + + +def chebgrid3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = chebval(x, c) + c = chebval(y, c) + c = chebval(z, c) + return c + + +def chebvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = T_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Chebyshev polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and + ``chebval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Chebyshev series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Chebyshev polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. + v[0] = x*0 + 1 + if ideg > 0: + x2 = 2*x + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x2 - v[i-2] + return np.rollaxis(v, 0, v.ndim) + + +def chebvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., deg[1]*i + j] = T_i(x) * T_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Chebyshev polynomials. + + If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + chebvander, chebvander3d. chebval2d, chebval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = chebvander(x, degx) + vy = chebvander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def chebvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Chebyshev polynomials. + + If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + chebvander, chebvander3d. chebval2d, chebval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = chebvander(x, degx) + vy = chebvander(y, degy) + vz = chebvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def chebfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Chebyshev series to data. + + Return the coefficients of a Legendre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting series + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Chebyshev coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + polyfit, legfit, lagfit, hermfit, hermefit + chebval : Evaluates a Chebyshev series. + chebvander : Vandermonde matrix of Chebyshev series. + chebweight : Chebyshev weight function. + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Chebyshev series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Chebyshev series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + http://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + order = int(deg) + 1 + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + # set up the least squares matrices in transposed form + lhs = chebvander(x, deg).T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def chebcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is aa Chebyshev basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded::1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[0] = np.sqrt(.5) + top[1:] = 1/2 + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + return mat + + +def chebroots(c): + """ + Compute the roots of a Chebyshev series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * T_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, legroots, lagroots, hermroots, hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Chebyshev series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as cheb + >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots + array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + m = chebcompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def chebgauss(deg): + """ + Gauss-Chebyshev quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1/\sqrt{1 - x^2}`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. For Gauss-Chebyshev there are closed form solutions for + the sample points and weights. If n = `deg`, then + + .. math:: x_i = \cos(\pi (2 i - 1) / (2 n)) + + .. math:: w_i = \pi / n + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) + w = np.ones(ideg)*(np.pi/ideg) + + return x, w + + +def chebweight(x): + """ + The weight function of the Chebyshev polynomials. + + The weight function is :math:`1/\sqrt{1 - x^2}` and the interval of + integration is :math:`[-1, 1]`. The Chebyshev polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) + return w + + +def chebpts1(npts): + """ + Chebyshev points of the first kind. + + The Chebyshev points of the first kind are the points ``cos(x)``, + where ``x = [pi*(k + .5)/npts for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the first kind. + + See Also + -------- + chebpts2 + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 1: + raise ValueError("npts must be >= 1") + + x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts) + return np.cos(x) + + +def chebpts2(npts): + """ + Chebyshev points of the second kind. + + The Chebyshev points of the second kind are the points ``cos(x)``, + where ``x = [pi*k/(npts - 1) for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the second kind. + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 2: + raise ValueError("npts must be >= 2") + + x = np.linspace(-np.pi, 0, _npts) + return np.cos(x) + + +# +# Chebyshev series class +# + +class Chebyshev(ABCPolyBase): + """A Chebyshev series class. + + The Chebyshev class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + methods listed below. + + Parameters + ---------- + coef : array_like + Chebyshev coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(chebadd) + _sub = staticmethod(chebsub) + _mul = staticmethod(chebmul) + _div = staticmethod(chebdiv) + _pow = staticmethod(chebpow) + _val = staticmethod(chebval) + _int = staticmethod(chebint) + _der = staticmethod(chebder) + _fit = staticmethod(chebfit) + _line = staticmethod(chebline) + _roots = staticmethod(chebroots) + _fromroots = staticmethod(chebfromroots) + + # Virtual properties + nickname = 'cheb' + domain = np.array(chebdomain) + window = np.array(chebdomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite.py new file mode 100644 index 0000000000000..1fd49d7745fac --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite.py @@ -0,0 +1,1789 @@ +""" +Objects for dealing with Hermite series. + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite series, including a `Hermite` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `hermdomain` -- Hermite series default domain, [-1,1]. +- `hermzero` -- Hermite series that evaluates identically to 0. +- `hermone` -- Hermite series that evaluates identically to 1. +- `hermx` -- Hermite series for the identity map, ``f(x) = x``. + +Arithmetic +---------- +- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. +- `hermadd` -- add two Hermite series. +- `hermsub` -- subtract one Hermite series from another. +- `hermmul` -- multiply two Hermite series. +- `hermdiv` -- divide one Hermite series by another. +- `hermval` -- evaluate a Hermite series at given points. +- `hermval2d` -- evaluate a 2D Hermite series at given points. +- `hermval3d` -- evaluate a 3D Hermite series at given points. +- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product. +- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product. + +Calculus +-------- +- `hermder` -- differentiate a Hermite series. +- `hermint` -- integrate a Hermite series. + +Misc Functions +-------------- +- `hermfromroots` -- create a Hermite series with specified roots. +- `hermroots` -- find the roots of a Hermite series. +- `hermvander` -- Vandermonde-like matrix for Hermite polynomials. +- `hermvander2d` -- Vandermonde-like matrix for 2D power series. +- `hermvander3d` -- Vandermonde-like matrix for 3D power series. +- `hermgauss` -- Gauss-Hermite quadrature, points and weights. +- `hermweight` -- Hermite weight function. +- `hermcompanion` -- symmetrized companion matrix in Hermite form. +- `hermfit` -- least-squares fit returning a Hermite series. +- `hermtrim` -- trim leading coefficients from a Hermite series. +- `hermline` -- Hermite series of given straight line. +- `herm2poly` -- convert a Hermite series to a polynomial. +- `poly2herm` -- convert a polynomial to a Hermite series. + +Classes +------- +- `Hermite` -- A Hermite series class. + +See also +-------- +`numpy.polynomial` + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', + 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', + 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', + 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', + 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] + +hermtrim = pu.trimcoef + + +def poly2herm(pol): + """ + poly2herm(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herm2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import poly2herm + >>> poly2herm(np.arange(4)) + array([ 1. , 2.75 , 0.5 , 0.375]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermadd(hermmulx(res), pol[i]) + return res + + +def herm2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herm + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import herm2poly + >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) + array([ 0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + c[1] *= 2 + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(2*(i - 1))) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)*2) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermdomain = np.array([-1, 1]) + +# Hermite coefficients representing zero. +hermzero = np.array([0]) + +# Hermite coefficients representing one. +hermone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermx = np.array([0, 1/2]) + + +def hermline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + polyline, chebline + + Examples + -------- + >>> from numpy.polynomial.hermite import hermline, hermval + >>> hermval(0,hermline(3, 2)) + 3.0 + >>> hermval(1,hermline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl/2]) + else: + return np.array([off]) + + +def hermfromroots(roots): + """ + Generate a Hermite series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Hermite form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Hermite form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, legfromroots, lagfromroots, chebfromroots, + hermefromroots. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfromroots, hermval + >>> coef = hermfromroots((-1, 0, 1)) + >>> hermval((-1, 0, 1), coef) + array([ 0., 0., 0.]) + >>> coef = hermfromroots((-1j, 1j)) + >>> hermval((-1j, 1j), coef) + array([ 0.+0.j, 0.+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [hermline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [hermmul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = hermmul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def hermadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermsub, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermadd + >>> hermadd([1, 2, 3], [1, 2, 3, 4]) + array([ 2., 4., 6., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def hermsub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermadd, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermsub + >>> hermsub([1, 2, 3, 4], [1, 2, 3]) + array([ 0., 0., 0., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def hermmulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmulx + >>> hermmulx([1, 2, 3]) + array([ 2. , 6.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0]/2 + for i in range(1, len(c)): + prd[i + 1] = c[i]/2 + prd[i - 1] += c[i]*i + return prd + + +def hermmul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermadd, hermsub, hermdiv, hermpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmul + >>> hermmul([1, 2, 3], [0, 1, 2]) + array([ 52., 29., 52., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) + c1 = hermadd(tmp, hermmulx(c1)*2) + return hermadd(c0, hermmulx(c1)*2) + + +def hermdiv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermadd, hermsub, hermmul, hermpow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermdiv + >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 0.])) + >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 2., 2.])) + >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 1., 1.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = hermmul([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, pu.trimseq(rem) + + +def hermpow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermadd, hermsub, hermmul, hermdiv + + Examples + -------- + >>> from numpy.polynomial.hermite import hermpow + >>> hermpow([1, 2, 3], 2) + array([ 81., 52., 82., 12., 9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = hermmul(prd, c) + return prd + + +def hermder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite series. + + Returns the Hermite series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` + while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If `c` is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermder + >>> hermder([ 1. , 0.5, 0.5, 0.5]) + array([ 1., 2., 3.]) + >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) + array([ 1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = (2*j)*c[j] + c = der + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite series. + + Returns the Hermite series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or + ``np.isscalar(scl) == False``. + + See Also + -------- + hermder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + .. math::`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermint + >>> hermint([1,2,3]) # integrate once, value 0 at 0. + array([ 1. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) + >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. + array([ 2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 + array([-2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) + array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0]/2 + for j in range(1, n): + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[0] += k[i] - hermval(lbnd, tmp) + c = tmp + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def hermval(x, c, tensor=True): + """ + Evaluate an Hermite series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermval2d, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval + >>> coef = [1,2,3] + >>> hermval(1, coef) + 11.0 + >>> hermval([[1,2],[3,4]], coef) + array([[ 11., 51.], + [ 115., 203.]]) + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + x2 = x*2 + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(2*(nd - 1)) + c1 = tmp + c1*x2 + return c0 + c1*x2 + + +def hermval2d(x, y, c): + """ + Evaluate a 2-D Hermite series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermval, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except: + raise ValueError('x, y are incompatible') + + c = hermval(x, c) + c = hermval(y, c, tensor=False) + return c + + +def hermgrid2d(x, y, c): + """ + Evaluate a 2-D Hermite series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = hermval(x, c) + c = hermval(y, c) + return c + + +def hermval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermgrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except: + raise ValueError('x, y, z are incompatible') + + c = hermval(x, c) + c = hermval(y, c, tensor=False) + c = hermval(z, c, tensor=False) + return c + + +def hermgrid3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = hermval(x, c) + c = hermval(y, c) + c = hermval(z, c) + return c + + +def hermvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = H_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Hermite polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and + ``hermval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Hermite series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Hermite polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander + >>> x = np.array([-1, 0, 1]) + >>> hermvander(x, 3) + array([[ 1., -2., 2., 4.], + [ 1., 0., -2., -0.], + [ 1., 2., 2., -4.]]) + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + x2 = x*2 + v[1] = x2 + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) + return np.rollaxis(v, 0, v.ndim) + + +def hermvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., deg[1]*i + j] = H_i(x) * H_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Hermite polynomials. + + If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermvander, hermvander3d. hermval2d, hermval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = hermvander(x, degx) + vy = hermvander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def hermvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Hermite polynomials. + + If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermvander, hermvander3d. hermval2d, hermval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = hermvander(x, degx) + vy = hermvander(y, degy) + vz = hermvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def hermfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a Hermite series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, legfit, lagfit, polyfit, hermefit + hermval : Evaluates a Hermite series. + hermvander : Vandermonde matrix of Hermite series. + hermweight : Hermite weight function + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Hermite series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Hermite series are probably most useful when the data can be + approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite + weight. In that case the weight ``sqrt(w(x[i])`` should be used + together with data values ``y[i]/sqrt(w(x[i])``. The weight function is + available as `hermweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + http://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfit, hermval + >>> x = np.linspace(-10, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = hermval(x, [1, 2, 3]) + err + >>> hermfit(x, y, 2) + array([ 0.97902637, 1.99849131, 3.00006 ]) + + """ + order = int(deg) + 1 + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + # set up the least squares matrices in transposed form + lhs = hermvander(x, deg).T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def hermcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Hermite basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded::1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-.5*c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., np.sqrt(2.*np.arange(1, n)))) + scl = np.multiply.accumulate(scl) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(.5*np.arange(1, n)) + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + return mat + + +def hermroots(c): + """ + Compute the roots of a Hermite series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * H_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, legroots, lagroots, chebroots, hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Hermite series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermroots, hermfromroots + >>> coef = hermfromroots([-1, 0, 1]) + >>> coef + array([ 0. , 0.25 , 0. , 0.125]) + >>> hermroots(coef) + array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-.5*c[0]/c[1]]) + + m = hermcompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def hermgauss(deg): + """ + Gauss-Hermite quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]` + with the weight function :math:`f(x) = \exp(-x^2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded::1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`H_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = hermcompanion(c) + x = la.eigvals(m) + x.sort() + + # improve roots by one application of Newton + dy = hermval(x, c) + df = hermval(x, hermder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = hermval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # for Hermite we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(np.pi) / w.sum() + + return x, w + + +def hermweight(x): + """ + Weight function of the Hermite polynomials. + + The weight function is :math:`\exp(-x^2)` and the interval of + integration is :math:`[-\inf, \inf]`. the Hermite polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded::1.7.0 + + """ + w = np.exp(-x**2) + return w + + +# +# Hermite series class +# + +class Hermite(ABCPolyBase): + """An Hermite series class. + + The Hermite class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(hermadd) + _sub = staticmethod(hermsub) + _mul = staticmethod(hermmul) + _div = staticmethod(hermdiv) + _pow = staticmethod(hermpow) + _val = staticmethod(hermval) + _int = staticmethod(hermint) + _der = staticmethod(hermder) + _fit = staticmethod(hermfit) + _line = staticmethod(hermline) + _roots = staticmethod(hermroots) + _fromroots = staticmethod(hermfromroots) + + # Virtual properties + nickname = 'herm' + domain = np.array(hermdomain) + window = np.array(hermdomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite_e.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite_e.py new file mode 100644 index 0000000000000..6e33dc0bc31c5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite_e.py @@ -0,0 +1,1786 @@ +""" +Objects for dealing with Hermite_e series. + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite_e series, including a `HermiteE` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `hermedomain` -- Hermite_e series default domain, [-1,1]. +- `hermezero` -- Hermite_e series that evaluates identically to 0. +- `hermeone` -- Hermite_e series that evaluates identically to 1. +- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``. + +Arithmetic +---------- +- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. +- `hermeadd` -- add two Hermite_e series. +- `hermesub` -- subtract one Hermite_e series from another. +- `hermemul` -- multiply two Hermite_e series. +- `hermediv` -- divide one Hermite_e series by another. +- `hermeval` -- evaluate a Hermite_e series at given points. +- `hermeval2d` -- evaluate a 2D Hermite_e series at given points. +- `hermeval3d` -- evaluate a 3D Hermite_e series at given points. +- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product. +- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product. + +Calculus +-------- +- `hermeder` -- differentiate a Hermite_e series. +- `hermeint` -- integrate a Hermite_e series. + +Misc Functions +-------------- +- `hermefromroots` -- create a Hermite_e series with specified roots. +- `hermeroots` -- find the roots of a Hermite_e series. +- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials. +- `hermevander2d` -- Vandermonde-like matrix for 2D power series. +- `hermevander3d` -- Vandermonde-like matrix for 3D power series. +- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights. +- `hermeweight` -- Hermite_e weight function. +- `hermecompanion` -- symmetrized companion matrix in Hermite_e form. +- `hermefit` -- least-squares fit returning a Hermite_e series. +- `hermetrim` -- trim leading coefficients from a Hermite_e series. +- `hermeline` -- Hermite_e series of given straight line. +- `herme2poly` -- convert a Hermite_e series to a polynomial. +- `poly2herme` -- convert a polynomial to a Hermite_e series. + +Classes +------- +- `HermiteE` -- A Hermite_e series class. + +See also +-------- +`numpy.polynomial` + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', + 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', + 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', + 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', + 'hermegauss', 'hermeweight'] + +hermetrim = pu.trimcoef + + +def poly2herme(pol): + """ + poly2herme(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herme2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import poly2herme + >>> poly2herme(np.arange(4)) + array([ 2., 10., 2., 3.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermeadd(hermemulx(res), pol[i]) + return res + + +def herme2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herme + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import herme2poly + >>> herme2poly([ 2., 10., 2., 3.]) + array([ 0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(i - 1)) + c1 = polyadd(tmp, polymulx(c1)) + return polyadd(c0, polymulx(c1)) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermedomain = np.array([-1, 1]) + +# Hermite coefficients representing zero. +hermezero = np.array([0]) + +# Hermite coefficients representing one. +hermeone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermex = np.array([0, 1]) + + +def hermeline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + polyline, chebline + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeline + >>> from numpy.polynomial.hermite_e import hermeline, hermeval + >>> hermeval(0,hermeline(3, 2)) + 3.0 + >>> hermeval(1,hermeline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def hermefromroots(roots): + """ + Generate a HermiteE series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in HermiteE form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in HermiteE form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, legfromroots, lagfromroots, hermfromroots, + chebfromroots. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval + >>> coef = hermefromroots((-1, 0, 1)) + >>> hermeval((-1, 0, 1), coef) + array([ 0., 0., 0.]) + >>> coef = hermefromroots((-1j, 1j)) + >>> hermeval((-1j, 1j), coef) + array([ 0.+0.j, 0.+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [hermeline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [hermemul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = hermemul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def hermeadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermesub, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeadd + >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) + array([ 2., 4., 6., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def hermesub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermeadd, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermesub + >>> hermesub([1, 2, 3, 4], [1, 2, 3]) + array([ 0., 0., 0., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def hermemulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemulx + >>> hermemulx([1, 2, 3]) + array([ 2., 7., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + prd[i + 1] = c[i] + prd[i - 1] += c[i]*i + return prd + + +def hermemul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermeadd, hermesub, hermediv, hermepow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemul + >>> hermemul([1, 2, 3], [0, 1, 2]) + array([ 14., 15., 28., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermesub(c[-i]*xs, c1*(nd - 1)) + c1 = hermeadd(tmp, hermemulx(c1)) + return hermeadd(c0, hermemulx(c1)) + + +def hermediv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermeadd, hermesub, hermemul, hermepow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermediv + >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 0.])) + >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 1., 2.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = hermemul([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, pu.trimseq(rem) + + +def hermepow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermeadd, hermesub, hermemul, hermediv + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermepow + >>> hermepow([1, 2, 3], 2) + array([ 23., 28., 46., 12., 9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = hermemul(prd, c) + return prd + + +def hermeder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite_e series. + + Returns the series coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` + while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 + is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermeint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeder + >>> hermeder([ 1., 1., 1., 1.]) + array([ 1., 2., 3.]) + >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) + array([ 1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + n = len(c) + if cnt >= n: + return c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite_e series. + + Returns the Hermite_e series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite_e series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or + ``np.isscalar(scl) == False``. + + See Also + -------- + hermeder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + .. math::`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeint + >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. + array([ 1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) + >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. + array([ 2., 1., 1., 1.]) + >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 + array([-1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) + array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - hermeval(lbnd, tmp) + c = tmp + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def hermeval(x, c, tensor=True): + """ + Evaluate an HermiteE series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermeval2d, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeval + >>> coef = [1,2,3] + >>> hermeval(1, coef) + 3.0 + >>> hermeval([[1,2],[3,4]], coef) + array([[ 3., 14.], + [ 31., 54.]]) + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(nd - 1) + c1 = tmp + c1*x + return c0 + c1*x + + +def hermeval2d(x, y, c): + """ + Evaluate a 2-D HermiteE series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermeval, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except: + raise ValueError('x, y are incompatible') + + c = hermeval(x, c) + c = hermeval(y, c, tensor=False) + return c + + +def hermegrid2d(x, y, c): + """ + Evaluate a 2-D HermiteE series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = hermeval(x, c) + c = hermeval(y, c) + return c + + +def hermeval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite_e series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermegrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except: + raise ValueError('x, y, z are incompatible') + + c = hermeval(x, c) + c = hermeval(y, c, tensor=False) + c = hermeval(z, c, tensor=False) + return c + + +def hermegrid3d(x, y, z, c): + """ + Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermeval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = hermeval(x, c) + c = hermeval(y, c) + c = hermeval(z, c) + return c + + +def hermevander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = He_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the HermiteE polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and + ``hermeval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of HermiteE series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding HermiteE polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermevander + >>> x = np.array([-1, 0, 1]) + >>> hermevander(x, 3) + array([[ 1., -1., 0., 2.], + [ 1., 0., -1., -0.], + [ 1., 1., 0., -2.]]) + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x - v[i-2]*(i - 1)) + return np.rollaxis(v, 0, v.ndim) + + +def hermevander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., deg[1]*i + j] = He_i(x) * He_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the HermiteE polynomials. + + If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermevander, hermevander3d. hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = hermevander(x, degx) + vy = hermevander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def hermevander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then Hehe pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the HermiteE polynomials. + + If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermevander, hermevander3d. hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = hermevander(x, degx) + vy = hermevander(y, degy) + vz = hermevander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def hermefit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a HermiteE series of degree `deg` that is + the least squares fit to the data values `y` given at points `x`. If + `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D + multiple fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, legfit, polyfit, hermfit, polyfit + hermeval : Evaluates a Hermite series. + hermevander : pseudo Vandermonde matrix of Hermite series. + hermeweight : HermiteE weight function. + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the HermiteE series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` + are the coefficients to be solved for, and the elements of `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using HermiteE series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE + weight. In that case the weight ``sqrt(w(x[i])`` should be used + together with data values ``y[i]/sqrt(w(x[i])``. The weight function is + available as `hermeweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + http://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefik, hermeval + >>> x = np.linspace(-10, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = hermeval(x, [1, 2, 3]) + err + >>> hermefit(x, y, 2) + array([ 1.01690445, 1.99951418, 2.99948696]) + + """ + order = int(deg) + 1 + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + # set up the least squares matrices in transposed form + lhs = hermevander(x, deg).T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def hermecompanion(c): + """ + Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an HermiteE basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of HermiteE series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded::1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., np.sqrt(np.arange(1, n)))) + scl = np.multiply.accumulate(scl) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(np.arange(1, n)) + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1]) + return mat + + +def hermeroots(c): + """ + Compute the roots of a HermiteE series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * He_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, legroots, lagroots, hermroots, chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The HermiteE series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots + >>> coef = hermefromroots([-1, 0, 1]) + >>> coef + array([ 0., 2., 0., 1.]) + >>> hermeroots(coef) + array([-1., 0., 1.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + m = hermecompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def hermegauss(deg): + """ + Gauss-HermiteE quadrature. + + Computes the sample points and weights for Gauss-HermiteE quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]` + with the weight function :math:`f(x) = \exp(-x^2/2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded::1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`He_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = hermecompanion(c) + x = la.eigvals(m) + x.sort() + + # improve roots by one application of Newton + dy = hermeval(x, c) + df = hermeval(x, hermeder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = hermeval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # for Hermite_e we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(2*np.pi) / w.sum() + + return x, w + + +def hermeweight(x): + """Weight function of the Hermite_e polynomials. + + The weight function is :math:`\exp(-x^2/2)` and the interval of + integration is :math:`[-\inf, \inf]`. the HermiteE polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded::1.7.0 + + """ + w = np.exp(-.5*x**2) + return w + + +# +# HermiteE series class +# + +class HermiteE(ABCPolyBase): + """An HermiteE series class. + + The HermiteE class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(hermeadd) + _sub = staticmethod(hermesub) + _mul = staticmethod(hermemul) + _div = staticmethod(hermediv) + _pow = staticmethod(hermepow) + _val = staticmethod(hermeval) + _int = staticmethod(hermeint) + _der = staticmethod(hermeder) + _fit = staticmethod(hermefit) + _line = staticmethod(hermeline) + _roots = staticmethod(hermeroots) + _fromroots = staticmethod(hermefromroots) + + # Virtual properties + nickname = 'herme' + domain = np.array(hermedomain) + window = np.array(hermedomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/laguerre.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/laguerre.py new file mode 100644 index 0000000000000..8d2705d5d3143 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/laguerre.py @@ -0,0 +1,1781 @@ +""" +Objects for dealing with Laguerre series. + +This module provides a number of objects (mostly functions) useful for +dealing with Laguerre series, including a `Laguerre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `lagdomain` -- Laguerre series default domain, [-1,1]. +- `lagzero` -- Laguerre series that evaluates identically to 0. +- `lagone` -- Laguerre series that evaluates identically to 1. +- `lagx` -- Laguerre series for the identity map, ``f(x) = x``. + +Arithmetic +---------- +- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. +- `lagadd` -- add two Laguerre series. +- `lagsub` -- subtract one Laguerre series from another. +- `lagmul` -- multiply two Laguerre series. +- `lagdiv` -- divide one Laguerre series by another. +- `lagval` -- evaluate a Laguerre series at given points. +- `lagval2d` -- evaluate a 2D Laguerre series at given points. +- `lagval3d` -- evaluate a 3D Laguerre series at given points. +- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product. +- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product. + +Calculus +-------- +- `lagder` -- differentiate a Laguerre series. +- `lagint` -- integrate a Laguerre series. + +Misc Functions +-------------- +- `lagfromroots` -- create a Laguerre series with specified roots. +- `lagroots` -- find the roots of a Laguerre series. +- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials. +- `lagvander2d` -- Vandermonde-like matrix for 2D power series. +- `lagvander3d` -- Vandermonde-like matrix for 3D power series. +- `laggauss` -- Gauss-Laguerre quadrature, points and weights. +- `lagweight` -- Laguerre weight function. +- `lagcompanion` -- symmetrized companion matrix in Laguerre form. +- `lagfit` -- least-squares fit returning a Laguerre series. +- `lagtrim` -- trim leading coefficients from a Laguerre series. +- `lagline` -- Laguerre series of given straight line. +- `lag2poly` -- convert a Laguerre series to a polynomial. +- `poly2lag` -- convert a polynomial to a Laguerre series. + +Classes +------- +- `Laguerre` -- A Laguerre series class. + +See also +-------- +`numpy.polynomial` + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', + 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', + 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', + 'laggauss', 'lagweight'] + +lagtrim = pu.trimcoef + + +def poly2lag(pol): + """ + poly2lag(pol) + + Convert a polynomial to a Laguerre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Laguerre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Laguerre + series. + + See Also + -------- + lag2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import poly2lag + >>> poly2lag(np.arange(4)) + array([ 23., -63., 58., -18.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = lagadd(lagmulx(res), pol[i]) + return res + + +def lag2poly(c): + """ + Convert a Laguerre series to a polynomial. + + Convert an array representing the coefficients of a Laguerre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Laguerre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2lag + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lag2poly + >>> lag2poly([ 23., -63., 58., -18.]) + array([ 0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) + return polyadd(c0, polysub(c1, polymulx(c1))) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Laguerre +lagdomain = np.array([0, 1]) + +# Laguerre coefficients representing zero. +lagzero = np.array([0]) + +# Laguerre coefficients representing one. +lagone = np.array([1]) + +# Laguerre coefficients representing the identity x. +lagx = np.array([1, -1]) + + +def lagline(off, scl): + """ + Laguerre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Laguerre series for + ``off + scl*x``. + + See Also + -------- + polyline, chebline + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagline, lagval + >>> lagval(0,lagline(3, 2)) + 3.0 + >>> lagval(1,lagline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off + scl, -scl]) + else: + return np.array([off]) + + +def lagfromroots(roots): + """ + Generate a Laguerre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Laguerre form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Laguerre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, legfromroots, chebfromroots, hermfromroots, + hermefromroots. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfromroots, lagval + >>> coef = lagfromroots((-1, 0, 1)) + >>> lagval((-1, 0, 1), coef) + array([ 0., 0., 0.]) + >>> coef = lagfromroots((-1j, 1j)) + >>> lagval((-1j, 1j), coef) + array([ 0.+0.j, 0.+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [lagline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [lagmul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = lagmul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def lagadd(c1, c2): + """ + Add one Laguerre series to another. + + Returns the sum of two Laguerre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Laguerre series of their sum. + + See Also + -------- + lagsub, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Laguerre series + is a Laguerre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagadd + >>> lagadd([1, 2, 3], [1, 2, 3, 4]) + array([ 2., 4., 6., 4.]) + + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def lagsub(c1, c2): + """ + Subtract one Laguerre series from another. + + Returns the difference of two Laguerre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their difference. + + See Also + -------- + lagadd, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Laguerre + series is a Laguerre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagsub + >>> lagsub([1, 2, 3, 4], [1, 2, 3]) + array([ 0., 0., 0., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def lagmulx(c): + """Multiply a Laguerre series by x. + + Multiply the Laguerre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + The multiplication uses the recursion relationship for Laguerre + polynomials in the form + + .. math:: + + xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmulx + >>> lagmulx([1, 2, 3]) + array([ -1., -1., 11., -9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] + prd[1] = -c[0] + for i in range(1, len(c)): + prd[i + 1] = -c[i]*(i + 1) + prd[i] += c[i]*(2*i + 1) + prd[i - 1] -= c[i]*i + return prd + + +def lagmul(c1, c2): + """ + Multiply one Laguerre series by another. + + Returns the product of two Laguerre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their product. + + See Also + -------- + lagadd, lagsub, lagdiv, lagpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Laguerre polynomial basis set. Thus, to express + the product as a Laguerre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmul + >>> lagmul([1, 2, 3], [0, 1, 2]) + array([ 8., -13., 38., -51., 36.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) + return lagadd(c0, lagsub(c1, lagmulx(c1))) + + +def lagdiv(c1, c2): + """ + Divide one Laguerre series by another. + + Returns the quotient-with-remainder of two Laguerre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Laguerre series coefficients representing the quotient and + remainder. + + See Also + -------- + lagadd, lagsub, lagmul, lagpow + + Notes + ----- + In general, the (polynomial) division of one Laguerre series by another + results in quotient and remainder terms that are not in the Laguerre + polynomial basis set. Thus, to express these results as a Laguerre + series, it is necessary to "reproject" the results onto the Laguerre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagdiv + >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 0.])) + >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) + (array([ 1., 2., 3.]), array([ 1., 1.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = lagmul([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, pu.trimseq(rem) + + +def lagpow(c, pow, maxpower=16): + """Raise a Laguerre series to a power. + + Returns the Laguerre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Laguerre series of power. + + See Also + -------- + lagadd, lagsub, lagmul, lagdiv + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagpow + >>> lagpow([1, 2, 3], 2) + array([ 14., -16., 56., -72., 54.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = lagmul(prd, c) + return prd + + +def lagder(c, m=1, scl=1, axis=0): + """ + Differentiate a Laguerre series. + + Returns the Laguerre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Laguerre series of the derivative. + + See Also + -------- + lagint + + Notes + ----- + In general, the result of differentiating a Laguerre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagder + >>> lagder([ 1., 1., 1., -3.]) + array([ 1., 2., 3.]) + >>> lagder([ 1., 0., 0., -4., 3.], m=2) + array([ 1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 1, -1): + der[j - 1] = -c[j] + c[j - 1] += c[j] + der[0] = -c[1] + c = der + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Laguerre series. + + Returns the Laguerre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Laguerre series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or + ``np.isscalar(scl) == False``. + + See Also + -------- + lagder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + .. math::`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagint + >>> lagint([1,2,3]) + array([ 1., 1., 1., -3.]) + >>> lagint([1,2,3], m=2) + array([ 1., 0., 0., -4., 3.]) + >>> lagint([1,2,3], k=1) + array([ 2., 1., 1., -3.]) + >>> lagint([1,2,3], lbnd=-1) + array([ 11.5, 1. , 1. , -3. ]) + >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) + array([ 11.16666667, -5. , -3. , 2. ]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] + tmp[1] = -c[0] + for j in range(1, n): + tmp[j] += c[j] + tmp[j + 1] = -c[j] + tmp[0] += k[i] - lagval(lbnd, tmp) + c = tmp + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def lagval(x, c, tensor=True): + """ + Evaluate a Laguerre series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + lagval2d, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval + >>> coef = [1,2,3] + >>> lagval(1, coef) + -0.5 + >>> lagval([[1,2],[3,4]], coef) + array([[-0.5, -4. ], + [-4.5, -2. ]]) + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*((2*nd - 1) - x))/nd + return c0 + c1*(1 - x) + + +def lagval2d(x, y, c): + """ + Evaluate a 2-D Laguerre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + lagval, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except: + raise ValueError('x, y are incompatible') + + c = lagval(x, c) + c = lagval(y, c, tensor=False) + return c + + +def laggrid2d(x, y, c): + """ + Evaluate a 2-D Laguerre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = lagval(x, c) + c = lagval(y, c) + return c + + +def lagval3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimension polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + lagval, lagval2d, laggrid2d, laggrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except: + raise ValueError('x, y, z are incompatible') + + c = lagval(x, c) + c = lagval(y, c, tensor=False) + c = lagval(z, c, tensor=False) + return c + + +def laggrid3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, laggrid2d, lagval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = lagval(x, c) + c = lagval(y, c) + c = lagval(z, c) + return c + + +def lagvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Laguerre polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and + ``lagval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Laguerre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Laguerre polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagvander + >>> x = np.array([0, 1, 2]) + >>> lagvander(x, 3) + array([[ 1. , 1. , 1. , 1. ], + [ 1. , 0. , -0.5 , -0.66666667], + [ 1. , -1. , -1. , -0.33333333]]) + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = 1 - x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i + return np.rollaxis(v, 0, v.ndim) + + +def lagvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Laguerre polynomials. + + If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + lagvander, lagvander3d. lagval2d, lagval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = lagvander(x, degx) + vy = lagvander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def lagvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Laguerre polynomials. + + If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + lagvander, lagvander3d. lagval2d, lagval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = lagvander(x, degx) + vy = lagvander(y, degy) + vz = lagvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def lagfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Laguerre series to data. + + Return the coefficients of a Laguerre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Laguerre coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, legfit, polyfit, hermfit, hermefit + lagval : Evaluates a Laguerre series. + lagvander : pseudo Vandermonde matrix of Laguerre series. + lagweight : Laguerre weight function. + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Laguerre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Laguerre series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre + weight. In that case the weight ``sqrt(w(x[i])`` should be used + together with data values ``y[i]/sqrt(w(x[i])``. The weight function is + available as `lagweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + http://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfit, lagval + >>> x = np.linspace(0, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = lagval(x, [1, 2, 3]) + err + >>> lagfit(x, y, 2) + array([ 0.96971004, 2.00193749, 3.00288744]) + + """ + order = int(deg) + 1 + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + # set up the least squares matrices in transposed form + lhs = lagvander(x, deg).T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def lagcompanion(c): + """ + Return the companion matrix of c. + + The usual companion matrix of the Laguerre polynomials is already + symmetric when `c` is a basis Laguerre polynomial, so no scaling is + applied. + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded::1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[1 + c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + top = mat.reshape(-1)[1::n+1] + mid = mat.reshape(-1)[0::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = -np.arange(1, n) + mid[...] = 2.*np.arange(n) + 1. + bot[...] = top + mat[:, -1] += (c[:-1]/c[-1])*n + return mat + + +def lagroots(c): + """ + Compute the roots of a Laguerre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, legroots, chebroots, hermroots, hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Laguerre series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagroots, lagfromroots + >>> coef = lagfromroots([0, 1, 2]) + >>> coef + array([ 2., -8., 12., -6.]) + >>> lagroots(coef) + array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([1 + c[0]/c[1]]) + + m = lagcompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def laggauss(deg): + """ + Gauss-Laguerre quadrature. + + Computes the sample points and weights for Gauss-Laguerre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[0, \inf]` + with the weight function :math:`f(x) = \exp(-x)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded::1.7.0 + + The results have only been tested up to degree 100 higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = lagcompanion(c) + x = la.eigvals(m) + x.sort() + + # improve roots by one application of Newton + dy = lagval(x, c) + df = lagval(x, lagder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = lagval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # scale w to get the right value, 1 in this case + w /= w.sum() + + return x, w + + +def lagweight(x): + """Weight function of the Laguerre polynomials. + + The weight function is :math:`exp(-x)` and the interval of integration + is :math:`[0, \inf]`. The Laguerre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded::1.7.0 + + """ + w = np.exp(-x) + return w + +# +# Laguerre series class +# + +class Laguerre(ABCPolyBase): + """A Laguerre series class. + + The Laguerre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [0, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [0, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(lagadd) + _sub = staticmethod(lagsub) + _mul = staticmethod(lagmul) + _div = staticmethod(lagdiv) + _pow = staticmethod(lagpow) + _val = staticmethod(lagval) + _int = staticmethod(lagint) + _der = staticmethod(lagder) + _fit = staticmethod(lagfit) + _line = staticmethod(lagline) + _roots = staticmethod(lagroots) + _fromroots = staticmethod(lagfromroots) + + # Virtual properties + nickname = 'lag' + domain = np.array(lagdomain) + window = np.array(lagdomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/legendre.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/legendre.py new file mode 100644 index 0000000000000..d2de282692d8d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/legendre.py @@ -0,0 +1,1809 @@ +""" +Legendre Series (:mod: `numpy.polynomial.legendre`) +=================================================== + +.. currentmodule:: numpy.polynomial.polynomial + +This module provides a number of objects (mostly functions) useful for +dealing with Legendre series, including a `Legendre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + legdomain Legendre series default domain, [-1,1]. + legzero Legendre series that evaluates identically to 0. + legone Legendre series that evaluates identically to 1. + legx Legendre series for the identity map, ``f(x) = x``. + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + legmulx multiply a Legendre series in P_i(x) by x. + legadd add two Legendre series. + legsub subtract one Legendre series from another. + legmul multiply two Legendre series. + legdiv divide one Legendre series by another. + legpow raise a Legendre series to an positive integer power + legval evaluate a Legendre series at given points. + legval2d evaluate a 2D Legendre series at given points. + legval3d evaluate a 3D Legendre series at given points. + leggrid2d evaluate a 2D Legendre series on a Cartesian product. + leggrid3d evaluate a 3D Legendre series on a Cartesian product. + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + legder differentiate a Legendre series. + legint integrate a Legendre series. + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + legfromroots create a Legendre series with specified roots. + legroots find the roots of a Legendre series. + legvander Vandermonde-like matrix for Legendre polynomials. + legvander2d Vandermonde-like matrix for 2D power series. + legvander3d Vandermonde-like matrix for 3D power series. + leggauss Gauss-Legendre quadrature, points and weights. + legweight Legendre weight function. + legcompanion symmetrized companion matrix in Legendre form. + legfit least-squares fit returning a Legendre series. + legtrim trim leading coefficients from a Legendre series. + legline Legendre series representing given straight line. + leg2poly convert a Legendre series to a polynomial. + poly2leg convert a polynomial to a Legendre series. + +Classes +------- + Legendre A Legendre series class. + +See also +-------- +numpy.polynomial.polynomial +numpy.polynomial.chebyshev +numpy.polynomial.laguerre +numpy.polynomial.hermite +numpy.polynomial.hermite_e + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import numpy as np +import numpy.linalg as la + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', + 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', + 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', + 'leggauss', 'legweight'] + +legtrim = pu.trimcoef + + +def poly2leg(pol): + """ + Convert a polynomial to a Legendre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Legendre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Legendre + series. + + See Also + -------- + leg2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(np.arange(4)) + >>> p + Polynomial([ 0., 1., 2., 3.], [-1., 1.]) + >>> c = P.Legendre(P.poly2leg(p.coef)) + >>> c + Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = legadd(legmulx(res), pol[i]) + return res + + +def leg2poly(c): + """ + Convert a Legendre series to a polynomial. + + Convert an array representing the coefficients of a Legendre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Legendre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2leg + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> c = P.Legendre(range(4)) + >>> c + Legendre([ 0., 1., 2., 3.], [-1., 1.]) + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.]) + >>> P.leg2poly(range(4)) + array([-1. , -3.5, 3. , 7.5]) + + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) + return polyadd(c0, polymulx(c1)) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Legendre +legdomain = np.array([-1, 1]) + +# Legendre coefficients representing zero. +legzero = np.array([0]) + +# Legendre coefficients representing one. +legone = np.array([1]) + +# Legendre coefficients representing the identity x. +legx = np.array([0, 1]) + + +def legline(off, scl): + """ + Legendre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Legendre series for + ``off + scl*x``. + + See Also + -------- + polyline, chebline + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legline(3,2) + array([3, 2]) + >>> L.legval(-3, L.legline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def legfromroots(roots): + """ + Generate a Legendre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Legendre form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Legendre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + polyfromroots, chebfromroots, lagfromroots, hermfromroots, + hermefromroots. + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.4, 0. , 0.4]) + >>> j = complex(0,1) + >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [legline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [legmul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = legmul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def legadd(c1, c2): + """ + Add one Legendre series to another. + + Returns the sum of two Legendre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Legendre series of their sum. + + See Also + -------- + legsub, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Legendre series + is a Legendre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legadd(c1,c2) + array([ 4., 4., 4.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def legsub(c1, c2): + """ + Subtract one Legendre series from another. + + Returns the difference of two Legendre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their difference. + + See Also + -------- + legadd, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Legendre + series is a Legendre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legsub(c1,c2) + array([-2., 0., 2.]) + >>> L.legsub(c2,c1) # -C.legsub(c1,c2) + array([ 2., 0., -2.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def legmulx(c): + """Multiply a Legendre series by x. + + Multiply the Legendre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + The multiplication uses the recursion relationship for Legendre + polynomials in the form + + .. math:: + + xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + j = i + 1 + k = i - 1 + s = i + j + prd[j] = (c[i]*j)/s + prd[k] += (c[i]*i)/s + return prd + + +def legmul(c1, c2): + """ + Multiply one Legendre series by another. + + Returns the product of two Legendre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their product. + + See Also + -------- + legadd, legsub, legdiv, legpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Legendre polynomial basis set. Thus, to express + the product as a Legendre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2) + >>> P.legmul(c1,c2) # multiplication requires "reprojection" + array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) + return legadd(c0, legmulx(c1)) + + +def legdiv(c1, c2): + """ + Divide one Legendre series by another. + + Returns the quotient-with-remainder of two Legendre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + quo, rem : ndarrays + Of Legendre series coefficients representing the quotient and + remainder. + + See Also + -------- + legadd, legsub, legmul, legpow + + Notes + ----- + In general, the (polynomial) division of one Legendre series by another + results in quotient and remainder terms that are not in the Legendre + polynomial basis set. Thus, to express these results as a Legendre + series, it is necessary to "reproject" the results onto the Legendre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not + (array([ 3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> L.legdiv(c2,c1) # neither "intuitive" + (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = legmul([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, pu.trimseq(rem) + + +def legpow(c, pow, maxpower=16): + """Raise a Legendre series to a power. + + Returns the Legendre series `c` raised to the power `pow`. The + arguement `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Legendre series of power. + + See Also + -------- + legadd, legsub, legmul, legdiv + + Examples + -------- + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = legmul(prd, c) + return prd + + +def legder(c, m=1, scl=1, axis=0): + """ + Differentiate a Legendre series. + + Returns the Legendre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Legendre series of the derivative. + + See Also + -------- + legint + + Notes + ----- + In general, the result of differentiating a Legendre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3,4) + >>> L.legder(c) + array([ 6., 9., 20.]) + >>> L.legder(c, 3) + array([ 60.]) + >>> L.legder(c, scl=-1) + array([ -6., -9., -20.]) + >>> L.legder(c, 2,-1) + array([ 9., 60.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j - 1)*c[j] + c[j - 2] += c[j] + if n > 1: + der[1] = 3*c[2] + der[0] = c[1] + c = der + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Legendre series. + + Returns the Legendre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Legendre series coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or + ``np.isscalar(scl) == False``. + + See Also + -------- + legder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + .. math::`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3) + >>> L.legint(c) + array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) + >>> L.legint(c, 3) + array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, + -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) + >>> L.legint(c, k=3) + array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) + >>> L.legint(c, lbnd=-2) + array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) + >>> L.legint(c, scl=2) + array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/3 + for j in range(2, n): + t = c[j]/(2*j + 1) + tmp[j + 1] = t + tmp[j - 1] -= t + tmp[0] += k[i] - legval(lbnd, tmp) + c = tmp + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def legval(x, c, tensor=True): + """ + Evaluate a Legendre series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + legval2d, leggrid2d, legval3d, leggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*x*(2*nd - 1))/nd + return c0 + c1*x + + +def legval2d(x, y, c): + """ + Evaluate a 2-D Legendre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Legendre series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + legval, leggrid2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except: + raise ValueError('x, y are incompatible') + + c = legval(x, c) + c = legval(y, c, tensor=False) + return c + + +def leggrid2d(x, y, c): + """ + Evaluate a 2-D Legendre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + legval, legval2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = legval(x, c) + c = legval(y, c) + return c + + +def legval3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + legval, legval2d, leggrid2d, leggrid3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except: + raise ValueError('x, y, z are incompatible') + + c = legval(x, c) + c = legval(y, c, tensor=False) + c = legval(z, c, tensor=False) + return c + + +def leggrid3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + legval, legval2d, leggrid2d, legval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + c = legval(x, c) + c = legval(y, c) + c = legval(z, c) + return c + + +def legvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Legendre polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and + ``legval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Legendre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Legendre polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. This is not as accurate + # as reverse recursion in this application but it is more efficient. + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i + return np.rollaxis(v, 0, v.ndim) + + +def legvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Legendre polynomials. + + If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + legvander, legvander3d. legval2d, legval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = legvander(x, degx) + vy = legvander(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def legvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Legendre polynomials. + + If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + legvander, legvander3d. legval2d, legval3d + + Notes + ----- + + .. versionadded::1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = legvander(x, degx) + vy = legvander(y, degy) + vz = legvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def legfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Legendre series to data. + + Return the coefficients of a Legendre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Legendre coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, polyfit, lagfit, hermfit, hermefit + legval : Evaluates a Legendre series. + legvander : Vandermonde matrix of Legendre series. + legweight : Legendre weight function (= 1). + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Legendre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Legendre series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + http://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + order = int(deg) + 1 + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + # set up the least squares matrices in transposed form + lhs = legvander(x, deg).T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def legcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Legendre basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded::1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = 1./np.sqrt(2*np.arange(n) + 1) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) + return mat + + +def legroots(c): + """ + Compute the roots of a Legendre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + polyroots, chebroots, lagroots, hermroots, hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such values. + Roots with multiplicity greater than 1 will also show larger errors as + the value of the series near such points is relatively insensitive to + errors in the roots. Isolated roots near the origin can be improved by + a few iterations of Newton's method. + + The Legendre series basis polynomials aren't powers of ``x`` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.legendre as leg + >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots + array([-0.85099543, -0.11407192, 0.51506735]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + m = legcompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +def leggauss(deg): + """ + Gauss-Legendre quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded::1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = int(deg) + if ideg != deg or ideg < 1: + raise ValueError("deg must be a non-negative integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = legcompanion(c) + x = la.eigvals(m) + x.sort() + + # improve roots by one application of Newton + dy = legval(x, c) + df = legval(x, legder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = legval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # for Legendre we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= 2. / w.sum() + + return x, w + + +def legweight(x): + """ + Weight function of the Legendre polynomials. + + The weight function is :math:`1` and the interval of integration is + :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded::1.7.0 + + """ + w = x*0.0 + 1.0 + return w + +# +# Legendre series class +# + +class Legendre(ABCPolyBase): + """A Legendre series class. + + The Legendre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Legendre coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(legadd) + _sub = staticmethod(legsub) + _mul = staticmethod(legmul) + _div = staticmethod(legdiv) + _pow = staticmethod(legpow) + _val = staticmethod(legval) + _int = staticmethod(legint) + _der = staticmethod(legder) + _fit = staticmethod(legfit) + _line = staticmethod(legline) + _roots = staticmethod(legroots) + _fromroots = staticmethod(legfromroots) + + # Virtual properties + nickname = 'leg' + domain = np.array(legdomain) + window = np.array(legdomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polynomial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polynomial.py new file mode 100644 index 0000000000000..60e339a1d2ca3 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polynomial.py @@ -0,0 +1,1532 @@ +""" +Objects for dealing with polynomials. + +This module provides a number of objects (mostly functions) useful for +dealing with polynomials, including a `Polynomial` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with polynomial objects is in +the docstring for its "parent" sub-package, `numpy.polynomial`). + +Constants +--------- +- `polydomain` -- Polynomial default domain, [-1,1]. +- `polyzero` -- (Coefficients of the) "zero polynomial." +- `polyone` -- (Coefficients of the) constant polynomial 1. +- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``. + +Arithmetic +---------- +- `polyadd` -- add two polynomials. +- `polysub` -- subtract one polynomial from another. +- `polymul` -- multiply two polynomials. +- `polydiv` -- divide one polynomial by another. +- `polypow` -- raise a polynomial to an positive integer power +- `polyval` -- evaluate a polynomial at given points. +- `polyval2d` -- evaluate a 2D polynomial at given points. +- `polyval3d` -- evaluate a 3D polynomial at given points. +- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product. +- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product. + +Calculus +-------- +- `polyder` -- differentiate a polynomial. +- `polyint` -- integrate a polynomial. + +Misc Functions +-------------- +- `polyfromroots` -- create a polynomial with specified roots. +- `polyroots` -- find the roots of a polynomial. +- `polyvander` -- Vandermonde-like matrix for powers. +- `polyvander2d` -- Vandermonde-like matrix for 2D power series. +- `polyvander3d` -- Vandermonde-like matrix for 3D power series. +- `polycompanion` -- companion matrix in power series form. +- `polyfit` -- least-squares fit returning a polynomial. +- `polytrim` -- trim leading coefficients from a polynomial. +- `polyline` -- polynomial representing given straight line. + +Classes +------- +- `Polynomial` -- polynomial class. + +See Also +-------- +`numpy.polynomial` + +""" +from __future__ import division, absolute_import, print_function + +__all__ = [ + 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', + 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', + 'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit', + 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] + +import warnings +import numpy as np +import numpy.linalg as la + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +polytrim = pu.trimcoef + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Polynomial default domain. +polydomain = np.array([-1, 1]) + +# Polynomial coefficients representing zero. +polyzero = np.array([0]) + +# Polynomial coefficients representing one. +polyone = np.array([1]) + +# Polynomial coefficients representing the identity x. +polyx = np.array([0, 1]) + +# +# Polynomial series functions +# + + +def polyline(off, scl): + """ + Returns an array representing a linear polynomial. + + Parameters + ---------- + off, scl : scalars + The "y-intercept" and "slope" of the line, respectively. + + Returns + ------- + y : ndarray + This module's representation of the linear polynomial ``off + + scl*x``. + + See Also + -------- + chebline + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyline(1,-1) + array([ 1, -1]) + >>> P.polyval(1, P.polyline(1,-1)) # should be 0 + 0.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def polyfromroots(roots): + """ + Generate a monic polynomial with given roots. + + Return the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + where the `r_n` are the roots specified in `roots`. If a zero has + multiplicity n, then it must appear in `roots` n times. For instance, + if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, + then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear + in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * x + ... + x^n + + The coefficient of the last term is 1 for monic polynomials in this + form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of the polynomial's coefficients If all the roots are + real, then `out` is also real, otherwise it is complex. (see + Examples below). + + See Also + -------- + chebfromroots, legfromroots, lagfromroots, hermfromroots + hermefromroots + + Notes + ----- + The coefficients are determined by multiplying together linear factors + of the form `(x - r_i)`, i.e. + + .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) + + where ``n == len(roots) - 1``; note that this implies that `1` is always + returned for :math:`a_n`. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x + array([ 0., -1., 0., 1.]) + >>> j = complex(0,1) + >>> P.polyfromroots((-j,j)) # complex returned, though values are real + array([ 1.+0.j, 0.+0.j, 1.+0.j]) + + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = pu.as_series([roots], trim=False) + roots.sort() + p = [polyline(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [polymul(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = polymul(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def polyadd(c1, c2): + """ + Add one polynomial to another. + + Returns the sum of two polynomials `c1` + `c2`. The arguments are + sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + out : ndarray + The coefficient array representing their sum. + + See Also + -------- + polysub, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> sum = P.polyadd(c1,c2); sum + array([ 4., 4., 4.]) + >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) + 28.0 + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def polysub(c1, c2): + """ + Subtract one polynomial from another. + + Returns the difference of two polynomials `c1` - `c2`. The arguments + are sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of coefficients representing their difference. + + See Also + -------- + polyadd, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polysub(c1,c2) + array([-2., 0., 2.]) + >>> P.polysub(c2,c1) # -P.polysub(c1,c2) + array([ 2., 0., -2.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return pu.trimseq(ret) + + +def polymulx(c): + """Multiply a polynomial by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1:] = c + return prd + + +def polymul(c1, c2): + """ + Multiply one polynomial by another. + + Returns the product of two polynomials `c1` * `c2`. The arguments are + sequences of coefficients, from lowest order term to highest, e.g., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of coefficients representing a polynomial, relative to the + "standard" basis, and ordered from lowest order term to highest. + + Returns + ------- + out : ndarray + Of the coefficients of their product. + + See Also + -------- + polyadd, polysub, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polymul(c1,c2) + array([ 3., 8., 14., 8., 3.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + ret = np.convolve(c1, c2) + return pu.trimseq(ret) + + +def polydiv(c1, c2): + """ + Divide one polynomial by another. + + Returns the quotient-with-remainder of two polynomials `c1` / `c2`. + The arguments are sequences of coefficients, from lowest order term + to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + [quo, rem] : ndarrays + Of coefficient series representing the quotient and remainder. + + See Also + -------- + polyadd, polysub, polymul, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polydiv(c1,c2) + (array([ 3.]), array([-8., -4.])) + >>> P.polydiv(c2,c1) + (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + len1 = len(c1) + len2 = len(c2) + if len2 == 1: + return c1/c2[-1], c1[:1]*0 + elif len1 < len2: + return c1[:1]*0, c1 + else: + dlen = len1 - len2 + scl = c2[-1] + c2 = c2[:-1]/scl + i = dlen + j = len1 - 1 + while i >= 0: + c1[i:j] -= c2*c1[j] + i -= 1 + j -= 1 + return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) + + +def polypow(c, pow, maxpower=None): + """Raise a polynomial to a power. + + Returns the polynomial `c` raised to the power `pow`. The argument + `c` is a sequence of coefficients ordered from low to high. i.e., + [1,2,3] is the series ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c : array_like + 1-D array of array of series coefficients ordered from low to + high degree. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Power series of power. + + See Also + -------- + polyadd, polysub, polymul, polydiv + + Examples + -------- + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = np.convolve(prd, c) + return prd + + +def polyder(c, m=1, scl=1, axis=0): + """ + Differentiate a polynomial. + + Returns the polynomial coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The + argument `c` is an array of coefficients from low to high degree along + each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` + while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is + ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of polynomial coefficients. If c is multidimensional the + different axis correspond to different variables with the degree + in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change + of variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Polynomial coefficients of the derivative. + + See Also + -------- + polyint + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 + >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 + array([ 2., 6., 12.]) + >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24 + array([ 24.]) + >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2 + array([ -2., -6., -12.]) + >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x + array([ 6., 24.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + cdt = c.dtype + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of derivation must be integer") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + c = np.rollaxis(c, iaxis) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=cdt) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a polynomial. + + Returns the polynomial coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients, from low to high degree along each axis, e.g., [1,2,3] + represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] + represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients, ordered from low to high. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``. + + See Also + -------- + polyder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. Why + is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + .. math::`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1,2,3) + >>> P.polyint(c) # should return array([0, 1, 1, 1]) + array([ 0., 1., 1., 1.]) + >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) + array([ 0. , 0. , 0. , 0.16666667, 0.08333333, + 0.05 ]) + >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1]) + array([ 3., 1., 1., 1.]) + >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) + array([ 6., 1., 1., 1.]) + >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) + array([ 0., -2., -2., -2.]) + + """ + c = np.array(c, ndmin=1, copy=1) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype doesn't preserve mask attribute. + c = c + 0.0 + cdt = c.dtype + if not np.iterable(k): + k = [k] + cnt, iaxis = [int(t) for t in [m, axis]] + + if cnt != m: + raise ValueError("The order of integration must be integer") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if iaxis != axis: + raise ValueError("The axis must be integer") + if not -c.ndim <= iaxis < c.ndim: + raise ValueError("The axis is out of range") + if iaxis < 0: + iaxis += c.ndim + + if cnt == 0: + return c + + k = list(k) + [0]*(cnt - len(k)) + c = np.rollaxis(c, iaxis) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - polyval(lbnd, tmp) + c = tmp + c = np.rollaxis(c, 0, iaxis + 1) + return c + + +def polyval(x, c, tensor=True): + """ + Evaluate a polynomial at points x. + + If `c` is of length `n + 1`, this function returns the value + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyval2d, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + The evaluation uses Horner's method. + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyval + >>> polyval(1, [1,2,3]) + 6.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyval(a, [1,2,3]) + array([[ 1., 6.], + [ 17., 34.]]) + >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients + >>> coef + array([[0, 1], + [2, 3]]) + >>> polyval([1,2], coef, tensor=True) + array([[ 2., 4.], + [ 4., 7.]]) + >>> polyval([1,2], coef, tensor=False) + array([ 2., 7.]) + + """ + c = np.array(c, ndmin=1, copy=0) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + c0 = c[-1] + x*0 + for i in range(2, len(c) + 1): + c0 = c[-i] + c0*x + return c0 + + +def polyval2d(x, y, c): + """ + Evaluate a 2-D polynomial at points (x, y). + + This function returns the value + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in `c[i,j]`. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + polyval, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y = np.array((x, y), copy=0) + except: + raise ValueError('x, y are incompatible') + + c = polyval(x, c) + c = polyval(y, c, tensor=False) + return c + + +def polygrid2d(x, y, c): + """ + Evaluate a 2-D polynomial on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = polyval(x, c) + c = polyval(y, c) + return c + + +def polyval3d(x, y, z, c): + """ + Evaluate a 3-D polynomial at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + try: + x, y, z = np.array((x, y, z), copy=0) + except: + raise ValueError('x, y, z are incompatible') + + c = polyval(x, c) + c = polyval(y, c, tensor=False) + c = polyval(z, c, tensor=False) + return c + + +def polygrid3d(x, y, z, c): + """ + Evaluate a 3-D polynomial on the Cartesian product of x, y and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + c = polyval(x, c) + c = polyval(y, c) + c = polyval(z, c) + return c + + +def polyvander(x, deg): + """Vandermonde matrix of given degree. + + Returns the Vandermonde matrix of degree `deg` and sample points + `x`. The Vandermonde matrix is defined by + + .. math:: V[..., i] = x^i, + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the power of `x`. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and + ``polyval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of polynomials of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray. + The Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where the last index is the power of `x`. + The dtype will be the same as the converted `x`. + + See Also + -------- + polyvander2d, polyvander3d + + """ + ideg = int(deg) + if ideg != deg: + raise ValueError("deg must be integer") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=0, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x + return np.rollaxis(v, 0, v.ndim) + + +def polyvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., deg[1]*i + j] = x^i * y^j, + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the powers of + `x` and `y`. + + If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + polyvander, polyvander3d. polyval2d, polyval3d + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy = ideg + x, y = np.array((x, y), copy=0) + 0.0 + + vx = polyvander(x, degx) + vy = polyvander(y, degy) + v = vx[..., None]*vy[..., None,:] + # einsum bug + #v = np.einsum("...i,...j->...ij", vx, vy) + return v.reshape(v.shape[:-2] + (-1,)) + + +def polyvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the powers of `x`, `y`, and `z`. + + If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + polyvander, polyvander3d. polyval2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + ideg = [int(d) for d in deg] + is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] + if is_valid != [1, 1, 1]: + raise ValueError("degrees must be non-negative integers") + degx, degy, degz = ideg + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = polyvander(x, degx) + vy = polyvander(y, degy) + vz = polyvander(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + # einsum bug + #v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz) + return v.reshape(v.shape[:-3] + (-1,)) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least-squares fit of a polynomial to data. + + Return the coefficients of a polynomial of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (`M`,) + x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. + y : array_like, shape (`M`,) or (`M`, `K`) + y-coordinates of the sample points. Several sets of sample points + sharing the same x-coordinates can be (independently) fit with one + call to `polyfit` by passing in for `y` a 2-D array that contains + one data set per column. + deg : int + Degree of the polynomial(s) to be fit. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than `rcond`, relative to the largest singular value, will be + ignored. The default value is ``len(x)*eps``, where `eps` is the + relative precision of the platform's float type, about 2e-16 in + most cases. + full : bool, optional + Switch determining the nature of the return value. When ``False`` + (the default) just the coefficients are returned; when ``True``, + diagnostic information from the singular value decomposition (used + to solve the fit's matrix equation) is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) + Polynomial coefficients ordered from low to high. If `y` was 2-D, + the coefficients in column `k` of `coef` represent the polynomial + fit to the data in `y`'s `k`-th column. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + Raises + ------ + RankWarning + Raised if the matrix in the least-squares fit is rank deficient. + The warning is only raised if `full` == False. The warnings can + be turned off by: + + >>> import warnings + >>> warnings.simplefilter('ignore', RankWarning) + + See Also + -------- + chebfit, legfit, lagfit, hermfit, hermefit + polyval : Evaluates a polynomial. + polyvander : Vandermonde matrix for powers. + linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the polynomial `p` that minimizes + the sum of the weighted squared errors + + .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) over-determined matrix equation: + + .. math :: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected (and `full` == ``False``), a `RankWarning` will be raised. + This means that the coefficient values may be poorly determined. + Fitting to a lower order polynomial will usually get rid of the warning + (but may not be what you want, of course; if you have independent + reason(s) for choosing the degree which isn't working, you may have to: + a) reconsider those reasons, and/or b) reconsider the quality of your + data). The `rcond` parameter can also be set to a value smaller than + its default, but the resulting fit may be spurious and have large + contributions from roundoff error. + + Polynomial fits using double precision tend to "fail" at about + (polynomial) degree 20. Fits using Chebyshev or Legendre series are + generally better conditioned, but much can still depend on the + distribution of the sample points and the smoothness of the data. If + the quality of the fit is inadequate, splines may be a good + alternative. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] + >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 + array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) + >>> stats # note the large SSR, explaining the rather poor results + [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, + 0.28853036]), 1.1324274851176597e-014] + + Same thing without the added noise + + >>> y = x**3 - x + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 + array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16, + 1.00000000e+00]) + >>> stats # note the minuscule SSR + [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, + 0.50443316, 0.28853036]), 1.1324274851176597e-014] + + """ + order = int(deg) + 1 + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + # set up the least squares matrices in transposed form + lhs = polyvander(x, deg).T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, pu.RankWarning) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def polycompanion(c): + """ + Return the companion matrix of c. + + The companion matrix for power series cannot be made symmetric by + scaling the basis, so this function differs from those for the + orthogonal polynomials. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + bot = mat.reshape(-1)[n::n+1] + bot[...] = 1 + mat[:, -1] -= c[:-1]/c[-1] + return mat + + +def polyroots(c): + """ + Compute the roots of a polynomial. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * x^i. + + Parameters + ---------- + c : 1-D array_like + 1-D array of polynomial coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the polynomial. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the power series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + Examples + -------- + >>> import numpy.polynomial.polynomial as poly + >>> poly.polyroots(poly.polyfromroots((-1,0,1))) + array([-1., 0., 1.]) + >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype + dtype('float64') + >>> j = complex(0,1) + >>> poly.polyroots(poly.polyfromroots((-j,0,j))) + array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + m = polycompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +# +# polynomial class +# + +class Polynomial(ABCPolyBase): + """A power series class. + + The Polynomial class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Polynomial coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(polyadd) + _sub = staticmethod(polysub) + _mul = staticmethod(polymul) + _div = staticmethod(polydiv) + _pow = staticmethod(polypow) + _val = staticmethod(polyval) + _int = staticmethod(polyint) + _der = staticmethod(polyder) + _fit = staticmethod(polyfit) + _line = staticmethod(polyline) + _roots = staticmethod(polyroots) + _fromroots = staticmethod(polyfromroots) + + # Virtual properties + nickname = 'poly' + domain = np.array(polydomain) + window = np.array(polydomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polytemplate.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polytemplate.py new file mode 100644 index 0000000000000..e68dd18ef7bbe --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polytemplate.py @@ -0,0 +1,927 @@ +""" +Template for the Chebyshev and Polynomial classes. + +This module houses a Python string module Template object (see, e.g., +http://docs.python.org/library/string.html#template-strings) used by +the `polynomial` and `chebyshev` modules to implement their respective +`Polynomial` and `Chebyshev` classes. It provides a mechanism for easily +creating additional specific polynomial classes (e.g., Legendre, Jacobi, +etc.) in the future, such that all these classes will have a common API. + +""" +from __future__ import division, absolute_import, print_function + +import string +import sys +import warnings +from number import Number + +from numpy import ModuleDeprecationWarning + +warnings.warn("The polytemplate module will be removed in Numpy 1.10.0.", + ModuleDeprecationWarning) + +polytemplate = string.Template(''' +from __future__ import division, absolute_import, print_function +import numpy as np +import warnings +from . import polyutils as pu + +class $name(pu.PolyBase) : + """A $name series class. + + $name instances provide the standard Python numerical methods '+', + '-', '*', '//', '%', 'divmod', '**', and '()' as well as the listed + methods. + + Parameters + ---------- + coef : array_like + $name coefficients, in increasing order. For example, + ``(1, 2, 3)`` implies ``P_0 + 2P_1 + 3P_2`` where the + ``P_i`` are a graded polynomial basis. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to + the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is $domain. + window : (2,) array_like, optional + Window, see ``domain`` for its use. The default value is $domain. + .. versionadded:: 1.6.0 + + Attributes + ---------- + coef : (N,) ndarray + $name coefficients, from low to high. + domain : (2,) ndarray + Domain that is mapped to ``window``. + window : (2,) ndarray + Window that ``domain`` is mapped to. + + Class Attributes + ---------------- + maxpower : int + Maximum power allowed, i.e., the largest number ``n`` such that + ``p(x)**n`` is allowed. This is to limit runaway polynomial size. + domain : (2,) ndarray + Default domain of the class. + window : (2,) ndarray + Default window of the class. + + Notes + ----- + It is important to specify the domain in many cases, for instance in + fitting data, because many of the important properties of the + polynomial basis only hold in a specified interval and consequently + the data must be mapped into that interval in order to benefit. + + Examples + -------- + + """ + # Limit runaway size. T_n^m has degree n*2^m + maxpower = 16 + # Default domain + domain = np.array($domain) + # Default window + window = np.array($domain) + # Don't let participate in array operations. Value doesn't matter. + __array_priority__ = 1000 + # Not hashable + __hash__ = None + + def has_samecoef(self, other): + """Check if coefficients match. + + Parameters + ---------- + other : class instance + The other class must have the ``coef`` attribute. + + Returns + ------- + bool : boolean + True if the coefficients are the same, False otherwise. + + Notes + ----- + .. versionadded:: 1.6.0 + + """ + if len(self.coef) != len(other.coef): + return False + elif not np.all(self.coef == other.coef): + return False + else: + return True + + def has_samedomain(self, other): + """Check if domains match. + + Parameters + ---------- + other : class instance + The other class must have the ``domain`` attribute. + + Returns + ------- + bool : boolean + True if the domains are the same, False otherwise. + + Notes + ----- + .. versionadded:: 1.6.0 + + """ + return np.all(self.domain == other.domain) + + def has_samewindow(self, other): + """Check if windows match. + + Parameters + ---------- + other : class instance + The other class must have the ``window`` attribute. + + Returns + ------- + bool : boolean + True if the windows are the same, False otherwise. + + Notes + ----- + .. versionadded:: 1.6.0 + + """ + return np.all(self.window == other.window) + + def has_sametype(self, other): + """Check if types match. + + Parameters + ---------- + other : object + Class instance. + + Returns + ------- + bool : boolean + True if other is same class as self + + Notes + ----- + .. versionadded:: 1.7.0 + + """ + return isinstance(other, self.__class__) + + def __init__(self, coef, domain=$domain, window=$domain) : + [coef, dom, win] = pu.as_series([coef, domain, window], trim=False) + if len(dom) != 2 : + raise ValueError("Domain has wrong number of elements.") + if len(win) != 2 : + raise ValueError("Window has wrong number of elements.") + self.coef = coef + self.domain = dom + self.window = win + + def __repr__(self): + format = "%s(%s, %s, %s)" + coef = repr(self.coef)[6:-1] + domain = repr(self.domain)[6:-1] + window = repr(self.window)[6:-1] + return format % ('$name', coef, domain, window) + + def __str__(self) : + format = "%s(%s)" + coef = str(self.coef) + return format % ('$nick', coef) + + # Pickle and copy + + def __getstate__(self) : + ret = self.__dict__.copy() + ret['coef'] = self.coef.copy() + ret['domain'] = self.domain.copy() + ret['window'] = self.window.copy() + return ret + + def __setstate__(self, dict) : + self.__dict__ = dict + + # Call + + def __call__(self, arg) : + off, scl = pu.mapparms(self.domain, self.window) + arg = off + scl*arg + return ${nick}val(arg, self.coef) + + def __iter__(self) : + return iter(self.coef) + + def __len__(self) : + return len(self.coef) + + # Numeric properties. + + def __neg__(self) : + return self.__class__(-self.coef, self.domain, self.window) + + def __pos__(self) : + return self + + def __add__(self, other) : + """Returns sum""" + if isinstance(other, pu.PolyBase): + if not self.has_sametype(other): + raise TypeError("Polynomial types differ") + elif not self.has_samedomain(other): + raise TypeError("Domains differ") + elif not self.has_samewindow(other): + raise TypeError("Windows differ") + else: + coef = ${nick}add(self.coef, other.coef) + else : + try : + coef = ${nick}add(self.coef, other) + except : + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __sub__(self, other) : + """Returns difference""" + if isinstance(other, pu.PolyBase): + if not self.has_sametype(other): + raise TypeError("Polynomial types differ") + elif not self.has_samedomain(other): + raise TypeError("Domains differ") + elif not self.has_samewindow(other): + raise TypeError("Windows differ") + else: + coef = ${nick}sub(self.coef, other.coef) + else : + try : + coef = ${nick}sub(self.coef, other) + except : + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __mul__(self, other) : + """Returns product""" + if isinstance(other, pu.PolyBase): + if not self.has_sametype(other): + raise TypeError("Polynomial types differ") + elif not self.has_samedomain(other): + raise TypeError("Domains differ") + elif not self.has_samewindow(other): + raise TypeError("Windows differ") + else: + coef = ${nick}mul(self.coef, other.coef) + else : + try : + coef = ${nick}mul(self.coef, other) + except : + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __div__(self, other): + # set to __floordiv__, /, for now. + return self.__floordiv__(other) + + def __truediv__(self, other) : + # there is no true divide if the rhs is not a Number, although it + # could return the first n elements of an infinite series. + # It is hard to see where n would come from, though. + if not isinstance(other, Number) or isinstance(other, bool): + form = "unsupported types for true division: '%s', '%s'" + raise TypeError(form % (type(self), type(other))) + return self.__floordiv__(other) + + def __floordiv__(self, other) : + """Returns the quotient.""" + if isinstance(other, pu.PolyBase): + if not self.has_sametype(other): + raise TypeError("Polynomial types differ") + elif not self.has_samedomain(other): + raise TypeError("Domains differ") + elif not self.has_samewindow(other): + raise TypeError("Windows differ") + else: + quo, rem = ${nick}div(self.coef, other.coef) + else : + try : + quo, rem = ${nick}div(self.coef, other) + except : + return NotImplemented + return self.__class__(quo, self.domain, self.window) + + def __mod__(self, other) : + """Returns the remainder.""" + if isinstance(other, pu.PolyBase): + if not self.has_sametype(other): + raise TypeError("Polynomial types differ") + elif not self.has_samedomain(other): + raise TypeError("Domains differ") + elif not self.has_samewindow(other): + raise TypeError("Windows differ") + else: + quo, rem = ${nick}div(self.coef, other.coef) + else : + try : + quo, rem = ${nick}div(self.coef, other) + except : + return NotImplemented + return self.__class__(rem, self.domain, self.window) + + def __divmod__(self, other) : + """Returns quo, remainder""" + if isinstance(other, self.__class__) : + if not self.has_samedomain(other): + raise TypeError("Domains are not equal") + elif not self.has_samewindow(other): + raise TypeError("Windows are not equal") + else: + quo, rem = ${nick}div(self.coef, other.coef) + else : + try : + quo, rem = ${nick}div(self.coef, other) + except : + return NotImplemented + quo = self.__class__(quo, self.domain, self.window) + rem = self.__class__(rem, self.domain, self.window) + return quo, rem + + def __pow__(self, other) : + try : + coef = ${nick}pow(self.coef, other, maxpower = self.maxpower) + except : + raise + return self.__class__(coef, self.domain, self.window) + + def __radd__(self, other) : + try : + coef = ${nick}add(other, self.coef) + except : + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rsub__(self, other): + try : + coef = ${nick}sub(other, self.coef) + except : + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rmul__(self, other) : + try : + coef = ${nick}mul(other, self.coef) + except : + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rdiv__(self, other): + # set to __floordiv__ /. + return self.__rfloordiv__(other) + + def __rtruediv__(self, other) : + # An instance of PolyBase is not considered a + # Number. + return NotImplemented + + def __rfloordiv__(self, other) : + try : + quo, rem = ${nick}div(other, self.coef) + except: + return NotImplemented + return self.__class__(quo, self.domain, self.window) + + def __rmod__(self, other) : + try : + quo, rem = ${nick}div(other, self.coef) + except : + return NotImplemented + return self.__class__(rem, self.domain, self.window) + + def __rdivmod__(self, other) : + try : + quo, rem = ${nick}div(other, self.coef) + except : + return NotImplemented + quo = self.__class__(quo, self.domain, self.window) + rem = self.__class__(rem, self.domain, self.window) + return quo, rem + + # Enhance me + # some augmented arithmetic operations could be added here + + def __eq__(self, other) : + res = isinstance(other, self.__class__) \ + and self.has_samecoef(other) \ + and self.has_samedomain(other) \ + and self.has_samewindow(other) + return res + + def __ne__(self, other) : + return not self.__eq__(other) + + # + # Extra methods. + # + + def copy(self) : + """Return a copy. + + Return a copy of the current $name instance. + + Returns + ------- + new_instance : $name + Copy of current instance. + + """ + return self.__class__(self.coef, self.domain, self.window) + + def degree(self) : + """The degree of the series. + + Notes + ----- + .. versionadded:: 1.5.0 + + """ + return len(self) - 1 + + def cutdeg(self, deg) : + """Truncate series to the given degree. + + Reduce the degree of the $name series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + Parameters + ---------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns + ------- + new_instance : $name + New instance of $name with reduced degree. + + Notes + ----- + .. versionadded:: 1.5.0 + + """ + return self.truncate(deg + 1) + + def trim(self, tol=0) : + """Remove small leading coefficients + + Remove leading coefficients until a coefficient is reached whose + absolute value greater than `tol` or the beginning of the series is + reached. If all the coefficients would be removed the series is set to + ``[0]``. A new $name instance is returned with the new coefficients. + The current instance remains unchanged. + + Parameters + ---------- + tol : non-negative number. + All trailing coefficients less than `tol` will be removed. + + Returns + ------- + new_instance : $name + Contains the new set of coefficients. + + """ + coef = pu.trimcoef(self.coef, tol) + return self.__class__(coef, self.domain, self.window) + + def truncate(self, size) : + """Truncate series to length `size`. + + Reduce the $name series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. + + Parameters + ---------- + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. + + Returns + ------- + new_instance : $name + New instance of $name with truncated coefficients. + + """ + isize = int(size) + if isize != size or isize < 1 : + raise ValueError("size must be a positive integer") + if isize >= len(self.coef) : + coef = self.coef + else : + coef = self.coef[:isize] + return self.__class__(coef, self.domain, self.window) + + def convert(self, domain=None, kind=None, window=None) : + """Convert to different class and/or domain. + + Parameters + ---------- + domain : array_like, optional + The domain of the converted series. If the value is None, + the default domain of `kind` is used. + kind : class, optional + The polynomial series type class to which the current instance + should be converted. If kind is None, then the class of the + current instance is used. + window : array_like, optional + The window of the converted series. If the value is None, + the default window of `kind` is used. + + Returns + ------- + new_series_instance : `kind` + The returned class can be of different type than the current + instance and/or have a different domain. + + Notes + ----- + Conversion between domains and class types can result in + numerically ill defined series. + + Examples + -------- + + """ + if kind is None: + kind = $name + if domain is None: + domain = kind.domain + if window is None: + window = kind.window + return self(kind.identity(domain, window=window)) + + def mapparms(self) : + """Return the mapping parameters. + + The returned values define a linear map ``off + scl*x`` that is + applied to the input arguments before the series is evaluated. The + map depends on the ``domain`` and ``window``; if the current + ``domain`` is equal to the ``window`` the resulting map is the + identity. If the coefficients of the ``$name`` instance are to be + used by themselves outside this class, then the linear function + must be substituted for the ``x`` in the standard representation of + the base polynomials. + + Returns + ------- + off, scl : floats or complex + The mapping function is defined by ``off + scl*x``. + + Notes + ----- + If the current domain is the interval ``[l_1, r_1]`` and the window + is ``[l_2, r_2]``, then the linear mapping function ``L`` is + defined by the equations:: + + L(l_1) = l_2 + L(r_1) = r_2 + + """ + return pu.mapparms(self.domain, self.window) + + def integ(self, m=1, k=[], lbnd=None) : + """Integrate. + + Return an instance of $name that is the definite integral of the + current series. Refer to `${nick}int` for full documentation. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + k : array_like + Integration constants. The first constant is applied to the + first integration, the second to the second, and so on. The + list of values must less than or equal to `m` in length and any + missing values are set to zero. + lbnd : Scalar + The lower bound of the definite integral. + + Returns + ------- + integral : $name + The integral of the series using the same domain. + + See Also + -------- + ${nick}int : similar function. + ${nick}der : similar function for derivative. + + """ + off, scl = self.mapparms() + if lbnd is None : + lbnd = 0 + else : + lbnd = off + scl*lbnd + coef = ${nick}int(self.coef, m, k, lbnd, 1./scl) + return self.__class__(coef, self.domain, self.window) + + def deriv(self, m=1): + """Differentiate. + + Return an instance of $name that is the derivative of the current + series. Refer to `${nick}der` for full documentation. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + + Returns + ------- + derivative : $name + The derivative of the series using the same domain. + + See Also + -------- + ${nick}der : similar function. + ${nick}int : similar function for integration. + + """ + off, scl = self.mapparms() + coef = ${nick}der(self.coef, m, scl) + return self.__class__(coef, self.domain, self.window) + + def roots(self) : + """Return list of roots. + + Return ndarray of roots for this series. See `${nick}roots` for + full documentation. Note that the accuracy of the roots is likely to + decrease the further outside the domain they lie. + + See Also + -------- + ${nick}roots : similar function + ${nick}fromroots : function to go generate series from roots. + + """ + roots = ${nick}roots(self.coef) + return pu.mapdomain(roots, self.window, self.domain) + + def linspace(self, n=100, domain=None): + """Return x,y values at equally spaced points in domain. + + Returns x, y values at `n` linearly spaced points across domain. + Here y is the value of the polynomial at the points x. By default + the domain is the same as that of the $name instance. This method + is intended mostly as a plotting aid. + + Parameters + ---------- + n : int, optional + Number of point pairs to return. The default value is 100. + domain : {None, array_like} + If not None, the specified domain is used instead of that of + the calling instance. It should be of the form ``[beg,end]``. + The default is None. + + Returns + ------- + x, y : ndarrays + ``x`` is equal to linspace(self.domain[0], self.domain[1], n) + ``y`` is the polynomial evaluated at ``x``. + + .. versionadded:: 1.5.0 + + """ + if domain is None: + domain = self.domain + x = np.linspace(domain[0], domain[1], n) + y = self(x) + return x, y + + + + @staticmethod + def fit(x, y, deg, domain=None, rcond=None, full=False, w=None, + window=$domain): + """Least squares fit to data. + + Return a `$name` instance that is the least squares fit to the data + `y` sampled at `x`. Unlike `${nick}fit`, the domain of the returned + instance can be specified and this will often result in a superior + fit with less chance of ill conditioning. Support for NA was added + in version 1.7.0. See `${nick}fit` for full documentation of the + implementation. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial. + domain : {None, [beg, end], []}, optional + Domain to use for the returned $name instance. If ``None``, + then a minimal domain that covers the points `x` is chosen. If + ``[]`` the default domain ``$domain`` is used. The default + value is $domain in numpy 1.4.x and ``None`` in later versions. + The ``[]`` value was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is len(x)*eps, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products + ``w[i]*y[i]`` all have the same variance. The default value is + None. + .. versionadded:: 1.5.0 + window : {[beg, end]}, optional + Window to use for the returned $name instance. The default + value is ``$domain`` + .. versionadded:: 1.6.0 + + Returns + ------- + least_squares_fit : instance of $name + The $name instance is the least squares fit to the data and + has the domain specified in the call. + + [residuals, rank, singular_values, rcond] : only if `full` = True + Residuals of the least squares fit, the effective rank of the + scaled Vandermonde matrix and its singular values, and the + specified value of `rcond`. For more details, see + `linalg.lstsq`. + + See Also + -------- + ${nick}fit : similar function + + """ + if domain is None: + domain = pu.getdomain(x) + elif type(domain) is list and len(domain) == 0: + domain = $domain + + if type(window) is list and len(window) == 0: + window = $domain + + xnew = pu.mapdomain(x, domain, window) + res = ${nick}fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full : + [coef, status] = res + return $name(coef, domain=domain, window=window), status + else : + coef = res + return $name(coef, domain=domain, window=window) + + @staticmethod + def fromroots(roots, domain=$domain, window=$domain) : + """Return $name instance with specified roots. + + Returns an instance of $name representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is the + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {array_like, None}, optional + Domain for the resulting instance of $name. If none the domain + is the interval from the smallest root to the largest. The + default is $domain. + window : array_like, optional + Window for the resulting instance of $name. The default value + is $domain. + + Returns + ------- + object : $name instance + Series with the specified roots. + + See Also + -------- + ${nick}fromroots : equivalent function + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None : + domain = pu.getdomain(roots) + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl*roots + coef = ${nick}fromroots(rnew) / scl**deg + return $name(coef, domain=domain, window=window) + + @staticmethod + def identity(domain=$domain, window=$domain) : + """Identity function. + + If ``p`` is the returned $name object, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : array_like + The resulting array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. + window : array_like + The resulting array must be if the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the window. + + Returns + ------- + identity : $name instance + + """ + off, scl = pu.mapparms(window, domain) + coef = ${nick}line(off, scl) + return $name(coef, domain, window) + + @staticmethod + def basis(deg, domain=$domain, window=$domain): + """$name polynomial of degree `deg`. + + Returns an instance of the $name polynomial of degree `d`. + + Parameters + ---------- + deg : int + Degree of the $name polynomial. Must be >= 0. + domain : array_like + The resulting array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. + window : array_like + The resulting array must be if the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the window. + + Returns + p : $name instance + + Notes + ----- + .. versionadded:: 1.7.0 + + """ + ideg = int(deg) + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return $name([0]*ideg + [1], domain, window) + + @staticmethod + def cast(series, domain=$domain, window=$domain): + """Convert instance to equivalent $name series. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + Parameters + ---------- + series : series + The instance series to be converted. + domain : array_like + The resulting array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. + window : array_like + The resulting array must be if the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the window. + + Returns + p : $name instance + A $name series equal to the `poly` series. + + See Also + -------- + convert -- similar instance method + + Notes + ----- + .. versionadded:: 1.7.0 + + """ + return series.convert(domain, $name, window) + +''') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polyutils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polyutils.py new file mode 100644 index 0000000000000..9348559edb97a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polyutils.py @@ -0,0 +1,403 @@ +""" +Utililty classes and functions for the polynomial modules. + +This module provides: error and warning objects; a polynomial base class; +and some routines used in both the `polynomial` and `chebyshev` modules. + +Error objects +------------- + +.. autosummary:: + :toctree: generated/ + + PolyError base class for this sub-package's errors. + PolyDomainError raised when domains are mismatched. + +Warning objects +--------------- + +.. autosummary:: + :toctree: generated/ + + RankWarning raised in least-squares fit for rank-deficient matrix. + +Base class +---------- + +.. autosummary:: + :toctree: generated/ + + PolyBase Obsolete base class for the polynomial classes. Do not use. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + as_series convert list of array_likes into 1-D arrays of common type. + trimseq remove trailing zeros. + trimcoef remove small trailing coefficients. + getdomain return the domain appropriate for a given set of abscissae. + mapdomain maps points between domains. + mapparms parameters of the linear map between domains. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np + +__all__ = [ + 'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq', + 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase'] + +# +# Warnings and Exceptions +# + +class RankWarning(UserWarning): + """Issued by chebfit when the design matrix is rank deficient.""" + pass + +class PolyError(Exception): + """Base class for errors in this module.""" + pass + +class PolyDomainError(PolyError): + """Issued by the generic Poly class when two domains don't match. + + This is raised when an binary operation is passed Poly objects with + different domains. + + """ + pass + +# +# Base class for all polynomial types +# + +class PolyBase(object): + """ + Base class for all polynomial types. + + Deprecated in numpy 1.9.0, use the abstract + ABCPolyBase class instead. Note that the latter + reguires a number of virtual functions to be + implemented. + + """ + pass + +# +# Helper functions to convert inputs to 1-D arrays +# +def trimseq(seq): + """Remove small Poly series coefficients. + + Parameters + ---------- + seq : sequence + Sequence of Poly series coefficients. This routine fails for + empty sequences. + + Returns + ------- + series : sequence + Subsequence with trailing zeros removed. If the resulting sequence + would be empty, return the first element. The returned sequence may + or may not be a view. + + Notes + ----- + Do not lose the type info if the sequence contains unknown objects. + + """ + if len(seq) == 0: + return seq + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: + break + return seq[:i+1] + + +def as_series(alist, trim=True): + """ + Return argument as a list of 1-d arrays. + + The returned list contains array(s) of dtype double, complex double, or + object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of + size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays + of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array + raises a Value Error if it is not first reshaped into either a 1-d or 2-d + array. + + Parameters + ---------- + a : array_like + A 1- or 2-d array_like + trim : boolean, optional + When True, trailing zeros are removed from the inputs. + When False, the inputs are passed through intact. + + Returns + ------- + [a1, a2,...] : list of 1-D arrays + A copy of the input data as a list of 1-d arrays. + + Raises + ------ + ValueError + Raised when `as_series` cannot convert its input to 1-d arrays, or at + least one of the resulting arrays is empty. + + Examples + -------- + >>> from numpy import polynomial as P + >>> a = np.arange(4) + >>> P.as_series(a) + [array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])] + >>> b = np.arange(6).reshape((2,3)) + >>> P.as_series(b) + [array([ 0., 1., 2.]), array([ 3., 4., 5.])] + + """ + arrays = [np.array(a, ndmin=1, copy=0) for a in alist] + if min([a.size for a in arrays]) == 0: + raise ValueError("Coefficient array is empty") + if any([a.ndim != 1 for a in arrays]): + raise ValueError("Coefficient array is not 1-d") + if trim: + arrays = [trimseq(a) for a in arrays] + + if any([a.dtype == np.dtype(object) for a in arrays]): + ret = [] + for a in arrays: + if a.dtype != np.dtype(object): + tmp = np.empty(len(a), dtype=np.dtype(object)) + tmp[:] = a[:] + ret.append(tmp) + else: + ret.append(a.copy()) + else: + try: + dtype = np.common_type(*arrays) + except: + raise ValueError("Coefficient arrays have no common type") + ret = [np.array(a, copy=1, dtype=dtype) for a in arrays] + return ret + + +def trimcoef(c, tol=0): + """ + Remove "small" "trailing" coefficients from a polynomial. + + "Small" means "small in absolute value" and is controlled by the + parameter `tol`; "trailing" means highest order coefficient(s), e.g., in + ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) + both the 3-rd and 4-th order coefficients would be "trimmed." + + Parameters + ---------- + c : array_like + 1-d array of coefficients, ordered from lowest order to highest. + tol : number, optional + Trailing (i.e., highest order) elements with absolute value less + than or equal to `tol` (default value is zero) are removed. + + Returns + ------- + trimmed : ndarray + 1-d array with trailing zeros removed. If the resulting series + would be empty, a series containing a single zero is returned. + + Raises + ------ + ValueError + If `tol` < 0 + + See Also + -------- + trimseq + + Examples + -------- + >>> from numpy import polynomial as P + >>> P.trimcoef((0,0,3,0,5,0,0)) + array([ 0., 0., 3., 0., 5.]) + >>> P.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed + array([ 0.]) + >>> i = complex(0,1) # works for complex + >>> P.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) + array([ 0.0003+0.j , 0.0010-0.001j]) + + """ + if tol < 0: + raise ValueError("tol must be non-negative") + + [c] = as_series([c]) + [ind] = np.where(np.abs(c) > tol) + if len(ind) == 0: + return c[:1]*0 + else: + return c[:ind[-1] + 1].copy() + +def getdomain(x): + """ + Return a domain suitable for given abscissae. + + Find a domain suitable for a polynomial or Chebyshev series + defined at the values supplied. + + Parameters + ---------- + x : array_like + 1-d array of abscissae whose domain will be determined. + + Returns + ------- + domain : ndarray + 1-d array containing two values. If the inputs are complex, then + the two returned points are the lower left and upper right corners + of the smallest rectangle (aligned with the axes) in the complex + plane containing the points `x`. If the inputs are real, then the + two points are the ends of the smallest interval containing the + points `x`. + + See Also + -------- + mapparms, mapdomain + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> points = np.arange(4)**2 - 5; points + array([-5, -4, -1, 4]) + >>> pu.getdomain(points) + array([-5., 4.]) + >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle + >>> pu.getdomain(c) + array([-1.-1.j, 1.+1.j]) + + """ + [x] = as_series([x], trim=False) + if x.dtype.char in np.typecodes['Complex']: + rmin, rmax = x.real.min(), x.real.max() + imin, imax = x.imag.min(), x.imag.max() + return np.array((complex(rmin, imin), complex(rmax, imax))) + else: + return np.array((x.min(), x.max())) + +def mapparms(old, new): + """ + Linear map parameters between domains. + + Return the parameters of the linear map ``offset + scale*x`` that maps + `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. + + Parameters + ---------- + old, new : array_like + Domains. Each domain must (successfully) convert to a 1-d array + containing precisely two values. + + Returns + ------- + offset, scale : scalars + The map ``L(x) = offset + scale*x`` maps the first domain to the + second. + + See Also + -------- + getdomain, mapdomain + + Notes + ----- + Also works for complex numbers, and thus can be used to calculate the + parameters required to map any line in the complex plane to any other + line therein. + + Examples + -------- + >>> from numpy import polynomial as P + >>> P.mapparms((-1,1),(-1,1)) + (0.0, 1.0) + >>> P.mapparms((1,-1),(-1,1)) + (0.0, -1.0) + >>> i = complex(0,1) + >>> P.mapparms((-i,-1),(1,i)) + ((1+1j), (1+0j)) + + """ + oldlen = old[1] - old[0] + newlen = new[1] - new[0] + off = (old[1]*new[0] - old[0]*new[1])/oldlen + scl = newlen/oldlen + return off, scl + +def mapdomain(x, old, new): + """ + Apply linear map to input points. + + The linear map ``offset + scale*x`` that maps the domain `old` to + the domain `new` is applied to the points `x`. + + Parameters + ---------- + x : array_like + Points to be mapped. If `x` is a subtype of ndarray the subtype + will be preserved. + old, new : array_like + The two domains that determine the map. Each must (successfully) + convert to 1-d arrays containing precisely two values. + + Returns + ------- + x_out : ndarray + Array of points of the same shape as `x`, after application of the + linear map between the two domains. + + See Also + -------- + getdomain, mapparms + + Notes + ----- + Effectively, this implements: + + .. math :: + x\\_out = new[0] + m(x - old[0]) + + where + + .. math :: + m = \\frac{new[1]-new[0]}{old[1]-old[0]} + + Examples + -------- + >>> from numpy import polynomial as P + >>> old_domain = (-1,1) + >>> new_domain = (0,2*np.pi) + >>> x = np.linspace(-1,1,6); x + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) + >>> x_out = P.mapdomain(x, old_domain, new_domain); x_out + array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, + 6.28318531]) + >>> x - P.mapdomain(x_out, new_domain, old_domain) + array([ 0., 0., 0., 0., 0., 0.]) + + Also works for complex numbers (and thus can be used to map any line in + the complex plane to any other line therein). + + >>> i = complex(0,1) + >>> old = (-1 - i, 1 + i) + >>> new = (-1 + i, 1 - i) + >>> z = np.linspace(old[0], old[1], 6); z + array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ]) + >>> new_z = P.mapdomain(z, old, new); new_z + array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) + + """ + x = np.asanyarray(x) + off, scl = mapparms(old, new) + return off + scl*x diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/setup.py new file mode 100644 index 0000000000000..cb59ee1e56d9c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/setup.py @@ -0,0 +1,11 @@ +from __future__ import division, print_function + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('polynomial', parent_package, top_path) + config.add_data_dir('tests') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_chebyshev.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_chebyshev.py new file mode 100644 index 0000000000000..a596905f6771d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_chebyshev.py @@ -0,0 +1,554 @@ +"""Tests for chebyshev module. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.polynomial.chebyshev as cheb +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + TestCase, assert_almost_equal, assert_raises, + assert_equal, assert_, run_module_suite) + + +def trim(x): + return cheb.chebtrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestPrivate(TestCase): + + def test__cseries_to_zseries(self): + for i in range(5): + inp = np.array([2] + [1]*i, np.double) + tgt = np.array([.5]*i + [2] + [.5]*i, np.double) + res = cheb._cseries_to_zseries(inp) + assert_equal(res, tgt) + + def test__zseries_to_cseries(self): + for i in range(5): + inp = np.array([.5]*i + [2] + [.5]*i, np.double) + tgt = np.array([2] + [1]*i, np.double) + res = cheb._zseries_to_cseries(inp) + assert_equal(res, tgt) + + +class TestConstants(TestCase): + + def test_chebdomain(self): + assert_equal(cheb.chebdomain, [-1, 1]) + + def test_chebzero(self): + assert_equal(cheb.chebzero, [0]) + + def test_chebone(self): + assert_equal(cheb.chebone, [1]) + + def test_chebx(self): + assert_equal(cheb.chebx, [0, 1]) + + +class TestArithmetic(TestCase): + + def test_chebadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = cheb.chebadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebsub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = cheb.chebsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebmulx(self): + assert_equal(cheb.chebmulx([0]), [0]) + assert_equal(cheb.chebmulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [.5, 0, .5] + assert_equal(cheb.chebmulx(ser), tgt) + + def test_chebmul(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(i + j + 1) + tgt[i + j] += .5 + tgt[abs(i - j)] += .5 + res = cheb.chebmul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebdiv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = cheb.chebadd(ci, cj) + quo, rem = cheb.chebdiv(tgt, ci) + res = cheb.chebadd(cheb.chebmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(TestCase): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 2., 1.5]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_chebval(self): + #check empty input + assert_equal(cheb.chebval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Tlist] + for i in range(10): + msg = "At i=%d" % i + tgt = y[i] + res = cheb.chebval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(cheb.chebval(x, [1]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) + + def test_chebval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = cheb.chebval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_chebval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = cheb.chebval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_chebgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = cheb.chebgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_chebgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = cheb.chebgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(TestCase): + + def test_chebint(self): + # check exceptions + assert_raises(ValueError, cheb.chebint, [0], .5) + assert_raises(ValueError, cheb.chebint, [0], -1) + assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = cheb.chebint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i]) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(cheb.chebval(-1, chebint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1) + res = cheb.chebint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k]) + res = cheb.chebint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) + res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) + res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T + res = cheb.chebint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c) for c in c2d]) + res = cheb.chebint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) + res = cheb.chebint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(TestCase): + + def test_chebder(self): + # check exceptions + assert_raises(ValueError, cheb.chebder, [0], .5) + assert_raises(ValueError, cheb.chebder, [0], -1) + + # check that zeroth deriviative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = cheb.chebder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T + res = cheb.chebder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebder(c) for c in c2d]) + res = cheb.chebder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(TestCase): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_chebvander(self): + # check for 1d x + x = np.arange(3) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + def test_chebvander2d(self): + # also tests chebval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = cheb.chebvander2d(x1, x2, [1, 2]) + tgt = cheb.chebval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_chebvander3d(self): + # also tests chebval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) + tgt = cheb.chebval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(TestCase): + + def test_chebfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, cheb.chebfit, [1], [1], -1) + assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) + assert_raises(TypeError, cheb.chebfit, [], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) + assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = cheb.chebfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + # + coef4 = cheb.chebfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # + coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = cheb.chebfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) + + +class TestCompanion(TestCase): + + def test_raises(self): + assert_raises(ValueError, cheb.chebcompanion, []) + assert_raises(ValueError, cheb.chebcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(cheb.chebcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss(TestCase): + + def test_100(self): + x, w = cheb.chebgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = cheb.chebvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.pi + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(TestCase): + + def test_chebfromroots(self): + res = cheb.chebfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = [0]*i + [1] + res = cheb.chebfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebroots(self): + assert_almost_equal(cheb.chebroots([1]), []) + assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = cheb.chebroots(cheb.chebfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, cheb.chebtrim, coef, -1) + + # Test results + assert_equal(cheb.chebtrim(coef), coef[:-1]) + assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) + assert_equal(cheb.chebtrim(coef, 2), [0]) + + def test_chebline(self): + assert_equal(cheb.chebline(3, 4), [3, 4]) + + def test_cheb2poly(self): + for i in range(10): + assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) + + def test_poly2cheb(self): + for i in range(10): + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11)[1:-1] + tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) + res = cheb.chebweight(x) + assert_almost_equal(res, tgt) + + def test_chebpts1(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts1, 1.5) + assert_raises(ValueError, cheb.chebpts1, 0) + + #test points + tgt = [0] + assert_almost_equal(cheb.chebpts1(1), tgt) + tgt = [-0.70710678118654746, 0.70710678118654746] + assert_almost_equal(cheb.chebpts1(2), tgt) + tgt = [-0.86602540378443871, 0, 0.86602540378443871] + assert_almost_equal(cheb.chebpts1(3), tgt) + tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] + assert_almost_equal(cheb.chebpts1(4), tgt) + + def test_chebpts2(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts2, 1.5) + assert_raises(ValueError, cheb.chebpts2, 1) + + #test points + tgt = [-1, 1] + assert_almost_equal(cheb.chebpts2(2), tgt) + tgt = [-1, 0, 1] + assert_almost_equal(cheb.chebpts2(3), tgt) + tgt = [-1, -0.5, .5, 1] + assert_almost_equal(cheb.chebpts2(4), tgt) + tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] + assert_almost_equal(cheb.chebpts2(5), tgt) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_classes.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_classes.py new file mode 100644 index 0000000000000..cd5a54687939d --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_classes.py @@ -0,0 +1,570 @@ +"""Test inter-conversion of different polynomial classes. + +This tests the convert and cast methods of all the polynomial classes. + +""" +from __future__ import division, absolute_import, print_function + +import operator as op +from numbers import Number + +import numpy as np +from numpy.polynomial import ( + Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite) +from numpy.compat import long + + +classes = ( + Polynomial, Legendre, Chebyshev, Laguerre, + Hermite, HermiteE) + + +def test_class_methods(): + for Poly1 in classes: + for Poly2 in classes: + yield check_conversion, Poly1, Poly2 + yield check_cast, Poly1, Poly2 + for Poly in classes: + yield check_call, Poly + yield check_identity, Poly + yield check_basis, Poly + yield check_fromroots, Poly + yield check_fit, Poly + yield check_equal, Poly + yield check_not_equal, Poly + yield check_add, Poly + yield check_sub, Poly + yield check_mul, Poly + yield check_floordiv, Poly + yield check_truediv, Poly + yield check_mod, Poly + yield check_divmod, Poly + yield check_pow, Poly + yield check_integ, Poly + yield check_deriv, Poly + yield check_roots, Poly + yield check_linspace, Poly + yield check_mapparms, Poly + yield check_degree, Poly + yield check_copy, Poly + yield check_cutdeg, Poly + yield check_truncate, Poly + yield check_trim, Poly + + +# +# helper functions +# +random = np.random.random + + +def assert_poly_almost_equal(p1, p2, msg=""): + try: + assert_(np.all(p1.domain == p2.domain)) + assert_(np.all(p1.window == p2.window)) + assert_almost_equal(p1.coef, p2.coef) + except AssertionError: + msg = "Result: %s\nTarget: %s", (p1, p2) + raise AssertionError(msg) + + +# +# conversion methods that depend on two classes +# + + +def check_conversion(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = p1.convert(kind=Poly2, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +def check_cast(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = Poly2.cast(p1, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +# +# methods that depend on one class +# + + +def check_identity(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + x = np.linspace(d[0], d[1], 11) + p = Poly.identity(domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_almost_equal(p(x), x) + + +def check_basis(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.basis(5, domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_equal(p.coef, [0]*5 + [1]) + + +def check_fromroots(Poly): + # check that requested roots are zeros of a polynomial + # of correct degree, domain, and window. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + r = random((5,)) + p1 = Poly.fromroots(r, domain=d, window=w) + assert_equal(p1.degree(), len(r)) + assert_equal(p1.domain, d) + assert_equal(p1.window, w) + assert_almost_equal(p1(r), 0) + + # check that polynomial is monic + pdom = Polynomial.domain + pwin = Polynomial.window + p2 = Polynomial.cast(p1, domain=pdom, window=pwin) + assert_almost_equal(p2.coef[-1], 1) + + +def check_fit(Poly): + + def f(x): + return x*(x - 1)*(x - 2) + x = np.linspace(0, 3) + y = f(x) + + # check default value of domain and window + p = Poly.fit(x, y, 3) + assert_almost_equal(p.domain, [0, 3]) + assert_almost_equal(p(x), y) + assert_equal(p.degree(), 3) + + # check with given domains and window + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.fit(x, y, 3, domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + + # check with class domain default + p = Poly.fit(x, y, 3, []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + + # check that fit accepts weights. + w = np.zeros_like(x) + z = y + random(y.shape)*.25 + w[::2] = 1 + p1 = Poly.fit(x[::2], z[::2], 3) + p2 = Poly.fit(x, z, 3, w=w) + assert_almost_equal(p1(x), p2(x)) + + +def check_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(p1 == p1) + assert_(not p1 == p2) + assert_(not p1 == p3) + assert_(not p1 == p4) + + +def check_not_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(not p1 != p1) + assert_(p1 != p2) + assert_(p1 != p3) + assert_(p1 != p4) + + +def check_add(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 + p2 + assert_poly_almost_equal(p2 + p1, p3) + assert_poly_almost_equal(p1 + c2, p3) + assert_poly_almost_equal(c2 + p1, p3) + assert_poly_almost_equal(p1 + tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) + p1, p3) + assert_poly_almost_equal(p1 + np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) + p1, p3) + assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.add, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.add, p1, Polynomial([0])) + + +def check_sub(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 - p2 + assert_poly_almost_equal(p2 - p1, -p3) + assert_poly_almost_equal(p1 - c2, p3) + assert_poly_almost_equal(c2 - p1, -p3) + assert_poly_almost_equal(p1 - tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) - p1, -p3) + assert_poly_almost_equal(p1 - np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) - p1, -p3) + assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.sub, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.sub, p1, Polynomial([0])) + + +def check_mul(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 * p2 + assert_poly_almost_equal(p2 * p1, p3) + assert_poly_almost_equal(p1 * c2, p3) + assert_poly_almost_equal(c2 * p1, p3) + assert_poly_almost_equal(p1 * tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) * p1, p3) + assert_poly_almost_equal(p1 * np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) * p1, p3) + assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) + assert_poly_almost_equal(2 * p1, p1 * Poly([2])) + assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mul, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mul, p1, Polynomial([0])) + + +def check_floordiv(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 // p2, p1) + assert_poly_almost_equal(p4 // c2, p1) + assert_poly_almost_equal(c4 // p2, p1) + assert_poly_almost_equal(p4 // tuple(c2), p1) + assert_poly_almost_equal(tuple(c4) // p2, p1) + assert_poly_almost_equal(p4 // np.array(c2), p1) + assert_poly_almost_equal(np.array(c4) // p2, p1) + assert_poly_almost_equal(2 // p2, Poly([0])) + assert_poly_almost_equal(p2 // 2, 0.5*p2) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) + + +def check_truediv(Poly): + # true division is valid only if the denominator is a Number and + # not a python bool. + p1 = Poly([1,2,3]) + p2 = p1 * 5 + + for stype in np.ScalarType: + if not issubclass(stype, Number) or issubclass(stype, bool): + continue + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in (int, long, float): + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in [complex]: + s = stype(5, 0) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for s in [tuple(), list(), dict(), bool(), np.array([1])]: + assert_raises(TypeError, op.truediv, p2, s) + assert_raises(TypeError, op.truediv, s, p2) + for ptype in classes: + assert_raises(TypeError, op.truediv, p2, ptype(1)) + + +def check_mod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 % p2, p3) + assert_poly_almost_equal(p4 % c2, p3) + assert_poly_almost_equal(c4 % p2, p3) + assert_poly_almost_equal(p4 % tuple(c2), p3) + assert_poly_almost_equal(tuple(c4) % p2, p3) + assert_poly_almost_equal(p4 % np.array(c2), p3) + assert_poly_almost_equal(np.array(c4) % p2, p3) + assert_poly_almost_equal(2 % p2, Poly([2])) + assert_poly_almost_equal(p2 % 2, Poly([0])) + assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mod, p1, Polynomial([0])) + + +def check_divmod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + quo, rem = divmod(p4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, c2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(c4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, tuple(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(tuple(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, np.array(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(np.array(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p2, 2) + assert_poly_almost_equal(quo, 0.5*p2) + assert_poly_almost_equal(rem, Poly([0])) + quo, rem = divmod(2, p2) + assert_poly_almost_equal(quo, Poly([0])) + assert_poly_almost_equal(rem, Poly([2])) + assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, divmod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, divmod, p1, Polynomial([0])) + + +def check_roots(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + tgt = np.sort(random((5,))) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window + res = np.sort(Poly.fromroots(tgt).roots()) + assert_almost_equal(res, tgt) + + +def check_degree(Poly): + p = Poly.basis(5) + assert_equal(p.degree(), 5) + + +def check_copy(Poly): + p1 = Poly.basis(5) + p2 = p1.copy() + assert_(p1 == p2) + assert_(p1 is not p2) + assert_(p1.coef is not p2.coef) + assert_(p1.domain is not p2.domain) + assert_(p1.window is not p2.window) + + +def check_integ(Poly): + P = Polynomial + # Check defaults + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + # Check with k + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(k=1)) + p2 = P.cast(p0.integ(2, k=[1, 1])) + assert_poly_almost_equal(p1, P([1, 2, 3, 4])) + assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) + # Check with lbnd + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(lbnd=1)) + p2 = P.cast(p0.integ(2, lbnd=1)) + assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) + assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) + # Check scaling + d = 2*Poly.domain + p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + + +def check_deriv(Poly): + # Check that the derivative is the inverse of integration. It is + # assumes that the integration has been checked elsewhere. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p1 = Poly([1, 2, 3], domain=d, window=w) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + + +def check_linspace(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly([1, 2, 3], domain=d, window=w) + # check default domain + xtgt = np.linspace(d[0], d[1], 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + # check specified domain + xtgt = np.linspace(0, 2, 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20, domain=[0, 2]) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + + +def check_pow(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # check error for invalid powers + assert_raises(ValueError, op.pow, tgt, 1.5) + assert_raises(ValueError, op.pow, tgt, -1) + + +def check_call(Poly): + P = Polynomial + d = Poly.domain + x = np.linspace(d[0], d[1], 11) + + # Check defaults + p = Poly.cast(P([1, 2, 3])) + tgt = 1 + x*(2 + 3*x) + res = p(x) + assert_almost_equal(res, tgt) + + +def check_cutdeg(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.cutdeg, .5) + assert_raises(ValueError, p.cutdeg, -1) + assert_equal(len(p.cutdeg(3)), 3) + assert_equal(len(p.cutdeg(2)), 3) + assert_equal(len(p.cutdeg(1)), 2) + assert_equal(len(p.cutdeg(0)), 1) + + +def check_truncate(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.truncate, .5) + assert_raises(ValueError, p.truncate, 0) + assert_equal(len(p.truncate(4)), 3) + assert_equal(len(p.truncate(3)), 3) + assert_equal(len(p.truncate(2)), 2) + assert_equal(len(p.truncate(1)), 1) + + +def check_trim(Poly): + c = [1, 1e-6, 1e-12, 0] + p = Poly(c) + assert_equal(p.trim().coef, c[:3]) + assert_equal(p.trim(1e-10).coef, c[:2]) + assert_equal(p.trim(1e-5).coef, c[:1]) + + +def check_mapparms(Poly): + # check with defaults. Should be identity. + d = Poly.domain + w = Poly.window + p = Poly([1], domain=d, window=w) + assert_almost_equal([0, 1], p.mapparms()) + # + w = 2*d + 1 + p = Poly([1], domain=d, window=w) + assert_almost_equal([1, 2], p.mapparms()) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite.py new file mode 100644 index 0000000000000..e67625a881395 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite.py @@ -0,0 +1,516 @@ +"""Tests for hermite module. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.polynomial.hermite as herm +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + TestCase, assert_almost_equal, assert_raises, + assert_equal, assert_, run_module_suite) + +H0 = np.array([1]) +H1 = np.array([0, 2]) +H2 = np.array([-2, 0, 4]) +H3 = np.array([0, -12, 0, 8]) +H4 = np.array([12, 0, -48, 0, 16]) +H5 = np.array([0, 120, 0, -160, 0, 32]) +H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) +H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) +H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) +H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) + +Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] + + +def trim(x): + return herm.hermtrim(x, tol=1e-6) + + +class TestConstants(TestCase): + + def test_hermdomain(self): + assert_equal(herm.hermdomain, [-1, 1]) + + def test_hermzero(self): + assert_equal(herm.hermzero, [0]) + + def test_hermone(self): + assert_equal(herm.hermone, [1]) + + def test_hermx(self): + assert_equal(herm.hermx, [0, .5]) + + +class TestArithmetic(TestCase): + x = np.linspace(-3, 3, 100) + + def test_hermadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herm.hermadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermsub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herm.hermsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermmulx(self): + assert_equal(herm.hermmulx([0]), [0]) + assert_equal(herm.hermmulx([1]), [0, .5]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, .5] + assert_equal(herm.hermmulx(ser), tgt) + + def test_hermmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herm.hermval(self.x, pol1) + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + pol2 = [0]*j + [1] + val2 = herm.hermval(self.x, pol2) + pol3 = herm.hermmul(pol1, pol2) + val3 = herm.hermval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermdiv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herm.hermadd(ci, cj) + quo, rem = herm.hermdiv(tgt, ci) + res = herm.hermadd(herm.hermmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(TestCase): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 1., .75]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermval(self): + #check empty input + assert_equal(herm.hermval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Hlist] + for i in range(10): + msg = "At i=%d" % i + tgt = y[i] + res = herm.hermval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herm.hermval(x, [1]).shape, dims) + assert_equal(herm.hermval(x, [1, 0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) + + def test_hermval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herm.hermval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herm.hermval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herm.hermgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herm.hermgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(TestCase): + + def test_hermint(self): + # check exceptions + assert_raises(ValueError, herm.hermint, [0], .5) + assert_raises(ValueError, herm.hermint, [0], -1) + assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herm.hermint([0], m=i, k=k) + assert_almost_equal(res, [0, .5]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i]) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herm.hermval(-1, hermint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1) + res = herm.hermint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k]) + res = herm.hermint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) + res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], scl=2) + res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T + res = herm.hermint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c) for c in c2d]) + res = herm.hermint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) + res = herm.hermint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(TestCase): + + def test_hermder(self): + # check exceptions + assert_raises(ValueError, herm.hermder, [0], .5) + assert_raises(ValueError, herm.hermder, [0], -1) + + # check that zeroth deriviative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herm.hermder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T + res = herm.hermder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermder(c) for c in c2d]) + res = herm.hermder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(TestCase): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermvander(self): + # check for 1d x + x = np.arange(3) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + def test_hermvander2d(self): + # also tests hermval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herm.hermvander2d(x1, x2, [1, 2]) + tgt = herm.hermval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermvander3d(self): + # also tests hermval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) + tgt = herm.hermval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(TestCase): + + def test_hermfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, herm.hermfit, [1], [1], -1) + assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) + assert_raises(TypeError, herm.hermfit, [], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) + assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herm.hermfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + # + coef4 = herm.hermfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # + coef2d = herm.hermfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herm.hermfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) + + +class TestCompanion(TestCase): + + def test_raises(self): + assert_raises(ValueError, herm.hermcompanion, []) + assert_raises(ValueError, herm.hermcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herm.hermcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) + + +class TestGauss(TestCase): + + def test_100(self): + x, w = herm.hermgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herm.hermvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(TestCase): + + def test_hermfromroots(self): + res = herm.hermfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herm.hermfromroots(roots) + res = herm.hermval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herm.herm2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermroots(self): + assert_almost_equal(herm.hermroots([1]), []) + assert_almost_equal(herm.hermroots([1, 1]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herm.hermroots(herm.hermfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herm.hermtrim, coef, -1) + + # Test results + assert_equal(herm.hermtrim(coef), coef[:-1]) + assert_equal(herm.hermtrim(coef, 1), coef[:-3]) + assert_equal(herm.hermtrim(coef, 2), [0]) + + def test_hermline(self): + assert_equal(herm.hermline(3, 4), [3, 2]) + + def test_herm2poly(self): + for i in range(10): + assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) + + def test_poly2herm(self): + for i in range(10): + assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-x**2) + res = herm.hermweight(x) + assert_almost_equal(res, tgt) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite_e.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite_e.py new file mode 100644 index 0000000000000..f8601a82846a5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite_e.py @@ -0,0 +1,517 @@ +"""Tests for hermite_e module. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.polynomial.hermite_e as herme +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + TestCase, assert_almost_equal, assert_raises, + assert_equal, assert_, run_module_suite) + +He0 = np.array([1]) +He1 = np.array([0, 1]) +He2 = np.array([-1, 0, 1]) +He3 = np.array([0, -3, 0, 1]) +He4 = np.array([3, 0, -6, 0, 1]) +He5 = np.array([0, 15, 0, -10, 0, 1]) +He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) +He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) +He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) +He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) + +Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] + + +def trim(x): + return herme.hermetrim(x, tol=1e-6) + + +class TestConstants(TestCase): + + def test_hermedomain(self): + assert_equal(herme.hermedomain, [-1, 1]) + + def test_hermezero(self): + assert_equal(herme.hermezero, [0]) + + def test_hermeone(self): + assert_equal(herme.hermeone, [1]) + + def test_hermex(self): + assert_equal(herme.hermex, [0, 1]) + + +class TestArithmetic(TestCase): + x = np.linspace(-3, 3, 100) + + def test_hermeadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herme.hermeadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermesub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herme.hermesub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermemulx(self): + assert_equal(herme.hermemulx([0]), [0]) + assert_equal(herme.hermemulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, 1] + assert_equal(herme.hermemulx(ser), tgt) + + def test_hermemul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herme.hermeval(self.x, pol1) + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + pol2 = [0]*j + [1] + val2 = herme.hermeval(self.x, pol2) + pol3 = herme.hermemul(pol1, pol2) + val3 = herme.hermeval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermediv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herme.hermeadd(ci, cj) + quo, rem = herme.hermediv(tgt, ci) + res = herme.hermeadd(herme.hermemul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(TestCase): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([4., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermeval(self): + #check empty input + assert_equal(herme.hermeval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Helist] + for i in range(10): + msg = "At i=%d" % i + tgt = y[i] + res = herme.hermeval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herme.hermeval(x, [1]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) + + def test_hermeval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herme.hermeval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermeval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herme.hermeval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermegrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herme.hermegrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermegrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herme.hermegrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(TestCase): + + def test_hermeint(self): + # check exceptions + assert_raises(ValueError, herme.hermeint, [0], .5) + assert_raises(ValueError, herme.hermeint, [0], -1) + assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herme.hermeint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i]) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herme.hermeval(-1, hermeint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1) + res = herme.hermeint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k]) + res = herme.hermeint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) + res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) + res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T + res = herme.hermeint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c) for c in c2d]) + res = herme.hermeint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) + res = herme.hermeint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(TestCase): + + def test_hermeder(self): + # check exceptions + assert_raises(ValueError, herme.hermeder, [0], .5) + assert_raises(ValueError, herme.hermeder, [0], -1) + + # check that zeroth deriviative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herme.hermeder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder( + herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T + res = herme.hermeder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeder(c) for c in c2d]) + res = herme.hermeder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(TestCase): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermevander(self): + # check for 1d x + x = np.arange(3) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + def test_hermevander2d(self): + # also tests hermeval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herme.hermevander2d(x1, x2, [1, 2]) + tgt = herme.hermeval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermevander3d(self): + # also tests hermeval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) + tgt = herme.hermeval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(TestCase): + + def test_hermefit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, herme.hermefit, [1], [1], -1) + assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) + assert_raises(TypeError, herme.hermefit, [], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) + assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herme.hermefit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + # + coef4 = herme.hermefit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # + coef2d = herme.hermefit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herme.hermefit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) + + +class TestCompanion(TestCase): + + def test_raises(self): + assert_raises(ValueError, herme.hermecompanion, []) + assert_raises(ValueError, herme.hermecompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herme.hermecompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) + + +class TestGauss(TestCase): + + def test_100(self): + x, w = herme.hermegauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herme.hermevander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(2*np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(TestCase): + + def test_hermefromroots(self): + res = herme.hermefromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herme.hermefromroots(roots) + res = herme.hermeval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herme.herme2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermeroots(self): + assert_almost_equal(herme.hermeroots([1]), []) + assert_almost_equal(herme.hermeroots([1, 1]), [-1]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herme.hermeroots(herme.hermefromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermetrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herme.hermetrim, coef, -1) + + # Test results + assert_equal(herme.hermetrim(coef), coef[:-1]) + assert_equal(herme.hermetrim(coef, 1), coef[:-3]) + assert_equal(herme.hermetrim(coef, 2), [0]) + + def test_hermeline(self): + assert_equal(herme.hermeline(3, 4), [3, 4]) + + def test_herme2poly(self): + for i in range(10): + assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) + + def test_poly2herme(self): + for i in range(10): + assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-.5*x**2) + res = herme.hermeweight(x) + assert_almost_equal(res, tgt) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_laguerre.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_laguerre.py new file mode 100644 index 0000000000000..1dc57a9602945 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_laguerre.py @@ -0,0 +1,513 @@ +"""Tests for laguerre module. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.polynomial.laguerre as lag +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + TestCase, assert_almost_equal, assert_raises, + assert_equal, assert_, run_module_suite) + +L0 = np.array([1])/1 +L1 = np.array([1, -1])/1 +L2 = np.array([2, -4, 1])/2 +L3 = np.array([6, -18, 9, -1])/6 +L4 = np.array([24, -96, 72, -16, 1])/24 +L5 = np.array([120, -600, 600, -200, 25, -1])/120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 + +Llist = [L0, L1, L2, L3, L4, L5, L6] + + +def trim(x): + return lag.lagtrim(x, tol=1e-6) + + +class TestConstants(TestCase): + + def test_lagdomain(self): + assert_equal(lag.lagdomain, [0, 1]) + + def test_lagzero(self): + assert_equal(lag.lagzero, [0]) + + def test_lagone(self): + assert_equal(lag.lagone, [1]) + + def test_lagx(self): + assert_equal(lag.lagx, [1, -1]) + + +class TestArithmetic(TestCase): + x = np.linspace(-3, 3, 100) + + def test_lagadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = lag.lagadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagsub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = lag.lagsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagmulx(self): + assert_equal(lag.lagmulx([0]), [0]) + assert_equal(lag.lagmulx([1]), [1, -1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] + assert_almost_equal(lag.lagmulx(ser), tgt) + + def test_lagmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = lag.lagval(self.x, pol1) + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + pol2 = [0]*j + [1] + val2 = lag.lagval(self.x, pol2) + pol3 = lag.lagmul(pol1, pol2) + val3 = lag.lagval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_lagdiv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = lag.lagadd(ci, cj) + quo, rem = lag.lagdiv(tgt, ci) + res = lag.lagadd(lag.lagmul(quo, ci), rem) + assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(TestCase): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([9., -14., 6.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_lagval(self): + #check empty input + assert_equal(lag.lagval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(7): + msg = "At i=%d" % i + tgt = y[i] + res = lag.lagval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(lag.lagval(x, [1]).shape, dims) + assert_equal(lag.lagval(x, [1, 0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) + + def test_lagval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = lag.lagval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_lagval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = lag.lagval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_laggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = lag.laggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_laggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = lag.laggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(TestCase): + + def test_lagint(self): + # check exceptions + assert_raises(ValueError, lag.lagint, [0], .5) + assert_raises(ValueError, lag.lagint, [0], -1) + assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = lag.lagint([0], m=i, k=k) + assert_almost_equal(res, [1, -1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i]) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(lag.lagval(-1, lagint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1) + res = lag.lagint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k]) + res = lag.lagint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) + res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], scl=2) + res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T + res = lag.lagint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c) for c in c2d]) + res = lag.lagint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) + res = lag.lagint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(TestCase): + + def test_lagder(self): + # check exceptions + assert_raises(ValueError, lag.lagder, [0], .5) + assert_raises(ValueError, lag.lagder, [0], -1) + + # check that zeroth deriviative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = lag.lagder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T + res = lag.lagder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagder(c) for c in c2d]) + res = lag.lagder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(TestCase): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_lagvander(self): + # check for 1d x + x = np.arange(3) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + def test_lagvander2d(self): + # also tests lagval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = lag.lagvander2d(x1, x2, [1, 2]) + tgt = lag.lagval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_lagvander3d(self): + # also tests lagval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) + tgt = lag.lagval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(TestCase): + + def test_lagfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, lag.lagfit, [1], [1], -1) + assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) + assert_raises(TypeError, lag.lagfit, [], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) + assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = lag.lagfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + # + coef4 = lag.lagfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + # + coef2d = lag.lagfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = lag.lagfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) + + +class TestCompanion(TestCase): + + def test_raises(self): + assert_raises(ValueError, lag.lagcompanion, []) + assert_raises(ValueError, lag.lagcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(lag.lagcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) + + +class TestGauss(TestCase): + + def test_100(self): + x, w = lag.laggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = lag.lagvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 1.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(TestCase): + + def test_lagfromroots(self): + res = lag.lagfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = lag.lagfromroots(roots) + res = lag.lagval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(lag.lag2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_lagroots(self): + assert_almost_equal(lag.lagroots([1]), []) + assert_almost_equal(lag.lagroots([0, 1]), [1]) + for i in range(2, 5): + tgt = np.linspace(0, 3, i) + res = lag.lagroots(lag.lagfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, lag.lagtrim, coef, -1) + + # Test results + assert_equal(lag.lagtrim(coef), coef[:-1]) + assert_equal(lag.lagtrim(coef, 1), coef[:-3]) + assert_equal(lag.lagtrim(coef, 2), [0]) + + def test_lagline(self): + assert_equal(lag.lagline(3, 4), [7, -4]) + + def test_lag2poly(self): + for i in range(7): + assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) + + def test_poly2lag(self): + for i in range(7): + assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(0, 10, 11) + tgt = np.exp(-x) + res = lag.lagweight(x) + assert_almost_equal(res, tgt) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_legendre.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_legendre.py new file mode 100644 index 0000000000000..8ac1feb589d40 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_legendre.py @@ -0,0 +1,517 @@ +"""Tests for legendre module. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.polynomial.legendre as leg +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + TestCase, assert_almost_equal, assert_raises, + assert_equal, assert_, run_module_suite) + +L0 = np.array([1]) +L1 = np.array([0, 1]) +L2 = np.array([-1, 0, 3])/2 +L3 = np.array([0, -3, 0, 5])/2 +L4 = np.array([3, 0, -30, 0, 35])/8 +L5 = np.array([0, 15, 0, -70, 0, 63])/8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 + +Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] + + +def trim(x): + return leg.legtrim(x, tol=1e-6) + + +class TestConstants(TestCase): + + def test_legdomain(self): + assert_equal(leg.legdomain, [-1, 1]) + + def test_legzero(self): + assert_equal(leg.legzero, [0]) + + def test_legone(self): + assert_equal(leg.legone, [1]) + + def test_legx(self): + assert_equal(leg.legx, [0, 1]) + + +class TestArithmetic(TestCase): + x = np.linspace(-1, 1, 100) + + def test_legadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = leg.legadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legsub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = leg.legsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legmulx(self): + assert_equal(leg.legmulx([0]), [0]) + assert_equal(leg.legmulx([1]), [0, 1]) + for i in range(1, 5): + tmp = 2*i + 1 + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] + assert_equal(leg.legmulx(ser), tgt) + + def test_legmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = leg.legval(self.x, pol1) + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + pol2 = [0]*j + [1] + val2 = leg.legval(self.x, pol2) + pol3 = leg.legmul(pol1, pol2) + val3 = leg.legval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_legdiv(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = leg.legadd(ci, cj) + quo, rem = leg.legdiv(tgt, ci) + res = leg.legadd(leg.legmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation(TestCase): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2., 2., 2.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_legval(self): + #check empty input + assert_equal(leg.legval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(10): + msg = "At i=%d" % i + tgt = y[i] + res = leg.legval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(leg.legval(x, [1]).shape, dims) + assert_equal(leg.legval(x, [1, 0]).shape, dims) + assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) + + def test_legval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = leg.legval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_legval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = leg.legval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_leggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = leg.leggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_leggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = leg.leggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(TestCase): + + def test_legint(self): + # check exceptions + assert_raises(ValueError, leg.legint, [0], .5) + assert_raises(ValueError, leg.legint, [0], -1) + assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = leg.legint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i]) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(leg.legval(-1, legint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], scl=2) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1) + res = leg.legint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k]) + res = leg.legint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) + res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], scl=2) + res = leg.legint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legint(c) for c in c2d.T]).T + res = leg.legint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c) for c in c2d]) + res = leg.legint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) + res = leg.legint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(TestCase): + + def test_legder(self): + # check exceptions + assert_raises(ValueError, leg.legder, [0], .5) + assert_raises(ValueError, leg.legder, [0], -1) + + # check that zeroth deriviative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = leg.legder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legder(c) for c in c2d.T]).T + res = leg.legder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legder(c) for c in c2d]) + res = leg.legder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(TestCase): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_legvander(self): + # check for 1d x + x = np.arange(3) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + def test_legvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = leg.legvander2d(x1, x2, [1, 2]) + tgt = leg.legval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_legvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) + tgt = leg.legval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting(TestCase): + + def test_legfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, leg.legfit, [1], [1], -1) + assert_raises(TypeError, leg.legfit, [[1]], [1], 0) + assert_raises(TypeError, leg.legfit, [], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) + assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = leg.legfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + # + coef4 = leg.legfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # + coef2d = leg.legfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = leg.legfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) + + +class TestCompanion(TestCase): + + def test_raises(self): + assert_raises(ValueError, leg.legcompanion, []) + assert_raises(ValueError, leg.legcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(leg.legcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(leg.legcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss(TestCase): + + def test_100(self): + x, w = leg.leggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = leg.legvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 2.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc(TestCase): + + def test_legfromroots(self): + res = leg.legfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = leg.legfromroots(roots) + res = leg.legval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(leg.leg2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_legroots(self): + assert_almost_equal(leg.legroots([1]), []) + assert_almost_equal(leg.legroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = leg.legroots(leg.legfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, leg.legtrim, coef, -1) + + # Test results + assert_equal(leg.legtrim(coef), coef[:-1]) + assert_equal(leg.legtrim(coef, 1), coef[:-3]) + assert_equal(leg.legtrim(coef, 2), [0]) + + def test_legline(self): + assert_equal(leg.legline(3, 4), [3, 4]) + + def test_leg2poly(self): + for i in range(10): + assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) + + def test_poly2leg(self): + for i in range(10): + assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11) + tgt = 1. + res = leg.legweight(x) + assert_almost_equal(res, tgt) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polynomial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polynomial.py new file mode 100644 index 0000000000000..c806a8497492f --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polynomial.py @@ -0,0 +1,477 @@ +"""Tests for polynomial module. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.polynomial.polynomial as poly +from numpy.testing import ( + TestCase, assert_almost_equal, assert_raises, + assert_equal, assert_, run_module_suite) + + +def trim(x): + return poly.polytrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestConstants(TestCase): + + def test_polydomain(self): + assert_equal(poly.polydomain, [-1, 1]) + + def test_polyzero(self): + assert_equal(poly.polyzero, [0]) + + def test_polyone(self): + assert_equal(poly.polyone, [1]) + + def test_polyx(self): + assert_equal(poly.polyx, [0, 1]) + + +class TestArithmetic(TestCase): + + def test_polyadd(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = poly.polyadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polysub(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = poly.polysub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polymulx(self): + assert_equal(poly.polymulx([0]), [0]) + assert_equal(poly.polymulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i + 1) + [1] + assert_equal(poly.polymulx(ser), tgt) + + def test_polymul(self): + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(i + j + 1) + tgt[i + j] += 1 + res = poly.polymul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polydiv(self): + # check zero division + assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) + + # check scalar division + quo, rem = poly.polydiv([2], [2]) + assert_equal((quo, rem), (1, 0)) + quo, rem = poly.polydiv([2, 2], [2]) + assert_equal((quo, rem), ((1, 1), 0)) + + # check rest. + for i in range(5): + for j in range(5): + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1, 2] + cj = [0]*j + [1, 2] + tgt = poly.polyadd(ci, cj) + quo, rem = poly.polydiv(tgt, ci) + res = poly.polyadd(poly.polymul(quo, ci), rem) + assert_equal(res, tgt, err_msg=msg) + + +class TestEvaluation(TestCase): + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([1., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = poly.polyval(x, [1., 2., 3.]) + + def test_polyval(self): + #check empty input + assert_equal(poly.polyval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(5): + tgt = y[i] + res = poly.polyval(x, [0]*i + [1]) + assert_almost_equal(res, tgt) + tgt = x*(x**2 - 1) + res = poly.polyval(x, [0, -1, 0, 1]) + assert_almost_equal(res, tgt) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(poly.polyval(x, [1]).shape, dims) + assert_equal(poly.polyval(x, [1, 0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) + + def test_polyval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = poly.polyval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_polyval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = poly.polyval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_polygrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = poly.polygrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_polygrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = poly.polygrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral(TestCase): + + def test_polyint(self): + # check exceptions + assert_raises(ValueError, poly.polyint, [0], .5) + assert_raises(ValueError, poly.polyint, [0], -1) + assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = poly.polyint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + res = poly.polyint(pol, m=1, k=[i]) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + res = poly.polyint(pol, m=1, k=[i], lbnd=-1) + assert_almost_equal(poly.polyval(-1, res), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + res = poly.polyint(pol, m=1, k=[i], scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1) + res = poly.polyint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k]) + res = poly.polyint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) + res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], scl=2) + res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T + res = poly.polyint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c) for c in c2d]) + res = poly.polyint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) + res = poly.polyint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative(TestCase): + + def test_polyder(self): + # check exceptions + assert_raises(ValueError, poly.polyder, [0], .5) + assert_raises(ValueError, poly.polyder, [0], -1) + + # check that zeroth deriviative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = poly.polyder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T + res = poly.polyder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyder(c) for c in c2d]) + res = poly.polyder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander(TestCase): + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_polyvander(self): + # check for 1d x + x = np.arange(3) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + def test_polyvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = poly.polyvander2d(x1, x2, [1, 2]) + tgt = poly.polyval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_polyvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) + tgt = poly.polyval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestCompanion(TestCase): + + def test_raises(self): + assert_raises(ValueError, poly.polycompanion, []) + assert_raises(ValueError, poly.polycompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(poly.polycompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(poly.polycompanion([1, 2])[0, 0] == -.5) + + +class TestMisc(TestCase): + + def test_polyfromroots(self): + res = poly.polyfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = Tlist[i] + res = poly.polyfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyroots(self): + assert_almost_equal(poly.polyroots([1]), []) + assert_almost_equal(poly.polyroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, poly.polyfit, [1], [1], -1) + assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) + assert_raises(TypeError, poly.polyfit, [], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) + assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = poly.polyfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + # + coef4 = poly.polyfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + # + coef2d = poly.polyfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + yw[0::2] = 0 + wcoef3 = poly.polyfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) + + def test_polytrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, poly.polytrim, coef, -1) + + # Test results + assert_equal(poly.polytrim(coef), coef[:-1]) + assert_equal(poly.polytrim(coef, 1), coef[:-3]) + assert_equal(poly.polytrim(coef, 2), [0]) + + def test_polyline(self): + assert_equal(poly.polyline(3, 4), [3, 4]) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polyutils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polyutils.py new file mode 100644 index 0000000000000..974e2e09a3886 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polyutils.py @@ -0,0 +1,109 @@ +"""Tests for polyutils module. + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.polynomial.polyutils as pu +from numpy.testing import ( + TestCase, assert_almost_equal, assert_raises, + assert_equal, assert_, run_module_suite) + + +class TestMisc(TestCase): + + def test_trimseq(self): + for i in range(5): + tgt = [1] + res = pu.trimseq([1] + [0]*5) + assert_equal(res, tgt) + + def test_as_series(self): + # check exceptions + assert_raises(ValueError, pu.as_series, [[]]) + assert_raises(ValueError, pu.as_series, [[[1, 2]]]) + assert_raises(ValueError, pu.as_series, [[1], ['a']]) + # check common types + types = ['i', 'd', 'O'] + for i in range(len(types)): + for j in range(i): + ci = np.ones(1, types[i]) + cj = np.ones(1, types[j]) + [resi, resj] = pu.as_series([ci, cj]) + assert_(resi.dtype.char == resj.dtype.char) + assert_(resj.dtype.char == types[i]) + + def test_trimcoef(self): + coef = [2, -1, 1, 0] + # Test exceptions + assert_raises(ValueError, pu.trimcoef, coef, -1) + # Test results + assert_equal(pu.trimcoef(coef), coef[:-1]) + assert_equal(pu.trimcoef(coef, 1), coef[:-3]) + assert_equal(pu.trimcoef(coef, 2), [0]) + + +class TestDomain(TestCase): + + def test_getdomain(self): + # test for real values + x = [1, 10, 3, -1] + tgt = [-1, 10] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + # test for complex values + x = [1 + 1j, 1 - 1j, 0, 2] + tgt = [-1j, 2 + 1j] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + def test_mapdomain(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = dom2 + res = pu. mapdomain(dom1, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = dom2 + x = dom1 + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for multidimensional arrays + dom1 = [0, 4] + dom2 = [1, 3] + tgt = np.array([dom2, dom2]) + x = np.array([dom1, dom1]) + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test that subtypes are preserved. + dom1 = [0, 4] + dom2 = [1, 3] + x = np.matrix([dom1, dom1]) + res = pu.mapdomain(x, dom1, dom2) + assert_(isinstance(res, np.matrix)) + + def test_mapparms(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = [1, .5] + res = pu. mapparms(dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = [-1 + 1j, 1 - 1j] + res = pu.mapparms(dom1, dom2) + assert_almost_equal(res, tgt) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_printing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_printing.py new file mode 100644 index 0000000000000..86cd257328bb4 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_printing.py @@ -0,0 +1,74 @@ +from __future__ import division, absolute_import, print_function + +import numpy.polynomial as poly +from numpy.testing import TestCase, run_module_suite, assert_ + + +class test_str(TestCase): + def test_polynomial_str(self): + res = str(poly.Polynomial([0, 1])) + tgt = 'poly([0., 1.])' + assert_(res, tgt) + + def test_chebyshev_str(self): + res = str(poly.Chebyshev([0, 1])) + tgt = 'leg([0., 1.])' + assert_(res, tgt) + + def test_legendre_str(self): + res = str(poly.Legendre([0, 1])) + tgt = 'leg([0., 1.])' + assert_(res, tgt) + + def test_hermite_str(self): + res = str(poly.Hermite([0, 1])) + tgt = 'herm([0., 1.])' + assert_(res, tgt) + + def test_hermiteE_str(self): + res = str(poly.HermiteE([0, 1])) + tgt = 'herme([0., 1.])' + assert_(res, tgt) + + def test_laguerre_str(self): + res = str(poly.Laguerre([0, 1])) + tgt = 'lag([0., 1.])' + assert_(res, tgt) + + +class test_repr(TestCase): + def test_polynomial_str(self): + res = repr(poly.Polynomial([0, 1])) + tgt = 'Polynomial([0., 1.])' + assert_(res, tgt) + + def test_chebyshev_str(self): + res = repr(poly.Chebyshev([0, 1])) + tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])' + assert_(res, tgt) + + def test_legendre_repr(self): + res = repr(poly.Legendre([0, 1])) + tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])' + assert_(res, tgt) + + def test_hermite_repr(self): + res = repr(poly.Hermite([0, 1])) + tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])' + assert_(res, tgt) + + def test_hermiteE_repr(self): + res = repr(poly.HermiteE([0, 1])) + tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])' + assert_(res, tgt) + + def test_laguerre_repr(self): + res = repr(poly.Laguerre([0, 1])) + tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])' + assert_(res, tgt) + + +# + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/__init__.py new file mode 100644 index 0000000000000..388267c97532c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/__init__.py @@ -0,0 +1,122 @@ +""" +======================== +Random Number Generation +======================== + +==================== ========================================================= +Utility functions +============================================================================== +random Uniformly distributed values of a given shape. +bytes Uniformly distributed random bytes. +random_integers Uniformly distributed integers in a given range. +random_sample Uniformly distributed floats in a given range. +random Alias for random_sample +ranf Alias for random_sample +sample Alias for random_sample +choice Generate a weighted random sample from a given array-like +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +seed Seed the random number generator. +==================== ========================================================= + +==================== ========================================================= +Compatibility functions +============================================================================== +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +randint Uniformly distributed integers in a given range. +==================== ========================================================= + +==================== ========================================================= +Univariate distributions +============================================================================== +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================= +Multivariate distributions +============================================================================== +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================= + +==================== ========================================================= +Standard distributions +============================================================================== +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +============================================================================== +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + +""" +from __future__ import division, absolute_import, print_function + +import warnings + +# To get sub-modules +from .info import __doc__, __all__ + + +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + from .mtrand import * + +# Some aliases: +ranf = random = sample = random_sample +__all__.extend(['ranf', 'random', 'sample']) + +def __RandomState_ctor(): + """Return a RandomState instance. + + This function exists solely to assist (un)pickling. + + Note that the state of the RandomState returned here is irrelevant, as this function's + entire purpose is to return a newly allocated RandomState whose state pickle can set. + Consequently the RandomState returned by this function is a freshly allocated copy + with a seed=0. + + See https://github.com/numpy/numpy/issues/4763 for a detailed discussion + + """ + return RandomState(seed=0) + +from numpy.testing import Tester +test = Tester().test +bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/info.py new file mode 100644 index 0000000000000..396e623815a83 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/info.py @@ -0,0 +1,135 @@ +""" +======================== +Random Number Generation +======================== + +==================== ========================================================= +Utility functions +============================================================================== +random_sample Uniformly distributed floats over ``[0, 1)``. +random Alias for `random_sample`. +bytes Uniformly distributed random bytes. +random_integers Uniformly distributed integers in a given range. +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +seed Seed the random number generator. +==================== ========================================================= + +==================== ========================================================= +Compatibility functions +============================================================================== +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +randint Uniformly distributed integers in a given range. +==================== ========================================================= + +==================== ========================================================= +Univariate distributions +============================================================================== +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================= +Multivariate distributions +============================================================================== +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================= + +==================== ========================================================= +Standard distributions +============================================================================== +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +============================================================================== +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + +""" +from __future__ import division, absolute_import, print_function + +depends = ['core'] + +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random_integers', + 'random_sample', + 'rayleigh', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf' +] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/mtrand.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/mtrand.py new file mode 100644 index 0000000000000..28939761af345 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/mtrand.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__, 'mtrand.cpython-34m.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/randomkit.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/randomkit.h new file mode 100644 index 0000000000000..e049488eeb14a --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/randomkit.h @@ -0,0 +1,189 @@ +/* Random kit 1.3 */ + +/* + * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* @(#) $Jeannot: randomkit.h,v 1.24 2005/07/21 22:14:09 js Exp $ */ + +/* + * Typical use: + * + * { + * rk_state state; + * unsigned long seed = 1, random_value; + * + * rk_seed(seed, &state); // Initialize the RNG + * ... + * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] + * } + * + * Instead of rk_seed, you can use rk_randomseed which will get a random seed + * from /dev/urandom (or the clock, if /dev/urandom is unavailable): + * + * { + * rk_state state; + * unsigned long random_value; + * + * rk_randomseed(&state); // Initialize the RNG with a random seed + * ... + * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] + * } + */ + +/* + * Useful macro: + * RK_DEV_RANDOM: the device used for random seeding. + * defaults to "/dev/urandom" + */ + +#include + +#ifndef _RANDOMKIT_ +#define _RANDOMKIT_ + +#define RK_STATE_LEN 624 + +typedef struct rk_state_ +{ + unsigned long key[RK_STATE_LEN]; + int pos; + int has_gauss; /* !=0: gauss contains a gaussian deviate */ + double gauss; + + /* The rk_state structure has been extended to store the following + * information for the binomial generator. If the input values of n or p + * are different than nsave and psave, then the other parameters will be + * recomputed. RTK 2005-09-02 */ + + int has_binomial; /* !=0: following parameters initialized for + binomial */ + double psave; + long nsave; + double r; + double q; + double fm; + long m; + double p1; + double xm; + double xl; + double xr; + double c; + double laml; + double lamr; + double p2; + double p3; + double p4; + +} +rk_state; + +typedef enum { + RK_NOERR = 0, /* no error */ + RK_ENODEV = 1, /* no RK_DEV_RANDOM device */ + RK_ERR_MAX = 2 +} rk_error; + +/* error strings */ +extern char *rk_strerror[RK_ERR_MAX]; + +/* Maximum generated random value */ +#define RK_MAX 0xFFFFFFFFUL + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Initialize the RNG state using the given seed. + */ +extern void rk_seed(unsigned long seed, rk_state *state); + +/* + * Initialize the RNG state using a random seed. + * Uses /dev/random or, when unavailable, the clock (see randomkit.c). + * Returns RK_NOERR when no errors occurs. + * Returns RK_ENODEV when the use of RK_DEV_RANDOM failed (for example because + * there is no such device). In this case, the RNG was initialized using the + * clock. + */ +extern rk_error rk_randomseed(rk_state *state); + +/* + * Returns a random unsigned long between 0 and RK_MAX inclusive + */ +extern unsigned long rk_random(rk_state *state); + +/* + * Returns a random long between 0 and LONG_MAX inclusive + */ +extern long rk_long(rk_state *state); + +/* + * Returns a random unsigned long between 0 and ULONG_MAX inclusive + */ +extern unsigned long rk_ulong(rk_state *state); + +/* + * Returns a random unsigned long between 0 and max inclusive. + */ +extern unsigned long rk_interval(unsigned long max, rk_state *state); + +/* + * Returns a random double between 0.0 and 1.0, 1.0 excluded. + */ +extern double rk_double(rk_state *state); + +/* + * fill the buffer with size random bytes + */ +extern void rk_fill(void *buffer, size_t size, rk_state *state); + +/* + * fill the buffer with randombytes from the random device + * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is + * On Unix, if strong is defined, RK_DEV_RANDOM is used. If not, RK_DEV_URANDOM + * is used instead. This parameter has no effect on Windows. + * Warning: on most unixes RK_DEV_RANDOM will wait for enough entropy to answer + * which can take a very long time on quiet systems. + */ +extern rk_error rk_devfill(void *buffer, size_t size, int strong); + +/* + * fill the buffer using rk_devfill if the random device is available and using + * rk_fill if is is not + * parameters have the same meaning as rk_fill and rk_devfill + * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is + */ +extern rk_error rk_altfill(void *buffer, size_t size, int strong, + rk_state *state); + +/* + * return a random gaussian deviate with variance unity and zero mean. + */ +extern double rk_gauss(rk_state *state); + +#ifdef __cplusplus +} +#endif + +#endif /* _RANDOMKIT_ */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/setup.py new file mode 100644 index 0000000000000..33c12975b662b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/setup.py @@ -0,0 +1,74 @@ +from __future__ import division, print_function + +from os.path import join, split, dirname +import os +import sys +from distutils.dep_util import newer +from distutils.msvccompiler import get_build_version as get_msvc_build_version + +def needs_mingw_ftime_workaround(): + # We need the mingw workaround for _ftime if the msvc runtime version is + # 7.1 or above and we build with mingw ... + # ... but we can't easily detect compiler version outside distutils command + # context, so we will need to detect in randomkit whether we build with gcc + msver = get_msvc_build_version() + if msver and msver >= 8: + return True + + return False + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration, get_mathlibs + config = Configuration('random', parent_package, top_path) + + def generate_libraries(ext, build_dir): + config_cmd = config.get_config_cmd() + libs = get_mathlibs() + tc = testcode_wincrypt() + if config_cmd.try_run(tc): + libs.append('Advapi32') + ext.libraries.extend(libs) + return None + + # enable unix large file support on 32 bit systems + # (64 bit off_t, lseek -> lseek64 etc.) + defs = [('_FILE_OFFSET_BITS', '64'), + ('_LARGEFILE_SOURCE', '1'), + ('_LARGEFILE64_SOURCE', '1')] + if needs_mingw_ftime_workaround(): + defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None)) + + libs = [] + # Configure mtrand + config.add_extension('mtrand', + sources=[join('mtrand', x) for x in + ['mtrand.c', 'randomkit.c', 'initarray.c', + 'distributions.c']]+[generate_libraries], + libraries=libs, + depends=[join('mtrand', '*.h'), + join('mtrand', '*.pyx'), + join('mtrand', '*.pxi'),], + define_macros=defs, + ) + + config.add_data_files(('.', join('mtrand', 'randomkit.h'))) + config.add_data_dir('tests') + + return config + +def testcode_wincrypt(): + return """\ +/* check to see if _WIN32 is defined */ +int main(int argc, char *argv[]) +{ +#ifdef _WIN32 + return 0; +#else + return 1; +#endif +} +""" + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_random.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_random.py new file mode 100644 index 0000000000000..1bf25a92613c5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_random.py @@ -0,0 +1,707 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import ( + TestCase, run_module_suite, assert_, assert_raises, assert_equal, + assert_warns) +from numpy import random +from numpy.compat import asbytes +import sys + +class TestSeed(TestCase): + def test_scalar(self): + s = np.random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = np.random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = np.random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = np.random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be a unsigned 32 bit integers + assert_raises(TypeError, np.random.RandomState, -0.5) + assert_raises(ValueError, np.random.RandomState, -1) + + def test_invalid_array(self): + # seed must be a unsigned 32 bit integers + assert_raises(TypeError, np.random.RandomState, [-0.5]) + assert_raises(ValueError, np.random.RandomState, [-1]) + assert_raises(ValueError, np.random.RandomState, [4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) + +class TestBinomial(TestCase): + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + np.testing.assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial(TestCase): + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, np.random.multinomial, 1, p, + np.float(1)) + + +class TestSetState(TestCase): + def setUp(self): + self.seed = 1234567890 + self.prng = random.RandomState(self.seed) + self.state = self.prng.get_state() + + def test_basic(self): + old = self.prng.tomaxint(16) + self.prng.set_state(self.state) + new = self.prng.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.prng.standard_normal(size=3) + self.prng.set_state(self.state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.prng.standard_normal() + state = self.prng.get_state() + old = self.prng.standard_normal(size=3) + self.prng.set_state(state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.prng.standard_normal(size=16) + self.prng.set_state(old_state) + x2 = self.prng.standard_normal(size=16) + self.prng.set_state(self.state) + x3 = self.prng.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.prng.negative_binomial(0.5, 0.5) + +class TestRandomDist(TestCase): + # Make sure the random distrobution return the correct value for a + # given seed + + def setUp(self): + self.seed = 1234567890 + + def test_rand(self): + np.random.seed(self.seed) + actual = np.random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + np.random.seed(self.seed) + actual = np.random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_randint(self): + np.random.seed(self.seed) + actual = np.random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + np.testing.assert_array_equal(actual, desired) + + def test_random_integers(self): + np.random.seed(self.seed) + actual = np.random.random_integers(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + np.testing.assert_array_equal(actual, desired) + + def test_random_sample(self): + np.random.seed(self.seed) + actual = np.random.random_sample((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_choice_uniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + np.testing.assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + np.testing.assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + np.testing.assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False, + p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + np.testing.assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + np.random.seed(self.seed) + actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + np.testing.assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = np.random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False, + p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(np.random.choice(2, replace=True))) + assert_(np.isscalar(np.random.choice(2, replace=False))) + assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) + assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) + assert_(np.isscalar(np.random.choice([1, 2], replace=True))) + assert_(np.random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, replace=True) is a) + + # Check 0-d array + s = tuple() + assert_(not np.isscalar(np.random.choice(2, s, replace=True))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False))) + assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) + assert_(np.random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_(np.random.choice(6, s, replace=True).shape, s) + assert_(np.random.choice(6, s, replace=False).shape, s) + assert_(np.random.choice(6, s, replace=True, p=p).shape, s) + assert_(np.random.choice(6, s, replace=False, p=p).shape, s) + assert_(np.random.choice(np.arange(6), s, replace=True).shape, s) + + def test_bytes(self): + np.random.seed(self.seed) + actual = np.random.bytes(10) + desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5') + np.testing.assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays, and multidimensional versions of both: + for conv in [lambda x: x, + np.asarray, + lambda x: [(i, i) for i in x], + lambda x: np.asarray([(i, i) for i in x])]: + np.random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + np.random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + np.testing.assert_array_equal(actual, desired) + + def test_shuffle_flexible(self): + # gh-4270 + arr = [(0, 1), (2, 3)] + dt = np.dtype([('a', np.int32, 1), ('b', np.int32, 1)]) + nparr = np.array(arr, dtype=dt) + a, b = nparr[0].copy(), nparr[1].copy() + for i in range(50): + np.random.shuffle(nparr) + assert_(a in nparr) + assert_(b in nparr) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + ma = np.ma.count_masked(a) + mb = np.ma.count_masked(b) + for i in range(50): + np.random.shuffle(a) + self.assertEqual(ma, np.ma.count_masked(a)) + np.random.shuffle(b) + self.assertEqual(mb, np.ma.count_masked(b)) + + def test_beta(self): + np.random.seed(self.seed) + actual = np.random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + np.random.seed(self.seed) + actual = np.random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + np.testing.assert_array_equal(actual, desired) + + def test_chisquare(self): + np.random.seed(self.seed) + actual = np.random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + np.random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, np.random.dirichlet, p, np.float(1)) + + def test_exponential(self): + np.random.seed(self.seed) + actual = np.random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_f(self): + np.random.seed(self.seed) + actual = np.random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + np.random.seed(self.seed) + actual = np.random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=14) + + def test_geometric(self): + np.random.seed(self.seed) + actual = np.random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + np.testing.assert_array_equal(actual, desired) + + def test_gumbel(self): + np.random.seed(self.seed) + actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_hypergeometric(self): + np.random.seed(self.seed) + actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + np.testing.assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = np.random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + np.testing.assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + np.testing.assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = np.random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + np.testing.assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + np.testing.assert_array_equal(actual, desired) + + def test_laplace(self): + np.random.seed(self.seed) + actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_logistic(self): + np.random.seed(self.seed) + actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + np.random.seed(self.seed) + actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=13) + + def test_logseries(self): + np.random.seed(self.seed) + actual = np.random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + np.testing.assert_array_equal(actual, desired) + + def test_multinomial(self): + np.random.seed(self.seed) + actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + np.testing.assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + np.random.seed(self.seed) + mean = (.123456789, 10) + # Hmm... not even symmetric. + cov = [[1, 0], [1, 0]] + size = (3, 2) + actual = np.random.multivariate_normal(mean, cov, size) + desired = np.array([[[-1.47027513018564449, 10.], + [-1.65915081534845532, 10.]], + [[-2.29186329304599745, 10.], + [-1.77505606019580053, 10.]], + [[-0.54970369430044119, 10.], + [0.29768848031692957, 10.]]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = np.random.multivariate_normal(mean, cov) + desired = np.array([-0.79441224511977482, 10.]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance raises warning + mean = [0, 0] + cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]] + assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + + def test_negative_binomial(self): + np.random.seed(self.seed) + actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + np.testing.assert_array_equal(actual, desired) + + def test_noncentral_chisquare(self): + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + np.random.seed(self.seed) + actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + np.random.seed(self.seed) + actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_pareto(self): + np.random.seed(self.seed) + actual = np.random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + np.random.seed(self.seed) + actual = np.random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + np.testing.assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, np.random.poisson, lamneg) + assert_raises(ValueError, np.random.poisson, [lamneg]*10) + assert_raises(ValueError, np.random.poisson, lambig) + assert_raises(ValueError, np.random.poisson, [lambig]*10) + + def test_power(self): + np.random.seed(self.seed) + actual = np.random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + np.random.seed(self.seed) + actual = np.random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_cauchy(self): + np.random.seed(self.seed) + actual = np.random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + np.random.seed(self.seed) + actual = np.random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + np.random.seed(self.seed) + actual = np.random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_normal(self): + np.random.seed(self.seed) + actual = np.random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + np.random.seed(self.seed) + actual = np.random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + np.random.seed(self.seed) + actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + np.random.seed(self.seed) + actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises(self): + np.random.seed(self.seed) + actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + np.random.seed(self.seed) + r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + np.testing.assert_(np.isfinite(r).all()) + + def test_wald(self): + np.random.seed(self.seed) + actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + np.random.seed(self.seed) + actual = np.random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=15) + + def test_zipf(self): + np.random.seed(self.seed) + actual = np.random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + np.testing.assert_array_equal(actual, desired) + + +class TestThread(object): + # make sure each state produces the same sequence even in threads + def setUp(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(np.random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(np.random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if (np.intp().dtype.itemsize == 4 and + (sys.platform == "win32" or + sys.platform.startswith("gnukfreebsd"))): + np.testing.assert_array_almost_equal(out1, out2) + else: + np.testing.assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1/6.]*6, size=10000) + self.check_function(gen_random, sz=(10000,6)) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_regression.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_regression.py new file mode 100644 index 0000000000000..ccffd033e55c9 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_regression.py @@ -0,0 +1,86 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import (TestCase, run_module_suite, assert_, + assert_array_equal) +from numpy import random +from numpy.compat import long +import numpy as np + + +class TestRegression(TestCase): + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.mtrand.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + np.random.seed(0) + rvsn = np.random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / float(N) + msg = "Frequency was %f, should be > 0.45" % freq + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / float(N) + msg = "Frequency was %f, should be < 0.23" % freq + assert_(freq < 0.23, msg) + + def test_permutation_longs(self): + np.random.seed(1234) + a = np.random.permutation(12) + np.random.seed(1234) + b = np.random.permutation(long(12)) + assert_array_equal(a, b) + + def test_randint_range(self): + # Test for ticket #1690 + lmax = np.iinfo('l').max + lmin = np.iinfo('l').min + try: + random.randint(lmin, lmax) + except: + raise AssertionError + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + np.random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = np.random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + np.random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + np.random.multivariate_normal([0], [[0]], size=1) + np.random.multivariate_normal([0], [[0]], size=np.int_(1)) + np.random.multivariate_normal([0], [[0]], size=np.int64(1)) + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/setup.py new file mode 100644 index 0000000000000..2c3846271b6e0 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/setup.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +from __future__ import division, print_function + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('numpy', parent_package, top_path) + config.add_subpackage('distutils') + config.add_subpackage('testing') + config.add_subpackage('f2py') + config.add_subpackage('core') + config.add_subpackage('lib') + config.add_subpackage('fft') + config.add_subpackage('linalg') + config.add_subpackage('random') + config.add_subpackage('ma') + config.add_subpackage('matrixlib') + config.add_subpackage('compat') + config.add_subpackage('polynomial') + config.add_subpackage('doc') + config.add_data_dir('doc') + config.add_data_dir('tests') + config.make_config_py() # installs __config__.py + return config + +if __name__ == '__main__': + print('This is the wrong setup.py file to run') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/__init__.py new file mode 100644 index 0000000000000..258cbe928b3ce --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/__init__.py @@ -0,0 +1,16 @@ +"""Common test support for all numpy test scripts. + +This single module should provide all the common functionality for numpy tests +in a single location, so that test scripts can just import it and work right +away. + +""" +from __future__ import division, absolute_import, print_function + +from unittest import TestCase + +from . import decorators as dec +from .utils import * +from .nosetester import NoseTester as Tester +from .nosetester import run_module_suite +test = Tester().test diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/decorators.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/decorators.py new file mode 100644 index 0000000000000..8a4cfb4809cbd --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/decorators.py @@ -0,0 +1,271 @@ +""" +Decorators for labeling and modifying behavior of test objects. + +Decorators that merely return a modified version of the original +function object are straightforward. Decorators that return a new +function object need to use +:: + + nose.tools.make_decorator(original_function)(decorator) + +in returning the decorator, in order to preserve meta-data such as +function name, setup and teardown functions and so on - see +``nose.tools`` for more information. + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import collections + + +def slow(t): + """ + Label a test as 'slow'. + + The exact definition of a slow test is obviously both subjective and + hardware-dependent, but in general any individual test that requires more + than a second or two should be labeled as slow (the whole suite consits of + thousands of tests, so even a second is significant). + + Parameters + ---------- + t : callable + The test to label as slow. + + Returns + ------- + t : callable + The decorated test `t`. + + Examples + -------- + The `numpy.testing` module includes ``import decorators as dec``. + A test can be decorated as slow like this:: + + from numpy.testing import * + + @dec.slow + def test_big(self): + print 'Big, slow test' + + """ + + t.slow = True + return t + +def setastest(tf=True): + """ + Signals to nose that this function is or is not a test. + + Parameters + ---------- + tf : bool + If True, specifies that the decorated callable is a test. + If False, specifies that the decorated callable is not a test. + Default is True. + + Notes + ----- + This decorator can't use the nose namespace, because it can be + called from a non-test module. See also ``istest`` and ``nottest`` in + ``nose.tools``. + + Examples + -------- + `setastest` can be used in the following way:: + + from numpy.testing.decorators import setastest + + @setastest(False) + def func_with_test_in_name(arg1, arg2): + pass + + """ + def set_test(t): + t.__test__ = tf + return t + return set_test + +def skipif(skip_condition, msg=None): + """ + Make function raise SkipTest exception if a given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + skip_condition : bool or callable + Flag to determine whether to skip the decorated test. + msg : str, optional + Message to give on raising a SkipTest exception. Default is None. + + Returns + ------- + decorator : function + Decorator which, when applied to a function, causes SkipTest + to be raised when `skip_condition` is True, and the function + to be called normally otherwise. + + Notes + ----- + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ + + def skip_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + + # Allow for both boolean or callable skip conditions. + if isinstance(skip_condition, collections.Callable): + skip_val = lambda : skip_condition() + else: + skip_val = lambda : skip_condition + + def get_msg(func,msg=None): + """Skip message with information about function being skipped.""" + if msg is None: + out = 'Test skipped due to test condition' + else: + out = msg + + return "Skipping test: %s: %s" % (func.__name__, out) + + # We need to define *two* skippers because Python doesn't allow both + # return with value and yield inside the same function. + def skipper_func(*args, **kwargs): + """Skipper for normal test functions.""" + if skip_val(): + raise nose.SkipTest(get_msg(f, msg)) + else: + return f(*args, **kwargs) + + def skipper_gen(*args, **kwargs): + """Skipper for test generators.""" + if skip_val(): + raise nose.SkipTest(get_msg(f, msg)) + else: + for x in f(*args, **kwargs): + yield x + + # Choose the right skipper to use when building the actual decorator. + if nose.util.isgenerator(f): + skipper = skipper_gen + else: + skipper = skipper_func + + return nose.tools.make_decorator(f)(skipper) + + return skip_decorator + + +def knownfailureif(fail_condition, msg=None): + """ + Make function raise KnownFailureTest exception if given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + fail_condition : bool or callable + Flag to determine whether to mark the decorated test as a known + failure (if True) or not (if False). + msg : str, optional + Message to give on raising a KnownFailureTest exception. + Default is None. + + Returns + ------- + decorator : function + Decorator, which, when applied to a function, causes SkipTest + to be raised when `skip_condition` is True, and the function + to be called normally otherwise. + + Notes + ----- + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ + if msg is None: + msg = 'Test skipped due to known failure' + + # Allow for both boolean or callable known failure conditions. + if isinstance(fail_condition, collections.Callable): + fail_val = lambda : fail_condition() + else: + fail_val = lambda : fail_condition + + def knownfail_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + from .noseclasses import KnownFailureTest + def knownfailer(*args, **kwargs): + if fail_val(): + raise KnownFailureTest(msg) + else: + return f(*args, **kwargs) + return nose.tools.make_decorator(f)(knownfailer) + + return knownfail_decorator + +def deprecated(conditional=True): + """ + Filter deprecation warnings while running the test suite. + + This decorator can be used to filter DeprecationWarning's, to avoid + printing them during the test suite run, while checking that the test + actually raises a DeprecationWarning. + + Parameters + ---------- + conditional : bool or callable, optional + Flag to determine whether to mark test as deprecated or not. If the + condition is a callable, it is used at runtime to dynamically make the + decision. Default is True. + + Returns + ------- + decorator : function + The `deprecated` decorator itself. + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + def deprecate_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + from .noseclasses import KnownFailureTest + + def _deprecated_imp(*args, **kwargs): + # Poor man's replacement for the with statement + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + f(*args, **kwargs) + if not len(l) > 0: + raise AssertionError("No warning raised when calling %s" + % f.__name__) + if not l[0].category is DeprecationWarning: + raise AssertionError("First warning for %s is not a " \ + "DeprecationWarning( is %s)" % (f.__name__, l[0])) + + if isinstance(conditional, collections.Callable): + cond = conditional() + else: + cond = conditional + if cond: + return nose.tools.make_decorator(f)(_deprecated_imp) + else: + return f + return deprecate_decorator diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/noseclasses.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/noseclasses.py new file mode 100644 index 0000000000000..cb757a13f2071 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/noseclasses.py @@ -0,0 +1,353 @@ +# These classes implement a doctest runner plugin for nose, a "known failure" +# error class, and a customized TestProgram for NumPy. + +# Because this module imports nose directly, it should not +# be used except by nosetester.py to avoid a general NumPy +# dependency on nose. +from __future__ import division, absolute_import, print_function + +import os +import doctest + +import nose +from nose.plugins import doctests as npd +from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin +from nose.plugins.base import Plugin +from nose.util import src +import numpy +from .nosetester import get_package_name +import inspect + +# Some of the classes in this module begin with 'Numpy' to clearly distinguish +# them from the plethora of very similar names from nose/unittest/doctest + +#----------------------------------------------------------------------------- +# Modified version of the one in the stdlib, that fixes a python bug (doctests +# not found in extension modules, http://bugs.python.org/issue3158) +class NumpyDocTestFinder(doctest.DocTestFinder): + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + #print '_fm C1' # dbg + return True + elif inspect.isfunction(object): + #print '_fm C2' # dbg + return module.__dict__ is object.__globals__ + elif inspect.isbuiltin(object): + #print '_fm C2-1' # dbg + return module.__name__ == object.__module__ + elif inspect.isclass(object): + #print '_fm C3' # dbg + return module.__name__ == object.__module__ + elif inspect.ismethod(object): + # This one may be a bug in cython that fails to correctly set the + # __module__ attribute of methods, but since the same error is easy + # to make by extension code writers, having this safety in place + # isn't such a bad idea + #print '_fm C3-1' # dbg + return module.__name__ == object.__self__.__class__.__module__ + elif inspect.getmodule(object) is not None: + #print '_fm C4' # dbg + #print 'C4 mod',module,'obj',object # dbg + return module is inspect.getmodule(object) + elif hasattr(object, '__module__'): + #print '_fm C5' # dbg + return module.__name__ == object.__module__ + elif isinstance(object, property): + #print '_fm C6' # dbg + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + + doctest.DocTestFinder._find(self, tests, obj, name, module, + source_lines, globs, seen) + + # Below we re-run pieces of the above method with manual modifications, + # because the original code is buggy and fails to correctly identify + # doctests in extension modules. + + # Local shorthands + from inspect import isroutine, isclass, ismodule, isfunction, \ + ismethod + + # Look for tests in a module's contained objects. + if ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + valname1 = '%s.%s' % (name, valname) + if ( (isroutine(val) or isclass(val)) + and self._from_module(module, val) ): + + self._find(tests, val, valname1, module, source_lines, + globs, seen) + + + # Look for tests in a class's contained objects. + if isclass(obj) and self._recurse: + #print 'RECURSE into class:',obj # dbg + for valname, val in obj.__dict__.items(): + #valname1 = '%s.%s' % (name, valname) # dbg + #print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = getattr(obj, valname).__func__ + + # Recurse to methods, properties, and nested classes. + if ((isfunction(val) or isclass(val) or + ismethod(val) or isinstance(val, property)) and + self._from_module(module, val)): + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + +# second-chance checker; if the default comparison doesn't +# pass, then see if the expected output string contains flags that +# tell us to ignore the output +class NumpyOutputChecker(doctest.OutputChecker): + def check_output(self, want, got, optionflags): + ret = doctest.OutputChecker.check_output(self, want, got, + optionflags) + if not ret: + if "#random" in want: + return True + + # it would be useful to normalize endianness so that + # bigendian machines don't fail all the tests (and there are + # actually some bigendian examples in the doctests). Let's try + # making them all little endian + got = got.replace("'>", "'<") + want= want.replace("'>", "'<") + + # try to normalize out 32 and 64 bit default int sizes + for sz in [4, 8]: + got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') + 'numpy' + + """ + + fullpath = filepath[:] + pkg_name = [] + while 'site-packages' in filepath or 'dist-packages' in filepath: + filepath, p2 = os.path.split(filepath) + if p2 in ('site-packages', 'dist-packages'): + break + pkg_name.append(p2) + + # if package name determination failed, just default to numpy/scipy + if not pkg_name: + if 'scipy' in fullpath: + return 'scipy' + else: + return 'numpy' + + # otherwise, reverse to get correct order and return + pkg_name.reverse() + + # don't include the outer egg directory + if pkg_name[0].endswith('.egg'): + pkg_name.pop(0) + + return '.'.join(pkg_name) + +def import_nose(): + """ Import nose only when needed. + """ + fine_nose = True + minimum_nose_version = (0, 10, 0) + try: + import nose + except ImportError: + fine_nose = False + else: + if nose.__versioninfo__ < minimum_nose_version: + fine_nose = False + + if not fine_nose: + msg = 'Need nose >= %d.%d.%d for tests - see ' \ + 'http://somethingaboutorange.com/mrl/projects/nose' % \ + minimum_nose_version + + raise ImportError(msg) + + return nose + +def run_module_suite(file_to_run=None, argv=None): + """ + Run a test module. + + Equivalent to calling ``$ nosetests `` from + the command line + + Parameters + ---------- + file_to_run: str, optional + Path to test module, or None. + By default, run the module from which this function is called. + argv: list of strings + Arguments to be passed to the nose test runner. ``argv[0]`` is + ignored. All command line arguments accepted by ``nosetests`` + will work. + + .. versionadded:: 1.9.0 + + Examples + -------- + Adding the following:: + + if __name__ == "__main__" : + run_module_suite(argv=sys.argv) + + at the end of a test module will run the tests when that module is + called in the python interpreter. + + Alternatively, calling:: + + >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") + + from an interpreter will run all the test routine in 'test_matlib.py'. + """ + if file_to_run is None: + f = sys._getframe(1) + file_to_run = f.f_locals.get('__file__', None) + if file_to_run is None: + raise AssertionError + + if argv is None: + argv = ['', file_to_run] + else: + argv = argv + [file_to_run] + + nose = import_nose() + from .noseclasses import KnownFailure + nose.run(argv=argv, addplugins=[KnownFailure()]) + + +class NoseTester(object): + """ + Nose test runner. + + This class is made available as numpy.testing.Tester, and a test function + is typically added to a package's __init__.py like so:: + + from numpy.testing import Tester + test = Tester().test + + Calling this test function finds and runs all tests associated with the + package and all its sub-packages. + + Attributes + ---------- + package_path : str + Full path to the package to test. + package_name : str + Name of the package to test. + + Parameters + ---------- + package : module, str or None, optional + The package to test. If a string, this should be the full path to + the package. If None (default), `package` is set to the module from + which `NoseTester` is initialized. + raise_warnings : str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of 'warn' during the test execution. Valid strings are: + + - "develop" : equals ``(DeprecationWarning, RuntimeWarning)`` + - "release" : equals ``()``, don't raise on any warnings. + + See Notes for more details. + + Notes + ----- + The default for `raise_warnings` is + ``(DeprecationWarning, RuntimeWarning)`` for the master branch of NumPy, + and ``()`` for maintenance branches and released versions. The purpose + of this switching behavior is to catch as many warnings as possible + during development, but not give problems for packaging of released + versions. + + """ + # Stuff to exclude from tests. These are from numpy.distutils + excludes = ['f2py_ext', + 'f2py_f90_ext', + 'gen_ext', + 'pyrex_ext', + 'swig_ext'] + + def __init__(self, package=None, raise_warnings="release"): + package_name = None + if package is None: + f = sys._getframe(1) + package_path = f.f_locals.get('__file__', None) + if package_path is None: + raise AssertionError + package_path = os.path.dirname(package_path) + package_name = f.f_locals.get('__name__', None) + elif isinstance(package, type(os)): + package_path = os.path.dirname(package.__file__) + package_name = getattr(package, '__name__', None) + else: + package_path = str(package) + + self.package_path = package_path + + # Find the package name under test; this name is used to limit coverage + # reporting (if enabled). + if package_name is None: + package_name = get_package_name(package_path) + self.package_name = package_name + + # Set to "release" in constructor in maintenance branches. + self.raise_warnings = raise_warnings + + def _test_argv(self, label, verbose, extra_argv): + ''' Generate argv for nosetest command + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + see ``test`` docstring + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + argv : list + command line arguments that will be passed to nose + ''' + argv = [__file__, self.package_path, '-s'] + if label and label != 'full': + if not isinstance(label, basestring): + raise TypeError('Selection label should be a string') + if label == 'fast': + label = 'not slow' + argv += ['-A', label] + argv += ['--verbosity', str(verbose)] + + # When installing with setuptools, and also in some other cases, the + # test_*.py files end up marked +x executable. Nose, by default, does + # not run files marked with +x as they might be scripts. However, in + # our case nose only looks for test_*.py files under the package + # directory, which should be safe. + argv += ['--exe'] + + if extra_argv: + argv += extra_argv + return argv + + def _show_system_info(self): + nose = import_nose() + + import numpy + print("NumPy version %s" % numpy.__version__) + npdir = os.path.dirname(numpy.__file__) + print("NumPy is installed in %s" % npdir) + + if 'scipy' in self.package_name: + import scipy + print("SciPy version %s" % scipy.__version__) + spdir = os.path.dirname(scipy.__file__) + print("SciPy is installed in %s" % spdir) + + pyversion = sys.version.replace('\n', '') + print("Python version %s" % pyversion) + print("nose version %d.%d.%d" % nose.__versioninfo__) + + def _get_custom_doctester(self): + """ Return instantiated plugin for doctests + + Allows subclassing of this class to override doctester + + A return value of None means use the nose builtin doctest plugin + """ + from .noseclasses import NumpyDoctest + return NumpyDoctest() + + def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False): + """ + Run tests for module using nose. + + This method does the heavy lifting for the `test` method. It takes all + the same arguments, for details see `test`. + + See Also + -------- + test + + """ + # fail with nice error message if nose is not present + import_nose() + # compile argv + argv = self._test_argv(label, verbose, extra_argv) + # bypass tests noted for exclude + for ename in self.excludes: + argv += ['--exclude', ename] + # our way of doing coverage + if coverage: + argv+=['--cover-package=%s' % self.package_name, '--with-coverage', + '--cover-tests', '--cover-erase'] + # construct list of plugins + import nose.plugins.builtin + from .noseclasses import KnownFailure, Unplugger + plugins = [KnownFailure()] + plugins += [p() for p in nose.plugins.builtin.plugins] + # add doctesting if required + doctest_argv = '--with-doctest' in argv + if doctests == False and doctest_argv: + doctests = True + plug = self._get_custom_doctester() + if plug is None: + # use standard doctesting + if doctests and not doctest_argv: + argv += ['--with-doctest'] + else: # custom doctesting + if doctest_argv: # in fact the unplugger would take care of this + argv.remove('--with-doctest') + plugins += [Unplugger('doctest'), plug] + if doctests: + argv += ['--with-' + plug.name] + return argv, plugins + + def test(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, + raise_warnings=None): + """ + Run tests for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the tests to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow tests as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + doctests : bool, optional + If True, run doctests in module. Default is False. + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + (This requires the `coverage module: + `_). + raise_warnings : str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of 'warn' during the test execution. Valid strings are: + + - "develop" : equals ``(DeprecationWarning, RuntimeWarning)`` + - "release" : equals ``()``, don't raise on any warnings. + + Returns + ------- + result : object + Returns the result of running the tests as a + ``nose.result.TextTestResult`` object. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for it. + For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + Running unit tests for numpy.lib + ... + Ran 976 tests in 3.933s + + OK + + >>> result.errors #doctest: +SKIP + [] + >>> result.knownfail #doctest: +SKIP + [] + """ + + # cap verbosity at 3 because nose becomes *very* verbose beyond that + verbose = min(verbose, 3) + + from . import utils + utils.verbose = verbose + + if doctests: + print("Running unit tests and doctests for %s" % self.package_name) + else: + print("Running unit tests for %s" % self.package_name) + + self._show_system_info() + + # reset doctest state on every run + import doctest + doctest.master = None + + if raise_warnings is None: + raise_warnings = self.raise_warnings + + _warn_opts = dict(develop=(DeprecationWarning, RuntimeWarning), + release=()) + if raise_warnings in _warn_opts.keys(): + raise_warnings = _warn_opts[raise_warnings] + + with warnings.catch_warnings(): + # Reset the warning filters to the default state, + # so that running the tests is more repeatable. + warnings.resetwarnings() + # If deprecation warnings are not set to 'error' below, + # at least set them to 'warn'. + warnings.filterwarnings('always', category=DeprecationWarning) + # Force the requested warnings to raise + for warningtype in raise_warnings: + warnings.filterwarnings('error', category=warningtype) + # Filter out annoying import messages. + warnings.filterwarnings('ignore', message='Not importing directory') + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", category=ModuleDeprecationWarning) + warnings.filterwarnings("ignore", category=FutureWarning) + # Filter out boolean '-' deprecation messages. This allows + # older versions of scipy to test without a flood of messages. + warnings.filterwarnings("ignore", message=".*boolean negative.*") + warnings.filterwarnings("ignore", message=".*boolean subtract.*") + + from .noseclasses import NumpyTestProgram + + argv, plugins = self.prepare_test_args( + label, verbose, extra_argv, doctests, coverage) + t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) + + return t.result + + def bench(self, label='fast', verbose=1, extra_argv=None): + """ + Run benchmarks for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the benchmarks to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow benchmarks as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional + Verbosity value for benchmark outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + success : bool + Returns True if running the benchmarks works, False if an error + occurred. + + Notes + ----- + Benchmarks are like tests, but have names starting with "bench" instead + of "test", and can be found under the "benchmarks" sub-directory of the + module. + + Each NumPy module exposes `bench` in its namespace to run all benchmarks + for it. + + Examples + -------- + >>> success = np.lib.bench() #doctest: +SKIP + Running benchmarks for numpy.lib + ... + using 562341 items: + unique: + 0.11 + unique1d: + 0.11 + ratio: 1.0 + nUnique: 56230 == 56230 + ... + OK + + >>> success #doctest: +SKIP + True + + """ + + print("Running benchmarks for %s" % self.package_name) + self._show_system_info() + + argv = self._test_argv(label, verbose, extra_argv) + argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] + + # import nose or make informative error + nose = import_nose() + + # get plugin to disable doctests + from .noseclasses import Unplugger + add_plugins = [Unplugger('doctest')] + + return nose.run(argv=argv, addplugins=add_plugins) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/print_coercion_tables.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/print_coercion_tables.py new file mode 100644 index 0000000000000..bde82a666fa8b --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/print_coercion_tables.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +"""Prints type-coercion tables for the built-in NumPy types + +""" +from __future__ import division, absolute_import, print_function + +import numpy as np + +# Generic object that can be added, but doesn't do anything else +class GenericObject(object): + def __init__(self, v): + self.v = v + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + dtype = np.dtype('O') + +def print_cancast_table(ntypes): + print('X', end=' ') + for char in ntypes: print(char, end=' ') + print() + for row in ntypes: + print(row, end=' ') + for col in ntypes: + print(int(np.can_cast(row, col)), end=' ') + print() + +def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): + print('+', end=' ') + for char in ntypes: print(char, end=' ') + print() + for row in ntypes: + if row == 'O': + rowtype = GenericObject + else: + rowtype = np.obj2sctype(row) + + print(row, end=' ') + for col in ntypes: + if col == 'O': + coltype = GenericObject + else: + coltype = np.obj2sctype(col) + try: + if firstarray: + rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) + else: + rowvalue = rowtype(inputfirstvalue) + colvalue = coltype(inputsecondvalue) + if use_promote_types: + char = np.promote_types(rowvalue.dtype, colvalue.dtype).char + else: + value = np.add(rowvalue, colvalue) + if isinstance(value, np.ndarray): + char = value.dtype.char + else: + char = np.dtype(type(value)).char + except ValueError: + char = '!' + except OverflowError: + char = '@' + except TypeError: + char = '#' + print(char, end=' ') + print() + +print("can cast") +print_cancast_table(np.typecodes['All']) +print() +print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") +print() +print("scalar + scalar") +print_coercion_table(np.typecodes['All'], 0, 0, False) +print() +print("scalar + neg scalar") +print_coercion_table(np.typecodes['All'], 0, -1, False) +print() +print("array + scalar") +print_coercion_table(np.typecodes['All'], 0, 0, True) +print() +print("array + neg scalar") +print_coercion_table(np.typecodes['All'], 0, -1, True) +print() +print("promote_types") +print_coercion_table(np.typecodes['All'], 0, 0, False, True) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/setup.py new file mode 100644 index 0000000000000..595e48925fffd --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/setup.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +from __future__ import division, print_function + + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('testing', parent_package, top_path) + + config.add_data_dir('tests') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(maintainer = "NumPy Developers", + maintainer_email = "numpy-dev@numpy.org", + description = "NumPy test module", + url = "http://www.numpy.org", + license = "NumPy License (BSD Style)", + configuration = configuration, + ) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_decorators.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_decorators.py new file mode 100644 index 0000000000000..36c7cc7bb29cc --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_decorators.py @@ -0,0 +1,185 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +from numpy.testing import * +from numpy.testing.noseclasses import KnownFailureTest +import nose + +def test_slow(): + @dec.slow + def slow_func(x, y, z): + pass + + assert_(slow_func.slow) + +def test_setastest(): + @dec.setastest() + def f_default(a): + pass + + @dec.setastest(True) + def f_istest(a): + pass + + @dec.setastest(False) + def f_isnottest(a): + pass + + assert_(f_default.__test__) + assert_(f_istest.__test__) + assert_(not f_isnottest.__test__) + +class DidntSkipException(Exception): + pass + +def test_skip_functions_hardcoded(): + @dec.skipif(True) + def f1(x): + raise DidntSkipException + + try: + f1('a') + except DidntSkipException: + raise Exception('Failed to skip') + except nose.SkipTest: + pass + + @dec.skipif(False) + def f2(x): + raise DidntSkipException + + try: + f2('a') + except DidntSkipException: + pass + except nose.SkipTest: + raise Exception('Skipped when not expected to') + + +def test_skip_functions_callable(): + def skip_tester(): + return skip_flag == 'skip me!' + + @dec.skipif(skip_tester) + def f1(x): + raise DidntSkipException + + try: + skip_flag = 'skip me!' + f1('a') + except DidntSkipException: + raise Exception('Failed to skip') + except nose.SkipTest: + pass + + @dec.skipif(skip_tester) + def f2(x): + raise DidntSkipException + + try: + skip_flag = 'five is right out!' + f2('a') + except DidntSkipException: + pass + except nose.SkipTest: + raise Exception('Skipped when not expected to') + + +def test_skip_generators_hardcoded(): + @dec.knownfailureif(True, "This test is known to fail") + def g1(x): + for i in range(x): + yield i + + try: + for j in g1(10): + pass + except KnownFailureTest: + pass + else: + raise Exception('Failed to mark as known failure') + + + @dec.knownfailureif(False, "This test is NOT known to fail") + def g2(x): + for i in range(x): + yield i + raise DidntSkipException('FAIL') + + try: + for j in g2(10): + pass + except KnownFailureTest: + raise Exception('Marked incorretly as known failure') + except DidntSkipException: + pass + + +def test_skip_generators_callable(): + def skip_tester(): + return skip_flag == 'skip me!' + + @dec.knownfailureif(skip_tester, "This test is known to fail") + def g1(x): + for i in range(x): + yield i + + try: + skip_flag = 'skip me!' + for j in g1(10): + pass + except KnownFailureTest: + pass + else: + raise Exception('Failed to mark as known failure') + + + @dec.knownfailureif(skip_tester, "This test is NOT known to fail") + def g2(x): + for i in range(x): + yield i + raise DidntSkipException('FAIL') + + try: + skip_flag = 'do not skip' + for j in g2(10): + pass + except KnownFailureTest: + raise Exception('Marked incorretly as known failure') + except DidntSkipException: + pass + + +def test_deprecated(): + @dec.deprecated(True) + def non_deprecated_func(): + pass + + @dec.deprecated() + def deprecated_func(): + import warnings + warnings.warn("TEST: deprecated func", DeprecationWarning) + + @dec.deprecated() + def deprecated_func2(): + import warnings + warnings.warn("AHHHH") + raise ValueError + + @dec.deprecated() + def deprecated_func3(): + import warnings + warnings.warn("AHHHH") + + # marked as deprecated, but does not raise DeprecationWarning + assert_raises(AssertionError, non_deprecated_func) + # should be silent + deprecated_func() + # fails if deprecated decorator just disables test. See #1453. + assert_raises(ValueError, deprecated_func2) + # first warnings is not a DeprecationWarning + assert_raises(AssertionError, deprecated_func3) + + +if __name__ == '__main__': + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_doctesting.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_doctesting.py new file mode 100644 index 0000000000000..43f9fb6cebba5 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_doctesting.py @@ -0,0 +1,56 @@ +""" Doctests for NumPy-specific nose/doctest modifications + +""" +from __future__ import division, absolute_import, print_function + +# try the #random directive on the output line +def check_random_directive(): + ''' + >>> 2+2 + #random: may vary on your system + ''' + +# check the implicit "import numpy as np" +def check_implicit_np(): + ''' + >>> np.array([1,2,3]) + array([1, 2, 3]) + ''' + +# there's some extraneous whitespace around the correct responses +def check_whitespace_enabled(): + ''' + # whitespace after the 3 + >>> 1+2 + 3 + + # whitespace before the 7 + >>> 3+4 + 7 + ''' + +def check_empty_output(): + """ Check that no output does not cause an error. + + This is related to nose bug 445; the numpy plugin changed the + doctest-result-variable default and therefore hit this bug: + http://code.google.com/p/python-nose/issues/detail?id=445 + + >>> a = 10 + """ + +def check_skip(): + """ Check skip directive + + The test below should not run + + >>> 1/0 #doctest: +SKIP + """ + + +if __name__ == '__main__': + # Run tests outside numpy test rig + import nose + from numpy.testing.noseclasses import NumpyDoctest + argv = ['', __file__, '--with-numpydoctest'] + nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_utils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_utils.py new file mode 100644 index 0000000000000..41a48ea65dd53 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_utils.py @@ -0,0 +1,558 @@ +from __future__ import division, absolute_import, print_function + +import warnings +import sys + +import numpy as np +from numpy.testing import * +import unittest + +class _GenericTest(object): + def _test_equal(self, a, b): + self._assert_func(a, b) + + def _test_not_equal(self, a, b): + try: + self._assert_func(a, b) + passed = True + except AssertionError: + pass + else: + raise AssertionError("a and b are found equal but are not") + + def test_array_rank1_eq(self): + """Test two equal array of rank 1 are found equal.""" + a = np.array([1, 2]) + b = np.array([1, 2]) + + self._test_equal(a, b) + + def test_array_rank1_noteq(self): + """Test two different array of rank 1 are found not equal.""" + a = np.array([1, 2]) + b = np.array([2, 2]) + + self._test_not_equal(a, b) + + def test_array_rank2_eq(self): + """Test two equal array of rank 2 are found equal.""" + a = np.array([[1, 2], [3, 4]]) + b = np.array([[1, 2], [3, 4]]) + + self._test_equal(a, b) + + def test_array_diffshape(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array([1, 2]) + b = np.array([[1, 2], [1, 2]]) + + self._test_not_equal(a, b) + + def test_objarray(self): + """Test object arrays.""" + a = np.array([1, 1], dtype=np.object) + self._test_equal(a, 1) + + def test_array_likes(self): + self._test_equal([1, 2, 3], (1, 2, 3)) + +class TestArrayEqual(_GenericTest, unittest.TestCase): + def setUp(self): + self._assert_func = assert_array_equal + + def test_generic_rank1(self): + """Test rank 1 array for all dtypes.""" + def foo(t): + a = np.empty(2, t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_generic_rank3(self): + """Test rank 3 array for all dtypes.""" + def foo(t): + a = np.empty((4, 2, 3), t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_nan_array(self): + """Test arrays with nan values in them.""" + a = np.array([1, 2, np.nan]) + b = np.array([1, 2, np.nan]) + + self._test_equal(a, b) + + c = np.array([1, 2, 3]) + self._test_not_equal(c, b) + + def test_string_arrays(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array(['floupi', 'floupa']) + b = np.array(['floupi', 'floupa']) + + self._test_equal(a, b) + + c = np.array(['floupipi', 'floupa']) + + self._test_not_equal(c, b) + + def test_recarrays(self): + """Test record arrays.""" + a = np.empty(2, [('floupi', np.float), ('floupa', np.float)]) + a['floupi'] = [1, 2] + a['floupa'] = [1, 2] + b = a.copy() + + self._test_equal(a, b) + + c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)]) + c['floupipi'] = a['floupi'].copy() + c['floupa'] = a['floupa'].copy() + + self._test_not_equal(c, b) + +class TestBuildErrorMessage(unittest.TestCase): + def test_build_err_msg_defaults(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ ' + '1.00001, 2.00002, 3.00003])\n DESIRED: array([ 1.00002, ' + '2.00003, 3.00004])') + self.assertEqual(a, b) + + def test_build_err_msg_no_verbose(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, verbose=False) + b = '\nItems are not equal: There is a mismatch' + self.assertEqual(a, b) + + def test_build_err_msg_custom_names(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) + b = ('\nItems are not equal: There is a mismatch\n FOO: array([ ' + '1.00001, 2.00002, 3.00003])\n BAR: array([ 1.00002, 2.00003, ' + '3.00004])') + self.assertEqual(a, b) + + def test_build_err_msg_custom_precision(self): + x = np.array([1.000000001, 2.00002, 3.00003]) + y = np.array([1.000000002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, precision=10) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ ' + '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([ ' + '1.000000002, 2.00003 , 3.00004 ])') + self.assertEqual(a, b) + +class TestEqual(TestArrayEqual): + def setUp(self): + self._assert_func = assert_equal + + def test_nan_items(self): + self._assert_func(np.nan, np.nan) + self._assert_func([np.nan], [np.nan]) + self._test_not_equal(np.nan, [np.nan]) + self._test_not_equal(np.nan, 1) + + def test_inf_items(self): + self._assert_func(np.inf, np.inf) + self._assert_func([np.inf], [np.inf]) + self._test_not_equal(np.inf, [np.inf]) + + def test_non_numeric(self): + self._assert_func('ab', 'ab') + self._test_not_equal('ab', 'abb') + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_negative_zero(self): + self._test_not_equal(np.PZERO, np.NZERO) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + +class TestArrayAlmostEqual(_GenericTest, unittest.TestCase): + def setUp(self): + self._assert_func = assert_array_almost_equal + + def test_simple(self): + x = np.array([1234.2222]) + y = np.array([1234.2223]) + + self._assert_func(x, y, decimal=3) + self._assert_func(x, y, decimal=4) + self.assertRaises(AssertionError, + lambda: self._assert_func(x, y, decimal=5)) + + def test_nan(self): + anan = np.array([np.nan]) + aone = np.array([1]) + ainf = np.array([np.inf]) + self._assert_func(anan, anan) + self.assertRaises(AssertionError, + lambda : self._assert_func(anan, aone)) + self.assertRaises(AssertionError, + lambda : self._assert_func(anan, ainf)) + self.assertRaises(AssertionError, + lambda : self._assert_func(ainf, anan)) + + def test_inf(self): + a = np.array([[1., 2.], [3., 4.]]) + b = a.copy() + a[0, 0] = np.inf + self.assertRaises(AssertionError, + lambda : self._assert_func(a, b)) + + def test_subclass(self): + a = np.array([[1., 2.], [3., 4.]]) + b = np.ma.masked_array([[1., 2.], [0., 4.]], + [[False, False], [True, False]]) + assert_array_almost_equal(a, b) + assert_array_almost_equal(b, a) + assert_array_almost_equal(b, b) + +class TestAlmostEqual(_GenericTest, unittest.TestCase): + def setUp(self): + self._assert_func = assert_almost_equal + + def test_nan_item(self): + self._assert_func(np.nan, np.nan) + self.assertRaises(AssertionError, + lambda : self._assert_func(np.nan, 1)) + self.assertRaises(AssertionError, + lambda : self._assert_func(np.nan, np.inf)) + self.assertRaises(AssertionError, + lambda : self._assert_func(np.inf, np.nan)) + + def test_inf_item(self): + self._assert_func(np.inf, np.inf) + self._assert_func(-np.inf, -np.inf) + self.assertRaises(AssertionError, + lambda : self._assert_func(np.inf, 1)) + + def test_simple_item(self): + self._test_not_equal(1, 2) + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + z = np.array([complex(1, 2), complex(np.nan, 1)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + self._test_not_equal(x, z) + + def test_error_message(self): + """Check the message is formatted correctly for the decimal value""" + x = np.array([1.00000000001, 2.00000000002, 3.00003]) + y = np.array([1.00000000002, 2.00000000003, 3.00004]) + + # test with a different amount of decimal digits + # note that we only check for the formatting of the arrays themselves + b = ('x: array([ 1.00000000001, 2.00000000002, 3.00003 ' + ' ])\n y: array([ 1.00000000002, 2.00000000003, 3.00004 ])') + try: + self._assert_func(x, y, decimal=12) + except AssertionError as e: + # remove anything that's not the array string + self.assertEqual(str(e).split('%)\n ')[1], b) + + # with the default value of decimal digits, only the 3rd element differs + # note that we only check for the formatting of the arrays themselves + b = ('x: array([ 1. , 2. , 3.00003])\n y: array([ 1. , ' + '2. , 3.00004])') + try: + self._assert_func(x, y) + except AssertionError as e: + # remove anything that's not the array string + self.assertEqual(str(e).split('%)\n ')[1], b) + +class TestApproxEqual(unittest.TestCase): + def setUp(self): + self._assert_func = assert_approx_equal + + def test_simple_arrays(self): + x = np.array([1234.22]) + y = np.array([1234.23]) + + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + self.assertRaises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_simple_items(self): + x = 1234.22 + y = 1234.23 + + self._assert_func(x, y, significant=4) + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + self.assertRaises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_nan_array(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + self.assertRaises(AssertionError, + lambda : self._assert_func(anan, aone)) + self.assertRaises(AssertionError, + lambda : self._assert_func(anan, ainf)) + self.assertRaises(AssertionError, + lambda : self._assert_func(ainf, anan)) + + def test_nan_items(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + self.assertRaises(AssertionError, + lambda : self._assert_func(anan, aone)) + self.assertRaises(AssertionError, + lambda : self._assert_func(anan, ainf)) + self.assertRaises(AssertionError, + lambda : self._assert_func(ainf, anan)) + +class TestRaises(unittest.TestCase): + def setUp(self): + class MyException(Exception): + pass + + self.e = MyException + + def raises_exception(self, e): + raise e + + def does_not_raise_exception(self): + pass + + def test_correct_catch(self): + f = raises(self.e)(self.raises_exception)(self.e) + + def test_wrong_exception(self): + try: + f = raises(self.e)(self.raises_exception)(RuntimeError) + except RuntimeError: + return + else: + raise AssertionError("should have caught RuntimeError") + + def test_catch_no_raise(self): + try: + f = raises(self.e)(self.does_not_raise_exception)() + except AssertionError: + return + else: + raise AssertionError("should have raised an AssertionError") + +class TestWarns(unittest.TestCase): + def test_warn(self): + def f(): + warnings.warn("yo") + return 3 + + before_filters = sys.modules['warnings'].filters[:] + assert_equal(assert_warns(UserWarning, f), 3) + after_filters = sys.modules['warnings'].filters + + assert_raises(AssertionError, assert_no_warnings, f) + assert_equal(assert_no_warnings(lambda x: x, 1), 1) + + # Check that the warnings state is unchanged + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_warn_wrong_warning(self): + def f(): + warnings.warn("yo", DeprecationWarning) + + failed = False + filters = sys.modules['warnings'].filters[:] + try: + try: + # Should raise an AssertionError + assert_warns(UserWarning, f) + failed = True + except AssertionError: + pass + finally: + sys.modules['warnings'].filters = filters + + if failed: + raise AssertionError("wrong warning caught by assert_warn") + +class TestAssertAllclose(unittest.TestCase): + def test_simple(self): + x = 1e-3 + y = 1e-9 + + assert_allclose(x, y, atol=1) + self.assertRaises(AssertionError, assert_allclose, x, y) + + a = np.array([x, y, x, y]) + b = np.array([x, y, x, x]) + + assert_allclose(a, b, atol=1) + self.assertRaises(AssertionError, assert_allclose, a, b) + + b[-1] = y * (1 + 1e-8) + assert_allclose(a, b) + self.assertRaises(AssertionError, assert_allclose, a, b, + rtol=1e-9) + + assert_allclose(6, 10, rtol=0.5) + self.assertRaises(AssertionError, assert_allclose, 10, 6, rtol=0.5) + + def test_min_int(self): + a = np.array([np.iinfo(np.int_).min], dtype=np.int_) + # Should not raise: + assert_allclose(a, a) + + +class TestArrayAlmostEqualNulp(unittest.TestCase): + @dec.knownfailureif(True, "Github issue #347") + def test_simple(self): + np.random.seed(12345) + for i in range(100): + dev = np.random.randn(10) + x = np.ones(10) + y = x + dev * np.finfo(np.float64).eps + assert_array_almost_equal_nulp(x, y, nulp=2 * np.max(dev)) + + def test_simple2(self): + x = np.random.randn(10) + y = 2 * x + def failure(): + return assert_array_almost_equal_nulp(x, y, + nulp=1000) + self.assertRaises(AssertionError, failure) + + def test_big_float32(self): + x = (1e10 * np.random.randn(10)).astype(np.float32) + y = x + 1 + assert_array_almost_equal_nulp(x, y, nulp=1000) + + def test_big_float64(self): + x = 1e10 * np.random.randn(10) + y = x + 1 + def failure(): + assert_array_almost_equal_nulp(x, y, nulp=1000) + self.assertRaises(AssertionError, failure) + + def test_complex(self): + x = np.random.randn(10) + 1j * np.random.randn(10) + y = x + 1 + def failure(): + assert_array_almost_equal_nulp(x, y, nulp=1000) + self.assertRaises(AssertionError, failure) + + def test_complex2(self): + x = np.random.randn(10) + y = np.array(x, np.complex) + 1e-16 * np.random.randn(10) + + assert_array_almost_equal_nulp(x, y, nulp=1000) + +class TestULP(unittest.TestCase): + def test_equal(self): + x = np.random.randn(10) + assert_array_max_ulp(x, x, maxulp=0) + + def test_single(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float32) + x += 0.01 * np.random.randn(10).astype(np.float32) + eps = np.finfo(np.float32).eps + assert_array_max_ulp(x, x+eps, maxulp=20) + + def test_double(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float64) + x += 0.01 * np.random.randn(10).astype(np.float64) + eps = np.finfo(np.float64).eps + assert_array_max_ulp(x, x+eps, maxulp=200) + + def test_inf(self): + for dt in [np.float32, np.float64]: + inf = np.array([np.inf]).astype(dt) + big = np.array([np.finfo(dt).max]) + assert_array_max_ulp(inf, big, maxulp=200) + + def test_nan(self): + # Test that nan is 'far' from small, tiny, inf, max and min + for dt in [np.float32, np.float64]: + if dt == np.float32: + maxulp = 1e6 + else: + maxulp = 1e12 + inf = np.array([np.inf]).astype(dt) + nan = np.array([np.nan]).astype(dt) + big = np.array([np.finfo(dt).max]) + tiny = np.array([np.finfo(dt).tiny]) + zero = np.array([np.PZERO]).astype(dt) + nzero = np.array([np.NZERO]).astype(dt) + self.assertRaises(AssertionError, + lambda: assert_array_max_ulp(nan, inf, + maxulp=maxulp)) + self.assertRaises(AssertionError, + lambda: assert_array_max_ulp(nan, big, + maxulp=maxulp)) + self.assertRaises(AssertionError, + lambda: assert_array_max_ulp(nan, tiny, + maxulp=maxulp)) + self.assertRaises(AssertionError, + lambda: assert_array_max_ulp(nan, zero, + maxulp=maxulp)) + self.assertRaises(AssertionError, + lambda: assert_array_max_ulp(nan, nzero, + maxulp=maxulp)) +if __name__ == '__main__': + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/utils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/utils.py new file mode 100644 index 0000000000000..4f45f62f4b2be --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/utils.py @@ -0,0 +1,1715 @@ +""" +Utility function to facilitate testing. + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import re +import operator +import warnings +from functools import partial +import shutil +import contextlib +from tempfile import mkdtemp +from .nosetester import import_nose +from numpy.core import float32, empty, arange, array_repr, ndarray + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +__all__ = ['assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException'] + + +verbose = 0 + + +def assert_(val, msg='') : + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + if not val : + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + +def gisnan(x): + """like isnan, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isnan and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isnan + st = isnan(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isnan not supported for this type") + return st + +def gisfinite(x): + """like isfinite, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isfinite and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isfinite, errstate + with errstate(invalid='ignore'): + st = isfinite(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isfinite not supported for this type") + return st + +def gisinf(x): + """like isinf, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isinf and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isinf, errstate + with errstate(invalid='ignore'): + st = isinf(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isinf not supported for this type") + return st + +def rand(*args): + """Returns an array of random numbers with the given shape. + + This only uses the standard library, so it is useful for testing purposes. + """ + import random + from numpy.core import zeros, float64 + results = zeros(args, float64) + f = results.flat + for i in range(len(f)): + f[i] = random.random() + return results + +if sys.platform[:5]=='linux': + def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()), + _load_time=[]): + """ Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. """ + import time + if not _load_time: + _load_time.append(time.time()) + try: + f=open(_proc_pid_stat, 'r') + l = f.readline().split(' ') + f.close() + return int(l[13]) + except: + return int(100*(time.time()-_load_time[0])) + + def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())): + """ Return virtual memory size in bytes of the running python. + """ + try: + f=open(_proc_pid_stat, 'r') + l = f.readline().split(' ') + f.close() + return int(l[22]) + except: + return +else: + # os.getpid is not in all platforms available. + # Using time is safe but inaccurate, especially when process + # was suspended or sleeping. + def jiffies(_load_time=[]): + """ Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. [Emulation with time.time]. """ + import time + if not _load_time: + _load_time.append(time.time()) + return int(100*(time.time()-_load_time[0])) + def memusage(): + """ Return memory usage of running python. [Not implemented]""" + raise NotImplementedError + +if os.name=='nt' and sys.version[:3] > '2.3': + # Code "stolen" from enthought/debug/memusage.py + def GetPerformanceAttributes(object, counter, instance = None, + inum=-1, format = None, machine=None): + # NOTE: Many counters require 2 samples to give accurate results, + # including "% Processor Time" (as by definition, at any instant, a + # thread's CPU usage is either 0 or 100). To read counters like this, + # you should copy this function, but keep the counter open, and call + # CollectQueryData() each time you need to know. + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + # My older explanation for this was that the "AddCounter" process forced + # the CPU to 100%, but the above makes more sense :) + import win32pdh + if format is None: format = win32pdh.PDH_FMT_LONG + path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter) ) + hq = win32pdh.OpenQuery() + try: + hc = win32pdh.AddCounter(hq, path) + try: + win32pdh.CollectQueryData(hq) + type, val = win32pdh.GetFormattedCounterValue(hc, format) + return val + finally: + win32pdh.RemoveCounter(hc) + finally: + win32pdh.CloseQuery(hq) + + def memusage(processName="python", instance=0): + # from win32pdhutil, part of the win32all package + import win32pdh + return GetPerformanceAttributes("Process", "Virtual Bytes", + processName, instance, + win32pdh.PDH_FMT_LONG, None) + +def build_err_msg(arrays, err_msg, header='Items are not equal:', + verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): + msg = ['\n' + header] + if err_msg: + if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + msg = [msg[0] + ' ' + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + r_func = partial(array_repr, precision=precision) + else: + r_func = repr + + try: + r = r_func(a) + except: + r = '[repr failed]' + if r.count('\n') > 3: + r = '\n'.join(r.splitlines()[:3]) + r += '...' + msg.append(' %s: %s' % (names[i], r)) + return '\n'.join(msg) + +def assert_equal(actual,desired,err_msg='',verbose=True): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal. + + Examples + -------- + >>> np.testing.assert_equal([4,5], [4,6]) + ... + : + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + """ + if isinstance(desired, dict): + if not isinstance(actual, dict) : + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k, i in desired.items(): + if k not in actual : + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose) + return + from numpy.core import ndarray, isscalar, signbit + from numpy.lib import iscomplexobj, real, imag + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # Inf/nan/negative zero handling + try: + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + isdesnan = gisnan(desired) + isactnan = gisnan(actual) + if isdesnan or isactnan: + if not (isdesnan and isactnan): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + elif desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + # If TypeError or ValueError raised while using isnan and co, just handle + # as before + except (TypeError, ValueError, NotImplementedError): + pass + + # Explicitly use __eq__ for comparison, ticket #2552 + if not (desired == actual): + raise AssertionError(msg) + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual, msg) + msg.write('DESIRED: \n') + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + +def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test is equivalent to ``abs(desired-actual) < 0.5 * 10**(-decimal)``. + + Given two objects (numbers or ndarrays), check that all elements of these + objects are almost equal. An exception is raised at conflicting values. + For ndarrays this delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> import numpy.testing as npt + >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) + >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + ... + : + Items are not equal: + ACTUAL: 2.3333333333333002 + DESIRED: 2.3333333399999998 + + >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + ... + : + Arrays are not almost equal + + (mismatch 50.0%) + x: array([ 1. , 2.33333333]) + y: array([ 1. , 2.33333334]) + + """ + from numpy.core import ndarray + from numpy.lib import iscomplexobj, real, imag + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = ('Arrays are not almost equal to %d decimals' % decimal) + return build_err_msg([actual, desired], err_msg, verbose=verbose, + header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) \ + or isinstance(desired, (ndarray, tuple, list)): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(_build_err_msg()) + else: + if not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if round(abs(desired - actual), decimal) != 0 : + raise AssertionError(_build_err_msg()) + + +def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, + significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, + significant=8) + ... + : + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-021 + DESIRED: 1.2345672000000001e-021 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired==actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + with np.errstate(invalid='ignore'): + scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired/scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual/scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg([actual, desired], err_msg, + header='Items are not equal to %d significant digits:' % + significant, + verbose=verbose) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)) : + raise AssertionError(msg) + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, + header='', precision=6): + from numpy.core import array, isnan, isinf, any, all, inf + x = array(x, copy=False, subok=True) + y = array(y, copy=False, subok=True) + + def isnumber(x): + return x.dtype.char in '?bhilqpBHILQPefdgFDG' + + def chk_same_position(x_id, y_id, hasval='nan'): + """Handling nan/inf: check that x and y have the nan/inf at the same + locations.""" + try: + assert_array_equal(x_id, y_id) + except AssertionError: + msg = build_err_msg([x, y], + err_msg + '\nx and y %s location mismatch:' \ + % (hasval), verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + + try: + cond = (x.shape==() or y.shape==()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + '\n(shapes %s, %s mismatch)' % (x.shape, + y.shape), + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + if not cond : + raise AssertionError(msg) + + if isnumber(x) and isnumber(y): + x_isnan, y_isnan = isnan(x), isnan(y) + x_isinf, y_isinf = isinf(x), isinf(y) + + # Validate that the special values are in the same place + if any(x_isnan) or any(y_isnan): + chk_same_position(x_isnan, y_isnan, hasval='nan') + if any(x_isinf) or any(y_isinf): + # Check +inf and -inf separately, since they are different + chk_same_position(x == +inf, y == +inf, hasval='+inf') + chk_same_position(x == -inf, y == -inf, hasval='-inf') + + # Combine all the special values + x_id, y_id = x_isnan, y_isnan + x_id |= x_isinf + y_id |= y_isinf + + # Only do the comparison if actual values are left + if all(x_id): + return + + if any(x_id): + val = comparison(x[~x_id], y[~y_id]) + else: + val = comparison(x, y) + else: + val = comparison(x, y) + + if isinstance(val, bool): + cond = val + reduced = [0] + else: + reduced = val.ravel() + cond = reduced.all() + reduced = reduced.tolist() + if not cond: + match = 100-100.0*reduced.count(1)/len(reduced) + msg = build_err_msg([x, y], + err_msg + + '\n(mismatch %s%%)' % (match,), + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + if not cond : + raise AssertionError(msg) + except ValueError as e: + import traceback + efmt = traceback.format_exc() + header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header) + + msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise ValueError(msg) + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal. An exception is raised at + shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if + both objects have NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Assert fails with numerical inprecision with floats: + + >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan]) + ... + : + AssertionError: + Arrays are not equal + + (mismatch 50.0%) + x: array([ 1. , 3.14159265, NaN]) + y: array([ 1. , 3.14159265, NaN]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + """ + assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, + verbose=verbose, header='Arrays are not equal') + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and verifies values with + ``abs(desired-actual) < 0.5 * 10**(-decimal)``. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are almost equal. An exception is raised at + shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if + both objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + ... + : + AssertionError: + Arrays are not almost equal + + (mismatch 50.0%) + x: array([ 1. , 2.33333, NaN]) + y: array([ 1. , 2.33339, NaN]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + : + ValueError: + Arrays are not almost equal + x: array([ 1. , 2.33333, NaN]) + y: array([ 1. , 2.33333, 5. ]) + + """ + from numpy.core import around, number, float_, result_type, array + from numpy.core.numerictypes import issubdtype + from numpy.core.fromnumeric import any as npany + def compare(x, y): + try: + if npany(gisinf(x)) or npany( gisinf(y)): + xinfid = gisinf(x) + yinfid = gisinf(y) + if not xinfid == yinfid: + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except (TypeError, NotImplementedError): + pass + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.) + y = array(y, dtype=dtype, copy=False, subok=True) + z = abs(x-y) + + if not issubdtype(z.dtype, number): + z = z.astype(float_) # handle object arrays + + return around(z, decimal) <= 10.0**(-decimal) + + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header=('Arrays are not almost equal to %d decimals' % decimal), + precision=decimal) + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects, check that the shape is equal and all + elements of the first object are strictly smaller than those of the + second object. An exception is raised at shape mismatch or incorrectly + ordered values. Shape mismatch does not raise if an object has zero + dimension. In contrast to the standard usage in numpy, NaNs are + compared, no assertion is raised if both objects have NaNs in the same + positions. + + + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + + + Examples + -------- + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) + ... + : + Arrays are not less-ordered + (mismatch 50.0%) + x: array([ 1., 1., NaN]) + y: array([ 1., 2., NaN]) + + >>> np.testing.assert_array_less([1.0, 4.0], 3) + ... + : + Arrays are not less-ordered + (mismatch 50.0%) + x: array([ 1., 4.]) + y: array(3) + + >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) + ... + : + Arrays are not less-ordered + (shapes (3,), (1,) mismatch) + x: array([ 1., 2., 3.]) + y: array([4]) + + """ + assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, + verbose=verbose, + header='Arrays are not less-ordered') + +def runstring(astr, dict): + exec(astr, dict) + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "", line 1, in + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + import difflib + + if not isinstance(actual, str) : + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if re.match(r'\A'+desired+r'\Z', actual, re.M): + return + + diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1))) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(' '): + continue + if d1.startswith('- '): + l = [d1] + d2 = diff.pop(0) + if d2.startswith('? '): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith('+ ') : + raise AssertionError(repr(d2)) + l.append(d2) + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) + if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]): + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip() + if actual != desired : + raise AssertionError(msg) + + +def rundocs(filename=None, raise_on_error=True): + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for `numpy.lib`: + + >>> np.lib.test(doctests=True) #doctest: +SKIP + """ + import doctest, imp + if filename is None: + f = sys._getframe(1) + filename = f.f_globals['__file__'] + name = os.path.splitext(os.path.basename(filename))[0] + path = [os.path.dirname(filename)] + file, pathname, description = imp.find_module(name, path) + try: + m = imp.load_module(name, file, pathname, description) + finally: + file.close() + + tests = doctest.DocTestFinder().find(m) + runner = doctest.DocTestRunner(verbose=False) + + msg = [] + if raise_on_error: + out = lambda s: msg.append(s) + else: + out = None + + for test in tests: + runner.run(test, out=out) + + if runner.failures > 0 and raise_on_error: + raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + + +def raises(*args,**kwargs): + nose = import_nose() + return nose.tools.raises(*args,**kwargs) + + +def assert_raises(*args,**kwargs): + """ + assert_raises(exception_class, callable, *args, **kwargs) + + Fail unless an exception of class exception_class is thrown + by callable when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + """ + nose = import_nose() + return nose.tools.assert_raises(*args,**kwargs) + + +assert_raises_regex_impl = None + + +def assert_raises_regex(exception_class, expected_regexp, + callable_obj=None, *args, **kwargs): + """ + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Name of this function adheres to Python 3.2+ reference, but should work in + all versions down to 2.6. + + """ + nose = import_nose() + + global assert_raises_regex_impl + if assert_raises_regex_impl is None: + try: + # Python 3.2+ + assert_raises_regex_impl = nose.tools.assert_raises_regex + except AttributeError: + try: + # 2.7+ + assert_raises_regex_impl = nose.tools.assert_raises_regexp + except AttributeError: + # 2.6 + + # This class is copied from Python2.7 stdlib almost verbatim + class _AssertRaisesContext(object): + """A context manager used to implement TestCase.assertRaises* methods.""" + + def __init__(self, expected, expected_regexp=None): + self.expected = expected + self.expected_regexp = expected_regexp + + def failureException(self, msg): + return AssertionError(msg) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + if exc_type is None: + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + raise self.failureException( + "{0} not raised".format(exc_name)) + if not issubclass(exc_type, self.expected): + # let unexpected exceptions pass through + return False + self.exception = exc_value # store for later retrieval + if self.expected_regexp is None: + return True + + expected_regexp = self.expected_regexp + if isinstance(expected_regexp, basestring): + expected_regexp = re.compile(expected_regexp) + if not expected_regexp.search(str(exc_value)): + raise self.failureException( + '"%s" does not match "%s"' % + (expected_regexp.pattern, str(exc_value))) + return True + + def impl(cls, regex, callable_obj, *a, **kw): + mgr = _AssertRaisesContext(cls, regex) + if callable_obj is None: + return mgr + with mgr: + callable_obj(*a, **kw) + assert_raises_regex_impl = impl + + return assert_raises_regex_impl(exception_class, expected_regexp, + callable_obj, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith('_'): + setattr(cls, funcname, decorator(function)) + return + + +def measure(code_str,times=1,label=None): + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', + ... times=times) + >>> print "Time for a single execution : ", etime / times, "s" + Time for a single execution : 0.005 s + + """ + frame = sys._getframe(1) + locs, globs = frame.f_locals, frame.f_globals + + code = compile(code_str, + 'Test name: %s ' % label, + 'exec') + i = 0 + elapsed = jiffies() + while i < times: + i += 1 + exec(code, globs, locs) + elapsed = jiffies() - elapsed + return 0.01*elapsed + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + import numpy as np + a = np.arange(100 * 100) + b = np.arange(100*100).reshape(100, 100) + c = b + + i = 1 + + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + + assert_(sys.getrefcount(i) >= rc) + +def assert_allclose(actual, desired, rtol=1e-7, atol=0, + err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)``. + It compares the difference between `actual` and `desired` to + ``atol + rtol * abs(desired)``. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> assert_allclose(x, y, rtol=1e-5, atol=0) + + """ + import numpy as np + def compare(x, y): + return np.allclose(x, y, rtol=rtol, atol=atol) + + actual, desired = np.asanyarray(actual), np.asanyarray(desired) + header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) + assert_array_compare(compare, actual, desired, err_msg=str(err_msg), + verbose=verbose, header=header) + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulps * spacing(max(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) + Traceback (most recent call last): + ... + AssertionError: X and Y are not equal to 1 ULP (max is 2) + + """ + import numpy as np + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x-y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = "X and Y are not equal to %d ULP" % nulp + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) + raise AssertionError(msg) + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) + + """ + import numpy as np + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError("Arrays are not almost equal up to %g ULP" % \ + maxulp) + return ret + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) + 1.0 + """ + import numpy as np + if dtype: + x = np.array(x, dtype=dtype) + y = np.array(y, dtype=dtype) + else: + x = np.array(x) + y = np.array(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array(x, dtype=t) + y = np.array(y, dtype=t) + + if not x.shape == y.shape: + raise ValueError("x and y do not have the same shape: %s - %s" % \ + (x.shape, y.shape)) + + def _diff(rx, ry, vdt): + diff = np.array(rx-ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx<0] + else: + if rx < 0: + rx = comp - rx + + return rx + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation of + x.""" + import numpy as np + if x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-2**31)) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-2**63)) + else: + raise ValueError("Unsupported dtype %s" % x.dtype) + +# The following two classes are copied from python 2.6 warnings module (context +# manager) +class WarningMessage(object): + + """ + Holds the result of a single showwarning() call. + + Deprecated in 1.8.0 + + Notes + ----- + `WarningMessage` is copied from the Python 2.6 warnings module, + so it can be used in NumPy with older Python versions. + + """ + + _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", + "line") + + def __init__(self, message, category, filename, lineno, file=None, + line=None): + local_values = locals() + for attr in self._WARNING_DETAILS: + setattr(self, attr, local_values[attr]) + if category: + self._category_name = category.__name__ + else: + self._category_name = None + + def __str__(self): + return ("{message : %r, category : %r, filename : %r, lineno : %s, " + "line : %r}" % (self.message, self._category_name, + self.filename, self.lineno, self.line)) + +class WarningManager(object): + """ + A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of ``warnings.showwarning()`` and be appended to a + list returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only useful + when testing the warnings module itself. + + Deprecated in 1.8.0 + + Notes + ----- + `WarningManager` is a copy of the ``catch_warnings`` context manager + from the Python 2.6 warnings module, with slight modifications. + It is copied so it can be used in NumPy with older Python versions. + + """ + def __init__(self, record=False, module=None): + self._record = record + if module is None: + self._module = sys.modules['warnings'] + else: + self._module = module + self._entered = False + + def __enter__(self): + if self._entered: + raise RuntimeError("Cannot enter %r twice" % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._showwarning = self._module.showwarning + if self._record: + log = [] + def showwarning(*args, **kwargs): + log.append(WarningMessage(*args, **kwargs)) + self._module.showwarning = showwarning + return log + else: + return None + + def __exit__(self): + if not self._entered: + raise RuntimeError("Cannot exit %r without entering first" % self) + self._module.filters = self._filters + self._module.showwarning = self._showwarning + + +def assert_warns(warning_class, func, *args, **kw): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught, and the + test case will be deemed to have suffered an error. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + result = func(*args, **kw) + if not len(l) > 0: + raise AssertionError("No warning raised when calling %s" + % func.__name__) + if not l[0].category is warning_class: + raise AssertionError("First warning for %s is not a " \ + "%s( is %s)" % (func.__name__, warning_class, l[0])) + return result + +def assert_no_warnings(func, *args, **kw): + """ + Fail if the given callable produces any warnings. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + result = func(*args, **kw) + if len(l) > 0: + raise AssertionError("Got warnings when calling %s: %s" + % (func.__name__, l)) + return result + + +def _gen_alignment_data(dtype=float32, type='binary', max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' + bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == 'unary': + inp = lambda : arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') + yield inp(), inp(), ufmt % (o, o, s, dtype, 'in place') + yield out[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'out of place') + yield inp()[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'aliased') + yield inp()[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'aliased') + if type == 'binary': + inp1 = lambda :arange(s, dtype=dtype)[o:] + inp2 = lambda :arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'out of place') + yield inp1(), inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place1') + yield inp2(), inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place2') + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'out of place') + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'aliased') + + +class IgnoreException(Exception): + "Ignoring this exception due to disabled feature" + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + yield tmpdir + shutil.rmtree(tmpdir) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_ctypeslib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_ctypeslib.py new file mode 100644 index 0000000000000..8e9c6c0bd9c76 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_ctypeslib.py @@ -0,0 +1,102 @@ +from __future__ import division, absolute_import, print_function + +import sys + +import numpy as np +from numpy.ctypeslib import ndpointer, load_library +from numpy.distutils.misc_util import get_shared_lib_extension +from numpy.testing import * + +try: + cdll = load_library('multiarray', np.core.multiarray.__file__) + _HAS_CTYPE = True +except ImportError: + _HAS_CTYPE = False + +class TestLoadLibrary(TestCase): + @dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation") + @dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin") + def test_basic(self): + try: + cdll = load_library('multiarray', + np.core.multiarray.__file__) + except ImportError as e: + msg = "ctypes is not available on this python: skipping the test" \ + " (import error was: %s)" % str(e) + print(msg) + + @dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation") + @dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin") + def test_basic2(self): + """Regression for #801: load_library with a full library name + (including extension) does not work.""" + try: + try: + so = get_shared_lib_extension(is_python_ext=True) + cdll = load_library('multiarray%s' % so, + np.core.multiarray.__file__) + except ImportError: + print("No distutils available, skipping test.") + except ImportError as e: + msg = "ctypes is not available on this python: skipping the test" \ + " (import error was: %s)" % str(e) + print(msg) + +class TestNdpointer(TestCase): + def test_dtype(self): + dt = np.intc + p = ndpointer(dtype=dt) + self.assertTrue(p.from_param(np.array([1], dt))) + dt = 'i4') + p = ndpointer(dtype=dt) + p.from_param(np.array([1], dt)) + self.assertRaises(TypeError, p.from_param, + np.array([1], dt.newbyteorder('swap'))) + dtnames = ['x', 'y'] + dtformats = [np.intc, np.float64] + dtdescr = {'names' : dtnames, 'formats' : dtformats} + dt = np.dtype(dtdescr) + p = ndpointer(dtype=dt) + self.assertTrue(p.from_param(np.zeros((10,), dt))) + samedt = np.dtype(dtdescr) + p = ndpointer(dtype=samedt) + self.assertTrue(p.from_param(np.zeros((10,), dt))) + dt2 = np.dtype(dtdescr, align=True) + if dt.itemsize != dt2.itemsize: + self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2)) + else: + self.assertTrue(p.from_param(np.zeros((10,), dt2))) + + def test_ndim(self): + p = ndpointer(ndim=0) + self.assertTrue(p.from_param(np.array(1))) + self.assertRaises(TypeError, p.from_param, np.array([1])) + p = ndpointer(ndim=1) + self.assertRaises(TypeError, p.from_param, np.array(1)) + self.assertTrue(p.from_param(np.array([1]))) + p = ndpointer(ndim=2) + self.assertTrue(p.from_param(np.array([[1]]))) + + def test_shape(self): + p = ndpointer(shape=(1, 2)) + self.assertTrue(p.from_param(np.array([[1, 2]]))) + self.assertRaises(TypeError, p.from_param, np.array([[1], [2]])) + p = ndpointer(shape=()) + self.assertTrue(p.from_param(np.array(1))) + + def test_flags(self): + x = np.array([[1, 2], [3, 4]], order='F') + p = ndpointer(flags='FORTRAN') + self.assertTrue(p.from_param(x)) + p = ndpointer(flags='CONTIGUOUS') + self.assertRaises(TypeError, p.from_param, x) + p = ndpointer(flags=x.flags.num) + self.assertTrue(p.from_param(x)) + self.assertRaises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_matlib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_matlib.py new file mode 100644 index 0000000000000..0bc8548baa7f9 --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_matlib.py @@ -0,0 +1,55 @@ +from __future__ import division, absolute_import, print_function + +import numpy as np +import numpy.matlib +from numpy.testing import assert_array_equal, assert_, run_module_suite + +def test_empty(): + x = np.matlib.empty((2,)) + assert_(isinstance(x, np.matrix)) + assert_(x.shape, (1, 2)) + +def test_ones(): + assert_array_equal(np.matlib.ones((2, 3)), + np.matrix([[ 1., 1., 1.], + [ 1., 1., 1.]])) + + assert_array_equal(np.matlib.ones(2), np.matrix([[ 1., 1.]])) + +def test_zeros(): + assert_array_equal(np.matlib.zeros((2, 3)), + np.matrix([[ 0., 0., 0.], + [ 0., 0., 0.]])) + + assert_array_equal(np.matlib.zeros(2), np.matrix([[ 0., 0.]])) + +def test_identity(): + x = np.matlib.identity(2, dtype=np.int) + assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) + +def test_eye(): + x = np.matlib.eye(3, k=1, dtype=int) + assert_array_equal(x, np.matrix([[ 0, 1, 0], + [ 0, 0, 1], + [ 0, 0, 0]])) + +def test_rand(): + x = np.matlib.rand(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_randn(): + x = np.matlib.randn(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_repmat(): + a1 = np.arange(4) + x = np.matlib.repmat(a1, 2, 2) + y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + assert_array_equal(x, y) + + +if __name__ == "__main__": + run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/version.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/version.py new file mode 100644 index 0000000000000..2eebbacb5df6c --- /dev/null +++ b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/version.py @@ -0,0 +1,10 @@ + +# THIS FILE IS GENERATED FROM NUMPY SETUP.PY +short_version = '1.9.0' +version = '1.9.0' +full_version = '1.9.0' +git_revision = '07601a64cdfeb1c0247bde1294ad6380413cab66' +release = True + +if not release: + version = full_version diff --git a/pandas/core/format.py b/pandas/core/format.py index 2773cc0c135c1..f46621d4b86bd 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -1169,7 +1169,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None, mode='w', nanRep=None, encoding=None, quoting=None, line_terminator='\n', chunksize=None, engine=None, tupleize_cols=False, quotechar='"', date_format=None, - doublequote=True, escapechar=None): + doublequote=True, escapechar=None, decimal='.'): self.engine = engine # remove for 0.13 self.obj = obj @@ -1181,6 +1181,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None, self.sep = sep self.na_rep = na_rep self.float_format = float_format + self.decimal = decimal self.header = header self.index = index @@ -1509,6 +1510,7 @@ def _save_chunk(self, start_i, end_i): b = self.blocks[i] d = b.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format, + decimal=self.decimal, date_format=self.date_format) for col_loc, col in zip(b.mgr_locs, d): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 223cb4fe78e94..4aa9e9a713955 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1073,7 +1073,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, mode='w', encoding=None, quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=False, date_format=None, doublequote=True, - escapechar=None, **kwds): + escapechar=None, decimal='.', **kwds): r"""Write DataFrame to a comma-separated values (csv) file Parameters @@ -1126,6 +1126,8 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, date_format : string, default None Format string for datetime objects cols : kwarg only alias of columns [deprecated] + decimal: string, default '.' + Character recognized as decimal separator. E.g. use ‘,’ for European data """ formatter = fmt.CSVFormatter(self, path_or_buf, @@ -1140,7 +1142,8 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, tupleize_cols=tupleize_cols, date_format=date_format, doublequote=doublequote, - escapechar=escapechar) + escapechar=escapechar, + decimal=decimal) formatter.save() if path_or_buf is None: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 354ccd2c94583..e2978bd75d4ff 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1161,7 +1161,7 @@ def _try_cast(self, element): except: # pragma: no cover return element - def to_native_types(self, slicer=None, na_rep='', float_format=None, + def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.', **kwargs): """ convert to our native types format, slicing if desired """ @@ -1171,10 +1171,17 @@ def to_native_types(self, slicer=None, na_rep='', float_format=None, values = np.array(values, dtype=object) mask = isnull(values) values[mask] = na_rep + if not float_format and decimal != '.': + float_format = '%f' if float_format: imask = (~mask).ravel() values.flat[imask] = np.array( [float_format % val for val in values.ravel()[imask]]) + if decimal != '.': + imask = (~mask).ravel() + values.flat[imask] = np.array( + [val.replace('.',',',1) for val in values.ravel()[imask]]) + return values.tolist() def should_store(self, value): diff --git a/pandas/core/series.py b/pandas/core/series.py index 37f66fc56ea56..ef9093c89a713 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2239,7 +2239,7 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, def to_csv(self, path, index=True, sep=",", na_rep='', float_format=None, header=False, index_label=None, mode='w', nanRep=None, encoding=None, - date_format=None): + date_format=None, decimal='.'): """ Write Series to a comma-separated values (csv) file @@ -2267,6 +2267,8 @@ def to_csv(self, path, index=True, sep=",", na_rep='', non-ascii, for python versions prior to 3 date_format: string, default None Format string for datetime objects. + decimal: string, default '.' + Character recognized as decimal separator. E.g. use ‘,’ for European data """ from pandas.core.frame import DataFrame df = DataFrame(self) @@ -2274,7 +2276,7 @@ def to_csv(self, path, index=True, sep=",", na_rep='', result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep, float_format=float_format, header=header, index_label=index_label, mode=mode, nanRep=nanRep, - encoding=encoding, date_format=date_format) + encoding=encoding, date_format=date_format, decimal=decimal) if path is None: return result From c0985eca4b5d10fe7959f5a2885551e75d3b52da Mon Sep 17 00:00:00 2001 From: bertrandhaut Date: Fri, 3 Oct 2014 09:54:09 +0200 Subject: [PATCH 2/6] remove numpy directory --- .../EGG-INFO/PKG-INFO | 39 - .../EGG-INFO/SOURCES.txt | 1033 --- .../EGG-INFO/dependency_links.txt | 1 - .../EGG-INFO/native_libs.txt | 14 - .../EGG-INFO/not-zip-safe | 1 - .../EGG-INFO/scripts/f2py | 24 - .../EGG-INFO/top_level.txt | 1 - .../numpy/__config__.py | 36 - .../numpy/__init__.py | 216 - .../numpy/_import_tools.py | 348 - .../numpy/add_newdocs.py | 7526 ----------------- .../numpy/compat/__init__.py | 20 - .../numpy/compat/_inspect.py | 221 - .../numpy/compat/py3k.py | 89 - .../numpy/compat/setup.py | 12 - .../numpy/core/__init__.py | 78 - .../numpy/core/_dummy.py | 7 - .../numpy/core/_internal.py | 570 -- .../numpy/core/_methods.py | 134 - .../numpy/core/arrayprint.py | 752 -- .../numpy/core/cversions.py | 15 - .../numpy/core/defchararray.py | 2687 ------ .../numpy/core/fromnumeric.py | 2930 ------- .../numpy/core/function_base.py | 188 - .../numpy/core/generate_numpy_api.py | 259 - .../numpy/core/getlimits.py | 306 - .../core/include/numpy/__multiarray_api.h | 1721 ---- .../numpy/core/include/numpy/__ufunc_api.h | 328 - .../numpy/_neighborhood_iterator_imp.h | 90 - .../numpy/core/include/numpy/_numpyconfig.h | 32 - .../numpy/core/include/numpy/arrayobject.h | 11 - .../numpy/core/include/numpy/arrayscalars.h | 175 - .../numpy/core/include/numpy/halffloat.h | 69 - .../core/include/numpy/multiarray_api.txt | 2442 ------ .../numpy/core/include/numpy/ndarrayobject.h | 237 - .../numpy/core/include/numpy/ndarraytypes.h | 1820 ---- .../numpy/core/include/numpy/noprefix.h | 209 - .../include/numpy/npy_1_7_deprecated_api.h | 130 - .../numpy/core/include/numpy/npy_3kcompat.h | 506 -- .../numpy/core/include/numpy/npy_common.h | 1046 --- .../numpy/core/include/numpy/npy_cpu.h | 122 - .../numpy/core/include/numpy/npy_endian.h | 49 - .../numpy/core/include/numpy/npy_interrupt.h | 117 - .../numpy/core/include/numpy/npy_math.h | 479 -- .../include/numpy/npy_no_deprecated_api.h | 19 - .../numpy/core/include/numpy/npy_os.h | 30 - .../numpy/core/include/numpy/numpyconfig.h | 35 - .../numpy/core/include/numpy/old_defines.h | 187 - .../numpy/core/include/numpy/oldnumeric.h | 23 - .../numpy/core/include/numpy/ufunc_api.txt | 321 - .../numpy/core/include/numpy/ufuncobject.h | 375 - .../numpy/core/include/numpy/utils.h | 19 - .../numpy/core/info.py | 87 - .../numpy/core/lib/npy-pkg-config/mlib.ini | 12 - .../numpy/core/lib/npy-pkg-config/npymath.ini | 20 - .../numpy/core/machar.py | 338 - .../numpy/core/memmap.py | 308 - .../numpy/core/multiarray.py | 7 - .../numpy/core/multiarray_tests.py | 7 - .../numpy/core/numeric.py | 2842 ------- .../numpy/core/numerictypes.py | 1042 --- .../numpy/core/operand_flag_tests.py | 7 - .../numpy/core/records.py | 808 -- .../numpy/core/scalarmath.py | 7 - .../numpy/core/setup.py | 1013 --- .../numpy/core/setup_common.py | 321 - .../numpy/core/shape_base.py | 277 - .../numpy/core/struct_ufunc_test.py | 7 - .../numpy/core/test_rational.py | 7 - .../numpy/core/tests/data/astype_copy.pkl | Bin 716 -> 0 bytes .../core/tests/data/recarray_from_file.fits | Bin 8640 -> 0 bytes .../numpy/core/tests/test_abc.py | 45 - .../numpy/core/tests/test_api.py | 514 -- .../numpy/core/tests/test_arrayprint.py | 167 - .../numpy/core/tests/test_blasdot.py | 172 - .../numpy/core/tests/test_datetime.py | 1771 ---- .../numpy/core/tests/test_defchararray.py | 642 -- .../numpy/core/tests/test_deprecations.py | 512 -- .../numpy/core/tests/test_dtype.py | 542 -- .../numpy/core/tests/test_einsum.py | 573 -- .../numpy/core/tests/test_errstate.py | 51 - .../numpy/core/tests/test_function_base.py | 111 - .../numpy/core/tests/test_getlimits.py | 86 - .../numpy/core/tests/test_half.py | 439 - .../numpy/core/tests/test_indexerrors.py | 127 - .../numpy/core/tests/test_indexing.py | 983 --- .../numpy/core/tests/test_item_selection.py | 70 - .../numpy/core/tests/test_machar.py | 30 - .../numpy/core/tests/test_memmap.py | 127 - .../numpy/core/tests/test_multiarray.py | 4482 ---------- .../core/tests/test_multiarray_assignment.py | 80 - .../numpy/core/tests/test_nditer.py | 2630 ------ .../numpy/core/tests/test_numeric.py | 2091 ----- .../numpy/core/tests/test_numerictypes.py | 377 - .../numpy/core/tests/test_print.py | 245 - .../numpy/core/tests/test_records.py | 176 - .../numpy/core/tests/test_regression.py | 2108 ----- .../numpy/core/tests/test_scalarinherit.py | 34 - .../numpy/core/tests/test_scalarmath.py | 275 - .../numpy/core/tests/test_scalarprint.py | 30 - .../numpy/core/tests/test_shape_base.py | 250 - .../numpy/core/tests/test_ufunc.py | 1153 --- .../numpy/core/tests/test_umath.py | 1665 ---- .../numpy/core/tests/test_umath_complex.py | 537 -- .../numpy/core/tests/test_unicode.py | 357 - .../numpy/core/umath.py | 7 - .../numpy/core/umath_tests.py | 7 - .../numpy/ctypeslib.py | 426 - .../numpy/distutils/__config__.py | 36 - .../numpy/distutils/__init__.py | 39 - .../numpy/distutils/__version__.py | 6 - .../numpy/distutils/ccompiler.py | 656 -- .../numpy/distutils/command/__init__.py | 43 - .../numpy/distutils/command/autodist.py | 43 - .../numpy/distutils/command/bdist_rpm.py | 24 - .../numpy/distutils/command/build.py | 39 - .../numpy/distutils/command/build_clib.py | 284 - .../numpy/distutils/command/build_ext.py | 503 -- .../numpy/distutils/command/build_py.py | 33 - .../numpy/distutils/command/build_scripts.py | 51 - .../numpy/distutils/command/build_src.py | 806 -- .../numpy/distutils/command/config.py | 476 -- .../distutils/command/config_compiler.py | 125 - .../numpy/distutils/command/develop.py | 17 - .../numpy/distutils/command/egg_info.py | 11 - .../numpy/distutils/command/install.py | 82 - .../numpy/distutils/command/install_clib.py | 39 - .../numpy/distutils/command/install_data.py | 26 - .../distutils/command/install_headers.py | 27 - .../numpy/distutils/command/sdist.py | 29 - .../numpy/distutils/compat.py | 10 - .../numpy/distutils/conv_template.py | 337 - .../numpy/distutils/core.py | 210 - .../numpy/distutils/cpuinfo.py | 693 -- .../numpy/distutils/environment.py | 72 - .../numpy/distutils/exec_command.py | 618 -- .../numpy/distutils/extension.py | 90 - .../numpy/distutils/fcompiler/__init__.py | 989 --- .../numpy/distutils/fcompiler/absoft.py | 160 - .../numpy/distutils/fcompiler/compaq.py | 128 - .../numpy/distutils/fcompiler/g95.py | 45 - .../numpy/distutils/fcompiler/gnu.py | 390 - .../numpy/distutils/fcompiler/hpux.py | 45 - .../numpy/distutils/fcompiler/ibm.py | 96 - .../numpy/distutils/fcompiler/intel.py | 205 - .../numpy/distutils/fcompiler/lahey.py | 49 - .../numpy/distutils/fcompiler/mips.py | 58 - .../numpy/distutils/fcompiler/nag.py | 45 - .../numpy/distutils/fcompiler/none.py | 31 - .../numpy/distutils/fcompiler/pathf95.py | 38 - .../numpy/distutils/fcompiler/pg.py | 60 - .../numpy/distutils/fcompiler/sun.py | 52 - .../numpy/distutils/fcompiler/vast.py | 56 - .../numpy/distutils/from_template.py | 256 - .../numpy/distutils/info.py | 6 - .../numpy/distutils/intelccompiler.py | 45 - .../numpy/distutils/lib2def.py | 116 - .../numpy/distutils/line_endings.py | 76 - .../numpy/distutils/log.py | 93 - .../numpy/distutils/mingw32ccompiler.py | 582 -- .../numpy/distutils/misc_util.py | 2271 ----- .../numpy/distutils/npy_pkg_config.py | 464 - .../numpy/distutils/numpy_distribution.py | 19 - .../numpy/distutils/pathccompiler.py | 23 - .../numpy/distutils/setup.py | 17 - .../numpy/distutils/system_info.py | 2242 ----- .../distutils/tests/f2py_ext/__init__.py | 1 - .../numpy/distutils/tests/f2py_ext/setup.py | 13 - .../numpy/distutils/tests/f2py_ext/src/fib1.f | 18 - .../distutils/tests/f2py_ext/src/fib2.pyf | 9 - .../tests/f2py_ext/tests/test_fib2.py | 13 - .../distutils/tests/f2py_f90_ext/__init__.py | 1 - .../tests/f2py_f90_ext/include/body.f90 | 5 - .../distutils/tests/f2py_f90_ext/setup.py | 18 - .../tests/f2py_f90_ext/src/foo_free.f90 | 6 - .../tests/f2py_f90_ext/tests/test_foo.py | 12 - .../numpy/distutils/tests/gen_ext/__init__.py | 1 - .../numpy/distutils/tests/gen_ext/setup.py | 48 - .../tests/gen_ext/tests/test_fib3.py | 12 - .../distutils/tests/pyrex_ext/__init__.py | 1 - .../distutils/tests/pyrex_ext/primes.pyx | 22 - .../numpy/distutils/tests/pyrex_ext/setup.py | 14 - .../tests/pyrex_ext/tests/test_primes.py | 14 - .../numpy/distutils/tests/setup.py | 16 - .../distutils/tests/swig_ext/__init__.py | 1 - .../numpy/distutils/tests/swig_ext/setup.py | 20 - .../distutils/tests/swig_ext/src/example.i | 14 - .../numpy/distutils/tests/swig_ext/src/zoo.cc | 23 - .../numpy/distutils/tests/swig_ext/src/zoo.h | 9 - .../numpy/distutils/tests/swig_ext/src/zoo.i | 10 - .../tests/swig_ext/tests/test_example.py | 18 - .../tests/swig_ext/tests/test_example2.py | 16 - .../distutils/tests/test_exec_command.py | 92 - .../distutils/tests/test_fcompiler_gnu.py | 53 - .../distutils/tests/test_fcompiler_intel.py | 36 - .../numpy/distutils/tests/test_misc_util.py | 75 - .../distutils/tests/test_npy_pkg_config.py | 98 - .../numpy/distutils/unixccompiler.py | 113 - .../numpy/doc/__init__.py | 28 - .../numpy/doc/basics.py | 146 - .../numpy/doc/broadcasting.py | 178 - .../numpy/doc/byteswapping.py | 147 - .../numpy/doc/constants.py | 393 - .../numpy/doc/creation.py | 144 - .../numpy/doc/glossary.py | 418 - .../numpy/doc/howtofind.py | 10 - .../numpy/doc/indexing.py | 437 - .../numpy/doc/internals.py | 163 - .../numpy/doc/io.py | 10 - .../numpy/doc/jargon.py | 10 - .../numpy/doc/methods_vs_functions.py | 10 - .../numpy/doc/misc.py | 226 - .../numpy/doc/performance.py | 10 - .../numpy/doc/structured_arrays.py | 223 - .../numpy/doc/subclassing.py | 560 -- .../numpy/doc/ufuncs.py | 138 - .../numpy/dual.py | 71 - .../numpy/f2py/__init__.py | 49 - .../numpy/f2py/__version__.py | 10 - .../numpy/f2py/auxfuncs.py | 711 -- .../numpy/f2py/capi_maps.py | 773 -- .../numpy/f2py/cb_rules.py | 539 -- .../numpy/f2py/cfuncs.py | 1224 --- .../numpy/f2py/common_rules.py | 132 - .../numpy/f2py/crackfortran.py | 2868 ------- .../numpy/f2py/diagnose.py | 149 - .../numpy/f2py/f2py2e.py | 598 -- .../numpy/f2py/f2py_testing.py | 46 - .../numpy/f2py/f90mod_rules.py | 246 - .../numpy/f2py/func2subr.py | 291 - .../numpy/f2py/info.py | 6 - .../numpy/f2py/rules.py | 1448 ---- .../numpy/f2py/setup.py | 129 - .../numpy/f2py/src/fortranobject.h | 162 - .../f2py/tests/src/assumed_shape/.f2py_f2cmap | 1 - .../f2py/tests/src/assumed_shape/foo_free.f90 | 34 - .../f2py/tests/src/assumed_shape/foo_mod.f90 | 41 - .../f2py/tests/src/assumed_shape/foo_use.f90 | 19 - .../tests/src/assumed_shape/precision.f90 | 4 - .../numpy/f2py/tests/src/kind/foo.f90 | 20 - .../numpy/f2py/tests/src/mixed/foo.f | 5 - .../numpy/f2py/tests/src/mixed/foo_fixed.f90 | 8 - .../numpy/f2py/tests/src/mixed/foo_free.f90 | 8 - .../numpy/f2py/tests/src/size/foo.f90 | 44 - .../numpy/f2py/tests/test_array_from_pyobj.py | 559 -- .../numpy/f2py/tests/test_assumed_shape.py | 37 - .../numpy/f2py/tests/test_callback.py | 132 - .../numpy/f2py/tests/test_kind.py | 36 - .../numpy/f2py/tests/test_mixed.py | 41 - .../numpy/f2py/tests/test_return_character.py | 142 - .../numpy/f2py/tests/test_return_complex.py | 169 - .../numpy/f2py/tests/test_return_integer.py | 178 - .../numpy/f2py/tests/test_return_logical.py | 187 - .../numpy/f2py/tests/test_return_real.py | 203 - .../numpy/f2py/tests/test_size.py | 47 - .../numpy/f2py/tests/util.py | 353 - .../numpy/f2py/use_rules.py | 109 - .../numpy/fft/__init__.py | 11 - .../numpy/fft/fftpack.py | 1169 --- .../numpy/fft/fftpack_lite.py | 7 - .../numpy/fft/helper.py | 224 - .../numpy/fft/info.py | 179 - .../numpy/fft/setup.py | 20 - .../numpy/fft/tests/test_fftpack.py | 75 - .../numpy/fft/tests/test_helper.py | 78 - .../numpy/lib/__init__.py | 46 - .../numpy/lib/_compiled_base.py | 7 - .../numpy/lib/_datasource.py | 666 -- .../numpy/lib/_iotools.py | 891 -- .../numpy/lib/_version.py | 156 - .../numpy/lib/arraypad.py | 1475 ---- .../numpy/lib/arraysetops.py | 463 - .../numpy/lib/arrayterator.py | 226 - .../numpy/lib/financial.py | 737 -- .../numpy/lib/format.py | 730 -- .../numpy/lib/function_base.py | 3872 --------- .../numpy/lib/index_tricks.py | 869 -- .../numpy/lib/info.py | 151 - .../numpy/lib/nanfunctions.py | 1158 --- .../numpy/lib/npyio.py | 1912 ----- .../numpy/lib/polynomial.py | 1271 --- .../numpy/lib/recfunctions.py | 1003 --- .../numpy/lib/scimath.py | 566 -- .../numpy/lib/setup.py | 23 - .../numpy/lib/shape_base.py | 865 -- .../numpy/lib/stride_tricks.py | 123 - .../numpy/lib/tests/test__datasource.py | 351 - .../numpy/lib/tests/test__iotools.py | 326 - .../numpy/lib/tests/test__version.py | 57 - .../numpy/lib/tests/test_arraypad.py | 560 -- .../numpy/lib/tests/test_arraysetops.py | 301 - .../numpy/lib/tests/test_arrayterator.py | 52 - .../numpy/lib/tests/test_financial.py | 160 - .../numpy/lib/tests/test_format.py | 706 -- .../numpy/lib/tests/test_function_base.py | 2131 ----- .../numpy/lib/tests/test_index_tricks.py | 289 - .../numpy/lib/tests/test_io.py | 1736 ---- .../numpy/lib/tests/test_nanfunctions.py | 758 -- .../numpy/lib/tests/test_polynomial.py | 177 - .../numpy/lib/tests/test_recfunctions.py | 705 -- .../numpy/lib/tests/test_regression.py | 265 - .../numpy/lib/tests/test_shape_base.py | 368 - .../numpy/lib/tests/test_stride_tricks.py | 238 - .../numpy/lib/tests/test_twodim_base.py | 504 -- .../numpy/lib/tests/test_type_check.py | 328 - .../numpy/lib/tests/test_ufunclike.py | 65 - .../numpy/lib/tests/test_utils.py | 65 - .../numpy/lib/twodim_base.py | 1003 --- .../numpy/lib/type_check.py | 605 -- .../numpy/lib/ufunclike.py | 177 - .../numpy/lib/user_array.py | 277 - .../numpy/lib/utils.py | 1176 --- .../numpy/linalg/__init__.py | 55 - .../numpy/linalg/_umath_linalg.py | 7 - .../numpy/linalg/info.py | 37 - .../numpy/linalg/lapack_lite.py | 7 - .../numpy/linalg/linalg.py | 2136 ----- .../numpy/linalg/setup.py | 56 - .../numpy/linalg/tests/test_build.py | 53 - .../numpy/linalg/tests/test_deprecations.py | 24 - .../numpy/linalg/tests/test_linalg.py | 1153 --- .../numpy/linalg/tests/test_regression.py | 90 - .../numpy/ma/__init__.py | 58 - .../numpy/ma/bench.py | 166 - .../numpy/ma/core.py | 7321 ---------------- .../numpy/ma/extras.py | 1923 ----- .../numpy/ma/mrecords.py | 734 -- .../numpy/ma/setup.py | 20 - .../numpy/ma/tests/test_core.py | 3684 -------- .../numpy/ma/tests/test_extras.py | 947 --- .../numpy/ma/tests/test_mrecords.py | 521 -- .../numpy/ma/tests/test_old_ma.py | 869 -- .../numpy/ma/tests/test_regression.py | 75 - .../numpy/ma/tests/test_subclassing.py | 236 - .../numpy/ma/testutils.py | 240 - .../numpy/ma/timer_comparison.py | 459 - .../numpy/ma/version.py | 14 - .../numpy/matlib.py | 358 - .../numpy/matrixlib/__init__.py | 12 - .../numpy/matrixlib/defmatrix.py | 1094 --- .../numpy/matrixlib/setup.py | 15 - .../numpy/matrixlib/tests/test_defmatrix.py | 400 - .../numpy/matrixlib/tests/test_multiarray.py | 18 - .../numpy/matrixlib/tests/test_numeric.py | 10 - .../numpy/matrixlib/tests/test_regression.py | 34 - .../numpy/polynomial/__init__.py | 27 - .../numpy/polynomial/_polybase.py | 962 --- .../numpy/polynomial/chebyshev.py | 2056 ----- .../numpy/polynomial/hermite.py | 1789 ---- .../numpy/polynomial/hermite_e.py | 1786 ---- .../numpy/polynomial/laguerre.py | 1781 ---- .../numpy/polynomial/legendre.py | 1809 ---- .../numpy/polynomial/polynomial.py | 1532 ---- .../numpy/polynomial/polytemplate.py | 927 -- .../numpy/polynomial/polyutils.py | 403 - .../numpy/polynomial/setup.py | 11 - .../numpy/polynomial/tests/test_chebyshev.py | 554 -- .../numpy/polynomial/tests/test_classes.py | 570 -- .../numpy/polynomial/tests/test_hermite.py | 516 -- .../numpy/polynomial/tests/test_hermite_e.py | 517 -- .../numpy/polynomial/tests/test_laguerre.py | 513 -- .../numpy/polynomial/tests/test_legendre.py | 517 -- .../numpy/polynomial/tests/test_polynomial.py | 477 -- .../numpy/polynomial/tests/test_polyutils.py | 109 - .../numpy/polynomial/tests/test_printing.py | 74 - .../numpy/random/__init__.py | 122 - .../numpy/random/info.py | 135 - .../numpy/random/mtrand.py | 7 - .../numpy/random/randomkit.h | 189 - .../numpy/random/setup.py | 74 - .../numpy/random/tests/test_random.py | 707 -- .../numpy/random/tests/test_regression.py | 86 - .../numpy/setup.py | 27 - .../numpy/testing/__init__.py | 16 - .../numpy/testing/decorators.py | 271 - .../numpy/testing/noseclasses.py | 353 - .../numpy/testing/nosetester.py | 504 -- .../numpy/testing/print_coercion_tables.py | 89 - .../numpy/testing/setup.py | 20 - .../numpy/testing/tests/test_decorators.py | 185 - .../numpy/testing/tests/test_doctesting.py | 56 - .../numpy/testing/tests/test_utils.py | 558 -- .../numpy/testing/utils.py | 1715 ---- .../numpy/tests/test_ctypeslib.py | 102 - .../numpy/tests/test_matlib.py | 55 - .../numpy/version.py | 10 - 386 files changed, 166455 deletions(-) delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/PKG-INFO delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/SOURCES.txt delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/dependency_links.txt delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/native_libs.txt delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/not-zip-safe delete mode 100755 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/scripts/f2py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/top_level.txt delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__config__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/_import_tools.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/add_newdocs.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/_inspect.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/py3k.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_dummy.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_internal.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_methods.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/arrayprint.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/cversions.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/defchararray.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/fromnumeric.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/function_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/generate_numpy_api.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/getlimits.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__multiarray_api.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__ufunc_api.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_neighborhood_iterator_imp.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_numpyconfig.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayobject.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayscalars.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/halffloat.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/multiarray_api.txt delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarrayobject.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarraytypes.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/noprefix.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_1_7_deprecated_api.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_3kcompat.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_common.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_cpu.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_endian.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_interrupt.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_math.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_no_deprecated_api.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_os.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/numpyconfig.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/old_defines.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/oldnumeric.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufunc_api.txt delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufuncobject.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/utils.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/info.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/mlib.ini delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/npymath.ini delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/machar.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/memmap.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray_tests.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numeric.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numerictypes.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/operand_flag_tests.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/records.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/scalarmath.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup_common.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/shape_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/struct_ufunc_test.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/test_rational.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/data/astype_copy.pkl delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/data/recarray_from_file.fits delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_abc.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_api.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_arrayprint.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_blasdot.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_datetime.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_defchararray.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_deprecations.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_dtype.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_einsum.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_errstate.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_function_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_getlimits.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_half.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexerrors.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexing.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_item_selection.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_machar.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_memmap.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray_assignment.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_nditer.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numeric.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numerictypes.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_print.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_records.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_regression.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarinherit.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarmath.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarprint.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_shape_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_ufunc.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath_complex.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_unicode.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath_tests.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ctypeslib.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__config__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__version__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/ccompiler.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/autodist.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/bdist_rpm.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_clib.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_ext.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_py.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_scripts.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_src.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config_compiler.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/develop.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/egg_info.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_clib.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_data.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_headers.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/sdist.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/compat.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/conv_template.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/core.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/cpuinfo.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/environment.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/exec_command.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/extension.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/absoft.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/compaq.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/g95.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/gnu.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/hpux.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/ibm.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/intel.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/lahey.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/mips.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/nag.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/none.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pathf95.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pg.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/sun.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/vast.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/from_template.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/info.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/intelccompiler.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/lib2def.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/line_endings.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/log.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/mingw32ccompiler.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/misc_util.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/npy_pkg_config.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/numpy_distribution.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/pathccompiler.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/system_info.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib1.f delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib2.pyf delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/tests/test_fib2.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/include/body.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/tests/test_fib3.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/primes.pyx delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/tests/test_primes.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/example.i delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.cc delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.i delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example2.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_exec_command.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_gnu.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_intel.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_misc_util.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_npy_pkg_config.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/unixccompiler.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/basics.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/broadcasting.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/byteswapping.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/constants.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/creation.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/glossary.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/howtofind.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/indexing.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/internals.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/io.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/jargon.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/methods_vs_functions.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/misc.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/performance.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/structured_arrays.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/subclassing.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/ufuncs.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/dual.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__version__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/auxfuncs.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/capi_maps.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cb_rules.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cfuncs.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/common_rules.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/crackfortran.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/diagnose.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py2e.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py_testing.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f90mod_rules.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/func2subr.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/info.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/rules.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/src/fortranobject.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_free.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_use.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/precision.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/kind/foo.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo.f delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_fixed.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_free.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/size/foo.f90 delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_array_from_pyobj.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_assumed_shape.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_callback.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_kind.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_mixed.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_character.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_complex.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_integer.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_logical.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_real.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_size.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/util.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/use_rules.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack_lite.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/helper.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/info.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_fftpack.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_helper.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_compiled_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_datasource.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_iotools.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_version.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraypad.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraysetops.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arrayterator.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/financial.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/format.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/function_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/index_tricks.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/info.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/nanfunctions.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/npyio.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/polynomial.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/recfunctions.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/scimath.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/shape_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/stride_tricks.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__datasource.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__iotools.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__version.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraypad.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraysetops.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arrayterator.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_financial.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_format.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_function_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_index_tricks.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_io.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_nanfunctions.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_polynomial.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_recfunctions.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_regression.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_shape_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_stride_tricks.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_twodim_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_type_check.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_ufunclike.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_utils.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/twodim_base.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/type_check.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/ufunclike.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/user_array.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/utils.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/_umath_linalg.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/info.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/lapack_lite.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/linalg.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_build.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_deprecations.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_linalg.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_regression.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/bench.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/core.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/extras.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/mrecords.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_core.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_extras.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_mrecords.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_old_ma.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_regression.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_subclassing.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/testutils.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/timer_comparison.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/version.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matlib.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/defmatrix.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_defmatrix.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_multiarray.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_numeric.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_regression.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/_polybase.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/chebyshev.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite_e.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/laguerre.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/legendre.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polynomial.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polytemplate.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polyutils.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_chebyshev.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_classes.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite_e.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_laguerre.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_legendre.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polynomial.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polyutils.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_printing.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/info.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/mtrand.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/randomkit.h delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_random.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_regression.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/__init__.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/decorators.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/noseclasses.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/nosetester.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/print_coercion_tables.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/setup.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_decorators.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_doctesting.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_utils.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/utils.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_ctypeslib.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_matlib.py delete mode 100644 numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/version.py diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/PKG-INFO b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/PKG-INFO deleted file mode 100644 index 477879ef61f2d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/PKG-INFO +++ /dev/null @@ -1,39 +0,0 @@ -Metadata-Version: 1.1 -Name: numpy -Version: 1.9.0 -Summary: NumPy: array processing for numbers, strings, records, and objects. -Home-page: http://www.numpy.org -Author: NumPy Developers -Author-email: numpy-discussion@scipy.org -License: BSD -Download-URL: http://sourceforge.net/projects/numpy/files/NumPy/ -Description: NumPy is a general-purpose array-processing package designed to - efficiently manipulate large multi-dimensional arrays of arbitrary - records without sacrificing too much speed for small multi-dimensional - arrays. NumPy is built on the Numeric code base and adds features - introduced by numarray as well as an extended C-API and the ability to - create arrays of arbitrary type which also makes NumPy suitable for - interfacing with general-purpose data-base applications. - - There are also basic facilities for discrete fourier transform, - basic linear algebra and random number generation. - - -Platform: Windows -Platform: Linux -Platform: Solaris -Platform: Mac OS-X -Platform: Unix -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Science/Research -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved -Classifier: Programming Language :: C -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Topic :: Software Development -Classifier: Topic :: Scientific/Engineering -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: POSIX -Classifier: Operating System :: Unix -Classifier: Operating System :: MacOS diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/SOURCES.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/SOURCES.txt deleted file mode 100644 index 44e348eeedb4a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/SOURCES.txt +++ /dev/null @@ -1,1033 +0,0 @@ -BENTO_BUILD.txt -COMPATIBILITY -DEV_README.txt -INSTALL.txt -LICENSE.txt -MANIFEST.in -README.txt -THANKS.txt -setup.cfg -setup.py -setupegg.py -site.cfg.example -doc/Makefile -doc/postprocess.py -doc/f2py/BUGS.txt -doc/f2py/FAQ.txt -doc/f2py/HISTORY.txt -doc/f2py/Makefile -doc/f2py/OLDNEWS.txt -doc/f2py/README.txt -doc/f2py/Release-1.x.txt -doc/f2py/Release-2.x.txt -doc/f2py/Release-3.x.txt -doc/f2py/Release-4.x.txt -doc/f2py/TESTING.txt -doc/f2py/THANKS.txt -doc/f2py/TODO.txt -doc/f2py/apps.tex -doc/f2py/bugs.tex -doc/f2py/collectinput.py -doc/f2py/commands.tex -doc/f2py/default.css -doc/f2py/docutils.conf -doc/f2py/f2py.1 -doc/f2py/f2py2e.tex -doc/f2py/fortranobject.tex -doc/f2py/hello.f -doc/f2py/index.html -doc/f2py/intro.tex -doc/f2py/multiarrays.txt -doc/f2py/notes.tex -doc/f2py/oldnews.html -doc/f2py/options.tex -doc/f2py/pyforttest.pyf -doc/f2py/pytest.py -doc/f2py/python9.tex -doc/f2py/signaturefile.tex -doc/f2py/simple.f -doc/f2py/simple_session.dat -doc/f2py/using_F_compiler.txt -doc/f2py/win32_notes.txt -doc/f2py/ex1/arr.f -doc/f2py/ex1/bar.f -doc/f2py/ex1/foo.f -doc/f2py/ex1/foobar-smart.f90 -doc/f2py/ex1/foobar.f90 -doc/f2py/ex1/foobarmodule.tex -doc/f2py/ex1/runme -doc/f2py/f2python9-final/README.txt -doc/f2py/f2python9-final/aerostructure.jpg -doc/f2py/f2python9-final/flow.jpg -doc/f2py/f2python9-final/mk_html.sh -doc/f2py/f2python9-final/mk_pdf.sh -doc/f2py/f2python9-final/mk_ps.sh -doc/f2py/f2python9-final/structure.jpg -doc/f2py/f2python9-final/src/examples/exp1.f -doc/f2py/f2python9-final/src/examples/exp1mess.txt -doc/f2py/f2python9-final/src/examples/exp1session.txt -doc/f2py/f2python9-final/src/examples/foo.pyf -doc/f2py/f2python9-final/src/examples/foom.pyf -doc/f2py/multiarray/array_from_pyobj.c -doc/f2py/multiarray/bar.c -doc/f2py/multiarray/foo.f -doc/f2py/multiarray/fortran_array_from_pyobj.txt -doc/f2py/multiarray/fun.pyf -doc/f2py/multiarray/run.pyf -doc/f2py/multiarray/transpose.txt -doc/release/1.3.0-notes.rst -doc/release/1.4.0-notes.rst -doc/release/1.5.0-notes.rst -doc/release/1.6.0-notes.rst -doc/release/1.6.1-notes.rst -doc/release/1.6.2-notes.rst -doc/release/1.7.0-notes.rst -doc/release/1.7.1-notes.rst -doc/release/1.7.2-notes.rst -doc/release/1.8.0-notes.rst -doc/release/1.8.1-notes.rst -doc/release/1.8.2-notes.rst -doc/release/1.9.0-notes.rst -doc/release/time_based_proposal.rst -doc/scipy-sphinx-theme/.git -doc/scipy-sphinx-theme/.gitignore -doc/scipy-sphinx-theme/Makefile -doc/scipy-sphinx-theme/README.rst -doc/scipy-sphinx-theme/conf.py -doc/scipy-sphinx-theme/index.rst -doc/scipy-sphinx-theme/test_autodoc.rst -doc/scipy-sphinx-theme/test_autodoc_2.rst -doc/scipy-sphinx-theme/test_autodoc_3.rst -doc/scipy-sphinx-theme/test_autodoc_4.rst -doc/scipy-sphinx-theme/test_optimize.rst -doc/scipy-sphinx-theme/_static/scipyshiny_small.png -doc/scipy-sphinx-theme/_theme/scipy/layout.html -doc/scipy-sphinx-theme/_theme/scipy/searchbox.html -doc/scipy-sphinx-theme/_theme/scipy/sourcelink.html -doc/scipy-sphinx-theme/_theme/scipy/theme.conf -doc/scipy-sphinx-theme/_theme/scipy/static/scipy.css_t -doc/scipy-sphinx-theme/_theme/scipy/static/css/extend.css -doc/scipy-sphinx-theme/_theme/scipy/static/css/pygments.css -doc/scipy-sphinx-theme/_theme/scipy/static/css/scipy-central.css -doc/scipy-sphinx-theme/_theme/scipy/static/css/spc-bootstrap.css -doc/scipy-sphinx-theme/_theme/scipy/static/css/spc-extend.css -doc/scipy-sphinx-theme/_theme/scipy/static/img/all-icons.svg -doc/scipy-sphinx-theme/_theme/scipy/static/img/contents.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/create-new-account-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-icon-shrunk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-icon.svg -doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-list-icon-tiniest.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-list-icon-tiny.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/external-link-list-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/glyphicons-halflings-white.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/glyphicons-halflings.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/important-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/information-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/internet-web-browser.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-icon-shrunk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-icon.svg -doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-list-icon-tiny.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/multiple-file-list-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/navigation.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/person-list-icon-tiny.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/person-list-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/scipy-logo.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/scipy_org_logo.gif -doc/scipy-sphinx-theme/_theme/scipy/static/img/scipycentral_logo.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/scipyshiny_small.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/send-email-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-icon-shrunk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-icon.svg -doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-list-icon-tiniest.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-list-icon-tiny.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/single-file-list-icon.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/transparent-pixel.gif -doc/scipy-sphinx-theme/_theme/scipy/static/img/ui-anim_basic_16x16.gif -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ad.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ae.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-af.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ag.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ai.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-al.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-am.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ao.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-aq.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ar.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-as.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-at.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-au.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-aw.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-az.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ba.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bb.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bd.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-be.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bf.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bh.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bi.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bj.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bl.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bo.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-br.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bs.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bt.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bw.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-by.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-bz.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ca.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cc.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cd.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cf.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ch.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ci.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ck.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cl.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-co.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cu.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cv.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cw.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cx.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cy.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-cz.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-de.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-dj.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-dk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-dm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-do.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-dz.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ec.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ee.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-eg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-er.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-es.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-et.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fi.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fj.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fo.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-fr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ga.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gb.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gd.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ge.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gf.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gh.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gi.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gl.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gq.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gs.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gt.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gu.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gw.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-gy.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ht.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-hu.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-id.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ie.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-il.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-im.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-in.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-io.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-iq.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ir.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-is.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-it.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-je.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-jm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-jo.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-jp.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ke.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kh.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ki.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-km.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kp.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kw.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ky.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-kz.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-la.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lb.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lc.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-li.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ls.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lt.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lu.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-lv.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ly.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ma.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mc.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-md.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-me.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mf.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mh.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ml.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mo.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mp.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mq.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ms.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mt.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mu.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mv.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mw.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mx.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-my.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-mz.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-na.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nc.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ne.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nf.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ng.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ni.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nl.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-no.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-np.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nu.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-nz.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-om.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pa.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pe.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pf.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ph.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pl.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ps.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pt.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-pw.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-py.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-qa.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-re.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ro.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-rs.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ru.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-rw.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sa.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sb.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sc.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sd.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-se.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sh.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-si.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sj.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sl.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-so.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-st.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sv.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sy.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-sz.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tc.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-td.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tf.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-th.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tj.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tk.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tl.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-to.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tr.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tt.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tv.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tw.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-tz.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ua.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ug.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-um.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-us.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-uy.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-uz.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-va.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vc.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ve.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vg.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vi.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vn.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-vu.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-wf.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ws.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-ye.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-za.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-zm.png -doc/scipy-sphinx-theme/_theme/scipy/static/img/flags/flag-zw.png -doc/scipy-sphinx-theme/_theme/scipy/static/js/copybutton.js -doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-bootstrap.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-content.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-extend.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-footer.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-header.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-rightsidebar.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-utils.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/accordion.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/alerts.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/bootstrap.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/breadcrumbs.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/button-groups.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/buttons.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/carousel.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/close.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/code.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/component-animations.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/dropdowns.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/forms.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/grid.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/hero-unit.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/labels-badges.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/layouts.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/media.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/mixins.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/modals.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/navbar.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/navs.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/pager.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/pagination.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/popovers.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/progress-bars.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/reset.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-1200px-min.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-767px-max.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-768px-979px.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-navbar.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-utilities.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/scaffolding.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/sprites.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/tables.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/thumbnails.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/tooltip.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/type.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/utilities.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/variables.less -doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/wells.less -doc/source/about.rst -doc/source/bugs.rst -doc/source/conf.py -doc/source/contents.rst -doc/source/glossary.rst -doc/source/license.rst -doc/source/release.rst -doc/source/_templates/indexcontent.html -doc/source/_templates/indexsidebar.html -doc/source/_templates/layout.html -doc/source/_templates/autosummary/class.rst -doc/source/dev/gitwash_links.txt -doc/source/dev/index.rst -doc/source/dev/gitwash/branch_list.png -doc/source/dev/gitwash/branch_list_compare.png -doc/source/dev/gitwash/configure_git.rst -doc/source/dev/gitwash/development_setup.rst -doc/source/dev/gitwash/development_workflow.rst -doc/source/dev/gitwash/dot2_dot3.rst -doc/source/dev/gitwash/following_latest.rst -doc/source/dev/gitwash/forking_button.png -doc/source/dev/gitwash/git_development.rst -doc/source/dev/gitwash/git_intro.rst -doc/source/dev/gitwash/git_links.inc -doc/source/dev/gitwash/git_resources.rst -doc/source/dev/gitwash/index.rst -doc/source/dev/gitwash/pull_button.png -doc/source/f2py/advanced.rst -doc/source/f2py/allocarr.f90 -doc/source/f2py/allocarr_session.dat -doc/source/f2py/array.f -doc/source/f2py/array_session.dat -doc/source/f2py/calculate.f -doc/source/f2py/calculate_session.dat -doc/source/f2py/callback.f -doc/source/f2py/callback2.pyf -doc/source/f2py/callback_session.dat -doc/source/f2py/common.f -doc/source/f2py/common_session.dat -doc/source/f2py/compile_session.dat -doc/source/f2py/distutils.rst -doc/source/f2py/extcallback.f -doc/source/f2py/extcallback_session.dat -doc/source/f2py/fib1.f -doc/source/f2py/fib1.pyf -doc/source/f2py/fib2.pyf -doc/source/f2py/fib3.f -doc/source/f2py/ftype.f -doc/source/f2py/ftype_session.dat -doc/source/f2py/getting-started.rst -doc/source/f2py/index.rst -doc/source/f2py/moddata.f90 -doc/source/f2py/moddata_session.dat -doc/source/f2py/python-usage.rst -doc/source/f2py/run_main_session.dat -doc/source/f2py/scalar.f -doc/source/f2py/scalar_session.dat -doc/source/f2py/setup_example.py -doc/source/f2py/signature-file.rst -doc/source/f2py/spam.pyf -doc/source/f2py/spam_session.dat -doc/source/f2py/string.f -doc/source/f2py/string_session.dat -doc/source/f2py/usage.rst -doc/source/f2py/var.pyf -doc/source/f2py/var_session.dat -doc/source/neps/datetime-proposal.rst -doc/source/neps/datetime-proposal3.rst -doc/source/neps/deferred-ufunc-evaluation.rst -doc/source/neps/generalized-ufuncs.rst -doc/source/neps/groupby_additions.rst -doc/source/neps/index.rst -doc/source/neps/math_config_clean.rst -doc/source/neps/missing-data.rst -doc/source/neps/new-iterator-ufunc.rst -doc/source/neps/newbugtracker.rst -doc/source/neps/npy-format.rst -doc/source/neps/structured_array_extensions.rst -doc/source/neps/ufunc-overrides.rst -doc/source/neps/warnfix.rst -doc/source/reference/arrays.classes.rst -doc/source/reference/arrays.datetime.rst -doc/source/reference/arrays.dtypes.rst -doc/source/reference/arrays.indexing.rst -doc/source/reference/arrays.interface.rst -doc/source/reference/arrays.ndarray.rst -doc/source/reference/arrays.nditer.rst -doc/source/reference/arrays.rst -doc/source/reference/arrays.scalars.rst -doc/source/reference/c-api.array.rst -doc/source/reference/c-api.config.rst -doc/source/reference/c-api.coremath.rst -doc/source/reference/c-api.deprecations.rst -doc/source/reference/c-api.dtype.rst -doc/source/reference/c-api.generalized-ufuncs.rst -doc/source/reference/c-api.iterator.rst -doc/source/reference/c-api.rst -doc/source/reference/c-api.types-and-structures.rst -doc/source/reference/c-api.ufunc.rst -doc/source/reference/distutils.rst -doc/source/reference/index.rst -doc/source/reference/internals.code-explanations.rst -doc/source/reference/internals.rst -doc/source/reference/maskedarray.baseclass.rst -doc/source/reference/maskedarray.generic.rst -doc/source/reference/maskedarray.rst -doc/source/reference/routines.array-creation.rst -doc/source/reference/routines.array-manipulation.rst -doc/source/reference/routines.bitwise.rst -doc/source/reference/routines.char.rst -doc/source/reference/routines.ctypeslib.rst -doc/source/reference/routines.datetime.rst -doc/source/reference/routines.dtype.rst -doc/source/reference/routines.dual.rst -doc/source/reference/routines.emath.rst -doc/source/reference/routines.err.rst -doc/source/reference/routines.fft.rst -doc/source/reference/routines.financial.rst -doc/source/reference/routines.functional.rst -doc/source/reference/routines.help.rst -doc/source/reference/routines.indexing.rst -doc/source/reference/routines.io.rst -doc/source/reference/routines.linalg.rst -doc/source/reference/routines.logic.rst -doc/source/reference/routines.ma.rst -doc/source/reference/routines.math.rst -doc/source/reference/routines.matlib.rst -doc/source/reference/routines.numarray.rst -doc/source/reference/routines.oldnumeric.rst -doc/source/reference/routines.other.rst -doc/source/reference/routines.padding.rst -doc/source/reference/routines.polynomials.chebyshev.rst -doc/source/reference/routines.polynomials.classes.rst -doc/source/reference/routines.polynomials.hermite.rst -doc/source/reference/routines.polynomials.hermite_e.rst -doc/source/reference/routines.polynomials.laguerre.rst -doc/source/reference/routines.polynomials.legendre.rst -doc/source/reference/routines.polynomials.package.rst -doc/source/reference/routines.polynomials.poly1d.rst -doc/source/reference/routines.polynomials.polynomial.rst -doc/source/reference/routines.polynomials.rst -doc/source/reference/routines.random.rst -doc/source/reference/routines.rst -doc/source/reference/routines.set.rst -doc/source/reference/routines.sort.rst -doc/source/reference/routines.statistics.rst -doc/source/reference/routines.testing.rst -doc/source/reference/routines.window.rst -doc/source/reference/swig.interface-file.rst -doc/source/reference/swig.rst -doc/source/reference/swig.testing.rst -doc/source/reference/ufuncs.rst -doc/source/reference/figures/dtype-hierarchy.dia -doc/source/reference/figures/dtype-hierarchy.pdf -doc/source/reference/figures/dtype-hierarchy.png -doc/source/reference/figures/threefundamental.fig -doc/source/reference/figures/threefundamental.pdf -doc/source/reference/figures/threefundamental.png -doc/source/user/basics.broadcasting.rst -doc/source/user/basics.byteswapping.rst -doc/source/user/basics.creation.rst -doc/source/user/basics.indexing.rst -doc/source/user/basics.io.genfromtxt.rst -doc/source/user/basics.io.rst -doc/source/user/basics.rec.rst -doc/source/user/basics.rst -doc/source/user/basics.subclassing.rst -doc/source/user/basics.types.rst -doc/source/user/c-info.beyond-basics.rst -doc/source/user/c-info.how-to-extend.rst -doc/source/user/c-info.python-as-glue.rst -doc/source/user/c-info.rst -doc/source/user/c-info.ufunc-tutorial.rst -doc/source/user/howtofind.rst -doc/source/user/index.rst -doc/source/user/install.rst -doc/source/user/introduction.rst -doc/source/user/misc.rst -doc/source/user/performance.rst -doc/source/user/whatisnumpy.rst -doc/sphinxext/.git -doc/sphinxext/.gitignore -doc/sphinxext/.travis.yml -doc/sphinxext/LICENSE.txt -doc/sphinxext/MANIFEST.in -doc/sphinxext/README.rst -doc/sphinxext/setup.py -doc/sphinxext/numpydoc/__init__.py -doc/sphinxext/numpydoc/comment_eater.py -doc/sphinxext/numpydoc/compiler_unparse.py -doc/sphinxext/numpydoc/docscrape.py -doc/sphinxext/numpydoc/docscrape_sphinx.py -doc/sphinxext/numpydoc/linkcode.py -doc/sphinxext/numpydoc/numpydoc.py -doc/sphinxext/numpydoc/phantom_import.py -doc/sphinxext/numpydoc/plot_directive.py -doc/sphinxext/numpydoc/traitsdoc.py -doc/sphinxext/numpydoc/tests/test_docscrape.py -doc/sphinxext/numpydoc/tests/test_linkcode.py -doc/sphinxext/numpydoc/tests/test_phantom_import.py -doc/sphinxext/numpydoc/tests/test_plot_directive.py -doc/sphinxext/numpydoc/tests/test_traitsdoc.py -numpy/__init__.py -numpy/_import_tools.py -numpy/add_newdocs.py -numpy/ctypeslib.py -numpy/dual.py -numpy/matlib.py -numpy/setup.py -numpy/version.py -numpy.egg-info/PKG-INFO -numpy.egg-info/SOURCES.txt -numpy.egg-info/dependency_links.txt -numpy.egg-info/top_level.txt -numpy/compat/__init__.py -numpy/compat/_inspect.py -numpy/compat/py3k.py -numpy/compat/setup.py -numpy/core/__init__.py -numpy/core/_internal.py -numpy/core/_methods.py -numpy/core/arrayprint.py -numpy/core/cversions.py -numpy/core/defchararray.py -numpy/core/fromnumeric.py -numpy/core/function_base.py -numpy/core/getlimits.py -numpy/core/info.py -numpy/core/machar.py -numpy/core/memmap.py -numpy/core/mlib.ini.in -numpy/core/npymath.ini.in -numpy/core/numeric.py -numpy/core/numerictypes.py -numpy/core/records.py -numpy/core/setup.py -numpy/core/setup_common.py -numpy/core/shape_base.py -numpy/core/blasdot/_dotblas.c -numpy/core/blasdot/cblas.h -numpy/core/code_generators/__init__.py -numpy/core/code_generators/cversions.txt -numpy/core/code_generators/genapi.py -numpy/core/code_generators/generate_numpy_api.py -numpy/core/code_generators/generate_ufunc_api.py -numpy/core/code_generators/generate_umath.py -numpy/core/code_generators/numpy_api.py -numpy/core/code_generators/ufunc_docstrings.py -numpy/core/include/numpy/_neighborhood_iterator_imp.h -numpy/core/include/numpy/_numpyconfig.h.in -numpy/core/include/numpy/arrayobject.h -numpy/core/include/numpy/arrayscalars.h -numpy/core/include/numpy/halffloat.h -numpy/core/include/numpy/ndarrayobject.h -numpy/core/include/numpy/ndarraytypes.h -numpy/core/include/numpy/noprefix.h -numpy/core/include/numpy/npy_1_7_deprecated_api.h -numpy/core/include/numpy/npy_3kcompat.h -numpy/core/include/numpy/npy_common.h -numpy/core/include/numpy/npy_cpu.h -numpy/core/include/numpy/npy_endian.h -numpy/core/include/numpy/npy_interrupt.h -numpy/core/include/numpy/npy_math.h -numpy/core/include/numpy/npy_no_deprecated_api.h -numpy/core/include/numpy/npy_os.h -numpy/core/include/numpy/numpyconfig.h -numpy/core/include/numpy/old_defines.h -numpy/core/include/numpy/oldnumeric.h -numpy/core/include/numpy/ufuncobject.h -numpy/core/include/numpy/utils.h -numpy/core/include/numpy/fenv/fenv.c -numpy/core/include/numpy/fenv/fenv.h -numpy/core/src/dummymodule.c -numpy/core/src/multiarray/_datetime.h -numpy/core/src/multiarray/alloc.c -numpy/core/src/multiarray/alloc.h -numpy/core/src/multiarray/array_assign.c -numpy/core/src/multiarray/array_assign.h -numpy/core/src/multiarray/array_assign_array.c -numpy/core/src/multiarray/array_assign_scalar.c -numpy/core/src/multiarray/arrayobject.c -numpy/core/src/multiarray/arrayobject.h -numpy/core/src/multiarray/arraytypes.h -numpy/core/src/multiarray/buffer.c -numpy/core/src/multiarray/buffer.h -numpy/core/src/multiarray/calculation.c -numpy/core/src/multiarray/calculation.h -numpy/core/src/multiarray/common.c -numpy/core/src/multiarray/common.h -numpy/core/src/multiarray/conversion_utils.c -numpy/core/src/multiarray/conversion_utils.h -numpy/core/src/multiarray/convert.c -numpy/core/src/multiarray/convert.h -numpy/core/src/multiarray/convert_datatype.c -numpy/core/src/multiarray/convert_datatype.h -numpy/core/src/multiarray/ctors.c -numpy/core/src/multiarray/ctors.h -numpy/core/src/multiarray/datetime.c -numpy/core/src/multiarray/datetime_busday.c -numpy/core/src/multiarray/datetime_busday.h -numpy/core/src/multiarray/datetime_busdaycal.c -numpy/core/src/multiarray/datetime_busdaycal.h -numpy/core/src/multiarray/datetime_strings.c -numpy/core/src/multiarray/datetime_strings.h -numpy/core/src/multiarray/descriptor.c -numpy/core/src/multiarray/descriptor.h -numpy/core/src/multiarray/dtype_transfer.c -numpy/core/src/multiarray/flagsobject.c -numpy/core/src/multiarray/getset.c -numpy/core/src/multiarray/getset.h -numpy/core/src/multiarray/hashdescr.c -numpy/core/src/multiarray/hashdescr.h -numpy/core/src/multiarray/item_selection.c -numpy/core/src/multiarray/item_selection.h -numpy/core/src/multiarray/iterators.c -numpy/core/src/multiarray/iterators.h -numpy/core/src/multiarray/mapping.c -numpy/core/src/multiarray/mapping.h -numpy/core/src/multiarray/methods.c -numpy/core/src/multiarray/methods.h -numpy/core/src/multiarray/multiarraymodule.c -numpy/core/src/multiarray/multiarraymodule.h -numpy/core/src/multiarray/nditer_api.c -numpy/core/src/multiarray/nditer_constr.c -numpy/core/src/multiarray/nditer_impl.h -numpy/core/src/multiarray/nditer_pywrap.c -numpy/core/src/multiarray/nditer_pywrap.h -numpy/core/src/multiarray/number.c -numpy/core/src/multiarray/number.h -numpy/core/src/multiarray/numpymemoryview.c -numpy/core/src/multiarray/numpymemoryview.h -numpy/core/src/multiarray/numpyos.c -numpy/core/src/multiarray/numpyos.h -numpy/core/src/multiarray/refcount.c -numpy/core/src/multiarray/refcount.h -numpy/core/src/multiarray/scalarapi.c -numpy/core/src/multiarray/scalartypes.h -numpy/core/src/multiarray/sequence.c -numpy/core/src/multiarray/sequence.h -numpy/core/src/multiarray/shape.c -numpy/core/src/multiarray/shape.h -numpy/core/src/multiarray/ucsnarrow.c -numpy/core/src/multiarray/ucsnarrow.h -numpy/core/src/multiarray/usertypes.c -numpy/core/src/multiarray/usertypes.h -numpy/core/src/npymath/_signbit.c -numpy/core/src/npymath/halffloat.c -numpy/core/src/npymath/ieee754.c.src -numpy/core/src/npymath/npy_math.c.src -numpy/core/src/npymath/npy_math_common.h -numpy/core/src/npymath/npy_math_complex.c.src -numpy/core/src/npymath/npy_math_private.h -numpy/core/src/npysort/binsearch.c.src -numpy/core/src/npysort/heapsort.c.src -numpy/core/src/npysort/mergesort.c.src -numpy/core/src/npysort/npysort_common.h -numpy/core/src/npysort/quicksort.c.src -numpy/core/src/npysort/selection.c.src -numpy/core/src/private/lowlevel_strided_loops.h -numpy/core/src/private/npy_binsearch.h.src -numpy/core/src/private/npy_config.h -numpy/core/src/private/npy_fpmath.h -numpy/core/src/private/npy_partition.h.src -numpy/core/src/private/npy_pycompat.h -numpy/core/src/private/npy_sort.h -numpy/core/src/private/ufunc_override.h -numpy/core/src/umath/reduction.c -numpy/core/src/umath/reduction.h -numpy/core/src/umath/simd.inc.src -numpy/core/src/umath/ufunc_object.c -numpy/core/src/umath/ufunc_object.h -numpy/core/src/umath/ufunc_type_resolution.c -numpy/core/src/umath/ufunc_type_resolution.h -numpy/core/src/umath/umathmodule.c -numpy/distutils/__init__.py -numpy/distutils/__version__.py -numpy/distutils/ccompiler.py -numpy/distutils/compat.py -numpy/distutils/conv_template.py -numpy/distutils/core.py -numpy/distutils/cpuinfo.py -numpy/distutils/environment.py -numpy/distutils/exec_command.py -numpy/distutils/extension.py -numpy/distutils/from_template.py -numpy/distutils/info.py -numpy/distutils/intelccompiler.py -numpy/distutils/lib2def.py -numpy/distutils/line_endings.py -numpy/distutils/log.py -numpy/distutils/mingw32ccompiler.py -numpy/distutils/misc_util.py -numpy/distutils/npy_pkg_config.py -numpy/distutils/numpy_distribution.py -numpy/distutils/pathccompiler.py -numpy/distutils/setup.py -numpy/distutils/system_info.py -numpy/distutils/unixccompiler.py -numpy/distutils/command/__init__.py -numpy/distutils/command/autodist.py -numpy/distutils/command/bdist_rpm.py -numpy/distutils/command/build.py -numpy/distutils/command/build_clib.py -numpy/distutils/command/build_ext.py -numpy/distutils/command/build_py.py -numpy/distutils/command/build_scripts.py -numpy/distutils/command/build_src.py -numpy/distutils/command/config.py -numpy/distutils/command/config_compiler.py -numpy/distutils/command/develop.py -numpy/distutils/command/egg_info.py -numpy/distutils/command/install.py -numpy/distutils/command/install_clib.py -numpy/distutils/command/install_data.py -numpy/distutils/command/install_headers.py -numpy/distutils/command/sdist.py -numpy/distutils/fcompiler/__init__.py -numpy/distutils/fcompiler/absoft.py -numpy/distutils/fcompiler/compaq.py -numpy/distutils/fcompiler/g95.py -numpy/distutils/fcompiler/gnu.py -numpy/distutils/fcompiler/hpux.py -numpy/distutils/fcompiler/ibm.py -numpy/distutils/fcompiler/intel.py -numpy/distutils/fcompiler/lahey.py -numpy/distutils/fcompiler/mips.py -numpy/distutils/fcompiler/nag.py -numpy/distutils/fcompiler/none.py -numpy/distutils/fcompiler/pathf95.py -numpy/distutils/fcompiler/pg.py -numpy/distutils/fcompiler/sun.py -numpy/distutils/fcompiler/vast.py -numpy/doc/__init__.py -numpy/doc/basics.py -numpy/doc/broadcasting.py -numpy/doc/byteswapping.py -numpy/doc/constants.py -numpy/doc/creation.py -numpy/doc/glossary.py -numpy/doc/howtofind.py -numpy/doc/indexing.py -numpy/doc/internals.py -numpy/doc/io.py -numpy/doc/jargon.py -numpy/doc/methods_vs_functions.py -numpy/doc/misc.py -numpy/doc/performance.py -numpy/doc/structured_arrays.py -numpy/doc/subclassing.py -numpy/doc/ufuncs.py -numpy/f2py/__init__.py -numpy/f2py/__version__.py -numpy/f2py/auxfuncs.py -numpy/f2py/capi_maps.py -numpy/f2py/cb_rules.py -numpy/f2py/cfuncs.py -numpy/f2py/common_rules.py -numpy/f2py/crackfortran.py -numpy/f2py/diagnose.py -numpy/f2py/f2py2e.py -numpy/f2py/f2py_testing.py -numpy/f2py/f90mod_rules.py -numpy/f2py/func2subr.py -numpy/f2py/info.py -numpy/f2py/rules.py -numpy/f2py/setup.py -numpy/f2py/use_rules.py -numpy/fft/__init__.py -numpy/fft/fftpack.c -numpy/fft/fftpack.h -numpy/fft/fftpack.py -numpy/fft/fftpack_litemodule.c -numpy/fft/helper.py -numpy/fft/info.py -numpy/fft/setup.py -numpy/lib/__init__.py -numpy/lib/_datasource.py -numpy/lib/_iotools.py -numpy/lib/_version.py -numpy/lib/arraypad.py -numpy/lib/arraysetops.py -numpy/lib/arrayterator.py -numpy/lib/financial.py -numpy/lib/format.py -numpy/lib/function_base.py -numpy/lib/index_tricks.py -numpy/lib/info.py -numpy/lib/nanfunctions.py -numpy/lib/npyio.py -numpy/lib/polynomial.py -numpy/lib/recfunctions.py -numpy/lib/scimath.py -numpy/lib/setup.py -numpy/lib/shape_base.py -numpy/lib/stride_tricks.py -numpy/lib/twodim_base.py -numpy/lib/type_check.py -numpy/lib/ufunclike.py -numpy/lib/user_array.py -numpy/lib/utils.py -numpy/lib/src/_compiled_base.c -numpy/linalg/__init__.py -numpy/linalg/info.py -numpy/linalg/lapack_litemodule.c -numpy/linalg/linalg.py -numpy/linalg/setup.py -numpy/linalg/umath_linalg.c.src -numpy/linalg/lapack_lite/blas_lite.c -numpy/linalg/lapack_lite/dlamch.c -numpy/linalg/lapack_lite/dlapack_lite.c -numpy/linalg/lapack_lite/f2c.h -numpy/linalg/lapack_lite/f2c_lite.c -numpy/linalg/lapack_lite/python_xerbla.c -numpy/linalg/lapack_lite/zlapack_lite.c -numpy/ma/__init__.py -numpy/ma/bench.py -numpy/ma/core.py -numpy/ma/extras.py -numpy/ma/mrecords.py -numpy/ma/setup.py -numpy/ma/testutils.py -numpy/ma/timer_comparison.py -numpy/ma/version.py -numpy/matrixlib/__init__.py -numpy/matrixlib/defmatrix.py -numpy/matrixlib/setup.py -numpy/polynomial/__init__.py -numpy/polynomial/_polybase.py -numpy/polynomial/chebyshev.py -numpy/polynomial/hermite.py -numpy/polynomial/hermite_e.py -numpy/polynomial/laguerre.py -numpy/polynomial/legendre.py -numpy/polynomial/polynomial.py -numpy/polynomial/polytemplate.py -numpy/polynomial/polyutils.py -numpy/polynomial/setup.py -numpy/random/__init__.py -numpy/random/info.py -numpy/random/setup.py -numpy/random/mtrand/Python.pxi -numpy/random/mtrand/distributions.c -numpy/random/mtrand/distributions.h -numpy/random/mtrand/generate_mtrand_c.py -numpy/random/mtrand/initarray.c -numpy/random/mtrand/initarray.h -numpy/random/mtrand/mtrand.c -numpy/random/mtrand/mtrand.pyx -numpy/random/mtrand/mtrand_py_helper.h -numpy/random/mtrand/numpy.pxd -numpy/random/mtrand/randomkit.c -numpy/random/mtrand/randomkit.h -numpy/testing/__init__.py -numpy/testing/decorators.py -numpy/testing/noseclasses.py -numpy/testing/nosetester.py -numpy/testing/print_coercion_tables.py -numpy/testing/setup.py -numpy/testing/utils.py -tools/swig/Makefile -tools/swig/README -tools/swig/numpy.i -tools/swig/pyfragments.swg -tools/swig/test/Array.i -tools/swig/test/Array1.cxx -tools/swig/test/Array1.h -tools/swig/test/Array2.cxx -tools/swig/test/Array2.h -tools/swig/test/Farray.cxx -tools/swig/test/Farray.h -tools/swig/test/Farray.i -tools/swig/test/Fortran.cxx -tools/swig/test/Fortran.h -tools/swig/test/Fortran.i -tools/swig/test/Makefile -tools/swig/test/Matrix.cxx -tools/swig/test/Matrix.h -tools/swig/test/Matrix.i -tools/swig/test/SuperTensor.cxx -tools/swig/test/SuperTensor.h -tools/swig/test/SuperTensor.i -tools/swig/test/Tensor.cxx -tools/swig/test/Tensor.h -tools/swig/test/Tensor.i -tools/swig/test/Vector.cxx -tools/swig/test/Vector.h -tools/swig/test/Vector.i -tools/swig/test/setup.py -tools/swig/test/testArray.py -tools/swig/test/testFarray.py -tools/swig/test/testFortran.py -tools/swig/test/testMatrix.py -tools/swig/test/testSuperTensor.py -tools/swig/test/testTensor.py -tools/swig/test/testVector.py \ No newline at end of file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/dependency_links.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/dependency_links.txt deleted file mode 100644 index 8b137891791fe..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/native_libs.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/native_libs.txt deleted file mode 100644 index ad1c477646763..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/native_libs.txt +++ /dev/null @@ -1,14 +0,0 @@ -numpy/random/mtrand.cpython-34m.so -numpy/core/struct_ufunc_test.cpython-34m.so -numpy/core/test_rational.cpython-34m.so -numpy/core/umath.cpython-34m.so -numpy/core/_dummy.cpython-34m.so -numpy/core/operand_flag_tests.cpython-34m.so -numpy/core/umath_tests.cpython-34m.so -numpy/core/multiarray_tests.cpython-34m.so -numpy/core/scalarmath.cpython-34m.so -numpy/core/multiarray.cpython-34m.so -numpy/linalg/_umath_linalg.cpython-34m.so -numpy/linalg/lapack_lite.cpython-34m.so -numpy/fft/fftpack_lite.cpython-34m.so -numpy/lib/_compiled_base.cpython-34m.so diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/not-zip-safe b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/not-zip-safe deleted file mode 100644 index 8b137891791fe..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/not-zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/scripts/f2py b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/scripts/f2py deleted file mode 100755 index bd9406e00c184..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/scripts/f2py +++ /dev/null @@ -1,24 +0,0 @@ -#!/home/berti/anaconda3/envs/test_pandas/bin/python -# See http://cens.ioc.ee/projects/f2py2e/ -import os, sys -for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]: - try: - i=sys.argv.index("--"+mode) - del sys.argv[i] - break - except ValueError: pass -os.environ["NO_SCIPY_IMPORT"]="f2py" -if mode=="g3-numpy": - sys.stderr.write("G3 f2py support is not implemented, yet.\n") - sys.exit(1) -elif mode=="2e-numeric": - from f2py2e import main -elif mode=="2e-numarray": - sys.argv.append("-DNUMARRAY") - from f2py2e import main -elif mode=="2e-numpy": - from numpy.f2py import main -else: - sys.stderr.write("Unknown mode: " + repr(mode) + "\n") - sys.exit(1) -main() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/top_level.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/top_level.txt deleted file mode 100644 index 24ce15ab7ead3..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/EGG-INFO/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -numpy diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__config__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__config__.py deleted file mode 100644 index 67c0a0f242bf6..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__config__.py +++ /dev/null @@ -1,36 +0,0 @@ -# This file is generated by /tmp/easy_install-kn_oavq3/numpy-1.9.0/setup.py -# It contains system_info results at the time of building this package. -__all__ = ["get_info","show"] - -atlas_blas_info={} -lapack_info={} -openblas_info={} -lapack_src_info={} -atlas_blas_threads_info={} -blas_src_info={} -lapack_mkl_info={} -blas_info={} -atlas_threads_info={} -mkl_info={} -atlas_info={} -openblas_lapack_info={} -blas_opt_info={} -blas_mkl_info={} -lapack_opt_info={} - -def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - -def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - \ No newline at end of file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__init__.py deleted file mode 100644 index 772c75b630db4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/__init__.py +++ /dev/null @@ -1,216 +0,0 @@ -""" -NumPy -===== - -Provides - 1. An array object of arbitrary homogeneous items - 2. Fast mathematical operations over arrays - 3. Linear Algebra, Fourier Transforms, Random Number Generation - -How to use the documentation ----------------------------- -Documentation is available in two forms: docstrings provided -with the code, and a loose standing reference guide, available from -`the NumPy homepage `_. - -We recommend exploring the docstrings using -`IPython `_, an advanced Python shell with -TAB-completion and introspection capabilities. See below for further -instructions. - -The docstring examples assume that `numpy` has been imported as `np`:: - - >>> import numpy as np - -Code snippets are indicated by three greater-than signs:: - - >>> x = 42 - >>> x = x + 1 - -Use the built-in ``help`` function to view a function's docstring:: - - >>> help(np.sort) - ... # doctest: +SKIP - -For some objects, ``np.info(obj)`` may provide additional help. This is -particularly true if you see the line "Help on ufunc object:" at the top -of the help() page. Ufuncs are implemented in C, not Python, for speed. -The native Python help() does not know how to view their help, but our -np.info() function does. - -To search for documents containing a keyword, do:: - - >>> np.lookfor('keyword') - ... # doctest: +SKIP - -General-purpose documents like a glossary and help on the basic concepts -of numpy are available under the ``doc`` sub-module:: - - >>> from numpy import doc - >>> help(doc) - ... # doctest: +SKIP - -Available subpackages ---------------------- -doc - Topical documentation on broadcasting, indexing, etc. -lib - Basic functions used by several sub-packages. -random - Core Random Tools -linalg - Core Linear Algebra Tools -fft - Core FFT routines -polynomial - Polynomial tools -testing - Numpy testing tools -f2py - Fortran to Python Interface Generator. -distutils - Enhancements to distutils with support for - Fortran compilers support and more. - -Utilities ---------- -test - Run numpy unittests -show_config - Show numpy build configuration -dual - Overwrite certain functions with high-performance Scipy tools -matlib - Make everything matrices. -__version__ - Numpy version string - -Viewing documentation using IPython ------------------------------------ -Start IPython with the NumPy profile (``ipython -p numpy``), which will -import `numpy` under the alias `np`. Then, use the ``cpaste`` command to -paste examples into the shell. To see which functions are available in -`numpy`, type ``np.`` (where ```` refers to the TAB key), or use -``np.*cos*?`` (where ```` refers to the ENTER key) to narrow -down the list. To view the docstring for a function, use -``np.cos?`` (to view the docstring) and ``np.cos??`` (to view -the source code). - -Copies vs. in-place operation ------------------------------ -Most of the functions in `numpy` return a copy of the array argument -(e.g., `np.sort`). In-place versions of these functions are often -available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. -Exceptions to this rule are documented. - -""" -from __future__ import division, absolute_import, print_function - -import sys - - -class ModuleDeprecationWarning(DeprecationWarning): - """Module deprecation warning. - - The nose tester turns ordinary Deprecation warnings into test failures. - That makes it hard to deprecate whole modules, because they get - imported by default. So this is a special Deprecation warning that the - nose tester will let pass without making tests fail. - - """ - pass - - -class VisibleDeprecationWarning(UserWarning): - """Visible deprecation warning. - - By default, python will not show deprecation warnings, so this class - can be used when a very visible warning is helpful, for example because - the usage is most likely a user bug. - - """ - pass - - -# oldnumeric and numarray were removed in 1.9. In case some packages import -# but do not use them, we define them here for backward compatibility. -oldnumeric = 'removed' -numarray = 'removed' - - -# We first need to detect if we're being called as part of the numpy setup -# procedure itself in a reliable manner. -try: - __NUMPY_SETUP__ -except NameError: - __NUMPY_SETUP__ = False - - -if __NUMPY_SETUP__: - import sys as _sys - _sys.stderr.write('Running from numpy source directory.\n') - del _sys -else: - try: - from numpy.__config__ import show as show_config - except ImportError: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) - from .version import git_revision as __git_revision__ - from .version import version as __version__ - - from ._import_tools import PackageLoader - - def pkgload(*packages, **options): - loader = PackageLoader(infunc=True) - return loader(*packages, **options) - - from . import add_newdocs - __all__ = ['add_newdocs', - 'ModuleDeprecationWarning', - 'VisibleDeprecationWarning'] - - pkgload.__doc__ = PackageLoader.__call__.__doc__ - - from .testing import Tester - test = Tester().test - bench = Tester().bench - - from . import core - from .core import * - from . import compat - from . import lib - from .lib import * - from . import linalg - from . import fft - from . import polynomial - from . import random - from . import ctypeslib - from . import ma - from . import matrixlib as _mat - from .matrixlib import * - from .compat import long - - # Make these accessible from numpy name-space - # but not imported in from numpy import * - if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str - else: - from __builtin__ import bool, int, float, complex, object, unicode, str - - from .core import round, abs, max, min - - __all__.extend(['__version__', 'pkgload', 'PackageLoader', - 'show_config']) - __all__.extend(core.__all__) - __all__.extend(_mat.__all__) - __all__.extend(lib.__all__) - __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) - - # Filter annoying Cython warnings that serve no good purpose. - import warnings - warnings.filterwarnings("ignore", message="numpy.dtype size changed") - warnings.filterwarnings("ignore", message="numpy.ufunc size changed") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/_import_tools.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/_import_tools.py deleted file mode 100644 index 5262173596240..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/_import_tools.py +++ /dev/null @@ -1,348 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys - -__all__ = ['PackageLoader'] - -class PackageLoader(object): - def __init__(self, verbose=False, infunc=False): - """ Manages loading packages. - """ - - if infunc: - _level = 2 - else: - _level = 1 - self.parent_frame = frame = sys._getframe(_level) - self.parent_name = eval('__name__', frame.f_globals, frame.f_locals) - parent_path = eval('__path__', frame.f_globals, frame.f_locals) - if isinstance(parent_path, str): - parent_path = [parent_path] - self.parent_path = parent_path - if '__all__' not in frame.f_locals: - exec('__all__ = []', frame.f_globals, frame.f_locals) - self.parent_export_names = eval('__all__', frame.f_globals, frame.f_locals) - - self.info_modules = {} - self.imported_packages = [] - self.verbose = None - - def _get_info_files(self, package_dir, parent_path, parent_package=None): - """ Return list of (package name,info.py file) from parent_path subdirectories. - """ - from glob import glob - files = glob(os.path.join(parent_path, package_dir, 'info.py')) - for info_file in glob(os.path.join(parent_path, package_dir, 'info.pyc')): - if info_file[:-1] not in files: - files.append(info_file) - info_files = [] - for info_file in files: - package_name = os.path.dirname(info_file[len(parent_path)+1:])\ - .replace(os.sep, '.') - if parent_package: - package_name = parent_package + '.' + package_name - info_files.append((package_name, info_file)) - info_files.extend(self._get_info_files('*', - os.path.dirname(info_file), - package_name)) - return info_files - - def _init_info_modules(self, packages=None): - """Initialize info_modules = {: }. - """ - import imp - info_files = [] - info_modules = self.info_modules - - if packages is None: - for path in self.parent_path: - info_files.extend(self._get_info_files('*', path)) - else: - for package_name in packages: - package_dir = os.path.join(*package_name.split('.')) - for path in self.parent_path: - names_files = self._get_info_files(package_dir, path) - if names_files: - info_files.extend(names_files) - break - else: - try: - exec('import %s.info as info' % (package_name)) - info_modules[package_name] = info - except ImportError as msg: - self.warn('No scipy-style subpackage %r found in %s. '\ - 'Ignoring: %s'\ - % (package_name, ':'.join(self.parent_path), msg)) - - for package_name, info_file in info_files: - if package_name in info_modules: - continue - fullname = self.parent_name +'.'+ package_name - if info_file[-1]=='c': - filedescriptor = ('.pyc', 'rb', 2) - else: - filedescriptor = ('.py', 'U', 1) - - try: - info_module = imp.load_module(fullname+'.info', - open(info_file, filedescriptor[1]), - info_file, - filedescriptor) - except Exception as msg: - self.error(msg) - info_module = None - - if info_module is None or getattr(info_module, 'ignore', False): - info_modules.pop(package_name, None) - else: - self._init_info_modules(getattr(info_module, 'depends', [])) - info_modules[package_name] = info_module - - return - - def _get_sorted_names(self): - """ Return package names sorted in the order as they should be - imported due to dependence relations between packages. - """ - - depend_dict = {} - for name, info_module in self.info_modules.items(): - depend_dict[name] = getattr(info_module, 'depends', []) - package_names = [] - - for name in list(depend_dict.keys()): - if not depend_dict[name]: - package_names.append(name) - del depend_dict[name] - - while depend_dict: - for name, lst in list(depend_dict.items()): - new_lst = [n for n in lst if n in depend_dict] - if not new_lst: - package_names.append(name) - del depend_dict[name] - else: - depend_dict[name] = new_lst - - return package_names - - def __call__(self,*packages, **options): - """Load one or more packages into parent package top-level namespace. - - This function is intended to shorten the need to import many - subpackages, say of scipy, constantly with statements such as - - import scipy.linalg, scipy.fftpack, scipy.etc... - - Instead, you can say: - - import scipy - scipy.pkgload('linalg','fftpack',...) - - or - - scipy.pkgload() - - to load all of them in one call. - - If a name which doesn't exist in scipy's namespace is - given, a warning is shown. - - Parameters - ---------- - *packages : arg-tuple - the names (one or more strings) of all the modules one - wishes to load into the top-level namespace. - verbose= : integer - verbosity level [default: -1]. - verbose=-1 will suspend also warnings. - force= : bool - when True, force reloading loaded packages [default: False]. - postpone= : bool - when True, don't load packages [default: False] - - """ - frame = self.parent_frame - self.info_modules = {} - if options.get('force', False): - self.imported_packages = [] - self.verbose = verbose = options.get('verbose', -1) - postpone = options.get('postpone', None) - self._init_info_modules(packages or None) - - self.log('Imports to %r namespace\n----------------------------'\ - % self.parent_name) - - for package_name in self._get_sorted_names(): - if package_name in self.imported_packages: - continue - info_module = self.info_modules[package_name] - global_symbols = getattr(info_module, 'global_symbols', []) - postpone_import = getattr(info_module, 'postpone_import', False) - if (postpone and not global_symbols) \ - or (postpone_import and postpone is not None): - continue - - old_object = frame.f_locals.get(package_name, None) - - cmdstr = 'import '+package_name - if self._execcmd(cmdstr): - continue - self.imported_packages.append(package_name) - - if verbose!=-1: - new_object = frame.f_locals.get(package_name) - if old_object is not None and old_object is not new_object: - self.warn('Overwriting %s=%s (was %s)' \ - % (package_name, self._obj2repr(new_object), - self._obj2repr(old_object))) - - if '.' not in package_name: - self.parent_export_names.append(package_name) - - for symbol in global_symbols: - if symbol=='*': - symbols = eval('getattr(%s,"__all__",None)'\ - % (package_name), - frame.f_globals, frame.f_locals) - if symbols is None: - symbols = eval('dir(%s)' % (package_name), - frame.f_globals, frame.f_locals) - symbols = [s for s in symbols if not s.startswith('_')] - else: - symbols = [symbol] - - if verbose!=-1: - old_objects = {} - for s in symbols: - if s in frame.f_locals: - old_objects[s] = frame.f_locals[s] - - cmdstr = 'from '+package_name+' import '+symbol - if self._execcmd(cmdstr): - continue - - if verbose!=-1: - for s, old_object in old_objects.items(): - new_object = frame.f_locals[s] - if new_object is not old_object: - self.warn('Overwriting %s=%s (was %s)' \ - % (s, self._obj2repr(new_object), - self._obj2repr(old_object))) - - if symbol=='*': - self.parent_export_names.extend(symbols) - else: - self.parent_export_names.append(symbol) - - return - - def _execcmd(self, cmdstr): - """ Execute command in parent_frame.""" - frame = self.parent_frame - try: - exec (cmdstr, frame.f_globals, frame.f_locals) - except Exception as msg: - self.error('%s -> failed: %s' % (cmdstr, msg)) - return True - else: - self.log('%s -> success' % (cmdstr)) - return - - def _obj2repr(self, obj): - """ Return repr(obj) with""" - module = getattr(obj, '__module__', None) - file = getattr(obj, '__file__', None) - if module is not None: - return repr(obj) + ' from ' + module - if file is not None: - return repr(obj) + ' from ' + file - return repr(obj) - - def log(self, mess): - if self.verbose>1: - print(str(mess), file=sys.stderr) - def warn(self, mess): - if self.verbose>=0: - print(str(mess), file=sys.stderr) - def error(self, mess): - if self.verbose!=-1: - print(str(mess), file=sys.stderr) - - def _get_doc_title(self, info_module): - """ Get the title from a package info.py file. - """ - title = getattr(info_module, '__doc_title__', None) - if title is not None: - return title - title = getattr(info_module, '__doc__', None) - if title is not None: - title = title.lstrip().split('\n', 1)[0] - return title - return '* Not Available *' - - def _format_titles(self,titles,colsep='---'): - display_window_width = 70 # How to determine the correct value in runtime?? - lengths = [len(name)-name.find('.')-1 for (name, title) in titles]+[0] - max_length = max(lengths) - lines = [] - for (name, title) in titles: - name = name[name.find('.')+1:] - w = max_length - len(name) - words = title.split() - line = '%s%s %s' % (name, w*' ', colsep) - tab = len(line) * ' ' - while words: - word = words.pop(0) - if len(line)+len(word)>display_window_width: - lines.append(line) - line = tab - line += ' ' + word - else: - lines.append(line) - return '\n'.join(lines) - - def get_pkgdocs(self): - """ Return documentation summary of subpackages. - """ - import sys - self.info_modules = {} - self._init_info_modules(None) - - titles = [] - symbols = [] - for package_name, info_module in self.info_modules.items(): - global_symbols = getattr(info_module, 'global_symbols', []) - fullname = self.parent_name +'.'+ package_name - note = '' - if fullname not in sys.modules: - note = ' [*]' - titles.append((fullname, self._get_doc_title(info_module) + note)) - if global_symbols: - symbols.append((package_name, ', '.join(global_symbols))) - - retstr = self._format_titles(titles) +\ - '\n [*] - using a package requires explicit import (see pkgload)' - - - if symbols: - retstr += """\n\nGlobal symbols from subpackages"""\ - """\n-------------------------------\n""" +\ - self._format_titles(symbols, '-->') - - return retstr - -class PackageLoaderDebug(PackageLoader): - def _execcmd(self, cmdstr): - """ Execute command in parent_frame.""" - frame = self.parent_frame - print('Executing', repr(cmdstr), '...', end=' ') - sys.stdout.flush() - exec (cmdstr, frame.f_globals, frame.f_locals) - print('ok') - sys.stdout.flush() - return - -if int(os.environ.get('NUMPY_IMPORT_DEBUG', '0')): - PackageLoader = PackageLoaderDebug diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/add_newdocs.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/add_newdocs.py deleted file mode 100644 index 09311a5364d4f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/add_newdocs.py +++ /dev/null @@ -1,7526 +0,0 @@ -""" -This is only meant to add docs to objects defined in C-extension modules. -The purpose is to allow easier editing of the docstrings without -requiring a re-compile. - -NOTE: Many of the methods of ndarray have corresponding functions. - If you update these docstrings, please keep also the ones in - core/fromnumeric.py, core/defmatrix.py up-to-date. - -""" -from __future__ import division, absolute_import, print_function - -from numpy.lib import add_newdoc - -############################################################################### -# -# flatiter -# -# flatiter needs a toplevel description -# -############################################################################### - -add_newdoc('numpy.core', 'flatiter', - """ - Flat iterator object to iterate over arrays. - - A `flatiter` iterator is returned by ``x.flat`` for any array `x`. - It allows iterating over the array as if it were a 1-D array, - either in a for-loop or by calling its `next` method. - - Iteration is done in C-contiguous style, with the last index varying the - fastest. The iterator can also be indexed using basic slicing or - advanced indexing. - - See Also - -------- - ndarray.flat : Return a flat iterator over an array. - ndarray.flatten : Returns a flattened copy of an array. - - Notes - ----- - A `flatiter` iterator can not be constructed directly from Python code - by calling the `flatiter` constructor. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> type(fl) - - >>> for item in fl: - ... print item - ... - 0 - 1 - 2 - 3 - 4 - 5 - - >>> fl[2:4] - array([2, 3]) - - """) - -# flatiter attributes - -add_newdoc('numpy.core', 'flatiter', ('base', - """ - A reference to the array that is iterated over. - - Examples - -------- - >>> x = np.arange(5) - >>> fl = x.flat - >>> fl.base is x - True - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('coords', - """ - An N-dimensional tuple of current coordinates. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> fl.coords - (0, 0) - >>> fl.next() - 0 - >>> fl.coords - (0, 1) - - """)) - - - -add_newdoc('numpy.core', 'flatiter', ('index', - """ - Current flat index into the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> fl = x.flat - >>> fl.index - 0 - >>> fl.next() - 0 - >>> fl.index - 1 - - """)) - -# flatiter functions - -add_newdoc('numpy.core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator - - """)) - - -add_newdoc('numpy.core', 'flatiter', ('copy', - """ - copy() - - Get a copy of the iterator as a 1-D array. - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> fl = x.flat - >>> fl.copy() - array([0, 1, 2, 3, 4, 5]) - - """)) - - -############################################################################### -# -# nditer -# -############################################################################### - -add_newdoc('numpy.core', 'nditer', - """ - Efficient multi-dimensional iterator object to iterate over arrays. - To get started using this object, see the - :ref:`introductory guide to array iteration `. - - Parameters - ---------- - op : ndarray or sequence of array_like - The array(s) to iterate over. - flags : sequence of str, optional - Flags to control the behavior of the iterator. - - * "buffered" enables buffering when required. - * "c_index" causes a C-order index to be tracked. - * "f_index" causes a Fortran-order index to be tracked. - * "multi_index" causes a multi-index, or a tuple of indices - with one per iteration dimension, to be tracked. - * "common_dtype" causes all the operands to be converted to - a common data type, with copying or buffering as necessary. - * "delay_bufalloc" delays allocation of the buffers until - a reset() call is made. Allows "allocate" operands to - be initialized before their values are copied into the buffers. - * "external_loop" causes the `values` given to be - one-dimensional arrays with multiple values instead of - zero-dimensional arrays. - * "grow_inner" allows the `value` array sizes to be made - larger than the buffer size when both "buffered" and - "external_loop" is used. - * "ranged" allows the iterator to be restricted to a sub-range - of the iterindex values. - * "refs_ok" enables iteration of reference types, such as - object arrays. - * "reduce_ok" enables iteration of "readwrite" operands - which are broadcasted, also known as reduction operands. - * "zerosize_ok" allows `itersize` to be zero. - op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - "readonly", "readwrite", or "writeonly" must be specified. - - * "readonly" indicates the operand will only be read from. - * "readwrite" indicates the operand will be read from and written to. - * "writeonly" indicates the operand will only be written to. - * "no_broadcast" prevents the operand from being broadcasted. - * "contig" forces the operand data to be contiguous. - * "aligned" forces the operand data to be aligned. - * "nbo" forces the operand data to be in native byte order. - * "copy" allows a temporary read-only copy if required. - * "updateifcopy" allows a temporary read-write copy if required. - * "allocate" causes the array to be allocated if it is None - in the `op` parameter. - * "no_subtype" prevents an "allocate" operand from using a subtype. - * "arraymask" indicates that this operand is the mask to use - for selecting elements when writing to operands with the - 'writemasked' flag set. The iterator does not enforce this, - but when writing from a buffer back to the array, it only - copies those elements indicated by this mask. - * 'writemasked' indicates that only elements where the chosen - 'arraymask' operand is True will be written to. - op_dtypes : dtype or tuple of dtype(s), optional - The required data type(s) of the operands. If copying or buffering - is enabled, the data will be converted to/from their original types. - order : {'C', 'F', 'A', 'K'}, optional - Controls the iteration order. 'C' means C order, 'F' means - Fortran order, 'A' means 'F' order if all the arrays are Fortran - contiguous, 'C' order otherwise, and 'K' means as close to the - order the array elements appear in memory as possible. This also - affects the element memory order of "allocate" operands, as they - are allocated to be compatible with iteration order. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur when making a copy - or buffering. Setting this to 'unsafe' is not recommended, - as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - op_axes : list of list of ints, optional - If provided, is a list of ints or None for each operands. - The list of axes for an operand is a mapping from the dimensions - of the iterator to the dimensions of the operand. A value of - -1 can be placed for entries, causing that dimension to be - treated as "newaxis". - itershape : tuple of ints, optional - The desired shape of the iterator. This allows "allocate" operands - with a dimension mapped by op_axes not corresponding to a dimension - of a different operand to get a value not equal to 1 for that - dimension. - buffersize : int, optional - When buffering is enabled, controls the size of the temporary - buffers. Set to 0 for the default value. - - Attributes - ---------- - dtypes : tuple of dtype(s) - The data types of the values provided in `value`. This may be - different from the operand data types if buffering is enabled. - finished : bool - Whether the iteration over the operands is finished or not. - has_delayed_bufalloc : bool - If True, the iterator was created with the "delay_bufalloc" flag, - and no reset() function was called on it yet. - has_index : bool - If True, the iterator was created with either the "c_index" or - the "f_index" flag, and the property `index` can be used to - retrieve it. - has_multi_index : bool - If True, the iterator was created with the "multi_index" flag, - and the property `multi_index` can be used to retrieve it. - index : - When the "c_index" or "f_index" flag was used, this property - provides access to the index. Raises a ValueError if accessed - and `has_index` is False. - iterationneedsapi : bool - Whether iteration requires access to the Python API, for example - if one of the operands is an object array. - iterindex : int - An index which matches the order of iteration. - itersize : int - Size of the iterator. - itviews : - Structured view(s) of `operands` in memory, matching the reordered - and optimized iterator access pattern. - multi_index : - When the "multi_index" flag was used, this property - provides access to the index. Raises a ValueError if accessed - accessed and `has_multi_index` is False. - ndim : int - The iterator's dimension. - nop : int - The number of iterator operands. - operands : tuple of operand(s) - The array(s) to be iterated over. - shape : tuple of ints - Shape tuple, the shape of the iterator. - value : - Value of `operands` at current iteration. Normally, this is a - tuple of array scalars, but if the flag "external_loop" is used, - it is a tuple of one dimensional arrays. - - Notes - ----- - `nditer` supersedes `flatiter`. The iterator implementation behind - `nditer` is also exposed by the Numpy C API. - - The Python exposure supplies two iteration interfaces, one which follows - the Python iterator protocol, and another which mirrors the C-style - do-while pattern. The native Python approach is better in most cases, but - if you need the iterator's coordinates or index, use the C-style pattern. - - Examples - -------- - Here is how we might write an ``iter_add`` function, using the - Python iterator protocol:: - - def iter_add_py(x, y, out=None): - addop = np.add - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - for (a, b, c) in it: - addop(a, b, out=c) - return it.operands[2] - - Here is the same function, but following the C-style pattern:: - - def iter_add(x, y, out=None): - addop = np.add - - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - - while not it.finished: - addop(it[0], it[1], out=it[2]) - it.iternext() - - return it.operands[2] - - Here is an example outer product function:: - - def outer_it(x, y, out=None): - mulop = np.multiply - - it = np.nditer([x, y, out], ['external_loop'], - [['readonly'], ['readonly'], ['writeonly', 'allocate']], - op_axes=[range(x.ndim)+[-1]*y.ndim, - [-1]*x.ndim+range(y.ndim), - None]) - - for (a, b, c) in it: - mulop(a, b, out=c) - - return it.operands[2] - - >>> a = np.arange(2)+1 - >>> b = np.arange(3)+1 - >>> outer_it(a,b) - array([[1, 2, 3], - [2, 4, 6]]) - - Here is an example function which operates like a "lambda" ufunc:: - - def luf(lamdaexpr, *args, **kwargs): - "luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)" - nargs = len(args) - op = (kwargs.get('out',None),) + args - it = np.nditer(op, ['buffered','external_loop'], - [['writeonly','allocate','no_broadcast']] + - [['readonly','nbo','aligned']]*nargs, - order=kwargs.get('order','K'), - casting=kwargs.get('casting','safe'), - buffersize=kwargs.get('buffersize',0)) - while not it.finished: - it[0] = lamdaexpr(*it[1:]) - it.iternext() - return it.operands[0] - - >>> a = np.arange(5) - >>> b = np.ones(5) - >>> luf(lambda i,j:i*i + j/2, a, b) - array([ 0.5, 1.5, 4.5, 9.5, 16.5]) - - """) - -# nditer methods - -add_newdoc('numpy.core', 'nditer', ('copy', - """ - copy() - - Get a copy of the iterator in its current state. - - Examples - -------- - >>> x = np.arange(10) - >>> y = x + 1 - >>> it = np.nditer([x, y]) - >>> it.next() - (array(0), array(1)) - >>> it2 = it.copy() - >>> it2.next() - (array(1), array(2)) - - """)) - -add_newdoc('numpy.core', 'nditer', ('debug_print', - """ - debug_print() - - Print the current state of the `nditer` instance and debug info to stdout. - - """)) - -add_newdoc('numpy.core', 'nditer', ('enable_external_loop', - """ - enable_external_loop() - - When the "external_loop" was not used during construction, but - is desired, this modifies the iterator to behave as if the flag - was specified. - - """)) - -add_newdoc('numpy.core', 'nditer', ('iternext', - """ - iternext() - - Check whether iterations are left, and perform a single internal iteration - without returning the result. Used in the C-style pattern do-while - pattern. For an example, see `nditer`. - - Returns - ------- - iternext : bool - Whether or not there are iterations left. - - """)) - -add_newdoc('numpy.core', 'nditer', ('remove_axis', - """ - remove_axis(i) - - Removes axis `i` from the iterator. Requires that the flag "multi_index" - be enabled. - - """)) - -add_newdoc('numpy.core', 'nditer', ('remove_multi_index', - """ - remove_multi_index() - - When the "multi_index" flag was specified, this removes it, allowing - the internal iteration structure to be optimized further. - - """)) - -add_newdoc('numpy.core', 'nditer', ('reset', - """ - reset() - - Reset the iterator to its initial state. - - """)) - - - -############################################################################### -# -# broadcast -# -############################################################################### - -add_newdoc('numpy.core', 'broadcast', - """ - Produce an object that mimics broadcasting. - - Parameters - ---------- - in1, in2, ... : array_like - Input parameters. - - Returns - ------- - b : broadcast object - Broadcast the input parameters against one another, and - return an object that encapsulates the result. - Amongst others, it has ``shape`` and ``nd`` properties, and - may be used as an iterator. - - Examples - -------- - Manually adding two vectors, using broadcasting: - - >>> x = np.array([[1], [2], [3]]) - >>> y = np.array([4, 5, 6]) - >>> b = np.broadcast(x, y) - - >>> out = np.empty(b.shape) - >>> out.flat = [u+v for (u,v) in b] - >>> out - array([[ 5., 6., 7.], - [ 6., 7., 8.], - [ 7., 8., 9.]]) - - Compare against built-in broadcasting: - - >>> x + y - array([[5, 6, 7], - [6, 7, 8], - [7, 8, 9]]) - - """) - -# attributes - -add_newdoc('numpy.core', 'broadcast', ('index', - """ - current index in broadcasted result - - Examples - -------- - >>> x = np.array([[1], [2], [3]]) - >>> y = np.array([4, 5, 6]) - >>> b = np.broadcast(x, y) - >>> b.index - 0 - >>> b.next(), b.next(), b.next() - ((1, 4), (1, 5), (1, 6)) - >>> b.index - 3 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('iters', - """ - tuple of iterators along ``self``'s "components." - - Returns a tuple of `numpy.flatiter` objects, one for each "component" - of ``self``. - - See Also - -------- - numpy.flatiter - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> row, col = b.iters - >>> row.next(), col.next() - (1, 4) - - """)) - -add_newdoc('numpy.core', 'broadcast', ('nd', - """ - Number of dimensions of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.nd - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('numiter', - """ - Number of iterators possessed by the broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.numiter - 2 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('shape', - """ - Shape of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.shape - (3, 3) - - """)) - -add_newdoc('numpy.core', 'broadcast', ('size', - """ - Total size of broadcasted result. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]]) - >>> b = np.broadcast(x, y) - >>> b.size - 9 - - """)) - -add_newdoc('numpy.core', 'broadcast', ('reset', - """ - reset() - - Reset the broadcasted result's iterator(s). - - Parameters - ---------- - None - - Returns - ------- - None - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]] - >>> b = np.broadcast(x, y) - >>> b.index - 0 - >>> b.next(), b.next(), b.next() - ((1, 4), (2, 4), (3, 4)) - >>> b.index - 3 - >>> b.reset() - >>> b.index - 0 - - """)) - -############################################################################### -# -# numpy functions -# -############################################################################### - -add_newdoc('numpy.core.multiarray', 'array', - """ - array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0) - - Create an array. - - Parameters - ---------- - object : array_like - An array, any object exposing the array interface, an - object whose __array__ method returns an array, or any - (nested) sequence. - dtype : data-type, optional - The desired data-type for the array. If not given, then - the type will be determined as the minimum type required - to hold the objects in the sequence. This argument can only - be used to 'upcast' the array. For downcasting, use the - .astype(t) method. - copy : bool, optional - If true (default), then the object is copied. Otherwise, a copy - will only be made if __array__ returns a copy, if obj is a - nested sequence, or if a copy is needed to satisfy any of the other - requirements (`dtype`, `order`, etc.). - order : {'C', 'F', 'A'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). If order is 'A', then the returned array may - be in any order (either C-, Fortran-contiguous, or even - discontiguous). - subok : bool, optional - If True, then sub-classes will be passed-through, otherwise - the returned array will be forced to be a base-class array (default). - ndmin : int, optional - Specifies the minimum number of dimensions that the resulting - array should have. Ones will be pre-pended to the shape as - needed to meet this requirement. - - Returns - ------- - out : ndarray - An array object satisfying the specified requirements. - - See Also - -------- - empty, empty_like, zeros, zeros_like, ones, ones_like, fill - - Examples - -------- - >>> np.array([1, 2, 3]) - array([1, 2, 3]) - - Upcasting: - - >>> np.array([1, 2, 3.0]) - array([ 1., 2., 3.]) - - More than one dimension: - - >>> np.array([[1, 2], [3, 4]]) - array([[1, 2], - [3, 4]]) - - Minimum dimensions 2: - - >>> np.array([1, 2, 3], ndmin=2) - array([[1, 2, 3]]) - - Type provided: - - >>> np.array([1, 2, 3], dtype=complex) - array([ 1.+0.j, 2.+0.j, 3.+0.j]) - - Data-type consisting of more than one element: - - >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] - array([1, 3]) - - Creating an array from sub-classes: - - >>> np.array(np.mat('1 2; 3 4')) - array([[1, 2], - [3, 4]]) - - >>> np.array(np.mat('1 2; 3 4'), subok=True) - matrix([[1, 2], - [3, 4]]) - - """) - -add_newdoc('numpy.core.multiarray', 'empty', - """ - empty(shape, dtype=float, order='C') - - Return a new array of given shape and type, without initializing entries. - - Parameters - ---------- - shape : int or tuple of int - Shape of the empty array - dtype : data-type, optional - Desired output data-type. - order : {'C', 'F'}, optional - Whether to store multi-dimensional data in C (row-major) or - Fortran (column-major) order in memory. - - Returns - ------- - out : ndarray - Array of uninitialized (arbitrary) data with the given - shape, dtype, and order. - - See Also - -------- - empty_like, zeros, ones - - Notes - ----- - `empty`, unlike `zeros`, does not set the array values to zero, - and may therefore be marginally faster. On the other hand, it requires - the user to manually set all the values in the array, and should be - used with caution. - - Examples - -------- - >>> np.empty([2, 2]) - array([[ -9.74499359e+001, 6.69583040e-309], - [ 2.13182611e-314, 3.06959433e-309]]) #random - - >>> np.empty([2, 2], dtype=int) - array([[-1073741821, -1067949133], - [ 496041986, 19249760]]) #random - - """) - -add_newdoc('numpy.core.multiarray', 'empty_like', - """ - empty_like(a, dtype=None, order='K', subok=True) - - Return a new array with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of the - returned array. - dtype : data-type, optional - .. versionadded:: 1.6.0 - Overrides the data type of the result. - order : {'C', 'F', 'A', or 'K'}, optional - .. versionadded:: 1.6.0 - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of ``a`` as closely - as possible. - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - - Returns - ------- - out : ndarray - Array of uninitialized (arbitrary) data with the same - shape and type as `a`. - - See Also - -------- - ones_like : Return an array of ones with shape and type of input. - zeros_like : Return an array of zeros with shape and type of input. - empty : Return a new uninitialized array. - ones : Return a new array setting values to one. - zeros : Return a new array setting values to zero. - - Notes - ----- - This function does *not* initialize the returned array; to do that use - `zeros_like` or `ones_like` instead. It may be marginally faster than - the functions that do set the array values. - - Examples - -------- - >>> a = ([1,2,3], [4,5,6]) # a is array-like - >>> np.empty_like(a) - array([[-1073741821, -1073741821, 3], #random - [ 0, 0, -1073741821]]) - >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) - >>> np.empty_like(a) - array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random - [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) - - """) - - -add_newdoc('numpy.core.multiarray', 'scalar', - """ - scalar(dtype, obj) - - Return a new scalar array of the given type initialized with obj. - - This function is meant mainly for pickle support. `dtype` must be a - valid data-type descriptor. If `dtype` corresponds to an object - descriptor, then `obj` can be any object, otherwise `obj` must be a - string. If `obj` is not given, it will be interpreted as None for object - type and as zeros for all other types. - - """) - -add_newdoc('numpy.core.multiarray', 'zeros', - """ - zeros(shape, dtype=float, order='C') - - Return a new array of given shape and type, filled with zeros. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - dtype : data-type, optional - The desired data-type for the array, e.g., `numpy.int8`. Default is - `numpy.float64`. - order : {'C', 'F'}, optional - Whether to store multidimensional data in C- or Fortran-contiguous - (row- or column-wise) order in memory. - - Returns - ------- - out : ndarray - Array of zeros with the given shape, dtype, and order. - - See Also - -------- - zeros_like : Return an array of zeros with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - empty_like : Return an empty array with shape and type of input. - ones : Return a new array setting values to one. - empty : Return a new uninitialized array. - - Examples - -------- - >>> np.zeros(5) - array([ 0., 0., 0., 0., 0.]) - - >>> np.zeros((5,), dtype=numpy.int) - array([0, 0, 0, 0, 0]) - - >>> np.zeros((2, 1)) - array([[ 0.], - [ 0.]]) - - >>> s = (2,2) - >>> np.zeros(s) - array([[ 0., 0.], - [ 0., 0.]]) - - >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype - array([(0, 0), (0, 0)], - dtype=[('x', '>> np.count_nonzero(np.eye(4)) - 4 - >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]]) - 5 - """) - -add_newdoc('numpy.core.multiarray', 'set_typeDict', - """set_typeDict(dict) - - Set the internal dictionary that can look up an array type using a - registered code. - - """) - -add_newdoc('numpy.core.multiarray', 'fromstring', - """ - fromstring(string, dtype=float, count=-1, sep='') - - A new 1-D array initialized from raw binary or text data in a string. - - Parameters - ---------- - string : str - A string containing the data. - dtype : data-type, optional - The data type of the array; default: float. For binary input data, - the data must be in exactly this format. - count : int, optional - Read this number of `dtype` elements from the data. If this is - negative (the default), the count will be determined from the - length of the data. - sep : str, optional - If not provided or, equivalently, the empty string, the data will - be interpreted as binary data; otherwise, as ASCII text with - decimal numbers. Also in this latter case, this argument is - interpreted as the string separating numbers in the data; extra - whitespace between elements is also ignored. - - Returns - ------- - arr : ndarray - The constructed array. - - Raises - ------ - ValueError - If the string is not the correct size to satisfy the requested - `dtype` and `count`. - - See Also - -------- - frombuffer, fromfile, fromiter - - Examples - -------- - >>> np.fromstring('\\x01\\x02', dtype=np.uint8) - array([1, 2], dtype=uint8) - >>> np.fromstring('1 2', dtype=int, sep=' ') - array([1, 2]) - >>> np.fromstring('1, 2', dtype=int, sep=',') - array([1, 2]) - >>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) - array([1, 2, 3], dtype=uint8) - - """) - -add_newdoc('numpy.core.multiarray', 'fromiter', - """ - fromiter(iterable, dtype, count=-1) - - Create a new 1-dimensional array from an iterable object. - - Parameters - ---------- - iterable : iterable object - An iterable object providing data for the array. - dtype : data-type - The data-type of the returned array. - count : int, optional - The number of items to read from *iterable*. The default is -1, - which means all data is read. - - Returns - ------- - out : ndarray - The output array. - - Notes - ----- - Specify `count` to improve performance. It allows ``fromiter`` to - pre-allocate the output array, instead of resizing it on demand. - - Examples - -------- - >>> iterable = (x*x for x in range(5)) - >>> np.fromiter(iterable, np.float) - array([ 0., 1., 4., 9., 16.]) - - """) - -add_newdoc('numpy.core.multiarray', 'fromfile', - """ - fromfile(file, dtype=float, count=-1, sep='') - - Construct an array from data in a text or binary file. - - A highly efficient way of reading binary data with a known data-type, - as well as parsing simply formatted text files. Data written using the - `tofile` method can be read using this function. - - Parameters - ---------- - file : file or str - Open file object or filename. - dtype : data-type - Data type of the returned array. - For binary files, it is used to determine the size and byte-order - of the items in the file. - count : int - Number of items to read. ``-1`` means all items (i.e., the complete - file). - sep : str - Separator between items if file is a text file. - Empty ("") separator means the file should be treated as binary. - Spaces (" ") in the separator match zero or more whitespace characters. - A separator consisting only of spaces must match at least one - whitespace. - - See also - -------- - load, save - ndarray.tofile - loadtxt : More flexible way of loading data from a text file. - - Notes - ----- - Do not rely on the combination of `tofile` and `fromfile` for - data storage, as the binary files generated are are not platform - independent. In particular, no byte-order or data-type information is - saved. Data can be stored in the platform independent ``.npy`` format - using `save` and `load` instead. - - Examples - -------- - Construct an ndarray: - - >>> dt = np.dtype([('time', [('min', int), ('sec', int)]), - ... ('temp', float)]) - >>> x = np.zeros((1,), dtype=dt) - >>> x['time']['min'] = 10; x['temp'] = 98.25 - >>> x - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> import os - >>> fname = os.tmpnam() - >>> x.tofile(fname) - - Read the raw data from disk: - - >>> np.fromfile(fname, dtype=dt) - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> np.save(fname, x) - >>> np.load(fname + '.npy') - array([((10, 0), 98.25)], - dtype=[('time', [('min', '>> dt = np.dtype(int) - >>> dt = dt.newbyteorder('>') - >>> np.frombuffer(buf, dtype=dt) - - The data of the resulting array will not be byteswapped, but will be - interpreted correctly. - - Examples - -------- - >>> s = 'hello world' - >>> np.frombuffer(s, dtype='S1', count=5, offset=6) - array(['w', 'o', 'r', 'l', 'd'], - dtype='|S1') - - """) - -add_newdoc('numpy.core.multiarray', 'concatenate', - """ - concatenate((a1, a2, ...), axis=0) - - Join a sequence of arrays together. - - Parameters - ---------- - a1, a2, ... : sequence of array_like - The arrays must have the same shape, except in the dimension - corresponding to `axis` (the first, by default). - axis : int, optional - The axis along which the arrays will be joined. Default is 0. - - Returns - ------- - res : ndarray - The concatenated array. - - See Also - -------- - ma.concatenate : Concatenate function that preserves input masks. - array_split : Split an array into multiple sub-arrays of equal or - near-equal size. - split : Split array into a list of multiple sub-arrays of equal size. - hsplit : Split array into multiple sub-arrays horizontally (column wise) - vsplit : Split array into multiple sub-arrays vertically (row wise) - dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). - hstack : Stack arrays in sequence horizontally (column wise) - vstack : Stack arrays in sequence vertically (row wise) - dstack : Stack arrays in sequence depth wise (along third dimension) - - Notes - ----- - When one or more of the arrays to be concatenated is a MaskedArray, - this function will return a MaskedArray object instead of an ndarray, - but the input masks are *not* preserved. In cases where a MaskedArray - is expected as input, use the ma.concatenate function from the masked - array module instead. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> b = np.array([[5, 6]]) - >>> np.concatenate((a, b), axis=0) - array([[1, 2], - [3, 4], - [5, 6]]) - >>> np.concatenate((a, b.T), axis=1) - array([[1, 2, 5], - [3, 4, 6]]) - - This function will not preserve masking of MaskedArray inputs. - - >>> a = np.ma.arange(3) - >>> a[1] = np.ma.masked - >>> b = np.arange(2, 5) - >>> a - masked_array(data = [0 -- 2], - mask = [False True False], - fill_value = 999999) - >>> b - array([2, 3, 4]) - >>> np.concatenate([a, b]) - masked_array(data = [0 1 2 2 3 4], - mask = False, - fill_value = 999999) - >>> np.ma.concatenate([a, b]) - masked_array(data = [0 -- 2 2 3 4], - mask = [False True False False False False], - fill_value = 999999) - - """) - -add_newdoc('numpy.core', 'inner', - """ - inner(a, b) - - Inner product of two arrays. - - Ordinary inner product of vectors for 1-D arrays (without complex - conjugation), in higher dimensions a sum product over the last axes. - - Parameters - ---------- - a, b : array_like - If `a` and `b` are nonscalar, their last dimensions of must match. - - Returns - ------- - out : ndarray - `out.shape = a.shape[:-1] + b.shape[:-1]` - - Raises - ------ - ValueError - If the last dimension of `a` and `b` has different size. - - See Also - -------- - tensordot : Sum products over arbitrary axes. - dot : Generalised matrix product, using second last dimension of `b`. - einsum : Einstein summation convention. - - Notes - ----- - For vectors (1-D arrays) it computes the ordinary inner-product:: - - np.inner(a, b) = sum(a[:]*b[:]) - - More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: - - np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) - - or explicitly:: - - np.inner(a, b)[i0,...,ir-1,j0,...,js-1] - = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) - - In addition `a` or `b` may be scalars, in which case:: - - np.inner(a,b) = a*b - - Examples - -------- - Ordinary inner product for vectors: - - >>> a = np.array([1,2,3]) - >>> b = np.array([0,1,0]) - >>> np.inner(a, b) - 2 - - A multidimensional example: - - >>> a = np.arange(24).reshape((2,3,4)) - >>> b = np.arange(4) - >>> np.inner(a, b) - array([[ 14, 38, 62], - [ 86, 110, 134]]) - - An example where `b` is a scalar: - - >>> np.inner(np.eye(2), 7) - array([[ 7., 0.], - [ 0., 7.]]) - - """) - -add_newdoc('numpy.core', 'fastCopyAndTranspose', - """_fastCopyAndTranspose(a)""") - -add_newdoc('numpy.core.multiarray', 'correlate', - """cross_correlate(a,v, mode=0)""") - -add_newdoc('numpy.core.multiarray', 'arange', - """ - arange([start,] stop[, step,], dtype=None) - - Return evenly spaced values within a given interval. - - Values are generated within the half-open interval ``[start, stop)`` - (in other words, the interval including `start` but excluding `stop`). - For integer arguments the function is equivalent to the Python built-in - `range `_ function, - but returns an ndarray rather than a list. - - When using a non-integer step, such as 0.1, the results will often not - be consistent. It is better to use ``linspace`` for these cases. - - Parameters - ---------- - start : number, optional - Start of interval. The interval includes this value. The default - start value is 0. - stop : number - End of interval. The interval does not include this value, except - in some cases where `step` is not an integer and floating point - round-off affects the length of `out`. - step : number, optional - Spacing between values. For any output `out`, this is the distance - between two adjacent values, ``out[i+1] - out[i]``. The default - step size is 1. If `step` is specified, `start` must also be given. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - - Returns - ------- - arange : ndarray - Array of evenly spaced values. - - For floating point arguments, the length of the result is - ``ceil((stop - start)/step)``. Because of floating point overflow, - this rule may result in the last element of `out` being greater - than `stop`. - - See Also - -------- - linspace : Evenly spaced numbers with careful handling of endpoints. - ogrid: Arrays of evenly spaced numbers in N-dimensions. - mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. - - Examples - -------- - >>> np.arange(3) - array([0, 1, 2]) - >>> np.arange(3.0) - array([ 0., 1., 2.]) - >>> np.arange(3,7) - array([3, 4, 5, 6]) - >>> np.arange(3,7,2) - array([3, 5]) - - """) - -add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', - """_get_ndarray_c_version() - - Return the compile time NDARRAY_VERSION number. - - """) - -add_newdoc('numpy.core.multiarray', '_reconstruct', - """_reconstruct(subtype, shape, dtype) - - Construct an empty array. Used by Pickles. - - """) - - -add_newdoc('numpy.core.multiarray', 'set_string_function', - """ - set_string_function(f, repr=1) - - Internal method to set a function to be used when pretty printing arrays. - - """) - -add_newdoc('numpy.core.multiarray', 'set_numeric_ops', - """ - set_numeric_ops(op1=func1, op2=func2, ...) - - Set numerical operators for array objects. - - Parameters - ---------- - op1, op2, ... : callable - Each ``op = func`` pair describes an operator to be replaced. - For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace - addition by modulus 5 addition. - - Returns - ------- - saved_ops : list of callables - A list of all operators, stored before making replacements. - - Notes - ----- - .. WARNING:: - Use with care! Incorrect usage may lead to memory errors. - - A function replacing an operator cannot make use of that operator. - For example, when replacing add, you may not use ``+``. Instead, - directly call ufuncs. - - Examples - -------- - >>> def add_mod5(x, y): - ... return np.add(x, y) % 5 - ... - >>> old_funcs = np.set_numeric_ops(add=add_mod5) - - >>> x = np.arange(12).reshape((3, 4)) - >>> x + x - array([[0, 2, 4, 1], - [3, 0, 2, 4], - [1, 3, 0, 2]]) - - >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators - - """) - -add_newdoc('numpy.core.multiarray', 'where', - """ - where(condition, [x, y]) - - Return elements, either from `x` or `y`, depending on `condition`. - - If only `condition` is given, return ``condition.nonzero()``. - - Parameters - ---------- - condition : array_like, bool - When True, yield `x`, otherwise yield `y`. - x, y : array_like, optional - Values from which to choose. `x` and `y` need to have the same - shape as `condition`. - - Returns - ------- - out : ndarray or tuple of ndarrays - If both `x` and `y` are specified, the output array contains - elements of `x` where `condition` is True, and elements from - `y` elsewhere. - - If only `condition` is given, return the tuple - ``condition.nonzero()``, the indices where `condition` is True. - - See Also - -------- - nonzero, choose - - Notes - ----- - If `x` and `y` are given and input arrays are 1-D, `where` is - equivalent to:: - - [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] - - Examples - -------- - >>> np.where([[True, False], [True, True]], - ... [[1, 2], [3, 4]], - ... [[9, 8], [7, 6]]) - array([[1, 8], - [3, 4]]) - - >>> np.where([[0, 1], [1, 0]]) - (array([0, 1]), array([1, 0])) - - >>> x = np.arange(9.).reshape(3, 3) - >>> np.where( x > 5 ) - (array([2, 2, 2]), array([0, 1, 2])) - >>> x[np.where( x > 3.0 )] # Note: result is 1D. - array([ 4., 5., 6., 7., 8.]) - >>> np.where(x < 5, x, -1) # Note: broadcasting. - array([[ 0., 1., 2.], - [ 3., 4., -1.], - [-1., -1., -1.]]) - - Find the indices of elements of `x` that are in `goodvalues`. - - >>> goodvalues = [3, 4, 7] - >>> ix = np.in1d(x.ravel(), goodvalues).reshape(x.shape) - >>> ix - array([[False, False, False], - [ True, True, False], - [False, True, False]], dtype=bool) - >>> np.where(ix) - (array([1, 1, 2]), array([0, 1, 1])) - - """) - - -add_newdoc('numpy.core.multiarray', 'lexsort', - """ - lexsort(keys, axis=-1) - - Perform an indirect sort using a sequence of keys. - - Given multiple sorting keys, which can be interpreted as columns in a - spreadsheet, lexsort returns an array of integer indices that describes - the sort order by multiple columns. The last key in the sequence is used - for the primary sort order, the second-to-last key for the secondary sort - order, and so on. The keys argument must be a sequence of objects that - can be converted to arrays of the same shape. If a 2D array is provided - for the keys argument, it's rows are interpreted as the sorting keys and - sorting is according to the last row, second last row etc. - - Parameters - ---------- - keys : (k, N) array or tuple containing k (N,)-shaped sequences - The `k` different "columns" to be sorted. The last column (or row if - `keys` is a 2D array) is the primary sort key. - axis : int, optional - Axis to be indirectly sorted. By default, sort over the last axis. - - Returns - ------- - indices : (N,) ndarray of ints - Array of indices that sort the keys along the specified axis. - - See Also - -------- - argsort : Indirect sort. - ndarray.sort : In-place sort. - sort : Return a sorted copy of an array. - - Examples - -------- - Sort names: first by surname, then by name. - - >>> surnames = ('Hertz', 'Galilei', 'Hertz') - >>> first_names = ('Heinrich', 'Galileo', 'Gustav') - >>> ind = np.lexsort((first_names, surnames)) - >>> ind - array([1, 2, 0]) - - >>> [surnames[i] + ", " + first_names[i] for i in ind] - ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] - - Sort two columns of numbers: - - >>> a = [1,5,1,4,3,4,4] # First column - >>> b = [9,4,0,4,0,2,1] # Second column - >>> ind = np.lexsort((b,a)) # Sort by a, then by b - >>> print ind - [2 0 4 6 5 3 1] - - >>> [(a[i],b[i]) for i in ind] - [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] - - Note that sorting is first according to the elements of ``a``. - Secondary sorting is according to the elements of ``b``. - - A normal ``argsort`` would have yielded: - - >>> [(a[i],b[i]) for i in np.argsort(a)] - [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] - - Structured arrays are sorted lexically by ``argsort``: - - >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], - ... dtype=np.dtype([('x', int), ('y', int)])) - - >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) - array([2, 0, 4, 6, 5, 3, 1]) - - """) - -add_newdoc('numpy.core.multiarray', 'can_cast', - """ - can_cast(from, totype, casting = 'safe') - - Returns True if cast between data types can occur according to the - casting rule. If from is a scalar or array scalar, also returns - True if the scalar value can be cast without overflow or truncation - to an integer. - - Parameters - ---------- - from : dtype, dtype specifier, scalar, or array - Data type, scalar, or array to cast from. - totype : dtype or dtype specifier - Data type to cast to. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Returns - ------- - out : bool - True if cast can occur according to the casting rule. - - Notes - ----- - Starting in NumPy 1.9, can_cast function now returns False in 'safe' - casting mode for integer/float dtype and string dtype if the string dtype - length is not long enough to store the max integer/float value converted - to a string. Previously can_cast in 'safe' mode returned True for - integer/float dtype and a string dtype of any length. - - See also - -------- - dtype, result_type - - Examples - -------- - Basic examples - - >>> np.can_cast(np.int32, np.int64) - True - >>> np.can_cast(np.float64, np.complex) - True - >>> np.can_cast(np.complex, np.float) - False - - >>> np.can_cast('i8', 'f8') - True - >>> np.can_cast('i8', 'f4') - False - >>> np.can_cast('i4', 'S4') - False - - Casting scalars - - >>> np.can_cast(100, 'i1') - True - >>> np.can_cast(150, 'i1') - False - >>> np.can_cast(150, 'u1') - True - - >>> np.can_cast(3.5e100, np.float32) - False - >>> np.can_cast(1000.0, np.float32) - True - - Array scalar checks the value, array does not - - >>> np.can_cast(np.array(1000.0), np.float32) - True - >>> np.can_cast(np.array([1000.0]), np.float32) - False - - Using the casting rules - - >>> np.can_cast('i8', 'i8', 'no') - True - >>> np.can_cast('i8', 'no') - False - - >>> np.can_cast('i8', 'equiv') - True - >>> np.can_cast('i8', 'equiv') - False - - >>> np.can_cast('i8', 'safe') - True - >>> np.can_cast('i4', 'safe') - False - - >>> np.can_cast('i4', 'same_kind') - True - >>> np.can_cast('u4', 'same_kind') - False - - >>> np.can_cast('u4', 'unsafe') - True - - """) - -add_newdoc('numpy.core.multiarray', 'promote_types', - """ - promote_types(type1, type2) - - Returns the data type with the smallest size and smallest scalar - kind to which both ``type1`` and ``type2`` may be safely cast. - The returned data type is always in native byte order. - - This function is symmetric and associative. - - Parameters - ---------- - type1 : dtype or dtype specifier - First data type. - type2 : dtype or dtype specifier - Second data type. - - Returns - ------- - out : dtype - The promoted data type. - - Notes - ----- - .. versionadded:: 1.6.0 - Starting in NumPy 1.9, promote_types function now returns a valid string - length when given an integer or float dtype as one argument and a string - dtype as another argument. Previously it always returned the input string - dtype, even if it wasn't long enough to store the max integer/float value - converted to a string. - - See Also - -------- - result_type, dtype, can_cast - - Examples - -------- - >>> np.promote_types('f4', 'f8') - dtype('float64') - - >>> np.promote_types('i8', 'f4') - dtype('float64') - - >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') - dtype('S11') - - """) - -add_newdoc('numpy.core.multiarray', 'min_scalar_type', - """ - min_scalar_type(a) - - For scalar ``a``, returns the data type with the smallest size - and smallest scalar kind which can hold its value. For non-scalar - array ``a``, returns the vector's dtype unmodified. - - Floating point values are not demoted to integers, - and complex values are not demoted to floats. - - Parameters - ---------- - a : scalar or array_like - The value whose minimal data type is to be found. - - Returns - ------- - out : dtype - The minimal data type. - - Notes - ----- - .. versionadded:: 1.6.0 - - See Also - -------- - result_type, promote_types, dtype, can_cast - - Examples - -------- - >>> np.min_scalar_type(10) - dtype('uint8') - - >>> np.min_scalar_type(-260) - dtype('int16') - - >>> np.min_scalar_type(3.1) - dtype('float16') - - >>> np.min_scalar_type(1e50) - dtype('float64') - - >>> np.min_scalar_type(np.arange(4,dtype='f8')) - dtype('float64') - - """) - -add_newdoc('numpy.core.multiarray', 'result_type', - """ - result_type(*arrays_and_dtypes) - - Returns the type that results from applying the NumPy - type promotion rules to the arguments. - - Type promotion in NumPy works similarly to the rules in languages - like C++, with some slight differences. When both scalars and - arrays are used, the array's type takes precedence and the actual value - of the scalar is taken into account. - - For example, calculating 3*a, where a is an array of 32-bit floats, - intuitively should result in a 32-bit float output. If the 3 is a - 32-bit integer, the NumPy rules indicate it can't convert losslessly - into a 32-bit float, so a 64-bit float should be the result type. - By examining the value of the constant, '3', we see that it fits in - an 8-bit integer, which can be cast losslessly into the 32-bit float. - - Parameters - ---------- - arrays_and_dtypes : list of arrays and dtypes - The operands of some operation whose result type is needed. - - Returns - ------- - out : dtype - The result type. - - See also - -------- - dtype, promote_types, min_scalar_type, can_cast - - Notes - ----- - .. versionadded:: 1.6.0 - - The specific algorithm used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :func:`promote_types` - to produce the return value. - - Otherwise, `min_scalar_type` is called on each array, and - the resulting data types are all combined with :func:`promote_types` - to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :func:`min_scalar_type`, but handled as a special case in `result_type`. - - Examples - -------- - >>> np.result_type(3, np.arange(7, dtype='i1')) - dtype('int8') - - >>> np.result_type('i4', 'c8') - dtype('complex128') - - >>> np.result_type(3.0, -2) - dtype('float64') - - """) - -add_newdoc('numpy.core.multiarray', 'newbuffer', - """ - newbuffer(size) - - Return a new uninitialized buffer object. - - Parameters - ---------- - size : int - Size in bytes of returned buffer object. - - Returns - ------- - newbuffer : buffer object - Returned, uninitialized buffer object of `size` bytes. - - """) - -add_newdoc('numpy.core.multiarray', 'getbuffer', - """ - getbuffer(obj [,offset[, size]]) - - Create a buffer object from the given object referencing a slice of - length size starting at offset. - - Default is the entire buffer. A read-write buffer is attempted followed - by a read-only buffer. - - Parameters - ---------- - obj : object - - offset : int, optional - - size : int, optional - - Returns - ------- - buffer_obj : buffer - - Examples - -------- - >>> buf = np.getbuffer(np.ones(5), 1, 3) - >>> len(buf) - 3 - >>> buf[0] - '\\x00' - >>> buf - - - """) - -add_newdoc('numpy.core', 'dot', - """ - dot(a, b, out=None) - - Dot product of two arrays. - - For 2-D arrays it is equivalent to matrix multiplication, and for 1-D - arrays to inner product of vectors (without complex conjugation). For - N dimensions it is a sum product over the last axis of `a` and - the second-to-last of `b`:: - - dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) - - Parameters - ---------- - a : array_like - First argument. - b : array_like - Second argument. - out : ndarray, optional - Output argument. This must have the exact kind that would be returned - if it was not used. In particular, it must have the right type, must be - C-contiguous, and its dtype must be the dtype that would be returned - for `dot(a,b)`. This is a performance feature. Therefore, if these - conditions are not met, an exception is raised, instead of attempting - to be flexible. - - Returns - ------- - output : ndarray - Returns the dot product of `a` and `b`. If `a` and `b` are both - scalars or both 1-D arrays then a scalar is returned; otherwise - an array is returned. - If `out` is given, then it is returned. - - Raises - ------ - ValueError - If the last dimension of `a` is not the same size as - the second-to-last dimension of `b`. - - See Also - -------- - vdot : Complex-conjugating dot product. - tensordot : Sum products over arbitrary axes. - einsum : Einstein summation convention. - - Examples - -------- - >>> np.dot(3, 4) - 12 - - Neither argument is complex-conjugated: - - >>> np.dot([2j, 3j], [2j, 3j]) - (-13+0j) - - For 2-D arrays it's the matrix product: - - >>> a = [[1, 0], [0, 1]] - >>> b = [[4, 1], [2, 2]] - >>> np.dot(a, b) - array([[4, 1], - [2, 2]]) - - >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) - >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) - >>> np.dot(a, b)[2,3,2,1,2,2] - 499128 - >>> sum(a[2,3,2,:] * b[1,2,:,2]) - 499128 - - """) - -add_newdoc('numpy.core', 'einsum', - """ - einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe') - - Evaluates the Einstein summation convention on the operands. - - Using the Einstein summation convention, many common multi-dimensional - array operations can be represented in a simple fashion. This function - provides a way compute such summations. The best way to understand this - function is to try the examples below, which show how many common NumPy - functions can be implemented as calls to `einsum`. - - Parameters - ---------- - subscripts : str - Specifies the subscripts for summation. - operands : list of array_like - These are the arrays for the operation. - out : ndarray, optional - If provided, the calculation is done into this array. - dtype : data-type, optional - If provided, forces the calculation to use the data type specified. - Note that you may have to also give a more liberal `casting` - parameter to allow the conversions. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the output. 'C' means it should - be C contiguous. 'F' means it should be Fortran contiguous, - 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. - 'K' means it should be as close to the layout as the inputs as - is possible, including arbitrarily permuted axes. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. Setting this to - 'unsafe' is not recommended, as it can adversely affect accumulations. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - - Returns - ------- - output : ndarray - The calculation based on the Einstein summation convention. - - See Also - -------- - dot, inner, outer, tensordot - - Notes - ----- - .. versionadded:: 1.6.0 - - The subscripts string is a comma-separated list of subscript labels, - where each label refers to a dimension of the corresponding operand. - Repeated subscripts labels in one operand take the diagonal. For example, - ``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``. - - Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)`` - is equivalent to ``np.inner(a,b)``. If a label appears only once, - it is not summed, so ``np.einsum('i', a)`` produces a view of ``a`` - with no changes. - - The order of labels in the output is by default alphabetical. This - means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while - ``np.einsum('ji', a)`` takes its transpose. - - The output can be controlled by specifying output subscript labels - as well. This specifies the label order, and allows summing to - be disallowed or forced when desired. The call ``np.einsum('i->', a)`` - is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)`` - is like ``np.diag(a)``. The difference is that `einsum` does not - allow broadcasting by default. - - To enable and control broadcasting, use an ellipsis. Default - NumPy-style broadcasting is done by adding an ellipsis - to the left of each term, like ``np.einsum('...ii->...i', a)``. - To take the trace along the first and last axes, - you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix - product with the left-most indices instead of rightmost, you can do - ``np.einsum('ij...,jk...->ik...', a, b)``. - - When there is only one operand, no axes are summed, and no output - parameter is provided, a view into the operand is returned instead - of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` - produces a view. - - An alternative way to provide the subscripts and operands is as - ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples - below have corresponding `einsum` calls with the two parameter methods. - - Examples - -------- - >>> a = np.arange(25).reshape(5,5) - >>> b = np.arange(5) - >>> c = np.arange(6).reshape(2,3) - - >>> np.einsum('ii', a) - 60 - >>> np.einsum(a, [0,0]) - 60 - >>> np.trace(a) - 60 - - >>> np.einsum('ii->i', a) - array([ 0, 6, 12, 18, 24]) - >>> np.einsum(a, [0,0], [0]) - array([ 0, 6, 12, 18, 24]) - >>> np.diag(a) - array([ 0, 6, 12, 18, 24]) - - >>> np.einsum('ij,j', a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum(a, [0,1], b, [1]) - array([ 30, 80, 130, 180, 230]) - >>> np.dot(a, b) - array([ 30, 80, 130, 180, 230]) - >>> np.einsum('...j,j', a, b) - array([ 30, 80, 130, 180, 230]) - - >>> np.einsum('ji', c) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> np.einsum(c, [1,0]) - array([[0, 3], - [1, 4], - [2, 5]]) - >>> c.T - array([[0, 3], - [1, 4], - [2, 5]]) - - >>> np.einsum('..., ...', 3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - >>> np.multiply(3, c) - array([[ 0, 3, 6], - [ 9, 12, 15]]) - - >>> np.einsum('i,i', b, b) - 30 - >>> np.einsum(b, [0], b, [0]) - 30 - >>> np.inner(b,b) - 30 - - >>> np.einsum('i,j', np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.einsum(np.arange(2)+1, [0], b, [1]) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - >>> np.outer(np.arange(2)+1, b) - array([[0, 1, 2, 3, 4], - [0, 2, 4, 6, 8]]) - - >>> np.einsum('i...->...', a) - array([50, 55, 60, 65, 70]) - >>> np.einsum(a, [0,Ellipsis], [Ellipsis]) - array([50, 55, 60, 65, 70]) - >>> np.sum(a, axis=0) - array([50, 55, 60, 65, 70]) - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> np.einsum('ijk,jil->kl', a, b) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> np.tensordot(a,b, axes=([1,0],[0,1])) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - - >>> a = np.arange(6).reshape((3,2)) - >>> b = np.arange(12).reshape((4,3)) - >>> np.einsum('ki,jk->ij', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('ki,...k->i...', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - >>> np.einsum('k...,jk', a, b) - array([[10, 28, 46, 64], - [13, 40, 67, 94]]) - - """) - -add_newdoc('numpy.core', 'alterdot', - """ - Change `dot`, `vdot`, and `inner` to use accelerated BLAS functions. - - Typically, as a user of Numpy, you do not explicitly call this function. If - Numpy is built with an accelerated BLAS, this function is automatically - called when Numpy is imported. - - When Numpy is built with an accelerated BLAS like ATLAS, these functions - are replaced to make use of the faster implementations. The faster - implementations only affect float32, float64, complex64, and complex128 - arrays. Furthermore, the BLAS API only includes matrix-matrix, - matrix-vector, and vector-vector products. Products of arrays with larger - dimensionalities use the built in functions and are not accelerated. - - See Also - -------- - restoredot : `restoredot` undoes the effects of `alterdot`. - - """) - -add_newdoc('numpy.core', 'restoredot', - """ - Restore `dot`, `vdot`, and `innerproduct` to the default non-BLAS - implementations. - - Typically, the user will only need to call this when troubleshooting and - installation problem, reproducing the conditions of a build without an - accelerated BLAS, or when being very careful about benchmarking linear - algebra operations. - - See Also - -------- - alterdot : `restoredot` undoes the effects of `alterdot`. - - """) - -add_newdoc('numpy.core', 'vdot', - """ - vdot(a, b) - - Return the dot product of two vectors. - - The vdot(`a`, `b`) function handles complex numbers differently than - dot(`a`, `b`). If the first argument is complex the complex conjugate - of the first argument is used for the calculation of the dot product. - - Note that `vdot` handles multidimensional arrays differently than `dot`: - it does *not* perform a matrix product, but flattens input arguments - to 1-D vectors first. Consequently, it should only be used for vectors. - - Parameters - ---------- - a : array_like - If `a` is complex the complex conjugate is taken before calculation - of the dot product. - b : array_like - Second argument to the dot product. - - Returns - ------- - output : ndarray - Dot product of `a` and `b`. Can be an int, float, or - complex depending on the types of `a` and `b`. - - See Also - -------- - dot : Return the dot product without using the complex conjugate of the - first argument. - - Examples - -------- - >>> a = np.array([1+2j,3+4j]) - >>> b = np.array([5+6j,7+8j]) - >>> np.vdot(a, b) - (70-8j) - >>> np.vdot(b, a) - (70+8j) - - Note that higher-dimensional arrays are flattened! - - >>> a = np.array([[1, 4], [5, 6]]) - >>> b = np.array([[4, 1], [2, 2]]) - >>> np.vdot(a, b) - 30 - >>> np.vdot(b, a) - 30 - >>> 1*4 + 4*1 + 5*2 + 6*2 - 30 - - """) - - -############################################################################## -# -# Documentation for ndarray attributes and methods -# -############################################################################## - - -############################################################################## -# -# ndarray object -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', - """ - ndarray(shape, dtype=float, buffer=None, offset=0, - strides=None, order=None) - - An array object represents a multidimensional, homogeneous array - of fixed-size items. An associated data-type object describes the - format of each element in the array (its byte-order, how many bytes it - occupies in memory, whether it is an integer, a floating point number, - or something else, etc.) - - Arrays should be constructed using `array`, `zeros` or `empty` (refer - to the See Also section below). The parameters given here refer to - a low-level method (`ndarray(...)`) for instantiating an array. - - For more information, refer to the `numpy` module and examine the - the methods and attributes of an array. - - Parameters - ---------- - (for the __new__ method; see Notes below) - - shape : tuple of ints - Shape of created array. - dtype : data-type, optional - Any object that can be interpreted as a numpy data type. - buffer : object exposing buffer interface, optional - Used to fill the array with data. - offset : int, optional - Offset of array data in buffer. - strides : tuple of ints, optional - Strides of data in memory. - order : {'C', 'F'}, optional - Row-major or column-major order. - - Attributes - ---------- - T : ndarray - Transpose of the array. - data : buffer - The array's elements, in memory. - dtype : dtype object - Describes the format of the elements in the array. - flags : dict - Dictionary containing information related to memory use, e.g., - 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. - flat : numpy.flatiter object - Flattened version of the array as an iterator. The iterator - allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for - assignment examples; TODO). - imag : ndarray - Imaginary part of the array. - real : ndarray - Real part of the array. - size : int - Number of elements in the array. - itemsize : int - The memory use of each array element in bytes. - nbytes : int - The total number of bytes required to store the array data, - i.e., ``itemsize * size``. - ndim : int - The array's number of dimensions. - shape : tuple of ints - Shape of the array. - strides : tuple of ints - The step-size required to move from one element to the next in - memory. For example, a contiguous ``(3, 4)`` array of type - ``int16`` in C-order has strides ``(8, 2)``. This implies that - to move from element to element in memory requires jumps of 2 bytes. - To move from row-to-row, one needs to jump 8 bytes at a time - (``2 * 4``). - ctypes : ctypes object - Class containing properties of the array needed for interaction - with ctypes. - base : ndarray - If the array is a view into another array, that array is its `base` - (unless that array is also a view). The `base` array is where the - array data is actually stored. - - See Also - -------- - array : Construct an array. - zeros : Create an array, each element of which is zero. - empty : Create an array, but leave its allocated memory unchanged (i.e., - it contains "garbage"). - dtype : Create a data-type. - - Notes - ----- - There are two modes of creating an array using ``__new__``: - - 1. If `buffer` is None, then only `shape`, `dtype`, and `order` - are used. - 2. If `buffer` is an object exposing the buffer interface, then - all keywords are interpreted. - - No ``__init__`` method is needed because the array is fully initialized - after the ``__new__`` method. - - Examples - -------- - These examples illustrate the low-level `ndarray` constructor. Refer - to the `See Also` section above for easier ways of constructing an - ndarray. - - First mode, `buffer` is None: - - >>> np.ndarray(shape=(2,2), dtype=float, order='F') - array([[ -1.13698227e+002, 4.25087011e-303], - [ 2.88528414e-306, 3.27025015e-309]]) #random - - Second mode: - - >>> np.ndarray((2,), buffer=np.array([1,2,3]), - ... offset=np.int_().itemsize, - ... dtype=int) # offset = 1*itemsize, i.e. skip first element - array([2, 3]) - - """) - - -############################################################################## -# -# ndarray attributes -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', - """Array protocol: Python side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', - """None.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', - """Array priority.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', - """Array protocol: C-struct side.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_', - """Allow the array to be interpreted as a ctypes object by returning the - data-memory location as an integer - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('base', - """ - Base object if memory is from some other object. - - Examples - -------- - The base of an array that owns its memory is None: - - >>> x = np.array([1,2,3,4]) - >>> x.base is None - True - - Slicing creates a view, whose memory is shared with x: - - >>> y = x[2:] - >>> y.base is x - True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', - """ - An object to simplify the interaction of the array with the ctypes - module. - - This attribute creates an object that makes it easier to use arrays - when calling shared libraries with the ctypes module. The returned - object has, among others, data, shape, and strides attributes (see - Notes below) which themselves return ctypes objects that can be used - as arguments to a shared library. - - Parameters - ---------- - None - - Returns - ------- - c : Python object - Possessing attributes data, shape, strides, etc. - - See Also - -------- - numpy.ctypeslib - - Notes - ----- - Below are the public attributes of this object which were documented - in "Guide to NumPy" (we have omitted undocumented public attributes, - as well as documented private attributes): - - * data: A pointer to the memory area of the array as a Python integer. - This memory area may contain data that is not aligned, or not in correct - byte-order. The memory area may not even be writeable. The array - flags and data-type of this array should be respected when passing this - attribute to arbitrary C-code to avoid trouble that can include Python - crashing. User Beware! The value of this attribute is exactly the same - as self._array_interface_['data'][0]. - - * shape (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the C-integer corresponding to dtype('p') on this - platform. This base-type could be c_int, c_long, or c_longlong - depending on the platform. The c_intp type is defined accordingly in - numpy.ctypeslib. The ctypes array contains the shape of the underlying - array. - - * strides (c_intp*self.ndim): A ctypes array of length self.ndim where - the basetype is the same as for the shape attribute. This ctypes array - contains the strides information from the underlying array. This strides - information is important for showing how many bytes must be jumped to - get to the next element in the array. - - * data_as(obj): Return the data pointer cast to a particular c-types object. - For example, calling self._as_parameter_ is equivalent to - self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a - pointer to a ctypes array of floating-point data: - self.data_as(ctypes.POINTER(ctypes.c_double)). - - * shape_as(obj): Return the shape tuple as an array of some other c-types - type. For example: self.shape_as(ctypes.c_short). - - * strides_as(obj): Return the strides tuple as an array of some other - c-types type. For example: self.strides_as(ctypes.c_longlong). - - Be careful using the ctypes attribute - especially on temporary - arrays or arrays constructed on the fly. For example, calling - ``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory - that is invalid because the array created as (a+b) is deallocated - before the next Python statement. You can avoid this problem using - either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will - hold a reference to the array until ct is deleted or re-assigned. - - If the ctypes module is not available, then the ctypes attribute - of array objects still returns something useful, but ctypes objects - are not returned and errors may be raised instead. In particular, - the object will still have the as parameter attribute which will - return an integer equal to the data attribute. - - Examples - -------- - >>> import ctypes - >>> x - array([[0, 1], - [2, 3]]) - >>> x.ctypes.data - 30439712 - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) - - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents - c_long(0) - >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents - c_longlong(4294967296L) - >>> x.ctypes.shape - - >>> x.ctypes.shape_as(ctypes.c_long) - - >>> x.ctypes.strides - - >>> x.ctypes.strides_as(ctypes.c_longlong) - - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('data', - """Python buffer object pointing to the start of the array's data.""")) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', - """ - Data-type of the array's elements. - - Parameters - ---------- - None - - Returns - ------- - d : numpy dtype object - - See Also - -------- - numpy.dtype - - Examples - -------- - >>> x - array([[0, 1], - [2, 3]]) - >>> x.dtype - dtype('int32') - >>> type(x.dtype) - - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', - """ - The imaginary part of the array. - - Examples - -------- - >>> x = np.sqrt([1+0j, 0+1j]) - >>> x.imag - array([ 0. , 0.70710678]) - >>> x.imag.dtype - dtype('float64') - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', - """ - Length of one array element in bytes. - - Examples - -------- - >>> x = np.array([1,2,3], dtype=np.float64) - >>> x.itemsize - 8 - >>> x = np.array([1,2,3], dtype=np.complex128) - >>> x.itemsize - 16 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', - """ - Information about the memory layout of the array. - - Attributes - ---------- - C_CONTIGUOUS (C) - The data is in a single, C-style contiguous segment. - F_CONTIGUOUS (F) - The data is in a single, Fortran-style contiguous segment. - OWNDATA (O) - The array owns the memory it uses or borrows it from another object. - WRITEABLE (W) - The data area can be written to. Setting this to False locks - the data, making it read-only. A view (slice, etc.) inherits WRITEABLE - from its base array at creation time, but a view of a writeable - array may be subsequently locked while the base array remains writeable. - (The opposite is not true, in that a view of a locked array may not - be made writeable. However, currently, locking a base object does not - lock any views that already reference it, so under that circumstance it - is possible to alter the contents of a locked array via a previously - created writeable view onto it.) Attempting to change a non-writeable - array raises a RuntimeError exception. - ALIGNED (A) - The data and all elements are aligned appropriately for the hardware. - UPDATEIFCOPY (U) - This array is a copy of some other array. When this array is - deallocated, the base array will be updated with the contents of - this array. - FNC - F_CONTIGUOUS and not C_CONTIGUOUS. - FORC - F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). - BEHAVED (B) - ALIGNED and WRITEABLE. - CARRAY (CA) - BEHAVED and C_CONTIGUOUS. - FARRAY (FA) - BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. - - Notes - ----- - The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), - or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag - names are only supported in dictionary access. - - Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by - the user, via direct assignment to the attribute or dictionary entry, - or by calling `ndarray.setflags`. - - The array flags cannot be set arbitrarily: - - - UPDATEIFCOPY can only be set ``False``. - - ALIGNED can only be set ``True`` if the data is truly aligned. - - WRITEABLE can only be set ``True`` if the array owns its own memory - or the ultimate owner of the memory exposes a writeable buffer - interface or is a string. - - Arrays can be both C-style and Fortran-style contiguous simultaneously. - This is clear for 1-dimensional arrays, but can also be true for higher - dimensional arrays. - - Even for contiguous arrays a stride for a given dimension - ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` - or the array has no elements. - It does *not* generally hold that ``self.strides[-1] == self.itemsize`` - for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for - Fortran-style contiguous arrays is true. - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', - """ - A 1-D iterator over the array. - - This is a `numpy.flatiter` instance, which acts similarly to, but is not - a subclass of, Python's built-in iterator object. - - See Also - -------- - flatten : Return a copy of the array collapsed into one dimension. - - flatiter - - Examples - -------- - >>> x = np.arange(1, 7).reshape(2, 3) - >>> x - array([[1, 2, 3], - [4, 5, 6]]) - >>> x.flat[3] - 4 - >>> x.T - array([[1, 4], - [2, 5], - [3, 6]]) - >>> x.T.flat[3] - 5 - >>> type(x.flat) - - - An assignment example: - - >>> x.flat = 3; x - array([[3, 3, 3], - [3, 3, 3]]) - >>> x.flat[[1,4]] = 1; x - array([[3, 1, 3], - [3, 1, 3]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', - """ - Total bytes consumed by the elements of the array. - - Notes - ----- - Does not include memory consumed by non-element attributes of the - array object. - - Examples - -------- - >>> x = np.zeros((3,5,2), dtype=np.complex128) - >>> x.nbytes - 480 - >>> np.prod(x.shape) * x.itemsize - 480 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', - """ - Number of array dimensions. - - Examples - -------- - >>> x = np.array([1, 2, 3]) - >>> x.ndim - 1 - >>> y = np.zeros((2, 3, 4)) - >>> y.ndim - 3 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('real', - """ - The real part of the array. - - Examples - -------- - >>> x = np.sqrt([1+0j, 0+1j]) - >>> x.real - array([ 1. , 0.70710678]) - >>> x.real.dtype - dtype('float64') - - See Also - -------- - numpy.real : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', - """ - Tuple of array dimensions. - - Notes - ----- - May be used to "reshape" the array, as long as this would not - require a change in the total number of elements - - Examples - -------- - >>> x = np.array([1, 2, 3, 4]) - >>> x.shape - (4,) - >>> y = np.zeros((2, 3, 4)) - >>> y.shape - (2, 3, 4) - >>> y.shape = (3, 8) - >>> y - array([[ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.]]) - >>> y.shape = (3, 6) - Traceback (most recent call last): - File "", line 1, in - ValueError: total size of new array must be unchanged - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('size', - """ - Number of elements in the array. - - Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's - dimensions. - - Examples - -------- - >>> x = np.zeros((3, 5, 2), dtype=np.complex128) - >>> x.size - 30 - >>> np.prod(x.shape) - 30 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', - """ - Tuple of bytes to step in each dimension when traversing an array. - - The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` - is:: - - offset = sum(np.array(i) * a.strides) - - A more detailed explanation of strides can be found in the - "ndarray.rst" file in the NumPy reference guide. - - Notes - ----- - Imagine an array of 32-bit integers (each 4 bytes):: - - x = np.array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]], dtype=np.int32) - - This array is stored in memory as 40 bytes, one after the other - (known as a contiguous block of memory). The strides of an array tell - us how many bytes we have to skip in memory to move to the next position - along a certain axis. For example, we have to skip 4 bytes (1 value) to - move to the next column, but 20 bytes (5 values) to get to the same - position in the next row. As such, the strides for the array `x` will be - ``(20, 4)``. - - See Also - -------- - numpy.lib.stride_tricks.as_strided - - Examples - -------- - >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) - >>> y - array([[[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]], - [[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23]]]) - >>> y.strides - (48, 16, 4) - >>> y[1,1,1] - 17 - >>> offset=sum(y.strides * np.array((1,1,1))) - >>> offset/y.itemsize - 17 - - >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) - >>> x.strides - (32, 4, 224, 1344) - >>> i = np.array([3,5,2,2]) - >>> offset = sum(i * x.strides) - >>> x[3,5,2,2] - 813 - >>> offset / x.itemsize - 813 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('T', - """ - Same as self.transpose(), except that self is returned if - self.ndim < 2. - - Examples - -------- - >>> x = np.array([[1.,2.],[3.,4.]]) - >>> x - array([[ 1., 2.], - [ 3., 4.]]) - >>> x.T - array([[ 1., 3.], - [ 2., 4.]]) - >>> x = np.array([1.,2.,3.,4.]) - >>> x - array([ 1., 2., 3., 4.]) - >>> x.T - array([ 1., 2., 3., 4.]) - - """)) - - -############################################################################## -# -# ndarray methods -# -############################################################################## - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', - """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. - - Returns either a new reference to self if dtype is not given or a new array - of provided data type if dtype is different from the current dtype of the - array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', - """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', - """a.__array_wrap__(obj) -> Object of same type as ndarray object a. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', - """a.__copy__([order]) - - Return a copy of the array. - - Parameters - ---------- - order : {'C', 'F', 'A'}, optional - If order is 'C' (False) then the result is contiguous (default). - If order is 'Fortran' (True) then the result has fortran order. - If order is 'Any' (None) then the result has fortran order - only if the array already is in fortran order. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', - """a.__deepcopy__() -> Deep copy of array. - - Used if copy.deepcopy is called on an array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', - """a.__reduce__() - - For pickling. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', - """a.__setstate__(version, shape, dtype, isfortran, rawdata) - - For unpickling. - - Parameters - ---------- - version : int - optional pickle version. If omitted defaults to 0. - shape : tuple - dtype : data-type - isFortran : bool - rawdata : string or list - a binary string with the data (or a list if 'a' is an object array) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('all', - """ - a.all(axis=None, out=None) - - Returns True if all elements evaluate to True. - - Refer to `numpy.all` for full documentation. - - See Also - -------- - numpy.all : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('any', - """ - a.any(axis=None, out=None) - - Returns True if any of the elements of `a` evaluate to True. - - Refer to `numpy.any` for full documentation. - - See Also - -------- - numpy.any : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', - """ - a.argmax(axis=None, out=None) - - Return indices of the maximum values along the given axis. - - Refer to `numpy.argmax` for full documentation. - - See Also - -------- - numpy.argmax : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', - """ - a.argmin(axis=None, out=None) - - Return indices of the minimum values along the given axis of `a`. - - Refer to `numpy.argmin` for detailed documentation. - - See Also - -------- - numpy.argmin : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', - """ - a.argsort(axis=-1, kind='quicksort', order=None) - - Returns the indices that would sort this array. - - Refer to `numpy.argsort` for full documentation. - - See Also - -------- - numpy.argsort : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', - """ - a.argpartition(kth, axis=-1, kind='introselect', order=None) - - Returns the indices that would partition this array. - - Refer to `numpy.argpartition` for full documentation. - - .. versionadded:: 1.8.0 - - See Also - -------- - numpy.argpartition : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', - """ - a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) - - Copy of the array, cast to a specified type. - - Parameters - ---------- - dtype : str or dtype - Typecode or data-type to which the array is cast. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout order of the result. - 'C' means C order, 'F' means Fortran order, 'A' - means 'F' order if all the arrays are Fortran contiguous, - 'C' order otherwise, and 'K' means as close to the - order the array elements appear in memory as possible. - Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur. Defaults to 'unsafe' - for backwards compatibility. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - subok : bool, optional - If True, then sub-classes will be passed-through (default), otherwise - the returned array will be forced to be a base-class array. - copy : bool, optional - By default, astype always returns a newly allocated array. If this - is set to false, and the `dtype`, `order`, and `subok` - requirements are satisfied, the input array is returned instead - of a copy. - - Returns - ------- - arr_t : ndarray - Unless `copy` is False and the other conditions for returning the input - array are satisfied (see description for `copy` input paramter), `arr_t` - is a new array of the same shape as the input array, with dtype, order - given by `dtype`, `order`. - - Notes - ----- - Starting in NumPy 1.9, astype method now returns an error if the string - dtype to cast to is not long enough in 'safe' casting mode to hold the max - value of integer/float array that is being casted. Previously the casting - was allowed even if the result was truncated. - - Raises - ------ - ComplexWarning - When casting from complex to float or int. To avoid this, - one should use ``a.real.astype(t)``. - - Examples - -------- - >>> x = np.array([1, 2, 2.5]) - >>> x - array([ 1. , 2. , 2.5]) - - >>> x.astype(int) - array([1, 2, 2]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', - """ - a.byteswap(inplace) - - Swap the bytes of the array elements - - Toggle between low-endian and big-endian data representation by - returning a byteswapped array, optionally swapped in-place. - - Parameters - ---------- - inplace : bool, optional - If ``True``, swap bytes in-place, default is ``False``. - - Returns - ------- - out : ndarray - The byteswapped array. If `inplace` is ``True``, this is - a view to self. - - Examples - -------- - >>> A = np.array([1, 256, 8755], dtype=np.int16) - >>> map(hex, A) - ['0x1', '0x100', '0x2233'] - >>> A.byteswap(True) - array([ 256, 1, 13090], dtype=int16) - >>> map(hex, A) - ['0x100', '0x1', '0x3322'] - - Arrays of strings are not swapped - - >>> A = np.array(['ceg', 'fac']) - >>> A.byteswap() - array(['ceg', 'fac'], - dtype='|S3') - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', - """ - a.choose(choices, out=None, mode='raise') - - Use an index array to construct a new array from a set of choices. - - Refer to `numpy.choose` for full documentation. - - See Also - -------- - numpy.choose : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', - """ - a.clip(a_min, a_max, out=None) - - Return an array whose values are limited to ``[a_min, a_max]``. - - Refer to `numpy.clip` for full documentation. - - See Also - -------- - numpy.clip : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', - """ - a.compress(condition, axis=None, out=None) - - Return selected slices of this array along given axis. - - Refer to `numpy.compress` for full documentation. - - See Also - -------- - numpy.compress : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', - """ - a.conj() - - Complex-conjugate all elements. - - Refer to `numpy.conjugate` for full documentation. - - See Also - -------- - numpy.conjugate : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', - """ - a.conjugate() - - Return the complex conjugate, element-wise. - - Refer to `numpy.conjugate` for full documentation. - - See Also - -------- - numpy.conjugate : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', - """ - a.copy(order='C') - - Return a copy of the array. - - Parameters - ---------- - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the copy. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. (Note that this function and :func:numpy.copy are very - similar, but have different default values for their order= - arguments.) - - See also - -------- - numpy.copy - numpy.copyto - - Examples - -------- - >>> x = np.array([[1,2,3],[4,5,6]], order='F') - - >>> y = x.copy() - - >>> x.fill(0) - - >>> x - array([[0, 0, 0], - [0, 0, 0]]) - - >>> y - array([[1, 2, 3], - [4, 5, 6]]) - - >>> y.flags['C_CONTIGUOUS'] - True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', - """ - a.cumprod(axis=None, dtype=None, out=None) - - Return the cumulative product of the elements along the given axis. - - Refer to `numpy.cumprod` for full documentation. - - See Also - -------- - numpy.cumprod : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', - """ - a.cumsum(axis=None, dtype=None, out=None) - - Return the cumulative sum of the elements along the given axis. - - Refer to `numpy.cumsum` for full documentation. - - See Also - -------- - numpy.cumsum : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', - """ - a.diagonal(offset=0, axis1=0, axis2=1) - - Return specified diagonals. In NumPy 1.9 the returned array is a - read-only view instead of a copy as in previous NumPy versions. In - NumPy 1.10 the read-only restriction will be removed. - - Refer to :func:`numpy.diagonal` for full documentation. - - See Also - -------- - numpy.diagonal : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', - """ - a.dot(b, out=None) - - Dot product of two arrays. - - Refer to `numpy.dot` for full documentation. - - See Also - -------- - numpy.dot : equivalent function - - Examples - -------- - >>> a = np.eye(2) - >>> b = np.ones((2, 2)) * 2 - >>> a.dot(b) - array([[ 2., 2.], - [ 2., 2.]]) - - This array method can be conveniently chained: - - >>> a.dot(b).dot(b) - array([[ 8., 8.], - [ 8., 8.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', - """a.dump(file) - - Dump a pickle of the array to the specified file. - The array can be read back with pickle.load or numpy.load. - - Parameters - ---------- - file : str - A string naming the dump file. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', - """ - a.dumps() - - Returns the pickle of the array as a string. - pickle.loads or numpy.loads will convert the string back to an array. - - Parameters - ---------- - None - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', - """ - a.fill(value) - - Fill the array with a scalar value. - - Parameters - ---------- - value : scalar - All elements of `a` will be assigned this value. - - Examples - -------- - >>> a = np.array([1, 2]) - >>> a.fill(0) - >>> a - array([0, 0]) - >>> a = np.empty(2) - >>> a.fill(1) - >>> a - array([ 1., 1.]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', - """ - a.flatten(order='C') - - Return a copy of the array collapsed into one dimension. - - Parameters - ---------- - order : {'C', 'F', 'A'}, optional - Whether to flatten in C (row-major), Fortran (column-major) order, - or preserve the C/Fortran ordering from `a`. - The default is 'C'. - - Returns - ------- - y : ndarray - A copy of the input array, flattened to one dimension. - - See Also - -------- - ravel : Return a flattened array. - flat : A 1-D flat iterator over the array. - - Examples - -------- - >>> a = np.array([[1,2], [3,4]]) - >>> a.flatten() - array([1, 2, 3, 4]) - >>> a.flatten('F') - array([1, 3, 2, 4]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', - """ - a.getfield(dtype, offset=0) - - Returns a field of the given array as a certain type. - - A field is a view of the array data with a given data-type. The values in - the view are determined by the given type and the offset into the current - array in bytes. The offset needs to be such that the view dtype fits in the - array dtype; for example an array of dtype complex128 has 16-byte elements. - If taking a view with a 32-bit integer (4 bytes), the offset needs to be - between 0 and 12 bytes. - - Parameters - ---------- - dtype : str or dtype - The data type of the view. The dtype size of the view can not be larger - than that of the array itself. - offset : int - Number of bytes to skip before beginning the element view. - - Examples - -------- - >>> x = np.diag([1.+1.j]*2) - >>> x[1, 1] = 2 + 4.j - >>> x - array([[ 1.+1.j, 0.+0.j], - [ 0.+0.j, 2.+4.j]]) - >>> x.getfield(np.float64) - array([[ 1., 0.], - [ 0., 2.]]) - - By choosing an offset of 8 bytes we can select the complex part of the - array for our view: - - >>> x.getfield(np.float64, offset=8) - array([[ 1., 0.], - [ 0., 4.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('item', - """ - a.item(*args) - - Copy an element of an array to a standard Python scalar and return it. - - Parameters - ---------- - \\*args : Arguments (variable number and type) - - * none: in this case, the method only works for arrays - with one element (`a.size == 1`), which element is - copied into a standard Python scalar object and returned. - - * int_type: this argument is interpreted as a flat index into - the array, specifying which element to copy and return. - - * tuple of int_types: functions as does a single int_type argument, - except that the argument is interpreted as an nd-index into the - array. - - Returns - ------- - z : Standard Python scalar object - A copy of the specified element of the array as a suitable - Python scalar - - Notes - ----- - When the data type of `a` is longdouble or clongdouble, item() returns - a scalar array object because there is no available Python scalar that - would not lose information. Void arrays return a buffer object for item(), - unless fields are defined, in which case a tuple is returned. - - `item` is very similar to a[args], except, instead of an array scalar, - a standard Python scalar is returned. This can be useful for speeding up - access to elements of the array and doing arithmetic on elements of the - array using Python's optimized math. - - Examples - -------- - >>> x = np.random.randint(9, size=(3, 3)) - >>> x - array([[3, 1, 7], - [2, 8, 3], - [8, 5, 3]]) - >>> x.item(3) - 2 - >>> x.item(7) - 5 - >>> x.item((0, 1)) - 1 - >>> x.item((2, 2)) - 3 - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', - """ - a.itemset(*args) - - Insert scalar into an array (scalar is cast to array's dtype, if possible) - - There must be at least 1 argument, and define the last argument - as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster - than ``a[args] = item``. The item should be a scalar value and `args` - must select a single item in the array `a`. - - Parameters - ---------- - \*args : Arguments - If one argument: a scalar, only used in case `a` is of size 1. - If two arguments: the last argument is the value to be set - and must be a scalar, the first argument specifies a single array - element location. It is either an int or a tuple. - - Notes - ----- - Compared to indexing syntax, `itemset` provides some speed increase - for placing a scalar into a particular location in an `ndarray`, - if you must do this. However, generally this is discouraged: - among other problems, it complicates the appearance of the code. - Also, when using `itemset` (and `item`) inside a loop, be sure - to assign the methods to a local variable to avoid the attribute - look-up at each loop iteration. - - Examples - -------- - >>> x = np.random.randint(9, size=(3, 3)) - >>> x - array([[3, 1, 7], - [2, 8, 3], - [8, 5, 3]]) - >>> x.itemset(4, 0) - >>> x.itemset((2, 2), 9) - >>> x - array([[3, 1, 7], - [2, 0, 3], - [8, 5, 9]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setasflat', - """ - a.setasflat(arr) - - Equivalent to a.flat = arr.flat, but is generally more efficient. - This function does not check for overlap, so if ``arr`` and ``a`` - are viewing the same data with different strides, the results will - be unpredictable. - - Parameters - ---------- - arr : array_like - The array to copy into a. - - Examples - -------- - >>> a = np.arange(2*4).reshape(2,4)[:,:-1]; a - array([[0, 1, 2], - [4, 5, 6]]) - >>> b = np.arange(3*3, dtype='f4').reshape(3,3).T[::-1,:-1]; b - array([[ 2., 5.], - [ 1., 4.], - [ 0., 3.]], dtype=float32) - >>> a.setasflat(b) - >>> a - array([[2, 5, 1], - [4, 0, 3]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('max', - """ - a.max(axis=None, out=None) - - Return the maximum along a given axis. - - Refer to `numpy.amax` for full documentation. - - See Also - -------- - numpy.amax : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', - """ - a.mean(axis=None, dtype=None, out=None) - - Returns the average of the array elements along given axis. - - Refer to `numpy.mean` for full documentation. - - See Also - -------- - numpy.mean : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('min', - """ - a.min(axis=None, out=None) - - Return the minimum along a given axis. - - Refer to `numpy.amin` for full documentation. - - See Also - -------- - numpy.amin : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'may_share_memory', - """ - Determine if two arrays can share memory - - The memory-bounds of a and b are computed. If they overlap then - this function returns True. Otherwise, it returns False. - - A return of True does not necessarily mean that the two arrays - share any element. It just means that they *might*. - - Parameters - ---------- - a, b : ndarray - - Returns - ------- - out : bool - - Examples - -------- - >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) - False - - """) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', - """ - arr.newbyteorder(new_order='S') - - Return the array with the same data viewed with a different byte order. - - Equivalent to:: - - arr.view(arr.dtype.newbytorder(new_order)) - - Changes are also made in all fields and sub-arrays of the array data - type. - - - - Parameters - ---------- - new_order : string, optional - Byte order to force; a value from the byte order specifications - above. `new_order` codes can be any of:: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - The default value ('S') results in swapping the current - byte order. The code does a case-insensitive check on the first - letter of `new_order` for the alternatives above. For example, - any of 'B' or 'b' or 'biggish' are valid to specify big-endian. - - - Returns - ------- - new_arr : array - New array object with the dtype reflecting given change to the - byte order. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', - """ - a.nonzero() - - Return the indices of the elements that are non-zero. - - Refer to `numpy.nonzero` for full documentation. - - See Also - -------- - numpy.nonzero : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', - """ - a.prod(axis=None, dtype=None, out=None) - - Return the product of the array elements over the given axis - - Refer to `numpy.prod` for full documentation. - - See Also - -------- - numpy.prod : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', - """ - a.ptp(axis=None, out=None) - - Peak to peak (maximum - minimum) value along a given axis. - - Refer to `numpy.ptp` for full documentation. - - See Also - -------- - numpy.ptp : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('put', - """ - a.put(indices, values, mode='raise') - - Set ``a.flat[n] = values[n]`` for all `n` in indices. - - Refer to `numpy.put` for full documentation. - - See Also - -------- - numpy.put : equivalent function - - """)) - -add_newdoc('numpy.core.multiarray', 'copyto', - """ - copyto(dst, src, casting='same_kind', where=None, preservena=False) - - Copies values from one array to another, broadcasting as necessary. - - Raises a TypeError if the `casting` rule is violated, and if - `where` is provided, it selects which elements to copy. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - dst : ndarray - The array into which values are copied. - src : array_like - The array from which values are copied. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional - Controls what kind of data casting may occur when copying. - - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. - where : array_like of bool, optional - A boolean array which is broadcasted to match the dimensions - of `dst`, and selects elements to copy from `src` to `dst` - wherever it contains the value True. - preservena : bool, optional - If set to True, leaves any NA values in `dst` untouched. This - is similar to the "hard mask" feature in numpy.ma. - - """) - -add_newdoc('numpy.core.multiarray', 'putmask', - """ - putmask(a, mask, values) - - Changes elements of an array based on conditional and input values. - - Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. - - If `values` is not the same size as `a` and `mask` then it will repeat. - This gives behavior different from ``a[mask] = values``. - - .. note:: The `putmask` functionality is also provided by `copyto`, which - can be significantly faster and in addition is NA-aware - (`preservena` keyword). Replacing `putmask` with - ``np.copyto(a, values, where=mask)`` is recommended. - - Parameters - ---------- - a : array_like - Target array. - mask : array_like - Boolean mask array. It has to be the same shape as `a`. - values : array_like - Values to put into `a` where `mask` is True. If `values` is smaller - than `a` it will be repeated. - - See Also - -------- - place, put, take, copyto - - Examples - -------- - >>> x = np.arange(6).reshape(2, 3) - >>> np.putmask(x, x>2, x**2) - >>> x - array([[ 0, 1, 2], - [ 9, 16, 25]]) - - If `values` is smaller than `a` it is repeated: - - >>> x = np.arange(5) - >>> np.putmask(x, x>1, [-33, -44]) - >>> x - array([ 0, 1, -33, -44, -33]) - - """) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', - """ - a.ravel([order]) - - Return a flattened array. - - Refer to `numpy.ravel` for full documentation. - - See Also - -------- - numpy.ravel : equivalent function - - ndarray.flat : a flat iterator on the array. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', - """ - a.repeat(repeats, axis=None) - - Repeat elements of an array. - - Refer to `numpy.repeat` for full documentation. - - See Also - -------- - numpy.repeat : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', - """ - a.reshape(shape, order='C') - - Returns an array containing the same data with a new shape. - - Refer to `numpy.reshape` for full documentation. - - See Also - -------- - numpy.reshape : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', - """ - a.resize(new_shape, refcheck=True) - - Change shape and size of array in-place. - - Parameters - ---------- - new_shape : tuple of ints, or `n` ints - Shape of resized array. - refcheck : bool, optional - If False, reference count will not be checked. Default is True. - - Returns - ------- - None - - Raises - ------ - ValueError - If `a` does not own its own data or references or views to it exist, - and the data memory must be changed. - - SystemError - If the `order` keyword argument is specified. This behaviour is a - bug in NumPy. - - See Also - -------- - resize : Return a new array with the specified shape. - - Notes - ----- - This reallocates space for the data area if necessary. - - Only contiguous arrays (data elements consecutive in memory) can be - resized. - - The purpose of the reference count check is to make sure you - do not use this array as a buffer for another Python object and then - reallocate the memory. However, reference counts can increase in - other ways so if you are sure that you have not shared the memory - for this array with another Python object, then you may safely set - `refcheck` to False. - - Examples - -------- - Shrinking an array: array is flattened (in the order that the data are - stored in memory), resized, and reshaped: - - >>> a = np.array([[0, 1], [2, 3]], order='C') - >>> a.resize((2, 1)) - >>> a - array([[0], - [1]]) - - >>> a = np.array([[0, 1], [2, 3]], order='F') - >>> a.resize((2, 1)) - >>> a - array([[0], - [2]]) - - Enlarging an array: as above, but missing entries are filled with zeros: - - >>> b = np.array([[0, 1], [2, 3]]) - >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple - >>> b - array([[0, 1, 2], - [3, 0, 0]]) - - Referencing an array prevents resizing... - - >>> c = a - >>> a.resize((1, 1)) - Traceback (most recent call last): - ... - ValueError: cannot resize an array that has been referenced ... - - Unless `refcheck` is False: - - >>> a.resize((1, 1), refcheck=False) - >>> a - array([[0]]) - >>> c - array([[0]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('round', - """ - a.round(decimals=0, out=None) - - Return `a` with each element rounded to the given number of decimals. - - Refer to `numpy.around` for full documentation. - - See Also - -------- - numpy.around : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', - """ - a.searchsorted(v, side='left', sorter=None) - - Find indices where elements of v should be inserted in a to maintain order. - - For full documentation, see `numpy.searchsorted` - - See Also - -------- - numpy.searchsorted : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', - """ - a.setfield(val, dtype, offset=0) - - Put a value into a specified place in a field defined by a data-type. - - Place `val` into `a`'s field defined by `dtype` and beginning `offset` - bytes into the field. - - Parameters - ---------- - val : object - Value to be placed in field. - dtype : dtype object - Data-type of the field in which to place `val`. - offset : int, optional - The number of bytes into the field at which to place `val`. - - Returns - ------- - None - - See Also - -------- - getfield - - Examples - -------- - >>> x = np.eye(3) - >>> x.getfield(np.float64) - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - >>> x.setfield(3, np.int32) - >>> x.getfield(np.int32) - array([[3, 3, 3], - [3, 3, 3], - [3, 3, 3]]) - >>> x - array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323], - [ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323], - [ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]]) - >>> x.setfield(np.eye(3), np.int32) - >>> x - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', - """ - a.setflags(write=None, align=None, uic=None) - - Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively. - - These Boolean-valued flags affect how numpy interprets the memory - area used by `a` (see Notes below). The ALIGNED flag can only - be set to True if the data is actually aligned according to the type. - The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE - can only be set to True if the array owns its own memory, or the - ultimate owner of the memory exposes a writeable buffer interface, - or is a string. (The exception for string is made so that unpickling - can be done without copying memory.) - - Parameters - ---------- - write : bool, optional - Describes whether or not `a` can be written to. - align : bool, optional - Describes whether or not `a` is aligned properly for its type. - uic : bool, optional - Describes whether or not `a` is a copy of another "base" array. - - Notes - ----- - Array flags provide information about how the memory area used - for the array is to be interpreted. There are 6 Boolean flags - in use, only three of which can be changed by the user: - UPDATEIFCOPY, WRITEABLE, and ALIGNED. - - WRITEABLE (W) the data area can be written to; - - ALIGNED (A) the data and strides are aligned appropriately for the hardware - (as determined by the compiler); - - UPDATEIFCOPY (U) this array is a copy of some other array (referenced - by .base). When this array is deallocated, the base array will be - updated with the contents of this array. - - All flags can be accessed using their first (upper case) letter as well - as the full name. - - Examples - -------- - >>> y - array([[3, 1, 7], - [2, 0, 0], - [8, 5, 9]]) - >>> y.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : True - WRITEABLE : True - ALIGNED : True - UPDATEIFCOPY : False - >>> y.setflags(write=0, align=0) - >>> y.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : True - WRITEABLE : False - ALIGNED : False - UPDATEIFCOPY : False - >>> y.setflags(uic=1) - Traceback (most recent call last): - File "", line 1, in - ValueError: cannot set UPDATEIFCOPY flag to True - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', - """ - a.sort(axis=-1, kind='quicksort', order=None) - - Sort an array, in-place. - - Parameters - ---------- - axis : int, optional - Axis along which to sort. Default is -1, which means sort along the - last axis. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. Default is 'quicksort'. - order : list, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - See Also - -------- - numpy.sort : Return a sorted copy of an array. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in sorted array. - partition: Partial sort. - - Notes - ----- - See ``sort`` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = np.array([[1,4], [3,1]]) - >>> a.sort(axis=1) - >>> a - array([[1, 4], - [1, 3]]) - >>> a.sort(axis=0) - >>> a - array([[1, 3], - [1, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) - >>> a.sort(order='y') - >>> a - array([('c', 1), ('a', 2)], - dtype=[('x', '|S1'), ('y', '>> a = np.array([3, 4, 2, 1]) - >>> a.partition(a, 3) - >>> a - array([2, 1, 3, 4]) - - >>> a.partition((1, 3)) - array([1, 2, 3, 4]) - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', - """ - a.squeeze(axis=None) - - Remove single-dimensional entries from the shape of `a`. - - Refer to `numpy.squeeze` for full documentation. - - See Also - -------- - numpy.squeeze : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('std', - """ - a.std(axis=None, dtype=None, out=None, ddof=0) - - Returns the standard deviation of the array elements along given axis. - - Refer to `numpy.std` for full documentation. - - See Also - -------- - numpy.std : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', - """ - a.sum(axis=None, dtype=None, out=None) - - Return the sum of the array elements over the given axis. - - Refer to `numpy.sum` for full documentation. - - See Also - -------- - numpy.sum : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', - """ - a.swapaxes(axis1, axis2) - - Return a view of the array with `axis1` and `axis2` interchanged. - - Refer to `numpy.swapaxes` for full documentation. - - See Also - -------- - numpy.swapaxes : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('take', - """ - a.take(indices, axis=None, out=None, mode='raise') - - Return an array formed from the elements of `a` at the given indices. - - Refer to `numpy.take` for full documentation. - - See Also - -------- - numpy.take : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', - """ - a.tofile(fid, sep="", format="%s") - - Write array to a file as text or binary (default). - - Data is always written in 'C' order, independent of the order of `a`. - The data produced by this method can be recovered using the function - fromfile(). - - Parameters - ---------- - fid : file or str - An open file object, or a string containing a filename. - sep : str - Separator between array items for text output. - If "" (empty), a binary file is written, equivalent to - ``file.write(a.tobytes())``. - format : str - Format string for text file output. - Each entry in the array is formatted to text by first converting - it to the closest Python type, and then using "format" % item. - - Notes - ----- - This is a convenience function for quick storage of array data. - Information on endianness and precision is lost, so this method is not a - good choice for files intended to archive data or transport data between - machines with different endianness. Some of these problems can be overcome - by outputting the data as text files, at the expense of speed and file - size. - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', - """ - a.tolist() - - Return the array as a (possibly nested) list. - - Return a copy of the array data as a (nested) Python list. - Data items are converted to the nearest compatible Python type. - - Parameters - ---------- - none - - Returns - ------- - y : list - The possibly nested list of array elements. - - Notes - ----- - The array may be recreated, ``a = np.array(a.tolist())``. - - Examples - -------- - >>> a = np.array([1, 2]) - >>> a.tolist() - [1, 2] - >>> a = np.array([[1, 2], [3, 4]]) - >>> list(a) - [array([1, 2]), array([3, 4])] - >>> a.tolist() - [[1, 2], [3, 4]] - - """)) - - -tobytesdoc = """ - a.{name}(order='C') - - Construct Python bytes containing the raw data bytes in the array. - - Constructs Python bytes showing a copy of the raw contents of - data memory. The bytes object can be produced in either 'C' or 'Fortran', - or 'Any' order (the default is 'C'-order). 'Any' order means C-order - unless the F_CONTIGUOUS flag in the array is set, in which case it - means 'Fortran' order. - - {deprecated} - - Parameters - ---------- - order : {{'C', 'F', None}}, optional - Order of the data for multidimensional arrays: - C, Fortran, or the same as for the original array. - - Returns - ------- - s : bytes - Python bytes exhibiting a copy of `a`'s raw data. - - Examples - -------- - >>> x = np.array([[0, 1], [2, 3]]) - >>> x.tobytes() - b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00' - >>> x.tobytes('C') == x.tobytes() - True - >>> x.tobytes('F') - b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00' - - """ - -add_newdoc('numpy.core.multiarray', 'ndarray', - ('tostring', tobytesdoc.format(name='tostring', - deprecated= - 'This function is a compatibility ' - 'alias for tobytes. Despite its ' - 'name it returns bytes not ' - 'strings.'))) -add_newdoc('numpy.core.multiarray', 'ndarray', - ('tobytes', tobytesdoc.format(name='tobytes', - deprecated='.. versionadded:: 1.9.0'))) - -add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', - """ - a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) - - Return the sum along diagonals of the array. - - Refer to `numpy.trace` for full documentation. - - See Also - -------- - numpy.trace : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', - """ - a.transpose(*axes) - - Returns a view of the array with axes transposed. - - For a 1-D array, this has no effect. (To change between column and - row vectors, first cast the 1-D array into a matrix object.) - For a 2-D array, this is the usual matrix transpose. - For an n-D array, if axes are given, their order indicates how the - axes are permuted (see Examples). If axes are not provided and - ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then - ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. - - Parameters - ---------- - axes : None, tuple of ints, or `n` ints - - * None or no argument: reverses the order of the axes. - - * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s - `i`-th axis becomes `a.transpose()`'s `j`-th axis. - - * `n` ints: same as an n-tuple of the same ints (this form is - intended simply as a "convenience" alternative to the tuple form) - - Returns - ------- - out : ndarray - View of `a`, with axes suitably permuted. - - See Also - -------- - ndarray.T : Array property returning the array transposed. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> a - array([[1, 2], - [3, 4]]) - >>> a.transpose() - array([[1, 3], - [2, 4]]) - >>> a.transpose((1, 0)) - array([[1, 3], - [2, 4]]) - >>> a.transpose(1, 0) - array([[1, 3], - [2, 4]]) - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('var', - """ - a.var(axis=None, dtype=None, out=None, ddof=0) - - Returns the variance of the array elements, along given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var : equivalent function - - """)) - - -add_newdoc('numpy.core.multiarray', 'ndarray', ('view', - """ - a.view(dtype=None, type=None) - - New view of array with the same data. - - Parameters - ---------- - dtype : data-type or ndarray sub-class, optional - Data-type descriptor of the returned view, e.g., float32 or int16. The - default, None, results in the view having the same data-type as `a`. - This argument can also be specified as an ndarray sub-class, which - then specifies the type of the returned object (this is equivalent to - setting the ``type`` parameter). - type : Python type, optional - Type of the returned view, e.g., ndarray or matrix. Again, the - default None results in type preservation. - - Notes - ----- - ``a.view()`` is used two different ways: - - ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view - of the array's memory with a different data-type. This can cause a - reinterpretation of the bytes of memory. - - ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just - returns an instance of `ndarray_subclass` that looks at the same array - (same shape, dtype, etc.) This does not cause a reinterpretation of the - memory. - - For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of - bytes per entry than the previous dtype (for example, converting a - regular array to a structured array), then the behavior of the view - cannot be predicted just from the superficial appearance of ``a`` (shown - by ``print(a)``). It also depends on exactly how ``a`` is stored in - memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus - defined as a slice or transpose, etc., the view may give different - results. - - - Examples - -------- - >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) - - Viewing array data using a different type and dtype: - - >>> y = x.view(dtype=np.int16, type=np.matrix) - >>> y - matrix([[513]], dtype=int16) - >>> print type(y) - - - Creating a view on a structured array so it can be used in calculations - - >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) - >>> xv = x.view(dtype=np.int8).reshape(-1,2) - >>> xv - array([[1, 2], - [3, 4]], dtype=int8) - >>> xv.mean(0) - array([ 2., 3.]) - - Making changes to the view changes the underlying array - - >>> xv[0,1] = 20 - >>> print x - [(1, 20) (3, 4)] - - Using a view to convert an array to a record array: - - >>> z = x.view(np.recarray) - >>> z.a - array([1], dtype=int8) - - Views share data: - - >>> x[0] = (9, 10) - >>> z[0] - (9, 10) - - Views that change the dtype size (bytes per entry) should normally be - avoided on arrays defined by slices, transposes, fortran-ordering, etc.: - - >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) - >>> y = x[:, 0:2] - >>> y - array([[1, 2], - [4, 5]], dtype=int16) - >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) - Traceback (most recent call last): - File "", line 1, in - ValueError: new type not compatible with array. - >>> z = y.copy() - >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) - array([[(1, 2)], - [(4, 5)]], dtype=[('width', '>> oct_array = np.frompyfunc(oct, 1, 1) - >>> oct_array(np.array((10, 30, 100))) - array([012, 036, 0144], dtype=object) - >>> np.array((oct(10), oct(30), oct(100))) # for comparison - array(['012', '036', '0144'], - dtype='|S4') - - """) - -add_newdoc('numpy.core.umath', 'geterrobj', - """ - geterrobj() - - Return the current object that defines floating-point error handling. - - The error object contains all information that defines the error handling - behavior in Numpy. `geterrobj` is used internally by the other - functions that get and set error handling behavior (`geterr`, `seterr`, - `geterrcall`, `seterrcall`). - - Returns - ------- - errobj : list - The error object, a list containing three elements: - [internal numpy buffer size, error mask, error callback function]. - - The error mask is a single integer that holds the treatment information - on all four floating point errors. The information for each error type - is contained in three bits of the integer. If we print it in base 8, we - can see what treatment is set for "invalid", "under", "over", and - "divide" (in that order). The printed string can be interpreted with - - * 0 : 'ignore' - * 1 : 'warn' - * 2 : 'raise' - * 3 : 'call' - * 4 : 'print' - * 5 : 'log' - - See Also - -------- - seterrobj, seterr, geterr, seterrcall, geterrcall - getbufsize, setbufsize - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> np.geterrobj() # first get the defaults - [10000, 0, None] - - >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) - ... - >>> old_bufsize = np.setbufsize(20000) - >>> old_err = np.seterr(divide='raise') - >>> old_handler = np.seterrcall(err_handler) - >>> np.geterrobj() - [20000, 2, ] - - >>> old_err = np.seterr(all='ignore') - >>> np.base_repr(np.geterrobj()[1], 8) - '0' - >>> old_err = np.seterr(divide='warn', over='log', under='call', - invalid='print') - >>> np.base_repr(np.geterrobj()[1], 8) - '4351' - - """) - -add_newdoc('numpy.core.umath', 'seterrobj', - """ - seterrobj(errobj) - - Set the object that defines floating-point error handling. - - The error object contains all information that defines the error handling - behavior in Numpy. `seterrobj` is used internally by the other - functions that set error handling behavior (`seterr`, `seterrcall`). - - Parameters - ---------- - errobj : list - The error object, a list containing three elements: - [internal numpy buffer size, error mask, error callback function]. - - The error mask is a single integer that holds the treatment information - on all four floating point errors. The information for each error type - is contained in three bits of the integer. If we print it in base 8, we - can see what treatment is set for "invalid", "under", "over", and - "divide" (in that order). The printed string can be interpreted with - - * 0 : 'ignore' - * 1 : 'warn' - * 2 : 'raise' - * 3 : 'call' - * 4 : 'print' - * 5 : 'log' - - See Also - -------- - geterrobj, seterr, geterr, seterrcall, geterrcall - getbufsize, setbufsize - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> old_errobj = np.geterrobj() # first get the defaults - >>> old_errobj - [10000, 0, None] - - >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) - ... - >>> new_errobj = [20000, 12, err_handler] - >>> np.seterrobj(new_errobj) - >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') - '14' - >>> np.geterr() - {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} - >>> np.geterrcall() is err_handler - True - - """) - - -############################################################################## -# -# lib._compiled_base functions -# -############################################################################## - -add_newdoc('numpy.lib._compiled_base', 'digitize', - """ - digitize(x, bins, right=False) - - Return the indices of the bins to which each value in input array belongs. - - Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if - `bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if - `bins` is monotonically decreasing. If values in `x` are beyond the - bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. If right - is True, then the right bin is closed so that the index ``i`` is such - that ``bins[i-1] < x <= bins[i]`` or bins[i-1] >= x > bins[i]`` if `bins` - is monotonically increasing or decreasing, respectively. - - Parameters - ---------- - x : array_like - Input array to be binned. It has to be 1-dimensional. - bins : array_like - Array of bins. It has to be 1-dimensional and monotonic. - right : bool, optional - Indicating whether the intervals include the right or the left bin - edge. Default behavior is (right==False) indicating that the interval - does not include the right edge. The left bin and is open in this - case. Ie., bins[i-1] <= x < bins[i] is the default behavior for - monotonically increasing bins. - - Returns - ------- - out : ndarray of ints - Output array of indices, of same shape as `x`. - - Raises - ------ - ValueError - If the input is not 1-dimensional, or if `bins` is not monotonic. - TypeError - If the type of the input is complex. - - See Also - -------- - bincount, histogram, unique - - Notes - ----- - If values in `x` are such that they fall outside the bin range, - attempting to index `bins` with the indices that `digitize` returns - will result in an IndexError. - - Examples - -------- - >>> x = np.array([0.2, 6.4, 3.0, 1.6]) - >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) - >>> inds = np.digitize(x, bins) - >>> inds - array([1, 4, 3, 2]) - >>> for n in range(x.size): - ... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]] - ... - 0.0 <= 0.2 < 1.0 - 4.0 <= 6.4 < 10.0 - 2.5 <= 3.0 < 4.0 - 1.0 <= 1.6 < 2.5 - - >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) - >>> bins = np.array([0,5,10,15,20]) - >>> np.digitize(x,bins,right=True) - array([1, 2, 3, 4, 4]) - >>> np.digitize(x,bins,right=False) - array([1, 3, 3, 4, 5]) - """) - -add_newdoc('numpy.lib._compiled_base', 'bincount', - """ - bincount(x, weights=None, minlength=None) - - Count number of occurrences of each value in array of non-negative ints. - - The number of bins (of size 1) is one larger than the largest value in - `x`. If `minlength` is specified, there will be at least this number - of bins in the output array (though it will be longer if necessary, - depending on the contents of `x`). - Each bin gives the number of occurrences of its index value in `x`. - If `weights` is specified the input array is weighted by it, i.e. if a - value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead - of ``out[n] += 1``. - - Parameters - ---------- - x : array_like, 1 dimension, nonnegative ints - Input array. - weights : array_like, optional - Weights, array of the same shape as `x`. - minlength : int, optional - .. versionadded:: 1.6.0 - - A minimum number of bins for the output array. - - Returns - ------- - out : ndarray of ints - The result of binning the input array. - The length of `out` is equal to ``np.amax(x)+1``. - - Raises - ------ - ValueError - If the input is not 1-dimensional, or contains elements with negative - values, or if `minlength` is non-positive. - TypeError - If the type of the input is float or complex. - - See Also - -------- - histogram, digitize, unique - - Examples - -------- - >>> np.bincount(np.arange(5)) - array([1, 1, 1, 1, 1]) - >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) - array([1, 3, 1, 1, 0, 0, 0, 1]) - - >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) - >>> np.bincount(x).size == np.amax(x)+1 - True - - The input array needs to be of integer dtype, otherwise a - TypeError is raised: - - >>> np.bincount(np.arange(5, dtype=np.float)) - Traceback (most recent call last): - File "", line 1, in - TypeError: array cannot be safely cast to required type - - A possible use of ``bincount`` is to perform sums over - variable-size chunks of an array, using the ``weights`` keyword. - - >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights - >>> x = np.array([0, 1, 1, 2, 2, 2]) - >>> np.bincount(x, weights=w) - array([ 0.3, 0.7, 1.1]) - - """) - -add_newdoc('numpy.lib._compiled_base', 'ravel_multi_index', - """ - ravel_multi_index(multi_index, dims, mode='raise', order='C') - - Converts a tuple of index arrays into an array of flat - indices, applying boundary modes to the multi-index. - - Parameters - ---------- - multi_index : tuple of array_like - A tuple of integer arrays, one array for each dimension. - dims : tuple of ints - The shape of array into which the indices from ``multi_index`` apply. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices are handled. Can specify - either one mode or a tuple of modes, one mode per index. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - In 'clip' mode, a negative index which would normally - wrap will clip to 0 instead. - order : {'C', 'F'}, optional - Determines whether the multi-index should be viewed as indexing in - C (row-major) order or FORTRAN (column-major) order. - - Returns - ------- - raveled_indices : ndarray - An array of indices into the flattened version of an array - of dimensions ``dims``. - - See Also - -------- - unravel_index - - Notes - ----- - .. versionadded:: 1.6.0 - - Examples - -------- - >>> arr = np.array([[3,6,6],[4,5,1]]) - >>> np.ravel_multi_index(arr, (7,6)) - array([22, 41, 37]) - >>> np.ravel_multi_index(arr, (7,6), order='F') - array([31, 41, 13]) - >>> np.ravel_multi_index(arr, (4,6), mode='clip') - array([22, 23, 19]) - >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) - array([12, 13, 13]) - - >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) - 1621 - """) - -add_newdoc('numpy.lib._compiled_base', 'unravel_index', - """ - unravel_index(indices, dims, order='C') - - Converts a flat index or array of flat indices into a tuple - of coordinate arrays. - - Parameters - ---------- - indices : array_like - An integer array whose elements are indices into the flattened - version of an array of dimensions ``dims``. Before version 1.6.0, - this function accepted just one index value. - dims : tuple of ints - The shape of the array to use for unraveling ``indices``. - order : {'C', 'F'}, optional - .. versionadded:: 1.6.0 - - Determines whether the indices should be viewed as indexing in - C (row-major) order or FORTRAN (column-major) order. - - Returns - ------- - unraveled_coords : tuple of ndarray - Each array in the tuple has the same shape as the ``indices`` - array. - - See Also - -------- - ravel_multi_index - - Examples - -------- - >>> np.unravel_index([22, 41, 37], (7,6)) - (array([3, 6, 6]), array([4, 5, 1])) - >>> np.unravel_index([31, 41, 13], (7,6), order='F') - (array([3, 6, 6]), array([4, 5, 1])) - - >>> np.unravel_index(1621, (6,7,8,9)) - (3, 1, 4, 1) - - """) - -add_newdoc('numpy.lib._compiled_base', 'add_docstring', - """ - add_docstring(obj, docstring) - - Add a docstring to a built-in obj if possible. - If the obj already has a docstring raise a RuntimeError - If this routine does not know how to add a docstring to the object - raise a TypeError - """) - -add_newdoc('numpy.lib._compiled_base', 'add_newdoc_ufunc', - """ - add_ufunc_docstring(ufunc, new_docstring) - - Replace the docstring for a ufunc with new_docstring. - This method will only work if the current docstring for - the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) - - Parameters - ---------- - ufunc : numpy.ufunc - A ufunc whose current doc is NULL. - new_docstring : string - The new docstring for the ufunc. - - Notes - ----- - This method allocates memory for new_docstring on - the heap. Technically this creates a mempory leak, since this - memory will not be reclaimed until the end of the program - even if the ufunc itself is removed. However this will only - be a problem if the user is repeatedly creating ufuncs with - no documentation, adding documentation via add_newdoc_ufunc, - and then throwing away the ufunc. - """) - -add_newdoc('numpy.lib._compiled_base', 'packbits', - """ - packbits(myarray, axis=None) - - Packs the elements of a binary-valued array into bits in a uint8 array. - - The result is padded to full bytes by inserting zero bits at the end. - - Parameters - ---------- - myarray : array_like - An integer type array whose elements should be packed to bits. - axis : int, optional - The dimension over which bit-packing is done. - ``None`` implies packing the flattened array. - - Returns - ------- - packed : ndarray - Array of type uint8 whose elements represent bits corresponding to the - logical (0 or nonzero) value of the input elements. The shape of - `packed` has the same number of dimensions as the input (unless `axis` - is None, in which case the output is 1-D). - - See Also - -------- - unpackbits: Unpacks elements of a uint8 array into a binary-valued output - array. - - Examples - -------- - >>> a = np.array([[[1,0,1], - ... [0,1,0]], - ... [[1,1,0], - ... [0,0,1]]]) - >>> b = np.packbits(a, axis=-1) - >>> b - array([[[160],[64]],[[192],[32]]], dtype=uint8) - - Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, - and 32 = 0010 0000. - - """) - -add_newdoc('numpy.lib._compiled_base', 'unpackbits', - """ - unpackbits(myarray, axis=None) - - Unpacks elements of a uint8 array into a binary-valued output array. - - Each element of `myarray` represents a bit-field that should be unpacked - into a binary-valued output array. The shape of the output array is either - 1-D (if `axis` is None) or the same shape as the input array with unpacking - done along the axis specified. - - Parameters - ---------- - myarray : ndarray, uint8 type - Input array. - axis : int, optional - Unpacks along this axis. - - Returns - ------- - unpacked : ndarray, uint8 type - The elements are binary-valued (0 or 1). - - See Also - -------- - packbits : Packs the elements of a binary-valued array into bits in a uint8 - array. - - Examples - -------- - >>> a = np.array([[2], [7], [23]], dtype=np.uint8) - >>> a - array([[ 2], - [ 7], - [23]], dtype=uint8) - >>> b = np.unpackbits(a, axis=1) - >>> b - array([[0, 0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) - - """) - - -############################################################################## -# -# Documentation for ufunc attributes and methods -# -############################################################################## - - -############################################################################## -# -# ufunc object -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', - """ - Functions that operate element by element on whole arrays. - - To see the documentation for a specific ufunc, use np.info(). For - example, np.info(np.sin). Because ufuncs are written in C - (for speed) and linked into Python with NumPy's ufunc facility, - Python's help() function finds this page whenever help() is called - on a ufunc. - - A detailed explanation of ufuncs can be found in the "ufuncs.rst" - file in the NumPy reference guide. - - Unary ufuncs: - ============= - - op(X, out=None) - Apply op to X elementwise - - Parameters - ---------- - X : array_like - Input array. - out : array_like - An array to store the output. Must be the same shape as `X`. - - Returns - ------- - r : array_like - `r` will have the same shape as `X`; if out is provided, `r` - will be equal to out. - - Binary ufuncs: - ============== - - op(X, Y, out=None) - Apply `op` to `X` and `Y` elementwise. May "broadcast" to make - the shapes of `X` and `Y` congruent. - - The broadcasting rules are: - - * Dimensions of length 1 may be prepended to either array. - * Arrays may be repeated along dimensions of length 1. - - Parameters - ---------- - X : array_like - First input array. - Y : array_like - Second input array. - out : array_like - An array to store the output. Must be the same shape as the - output would have. - - Returns - ------- - r : array_like - The return value; if out is provided, `r` will be equal to out. - - """) - - -############################################################################## -# -# ufunc attributes -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', ('identity', - """ - The identity value. - - Data attribute containing the identity element for the ufunc, if it has one. - If it does not, the attribute value is None. - - Examples - -------- - >>> np.add.identity - 0 - >>> np.multiply.identity - 1 - >>> np.power.identity - 1 - >>> print np.exp.identity - None - """)) - -add_newdoc('numpy.core', 'ufunc', ('nargs', - """ - The number of arguments. - - Data attribute containing the number of arguments the ufunc takes, including - optional ones. - - Notes - ----- - Typically this value will be one more than what you might expect because all - ufuncs take the optional "out" argument. - - Examples - -------- - >>> np.add.nargs - 3 - >>> np.multiply.nargs - 3 - >>> np.power.nargs - 3 - >>> np.exp.nargs - 2 - """)) - -add_newdoc('numpy.core', 'ufunc', ('nin', - """ - The number of inputs. - - Data attribute containing the number of arguments the ufunc treats as input. - - Examples - -------- - >>> np.add.nin - 2 - >>> np.multiply.nin - 2 - >>> np.power.nin - 2 - >>> np.exp.nin - 1 - """)) - -add_newdoc('numpy.core', 'ufunc', ('nout', - """ - The number of outputs. - - Data attribute containing the number of arguments the ufunc treats as output. - - Notes - ----- - Since all ufuncs can take output arguments, this will always be (at least) 1. - - Examples - -------- - >>> np.add.nout - 1 - >>> np.multiply.nout - 1 - >>> np.power.nout - 1 - >>> np.exp.nout - 1 - - """)) - -add_newdoc('numpy.core', 'ufunc', ('ntypes', - """ - The number of types. - - The number of numerical NumPy types - of which there are 18 total - on which - the ufunc can operate. - - See Also - -------- - numpy.ufunc.types - - Examples - -------- - >>> np.add.ntypes - 18 - >>> np.multiply.ntypes - 18 - >>> np.power.ntypes - 17 - >>> np.exp.ntypes - 7 - >>> np.remainder.ntypes - 14 - - """)) - -add_newdoc('numpy.core', 'ufunc', ('types', - """ - Returns a list with types grouped input->output. - - Data attribute listing the data-type "Domain-Range" groupings the ufunc can - deliver. The data-types are given using the character codes. - - See Also - -------- - numpy.ufunc.ntypes - - Examples - -------- - >>> np.add.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.multiply.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.power.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', - 'OO->O'] - - >>> np.exp.types - ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] - - >>> np.remainder.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] - - """)) - - -############################################################################## -# -# ufunc methods -# -############################################################################## - -add_newdoc('numpy.core', 'ufunc', ('reduce', - """ - reduce(a, axis=0, dtype=None, out=None, keepdims=False) - - Reduces `a`'s dimension by one, by applying ufunc along one axis. - - Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then - :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = - the result of iterating `j` over :math:`range(N_i)`, cumulatively applying - ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. - For a one-dimensional array, reduce produces results equivalent to: - :: - - r = op.identity # op = ufunc - for i in range(len(A)): - r = op(r, A[i]) - return r - - For example, add.reduce() is equivalent to sum(). - - Parameters - ---------- - a : array_like - The array to act on. - axis : None or int or tuple of ints, optional - Axis or axes along which a reduction is performed. - The default (`axis` = 0) is perform a reduction over the first - dimension of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is `None`, a reduction is performed over all the axes. - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - - For operations which are either not commutative or not associative, - doing a reduction over multiple axes is not well-defined. The - ufuncs do not currently raise an exception in this case, but will - likely do so in the future. - dtype : data-type code, optional - The type used to represent the intermediate results. Defaults - to the data-type of the output array if this is provided, or - the data-type of the input array if no output array is provided. - out : ndarray, optional - A location into which the result is stored. If not provided, a - freshly-allocated array is returned. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - .. versionadded:: 1.7.0 - - Returns - ------- - r : ndarray - The reduced array. If `out` was supplied, `r` is a reference to it. - - Examples - -------- - >>> np.multiply.reduce([2,3,5]) - 30 - - A multi-dimensional array example: - - >>> X = np.arange(8).reshape((2,2,2)) - >>> X - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> np.add.reduce(X, 0) - array([[ 4, 6], - [ 8, 10]]) - >>> np.add.reduce(X) # confirm: default axis value is 0 - array([[ 4, 6], - [ 8, 10]]) - >>> np.add.reduce(X, 1) - array([[ 2, 4], - [10, 12]]) - >>> np.add.reduce(X, 2) - array([[ 1, 5], - [ 9, 13]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('accumulate', - """ - accumulate(array, axis=0, dtype=None, out=None) - - Accumulate the result of applying the operator to all elements. - - For a one-dimensional array, accumulate produces results equivalent to:: - - r = np.empty(len(A)) - t = op.identity # op = the ufunc being applied to A's elements - for i in range(len(A)): - t = op(t, A[i]) - r[i] = t - return r - - For example, add.accumulate() is equivalent to np.cumsum(). - - For a multi-dimensional array, accumulate is applied along only one - axis (axis zero by default; see Examples below) so repeated use is - necessary if one wants to accumulate over multiple axes. - - Parameters - ---------- - array : array_like - The array to act on. - axis : int, optional - The axis along which to apply the accumulation; default is zero. - dtype : data-type code, optional - The data-type used to represent the intermediate results. Defaults - to the data-type of the output array if such is provided, or the - the data-type of the input array if no output array is provided. - out : ndarray, optional - A location into which the result is stored. If not provided a - freshly-allocated array is returned. - - Returns - ------- - r : ndarray - The accumulated values. If `out` was supplied, `r` is a reference to - `out`. - - Examples - -------- - 1-D array examples: - - >>> np.add.accumulate([2, 3, 5]) - array([ 2, 5, 10]) - >>> np.multiply.accumulate([2, 3, 5]) - array([ 2, 6, 30]) - - 2-D array examples: - - >>> I = np.eye(2) - >>> I - array([[ 1., 0.], - [ 0., 1.]]) - - Accumulate along axis 0 (rows), down columns: - - >>> np.add.accumulate(I, 0) - array([[ 1., 0.], - [ 1., 1.]]) - >>> np.add.accumulate(I) # no axis specified = axis zero - array([[ 1., 0.], - [ 1., 1.]]) - - Accumulate along axis 1 (columns), through rows: - - >>> np.add.accumulate(I, 1) - array([[ 1., 1.], - [ 0., 1.]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('reduceat', - """ - reduceat(a, indices, axis=0, dtype=None, out=None) - - Performs a (local) reduce with specified slices over a single axis. - - For i in ``range(len(indices))``, `reduceat` computes - ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th - generalized "row" parallel to `axis` in the final result (i.e., in a - 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if - `axis = 1`, it becomes the i-th column). There are three exceptions to this: - - * when ``i = len(indices) - 1`` (so for the last index), - ``indices[i+1] = a.shape[axis]``. - * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is - simply ``a[indices[i]]``. - * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. - - The shape of the output depends on the size of `indices`, and may be - larger than `a` (this happens if ``len(indices) > a.shape[axis]``). - - Parameters - ---------- - a : array_like - The array to act on. - indices : array_like - Paired indices, comma separated (not colon), specifying slices to - reduce. - axis : int, optional - The axis along which to apply the reduceat. - dtype : data-type code, optional - The type used to represent the intermediate results. Defaults - to the data type of the output array if this is provided, or - the data type of the input array if no output array is provided. - out : ndarray, optional - A location into which the result is stored. If not provided a - freshly-allocated array is returned. - - Returns - ------- - r : ndarray - The reduced values. If `out` was supplied, `r` is a reference to - `out`. - - Notes - ----- - A descriptive example: - - If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as - ``ufunc.reduceat(a, indices)[::2]`` where `indices` is - ``range(len(array) - 1)`` with a zero placed - in every other element: - ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. - - Don't be fooled by this attribute's name: `reduceat(a)` is not - necessarily smaller than `a`. - - Examples - -------- - To take the running sum of four successive values: - - >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] - array([ 6, 10, 14, 18]) - - A 2-D example: - - >>> x = np.linspace(0, 15, 16).reshape(4,4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) - - :: - - # reduce such that the result has the following five rows: - # [row1 + row2 + row3] - # [row4] - # [row2] - # [row3] - # [row1 + row2 + row3 + row4] - - >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) - array([[ 12., 15., 18., 21.], - [ 12., 13., 14., 15.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 24., 28., 32., 36.]]) - - :: - - # reduce such that result has the following two columns: - # [col1 * col2 * col3, col4] - - >>> np.multiply.reduceat(x, [0, 3], 1) - array([[ 0., 3.], - [ 120., 7.], - [ 720., 11.], - [ 2184., 15.]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('outer', - """ - outer(A, B) - - Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. - - Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of - ``op.outer(A, B)`` is an array of dimension M + N such that: - - .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = - op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) - - For `A` and `B` one-dimensional, this is equivalent to:: - - r = empty(len(A),len(B)) - for i in range(len(A)): - for j in range(len(B)): - r[i,j] = op(A[i], B[j]) # op = ufunc in question - - Parameters - ---------- - A : array_like - First array - B : array_like - Second array - - Returns - ------- - r : ndarray - Output array - - See Also - -------- - numpy.outer - - Examples - -------- - >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) - array([[ 4, 5, 6], - [ 8, 10, 12], - [12, 15, 18]]) - - A multi-dimensional example: - - >>> A = np.array([[1, 2, 3], [4, 5, 6]]) - >>> A.shape - (2, 3) - >>> B = np.array([[1, 2, 3, 4]]) - >>> B.shape - (1, 4) - >>> C = np.multiply.outer(A, B) - >>> C.shape; C - (2, 3, 1, 4) - array([[[[ 1, 2, 3, 4]], - [[ 2, 4, 6, 8]], - [[ 3, 6, 9, 12]]], - [[[ 4, 8, 12, 16]], - [[ 5, 10, 15, 20]], - [[ 6, 12, 18, 24]]]]) - - """)) - -add_newdoc('numpy.core', 'ufunc', ('at', - """ - at(a, indices, b=None) - - Performs unbuffered in place operation on operand 'a' for elements - specified by 'indices'. For addition ufunc, this method is equivalent to - `a[indices] += b`, except that results are accumulated for elements that - are indexed more than once. For example, `a[[0,0]] += 1` will only - increment the first element once because of buffering, whereas - `add.at(a, [0,0], 1)` will increment the first element twice. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - The array to perform in place operation on. - indices : array_like or tuple - Array like index object or slice object for indexing into first - operand. If first operand has multiple dimensions, indices can be a - tuple of array like index objects or slice objects. - b : array_like - Second operand for ufuncs requiring two operands. Operand must be - broadcastable over first operand after indexing or slicing. - - Examples - -------- - Set items 0 and 1 to their negative values: - - >>> a = np.array([1, 2, 3, 4]) - >>> np.negative.at(a, [0, 1]) - >>> print(a) - array([-1, -2, 3, 4]) - - :: - - Increment items 0 and 1, and increment item 2 twice: - - >>> a = np.array([1, 2, 3, 4]) - >>> np.add.at(a, [0, 1, 2, 2], 1) - >>> print(a) - array([2, 3, 5, 4]) - - :: - - Add items 0 and 1 in first array to second array, - and store results in first array: - - >>> a = np.array([1, 2, 3, 4]) - >>> b = np.array([1, 2]) - >>> np.add.at(a, [0, 1], b) - >>> print(a) - array([2, 4, 3, 4]) - - """)) - -############################################################################## -# -# Documentation for dtype attributes and methods -# -############################################################################## - -############################################################################## -# -# dtype object -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', - """ - dtype(obj, align=False, copy=False) - - Create a data type object. - - A numpy array is homogeneous, and contains elements described by a - dtype object. A dtype object can be constructed from different - combinations of fundamental numeric types. - - Parameters - ---------- - obj - Object to be converted to a data type object. - align : bool, optional - Add padding to the fields to match what a C compiler would output - for a similar C-struct. Can be ``True`` only if `obj` is a dictionary - or a comma-separated string. If a struct dtype is being created, - this also sets a sticky alignment flag ``isalignedstruct``. - copy : bool, optional - Make a new copy of the data-type object. If ``False``, the result - may just be a reference to a built-in data-type object. - - See also - -------- - result_type - - Examples - -------- - Using array-scalar type: - - >>> np.dtype(np.int16) - dtype('int16') - - Record, one field name 'f1', containing int16: - - >>> np.dtype([('f1', np.int16)]) - dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) - dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) - dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) - dtype([('a', '>> np.dtype("i4, (2,3)f8") - dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) - dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) - dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) - dtype([('gender', '|S1'), ('age', '|u1')]) - - Offsets in bytes, here 0 and 25: - - >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) - dtype([('surname', '|S25'), ('age', '|u1')]) - - """) - -############################################################################## -# -# dtype attributes -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', - """ - The required alignment (bytes) of this data-type according to the compiler. - - More information is available in the C-API section of the manual. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', - """ - A character indicating the byte-order of this data-type object. - - One of: - - === ============== - '=' native - '<' little-endian - '>' big-endian - '|' not applicable - === ============== - - All built-in data-type objects have byteorder either '=' or '|'. - - Examples - -------- - - >>> dt = np.dtype('i2') - >>> dt.byteorder - '=' - >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder - '|' - >>> # or ASCII strings - >>> np.dtype('S2').byteorder - '|' - >>> # Even if specific code is given, and it is native - >>> # '=' is the byteorder - >>> import sys - >>> sys_is_le = sys.byteorder == 'little' - >>> native_code = sys_is_le and '<' or '>' - >>> swapped_code = sys_is_le and '>' or '<' - >>> dt = np.dtype(native_code + 'i2') - >>> dt.byteorder - '=' - >>> # Swapped code shows up as itself - >>> dt = np.dtype(swapped_code + 'i2') - >>> dt.byteorder == swapped_code - True - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('char', - """A unique character code for each of the 21 different built-in types.""")) - -add_newdoc('numpy.core.multiarray', 'dtype', ('descr', - """ - Array-interface compliant full description of the data-type. - - The format is that required by the 'descr' key in the - `__array_interface__` attribute. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('fields', - """ - Dictionary of named fields defined for this data type, or ``None``. - - The dictionary is indexed by keys that are the names of the fields. - Each entry in the dictionary is a tuple fully describing the field:: - - (dtype, offset[, title]) - - If present, the optional title can be any object (if it is a string - or unicode then it will also be a key in the fields dictionary, - otherwise it's meta-data). Notice also that the first two elements - of the tuple can be passed directly as arguments to the ``ndarray.getfield`` - and ``ndarray.setfield`` methods. - - See Also - -------- - ndarray.getfield, ndarray.setfield - - Examples - -------- - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> print dt.fields - {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('flags', - """ - Bit-flags describing how this data type is to be interpreted. - - Bit-masks are in `numpy.core.multiarray` as the constants - `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, - `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation - of these flags is in C-API documentation; they are largely useful - for user-defined data-types. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', - """ - Boolean indicating whether this dtype contains any reference-counted - objects in any fields or sub-dtypes. - - Recall that what is actually in the ndarray memory representing - the Python object is the memory address of that object (a pointer). - Special handling may be required, and this attribute is useful for - distinguishing data types that may contain arbitrary Python objects - and data-types that won't. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', - """ - Integer indicating how this dtype relates to the built-in dtypes. - - Read-only. - - = ======================================================================== - 0 if this is a structured array type, with fields - 1 if this is a dtype compiled into numpy (such as ints, floats etc) - 2 if the dtype is for a user-defined numpy type - A user-defined type uses the numpy C-API machinery to extend - numpy to handle a new array type. See - :ref:`user.user-defined-data-types` in the Numpy manual. - = ======================================================================== - - Examples - -------- - >>> dt = np.dtype('i2') - >>> dt.isbuiltin - 1 - >>> dt = np.dtype('f8') - >>> dt.isbuiltin - 1 - >>> dt = np.dtype([('field1', 'f8')]) - >>> dt.isbuiltin - 0 - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', - """ - Boolean indicating whether the byte order of this dtype is native - to the platform. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', - """ - Boolean indicating whether the dtype is a struct which maintains - field alignment. This flag is sticky, so when combining multiple - structs together, it is preserved and produces new dtypes which - are also aligned. - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', - """ - The element size of this data-type object. - - For 18 of the 21 types this number is fixed by the data-type. - For the flexible data-types, this number can be anything. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('kind', - """ - A character code (one of 'biufcOSUV') identifying the general kind of data. - - = ====================== - b boolean - i signed integer - u unsigned integer - f floating-point - c complex floating-point - O object - S (byte-)string - U Unicode - V void - = ====================== - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('name', - """ - A bit-width name for this data-type. - - Un-sized flexible data-type objects do not have this attribute. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('names', - """ - Ordered list of field names, or ``None`` if there are no fields. - - The names are ordered according to increasing byte offset. This can be - used, for example, to walk through all of the named fields in offset order. - - Examples - -------- - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt.names - ('name', 'grades') - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('num', - """ - A unique number for each of the 21 different built-in types. - - These are roughly ordered from least-to-most precision. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('shape', - """ - Shape tuple of the sub-array if this data type describes a sub-array, - and ``()`` otherwise. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('str', - """The array-protocol typestring of this data-type object.""")) - -add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', - """ - Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and - None otherwise. - - The *shape* is the fixed shape of the sub-array described by this - data type, and *item_dtype* the data type of the array. - - If a field whose dtype object has this attribute is retrieved, - then the extra dimensions implied by *shape* are tacked on to - the end of the retrieved array. - - """)) - -add_newdoc('numpy.core.multiarray', 'dtype', ('type', - """The type object used to instantiate a scalar of this data-type.""")) - -############################################################################## -# -# dtype methods -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', - """ - newbyteorder(new_order='S') - - Return a new dtype with a different byte order. - - Changes are also made in all fields and sub-arrays of the data type. - - Parameters - ---------- - new_order : string, optional - Byte order to force; a value from the byte order - specifications below. The default value ('S') results in - swapping the current byte order. - `new_order` codes can be any of:: - - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) - - The code does a case-insensitive check on the first letter of - `new_order` for these alternatives. For example, any of '>' - or 'B' or 'b' or 'brian' are valid to specify big-endian. - - Returns - ------- - new_dtype : dtype - New dtype object with the given change to the byte order. - - Notes - ----- - Changes are also made in all fields and sub-arrays of the data type. - - Examples - -------- - >>> import sys - >>> sys_is_le = sys.byteorder == 'little' - >>> native_code = sys_is_le and '<' or '>' - >>> swapped_code = sys_is_le and '>' or '<' - >>> native_dt = np.dtype(native_code+'i2') - >>> swapped_dt = np.dtype(swapped_code+'i2') - >>> native_dt.newbyteorder('S') == swapped_dt - True - >>> native_dt.newbyteorder() == swapped_dt - True - >>> native_dt == swapped_dt.newbyteorder('S') - True - >>> native_dt == swapped_dt.newbyteorder('=') - True - >>> native_dt == swapped_dt.newbyteorder('N') - True - >>> native_dt == native_dt.newbyteorder('|') - True - >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') - True - >>> np.dtype('>i2') == native_dt.newbyteorder('B') - True - - """)) - - -############################################################################## -# -# Datetime-related Methods -# -############################################################################## - -add_newdoc('numpy.core.multiarray', 'busdaycalendar', - """ - busdaycalendar(weekmask='1111100', holidays=None) - - A business day calendar object that efficiently stores information - defining valid days for the busday family of functions. - - The default valid days are Monday through Friday ("business days"). - A busdaycalendar object can be specified with any set of weekly - valid days, plus an optional "holiday" dates that always will be invalid. - - Once a busdaycalendar object is created, the weekmask and holidays - cannot be modified. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates, no matter which - weekday they fall upon. Holiday dates may be specified in any - order, and NaT (not-a-time) dates are ignored. This list is - saved in a normalized form that is suited for fast calculations - of valid days. - - Returns - ------- - out : busdaycalendar - A business day calendar object containing the specified - weekmask and holidays values. - - See Also - -------- - is_busday : Returns a boolean array indicating valid days. - busday_offset : Applies an offset counted in valid days. - busday_count : Counts how many valid days are in a half-open date range. - - Attributes - ---------- - Note: once a busdaycalendar object is created, you cannot modify the - weekmask or holidays. The attributes return copies of internal data. - weekmask : (copy) seven-element array of bool - holidays : (copy) sorted array of datetime64[D] - - Examples - -------- - >>> # Some important days in July - ... bdd = np.busdaycalendar( - ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) - >>> # Default is Monday to Friday weekdays - ... bdd.weekmask - array([ True, True, True, True, True, False, False], dtype='bool') - >>> # Any holidays already on the weekend are removed - ... bdd.holidays - array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') - """) - -add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', - """A copy of the seven-element boolean mask indicating valid days.""")) - -add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', - """A copy of the holiday array indicating additional invalid days.""")) - -add_newdoc('numpy.core.multiarray', 'is_busday', - """ - is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) - - Calculates which of the given dates are valid days, and which are not. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - dates : array_like of datetime64[D] - The array of dates to process. - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates. They may be - specified in any order, and NaT (not-a-time) dates are ignored. - This list is saved in a normalized form that is suited for - fast calculations of valid days. - busdaycal : busdaycalendar, optional - A `busdaycalendar` object which specifies the valid days. If this - parameter is provided, neither weekmask nor holidays may be - provided. - out : array of bool, optional - If provided, this array is filled with the result. - - Returns - ------- - out : array of bool - An array with the same shape as ``dates``, containing True for - each valid day, and False for each invalid day. - - See Also - -------- - busdaycalendar: An object that specifies a custom set of valid days. - busday_offset : Applies an offset counted in valid days. - busday_count : Counts how many valid days are in a half-open date range. - - Examples - -------- - >>> # The weekdays are Friday, Saturday, and Monday - ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], - ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) - array([False, False, True], dtype='bool') - """) - -add_newdoc('numpy.core.multiarray', 'busday_offset', - """ - busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) - - First adjusts the date to fall on a valid day according to - the ``roll`` rule, then applies offsets to the given dates - counted in valid days. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - dates : array_like of datetime64[D] - The array of dates to process. - offsets : array_like of int - The array of offsets, which is broadcast with ``dates``. - roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional - How to treat dates that do not fall on a valid day. The default - is 'raise'. - - * 'raise' means to raise an exception for an invalid day. - * 'nat' means to return a NaT (not-a-time) for an invalid day. - * 'forward' and 'following' mean to take the first valid day - later in time. - * 'backward' and 'preceding' mean to take the first valid day - earlier in time. - * 'modifiedfollowing' means to take the first valid day - later in time unless it is across a Month boundary, in which - case to take the first valid day earlier in time. - * 'modifiedpreceding' means to take the first valid day - earlier in time unless it is across a Month boundary, in which - case to take the first valid day later in time. - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates. They may be - specified in any order, and NaT (not-a-time) dates are ignored. - This list is saved in a normalized form that is suited for - fast calculations of valid days. - busdaycal : busdaycalendar, optional - A `busdaycalendar` object which specifies the valid days. If this - parameter is provided, neither weekmask nor holidays may be - provided. - out : array of datetime64[D], optional - If provided, this array is filled with the result. - - Returns - ------- - out : array of datetime64[D] - An array with a shape from broadcasting ``dates`` and ``offsets`` - together, containing the dates with offsets applied. - - See Also - -------- - busdaycalendar: An object that specifies a custom set of valid days. - is_busday : Returns a boolean array indicating valid days. - busday_count : Counts how many valid days are in a half-open date range. - - Examples - -------- - >>> # First business day in October 2011 (not accounting for holidays) - ... np.busday_offset('2011-10', 0, roll='forward') - numpy.datetime64('2011-10-03','D') - >>> # Last business day in February 2012 (not accounting for holidays) - ... np.busday_offset('2012-03', -1, roll='forward') - numpy.datetime64('2012-02-29','D') - >>> # Third Wednesday in January 2011 - ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') - numpy.datetime64('2011-01-19','D') - >>> # 2012 Mother's Day in Canada and the U.S. - ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') - numpy.datetime64('2012-05-13','D') - - >>> # First business day on or after a date - ... np.busday_offset('2011-03-20', 0, roll='forward') - numpy.datetime64('2011-03-21','D') - >>> np.busday_offset('2011-03-22', 0, roll='forward') - numpy.datetime64('2011-03-22','D') - >>> # First business day after a date - ... np.busday_offset('2011-03-20', 1, roll='backward') - numpy.datetime64('2011-03-21','D') - >>> np.busday_offset('2011-03-22', 1, roll='backward') - numpy.datetime64('2011-03-23','D') - """) - -add_newdoc('numpy.core.multiarray', 'busday_count', - """ - busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) - - Counts the number of valid days between `begindates` and - `enddates`, not including the day of `enddates`. - - If ``enddates`` specifies a date value that is earlier than the - corresponding ``begindates`` date value, the count will be negative. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - begindates : array_like of datetime64[D] - The array of the first dates for counting. - enddates : array_like of datetime64[D] - The array of the end dates for counting, which are excluded - from the count themselves. - weekmask : str or array_like of bool, optional - A seven-element array indicating which of Monday through Sunday are - valid days. May be specified as a length-seven list or array, like - [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string - like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for - weekdays, optionally separated by white space. Valid abbreviations - are: Mon Tue Wed Thu Fri Sat Sun - holidays : array_like of datetime64[D], optional - An array of dates to consider as invalid dates. They may be - specified in any order, and NaT (not-a-time) dates are ignored. - This list is saved in a normalized form that is suited for - fast calculations of valid days. - busdaycal : busdaycalendar, optional - A `busdaycalendar` object which specifies the valid days. If this - parameter is provided, neither weekmask nor holidays may be - provided. - out : array of int, optional - If provided, this array is filled with the result. - - Returns - ------- - out : array of int - An array with a shape from broadcasting ``begindates`` and ``enddates`` - together, containing the number of valid days between - the begin and end dates. - - See Also - -------- - busdaycalendar: An object that specifies a custom set of valid days. - is_busday : Returns a boolean array indicating valid days. - busday_offset : Applies an offset counted in valid days. - - Examples - -------- - >>> # Number of weekdays in January 2011 - ... np.busday_count('2011-01', '2011-02') - 21 - >>> # Number of weekdays in 2011 - ... np.busday_count('2011', '2012') - 260 - >>> # Number of Saturdays in 2011 - ... np.busday_count('2011', '2012', weekmask='Sat') - 53 - """) - -############################################################################## -# -# nd_grid instances -# -############################################################################## - -add_newdoc('numpy.lib.index_tricks', 'mgrid', - """ - `nd_grid` instance which returns a dense multi-dimensional "meshgrid". - - An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense - (or fleshed out) mesh-grid when indexed, so that each returned argument - has the same shape. The dimensions and number of the output arrays are - equal to the number of indexing dimensions. If the step length is not a - complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then - the integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - Returns - ---------- - mesh-grid `ndarrays` all of the same dimensions - - See Also - -------- - numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects - ogrid : like mgrid but returns open (not fleshed out) mesh grids - r_ : array concatenator - - Examples - -------- - >>> np.mgrid[0:5,0:5] - array([[[0, 0, 0, 0, 0], - [1, 1, 1, 1, 1], - [2, 2, 2, 2, 2], - [3, 3, 3, 3, 3], - [4, 4, 4, 4, 4]], - [[0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4]]]) - >>> np.mgrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - - """) - -add_newdoc('numpy.lib.index_tricks', 'ogrid', - """ - `nd_grid` instance which returns an open multi-dimensional "meshgrid". - - An instance of `numpy.lib.index_tricks.nd_grid` which returns an open - (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension - of each returned array is greater than 1. The dimension and number of the - output arrays are equal to the number of indexing dimensions. If the step - length is not a complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then - the integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - Returns - ---------- - mesh-grid `ndarrays` with only one dimension :math:`\\neq 1` - - See Also - -------- - np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects - mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids - r_ : array concatenator - - Examples - -------- - >>> from numpy import ogrid - >>> ogrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - >>> ogrid[0:5,0:5] - [array([[0], - [1], - [2], - [3], - [4]]), array([[0, 1, 2, 3, 4]])] - - """) - - -############################################################################## -# -# Documentation for `generic` attributes and methods -# -############################################################################## - -add_newdoc('numpy.core.numerictypes', 'generic', - """ - Base class for numpy scalar types. - - Class from which most (all?) numpy scalar types are derived. For - consistency, exposes the same API as `ndarray`, despite many - consequent attributes being either "get-only," or completely irrelevant. - This is the class from which it is strongly suggested users should derive - custom scalar types. - - """) - -# Attributes - -add_newdoc('numpy.core.numerictypes', 'generic', ('T', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('base', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('data', - """Pointer to start of data.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', - """Get array data-descriptor.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flags', - """The integer value of flags.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flat', - """A 1-D view of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('imag', - """The imaginary part of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', - """The length of one element in bytes.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', - """The length of the scalar in bytes.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', - """The number of array dimensions.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('real', - """The real part of the scalar.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('shape', - """Tuple of array dimensions.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('size', - """The number of elements in the gentype.""")) - -add_newdoc('numpy.core.numerictypes', 'generic', ('strides', - """Tuple of bytes steps in each dimension.""")) - -# Methods - -add_newdoc('numpy.core.numerictypes', 'generic', ('all', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('any', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('astype', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('choose', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('clip', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('compress', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('copy', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dump', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('fill', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('item', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('max', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('mean', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('min', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', - """ - newbyteorder(new_order='S') - - Return a new `dtype` with a different byte order. - - Changes are also made in all fields and sub-arrays of the data type. - - The `new_order` code can be any from the following: - - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * 'S' - swap dtype from current to opposite endian - * {'|', 'I'} - ignore (no change to byte order) - - Parameters - ---------- - new_order : str, optional - Byte order to force; a value from the byte order specifications - above. The default value ('S') results in swapping the current - byte order. The code does a case-insensitive check on the first - letter of `new_order` for the alternatives above. For example, - any of 'B' or 'b' or 'biggish' are valid to specify big-endian. - - - Returns - ------- - new_dtype : dtype - New `dtype` object with the given change to the byte order. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('prod', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('put', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('resize', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('round', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class so as to - provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sort', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('std', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('sum', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('take', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('trace', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('var', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - -add_newdoc('numpy.core.numerictypes', 'generic', ('view', - """ - Not implemented (virtual attribute) - - Class generic exists solely to derive numpy scalars from, and possesses, - albeit unimplemented, all the attributes of the ndarray class - so as to provide a uniform API. - - See Also - -------- - The corresponding attribute of the derived class of interest. - - """)) - - -############################################################################## -# -# Documentation for other scalar classes -# -############################################################################## - -add_newdoc('numpy.core.numerictypes', 'bool_', - """Numpy's Boolean type. Character code: ``?``. Alias: bool8""") - -add_newdoc('numpy.core.numerictypes', 'complex64', - """ - Complex number type composed of two 32 bit floats. Character code: 'F'. - - """) - -add_newdoc('numpy.core.numerictypes', 'complex128', - """ - Complex number type composed of two 64 bit floats. Character code: 'D'. - Python complex compatible. - - """) - -add_newdoc('numpy.core.numerictypes', 'complex256', - """ - Complex number type composed of two 128-bit floats. Character code: 'G'. - - """) - -add_newdoc('numpy.core.numerictypes', 'float32', - """ - 32-bit floating-point number. Character code 'f'. C float compatible. - - """) - -add_newdoc('numpy.core.numerictypes', 'float64', - """ - 64-bit floating-point number. Character code 'd'. Python float compatible. - - """) - -add_newdoc('numpy.core.numerictypes', 'float96', - """ - """) - -add_newdoc('numpy.core.numerictypes', 'float128', - """ - 128-bit floating-point number. Character code: 'g'. C long float - compatible. - - """) - -add_newdoc('numpy.core.numerictypes', 'int8', - """8-bit integer. Character code ``b``. C char compatible.""") - -add_newdoc('numpy.core.numerictypes', 'int16', - """16-bit integer. Character code ``h``. C short compatible.""") - -add_newdoc('numpy.core.numerictypes', 'int32', - """32-bit integer. Character code 'i'. C int compatible.""") - -add_newdoc('numpy.core.numerictypes', 'int64', - """64-bit integer. Character code 'l'. Python int compatible.""") - -add_newdoc('numpy.core.numerictypes', 'object_', - """Any Python object. Character code: 'O'.""") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/__init__.py deleted file mode 100644 index 5b371f5c064ba..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Compatibility module. - -This module contains duplicated code from Python itself or 3rd party -extensions, which may be included for the following reasons: - - * compatibility - * we may only need a small subset of the copied library/module - -""" -from __future__ import division, absolute_import, print_function - -from . import _inspect -from . import py3k -from ._inspect import getargspec, formatargspec -from .py3k import * - -__all__ = [] -__all__.extend(_inspect.__all__) -__all__.extend(py3k.__all__) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/_inspect.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/_inspect.py deleted file mode 100644 index 6a499e727b6ee..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/_inspect.py +++ /dev/null @@ -1,221 +0,0 @@ -"""Subset of inspect module from upstream python - -We use this instead of upstream because upstream inspect is slow to import, and -significanly contributes to numpy import times. Importing this copy has almost -no overhead. - -""" -from __future__ import division, absolute_import, print_function - -import types - -__all__ = ['getargspec', 'formatargspec'] - -# ----------------------------------------------------------- type-checking -def ismethod(object): - """Return true if the object is an instance method. - - Instance method objects provide these attributes: - __doc__ documentation string - __name__ name with which this method was defined - im_class class object in which this method belongs - im_func function object containing implementation of method - im_self instance to which this method is bound, or None""" - return isinstance(object, types.MethodType) - -def isfunction(object): - """Return true if the object is a user-defined function. - - Function objects provide these attributes: - __doc__ documentation string - __name__ name with which this function was defined - func_code code object containing compiled function bytecode - func_defaults tuple of any default values for arguments - func_doc (same as __doc__) - func_globals global namespace in which this function was defined - func_name (same as __name__)""" - return isinstance(object, types.FunctionType) - -def iscode(object): - """Return true if the object is a code object. - - Code objects provide these attributes: - co_argcount number of arguments (not including * or ** args) - co_code string of raw compiled bytecode - co_consts tuple of constants used in the bytecode - co_filename name of file in which this code object was created - co_firstlineno number of first line in Python source code - co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg - co_lnotab encoded mapping of line numbers to bytecode indices - co_name name with which this code object was defined - co_names tuple of names of local variables - co_nlocals number of local variables - co_stacksize virtual machine stack space required - co_varnames tuple of names of arguments and local variables""" - return isinstance(object, types.CodeType) - -# ------------------------------------------------ argument list extraction -# These constants are from Python's compile.h. -CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 - -def getargs(co): - """Get information about the arguments accepted by a code object. - - Three things are returned: (args, varargs, varkw), where 'args' is - a list of argument names (possibly containing nested lists), and - 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" - - if not iscode(co): - raise TypeError('arg is not a code object') - - code = co.co_code - nargs = co.co_argcount - names = co.co_varnames - args = list(names[:nargs]) - step = 0 - - # The following acrobatics are for anonymous (tuple) arguments. - for i in range(nargs): - if args[i][:1] in ['', '.']: - stack, remain, count = [], [], [] - while step < len(code): - op = ord(code[step]) - step = step + 1 - if op >= dis.HAVE_ARGUMENT: - opname = dis.opname[op] - value = ord(code[step]) + ord(code[step+1])*256 - step = step + 2 - if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']: - remain.append(value) - count.append(value) - elif opname == 'STORE_FAST': - stack.append(names[value]) - - # Special case for sublists of length 1: def foo((bar)) - # doesn't generate the UNPACK_TUPLE bytecode, so if - # `remain` is empty here, we have such a sublist. - if not remain: - stack[0] = [stack[0]] - break - else: - remain[-1] = remain[-1] - 1 - while remain[-1] == 0: - remain.pop() - size = count.pop() - stack[-size:] = [stack[-size:]] - if not remain: break - remain[-1] = remain[-1] - 1 - if not remain: break - args[i] = stack[0] - - varargs = None - if co.co_flags & CO_VARARGS: - varargs = co.co_varnames[nargs] - nargs = nargs + 1 - varkw = None - if co.co_flags & CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] - return args, varargs, varkw - -def getargspec(func): - """Get the names and default values of a function's arguments. - - A tuple of four things is returned: (args, varargs, varkw, defaults). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'defaults' is an n-tuple of the default values of the last n arguments. - """ - - if ismethod(func): - func = func.__func__ - if not isfunction(func): - raise TypeError('arg is not a Python function') - args, varargs, varkw = getargs(func.__code__) - return args, varargs, varkw, func.__defaults__ - -def getargvalues(frame): - """Get information about arguments passed into a particular frame. - - A tuple of four things is returned: (args, varargs, varkw, locals). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'locals' is the locals dictionary of the given frame.""" - args, varargs, varkw = getargs(frame.f_code) - return args, varargs, varkw, frame.f_locals - -def joinseq(seq): - if len(seq) == 1: - return '(' + seq[0] + ',)' - else: - return '(' + ', '.join(seq) + ')' - -def strseq(object, convert, join=joinseq): - """Recursively walk a sequence, stringifying each element.""" - if type(object) in [list, tuple]: - return join([strseq(_o, convert, join) for _o in object]) - else: - return convert(object) - -def formatargspec(args, varargs=None, varkw=None, defaults=None, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargspec. - - The first four arguments are (args, varargs, varkw, defaults). The - other four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" - specs = [] - if defaults: - firstdefault = len(args) - len(defaults) - for i in range(len(args)): - spec = strseq(args[i], formatarg, join) - if defaults and i >= firstdefault: - spec = spec + formatvalue(defaults[i - firstdefault]) - specs.append(spec) - if varargs is not None: - specs.append(formatvarargs(varargs)) - if varkw is not None: - specs.append(formatvarkw(varkw)) - return '(' + ', '.join(specs) + ')' - -def formatargvalues(args, varargs, varkw, locals, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargvalues. - - The first four arguments are (args, varargs, varkw, locals). The - next four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" - def convert(name, locals=locals, - formatarg=formatarg, formatvalue=formatvalue): - return formatarg(name) + formatvalue(locals[name]) - specs = [] - for i in range(len(args)): - specs.append(strseq(args[i], convert, join)) - if varargs: - specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) - if varkw: - specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) - return '(' + string.join(specs, ', ') + ')' - -if __name__ == '__main__': - import inspect - def foo(x, y, z=None): - return None - - print(inspect.getargs(foo.__code__)) - print(getargs(foo.__code__)) - - print(inspect.getargspec(foo)) - print(getargspec(foo)) - - print(inspect.formatargspec(*inspect.getargspec(foo))) - print(formatargspec(*getargspec(foo))) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/py3k.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/py3k.py deleted file mode 100644 index 4607d95023322..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/py3k.py +++ /dev/null @@ -1,89 +0,0 @@ -""" -Python 3 compatibility tools. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', - 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', - 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', - 'integer_types'] - -import sys - -if sys.version_info[0] >= 3: - import io - - long = int - integer_types = (int,) - basestring = str - unicode = str - bytes = bytes - - def asunicode(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - - def asbytes(s): - if isinstance(s, bytes): - return s - return str(s).encode('latin1') - - def asstr(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - - def isfileobj(f): - return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) - - def open_latin1(filename, mode='r'): - return open(filename, mode=mode, encoding='iso-8859-1') - - def sixu(s): - return s - - strchar = 'U' - - -else: - bytes = str - long = long - basestring = basestring - unicode = unicode - integer_types = (int, long) - asbytes = str - asstr = str - strchar = 'S' - - - def isfileobj(f): - return isinstance(f, file) - - def asunicode(s): - if isinstance(s, unicode): - return s - return str(s).decode('ascii') - - def open_latin1(filename, mode='r'): - return open(filename, mode=mode) - - def sixu(s): - return unicode(s, 'unicode_escape') - - -def getexception(): - return sys.exc_info()[1] - -def asbytes_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asbytes_nested(y) for y in x] - else: - return asbytes(x) - -def asunicode_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asunicode_nested(y) for y in x] - else: - return asunicode(x) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/setup.py deleted file mode 100644 index c163bcaf973c3..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/compat/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('compat', parent_package, top_path) - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/__init__.py deleted file mode 100644 index 0b8d5bb17786a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/__init__.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from .info import __doc__ -from numpy.version import version as __version__ - -from . import multiarray -from . import umath -from . import _internal # for freeze programs -from . import numerictypes as nt -multiarray.set_typeDict(nt.sctypeDict) -from . import numeric -from .numeric import * -from . import fromnumeric -from .fromnumeric import * -from . import defchararray as char -from . import records as rec -from .records import * -from .memmap import * -from .defchararray import chararray -from . import scalarmath -from . import function_base -from .function_base import * -from . import machar -from .machar import * -from . import getlimits -from .getlimits import * -from . import shape_base -from .shape_base import * -del nt - -from .fromnumeric import amax as max, amin as min, \ - round_ as round -from .numeric import absolute as abs - -__all__ = ['char', 'rec', 'memmap'] -__all__ += numeric.__all__ -__all__ += fromnumeric.__all__ -__all__ += rec.__all__ -__all__ += ['chararray'] -__all__ += function_base.__all__ -__all__ += machar.__all__ -__all__ += getlimits.__all__ -__all__ += shape_base.__all__ - - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench - -# Make it possible so that ufuncs can be pickled -# Here are the loading and unloading functions -# The name numpy.core._ufunc_reconstruct must be -# available for unpickling to work. -def _ufunc_reconstruct(module, name): - # The `fromlist` kwarg is required to ensure that `mod` points to the - # inner-most module rather than the parent package when module name is - # nested. This makes it possible to pickle non-toplevel ufuncs such as - # scipy.special.expit for instance. - mod = __import__(module, fromlist=[name]) - return getattr(mod, name) - -def _ufunc_reduce(func): - from pickle import whichmodule - name = func.__name__ - return _ufunc_reconstruct, (whichmodule(func, name), name) - - -import sys -if sys.version_info[0] >= 3: - import copyreg -else: - import copy_reg as copyreg - -copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct) -# Unclutter namespace (must keep _ufunc_reconstruct for unpickling) -del copyreg -del sys -del _ufunc_reduce diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_dummy.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_dummy.py deleted file mode 100644 index aaa56df579d40..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_dummy.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, '_dummy.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_internal.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_internal.py deleted file mode 100644 index d32f593904ae7..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_internal.py +++ /dev/null @@ -1,570 +0,0 @@ -""" -A place for code to be called from core C-code. - -Some things are more easily handled Python. - -""" -from __future__ import division, absolute_import, print_function - -import re -import sys -import warnings - -from numpy.compat import asbytes, bytes - -if (sys.byteorder == 'little'): - _nbo = asbytes('<') -else: - _nbo = asbytes('>') - -def _makenames_list(adict, align): - from .multiarray import dtype - allfields = [] - fnames = list(adict.keys()) - for fname in fnames: - obj = adict[fname] - n = len(obj) - if not isinstance(obj, tuple) or n not in [2, 3]: - raise ValueError("entry not a 2- or 3- tuple") - if (n > 2) and (obj[2] == fname): - continue - num = int(obj[1]) - if (num < 0): - raise ValueError("invalid offset.") - format = dtype(obj[0], align=align) - if (format.itemsize == 0): - raise ValueError("all itemsizes must be fixed.") - if (n > 2): - title = obj[2] - else: - title = None - allfields.append((fname, format, num, title)) - # sort by offsets - allfields.sort(key=lambda x: x[2]) - names = [x[0] for x in allfields] - formats = [x[1] for x in allfields] - offsets = [x[2] for x in allfields] - titles = [x[3] for x in allfields] - - return names, formats, offsets, titles - -# Called in PyArray_DescrConverter function when -# a dictionary without "names" and "formats" -# fields is used as a data-type descriptor. -def _usefields(adict, align): - from .multiarray import dtype - try: - names = adict[-1] - except KeyError: - names = None - if names is None: - names, formats, offsets, titles = _makenames_list(adict, align) - else: - formats = [] - offsets = [] - titles = [] - for name in names: - res = adict[name] - formats.append(res[0]) - offsets.append(res[1]) - if (len(res) > 2): - titles.append(res[2]) - else: - titles.append(None) - - return dtype({"names" : names, - "formats" : formats, - "offsets" : offsets, - "titles" : titles}, align) - - -# construct an array_protocol descriptor list -# from the fields attribute of a descriptor -# This calls itself recursively but should eventually hit -# a descriptor that has no fields and then return -# a simple typestring - -def _array_descr(descriptor): - fields = descriptor.fields - if fields is None: - subdtype = descriptor.subdtype - if subdtype is None: - if descriptor.metadata is None: - return descriptor.str - else: - new = descriptor.metadata.copy() - if new: - return (descriptor.str, new) - else: - return descriptor.str - else: - return (_array_descr(subdtype[0]), subdtype[1]) - - - names = descriptor.names - ordered_fields = [fields[x] + (x,) for x in names] - result = [] - offset = 0 - for field in ordered_fields: - if field[1] > offset: - num = field[1] - offset - result.append(('', '|V%d' % num)) - offset += num - if len(field) > 3: - name = (field[2], field[3]) - else: - name = field[2] - if field[0].subdtype: - tup = (name, _array_descr(field[0].subdtype[0]), - field[0].subdtype[1]) - else: - tup = (name, _array_descr(field[0])) - offset += field[0].itemsize - result.append(tup) - - return result - -# Build a new array from the information in a pickle. -# Note that the name numpy.core._internal._reconstruct is embedded in -# pickles of ndarrays made with NumPy before release 1.0 -# so don't remove the name here, or you'll -# break backward compatibilty. -def _reconstruct(subtype, shape, dtype): - from .multiarray import ndarray - return ndarray.__new__(subtype, shape, dtype) - - -# format_re was originally from numarray by J. Todd Miller - -format_re = re.compile(asbytes( - r'(?P[<>|=]?)' - r'(?P *[(]?[ ,0-9L]*[)]? *)' - r'(?P[<>|=]?)' - r'(?P[A-Za-z0-9.]*(?:\[[a-zA-Z0-9,.]+\])?)')) -sep_re = re.compile(asbytes(r'\s*,\s*')) -space_re = re.compile(asbytes(r'\s+$')) - -# astr is a string (perhaps comma separated) - -_convorder = {asbytes('='): _nbo} - -def _commastring(astr): - startindex = 0 - result = [] - while startindex < len(astr): - mo = format_re.match(astr, pos=startindex) - try: - (order1, repeats, order2, dtype) = mo.groups() - except (TypeError, AttributeError): - raise ValueError('format number %d of "%s" is not recognized' % - (len(result)+1, astr)) - startindex = mo.end() - # Separator or ending padding - if startindex < len(astr): - if space_re.match(astr, pos=startindex): - startindex = len(astr) - else: - mo = sep_re.match(astr, pos=startindex) - if not mo: - raise ValueError( - 'format number %d of "%s" is not recognized' % - (len(result)+1, astr)) - startindex = mo.end() - - if order2 == asbytes(''): - order = order1 - elif order1 == asbytes(''): - order = order2 - else: - order1 = _convorder.get(order1, order1) - order2 = _convorder.get(order2, order2) - if (order1 != order2): - raise ValueError('inconsistent byte-order specification %s and %s' % (order1, order2)) - order = order1 - - if order in [asbytes('|'), asbytes('='), _nbo]: - order = asbytes('') - dtype = order + dtype - if (repeats == asbytes('')): - newitem = dtype - else: - newitem = (dtype, eval(repeats)) - result.append(newitem) - - return result - -def _getintp_ctype(): - from .multiarray import dtype - val = _getintp_ctype.cache - if val is not None: - return val - char = dtype('p').char - import ctypes - if (char == 'i'): - val = ctypes.c_int - elif char == 'l': - val = ctypes.c_long - elif char == 'q': - val = ctypes.c_longlong - else: - val = ctypes.c_long - _getintp_ctype.cache = val - return val -_getintp_ctype.cache = None - -# Used for .ctypes attribute of ndarray - -class _missing_ctypes(object): - def cast(self, num, obj): - return num - - def c_void_p(self, num): - return num - -class _ctypes(object): - def __init__(self, array, ptr=None): - try: - import ctypes - self._ctypes = ctypes - except ImportError: - self._ctypes = _missing_ctypes() - self._arr = array - self._data = ptr - if self._arr.ndim == 0: - self._zerod = True - else: - self._zerod = False - - def data_as(self, obj): - return self._ctypes.cast(self._data, obj) - - def shape_as(self, obj): - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.shape) - - def strides_as(self, obj): - if self._zerod: - return None - return (obj*self._arr.ndim)(*self._arr.strides) - - def get_data(self): - return self._data - - def get_shape(self): - if self._zerod: - return None - return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape) - - def get_strides(self): - if self._zerod: - return None - return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides) - - def get_as_parameter(self): - return self._ctypes.c_void_p(self._data) - - data = property(get_data, None, doc="c-types data") - shape = property(get_shape, None, doc="c-types shape") - strides = property(get_strides, None, doc="c-types strides") - _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_") - - -# Given a datatype and an order object -# return a new names tuple -# with the order indicated -def _newnames(datatype, order): - oldnames = datatype.names - nameslist = list(oldnames) - if isinstance(order, str): - order = [order] - if isinstance(order, (list, tuple)): - for name in order: - try: - nameslist.remove(name) - except ValueError: - raise ValueError("unknown field name: %s" % (name,)) - return tuple(list(order) + nameslist) - raise ValueError("unsupported order value: %s" % (order,)) - -# Given an array with fields and a sequence of field names -# construct a new array with just those fields copied over -def _index_fields(ary, fields): - from .multiarray import empty, dtype, array - dt = ary.dtype - - names = [name for name in fields if name in dt.names] - formats = [dt.fields[name][0] for name in fields if name in dt.names] - offsets = [dt.fields[name][1] for name in fields if name in dt.names] - - view_dtype = {'names':names, 'formats':formats, 'offsets':offsets, 'itemsize':dt.itemsize} - view = ary.view(dtype=view_dtype) - - # Return a copy for now until behavior is fully deprecated - # in favor of returning view - copy_dtype = {'names':view_dtype['names'], 'formats':view_dtype['formats']} - return array(view, dtype=copy_dtype, copy=True) - -# Given a string containing a PEP 3118 format specifier, -# construct a Numpy dtype - -_pep3118_native_map = { - '?': '?', - 'b': 'b', - 'B': 'B', - 'h': 'h', - 'H': 'H', - 'i': 'i', - 'I': 'I', - 'l': 'l', - 'L': 'L', - 'q': 'q', - 'Q': 'Q', - 'e': 'e', - 'f': 'f', - 'd': 'd', - 'g': 'g', - 'Zf': 'F', - 'Zd': 'D', - 'Zg': 'G', - 's': 'S', - 'w': 'U', - 'O': 'O', - 'x': 'V', # padding -} -_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) - -_pep3118_standard_map = { - '?': '?', - 'b': 'b', - 'B': 'B', - 'h': 'i2', - 'H': 'u2', - 'i': 'i4', - 'I': 'u4', - 'l': 'i4', - 'L': 'u4', - 'q': 'i8', - 'Q': 'u8', - 'e': 'f2', - 'f': 'f', - 'd': 'd', - 'Zf': 'F', - 'Zd': 'D', - 's': 'S', - 'w': 'U', - 'O': 'O', - 'x': 'V', # padding -} -_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) - -def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False): - from numpy.core.multiarray import dtype - - fields = {} - offset = 0 - explicit_name = False - this_explicit_name = False - common_alignment = 1 - is_padding = False - last_offset = 0 - - dummy_name_index = [0] - def next_dummy_name(): - dummy_name_index[0] += 1 - def get_dummy_name(): - while True: - name = 'f%d' % dummy_name_index[0] - if name not in fields: - return name - next_dummy_name() - - # Parse spec - while spec: - value = None - - # End of structure, bail out to upper level - if spec[0] == '}': - spec = spec[1:] - break - - # Sub-arrays (1) - shape = None - if spec[0] == '(': - j = spec.index(')') - shape = tuple(map(int, spec[1:j].split(','))) - spec = spec[j+1:] - - # Byte order - if spec[0] in ('@', '=', '<', '>', '^', '!'): - byteorder = spec[0] - if byteorder == '!': - byteorder = '>' - spec = spec[1:] - - # Byte order characters also control native vs. standard type sizes - if byteorder in ('@', '^'): - type_map = _pep3118_native_map - type_map_chars = _pep3118_native_typechars - else: - type_map = _pep3118_standard_map - type_map_chars = _pep3118_standard_typechars - - # Item sizes - itemsize = 1 - if spec[0].isdigit(): - j = 1 - for j in range(1, len(spec)): - if not spec[j].isdigit(): - break - itemsize = int(spec[:j]) - spec = spec[j:] - - # Data types - is_padding = False - - if spec[:2] == 'T{': - value, spec, align, next_byteorder = _dtype_from_pep3118( - spec[2:], byteorder=byteorder, is_subdtype=True) - elif spec[0] in type_map_chars: - next_byteorder = byteorder - if spec[0] == 'Z': - j = 2 - else: - j = 1 - typechar = spec[:j] - spec = spec[j:] - is_padding = (typechar == 'x') - dtypechar = type_map[typechar] - if dtypechar in 'USV': - dtypechar += '%d' % itemsize - itemsize = 1 - numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder) - value = dtype(numpy_byteorder + dtypechar) - align = value.alignment - else: - raise ValueError("Unknown PEP 3118 data type specifier %r" % spec) - - # - # Native alignment may require padding - # - # Here we assume that the presence of a '@' character implicitly implies - # that the start of the array is *already* aligned. - # - extra_offset = 0 - if byteorder == '@': - start_padding = (-offset) % align - intra_padding = (-value.itemsize) % align - - offset += start_padding - - if intra_padding != 0: - if itemsize > 1 or (shape is not None and _prod(shape) > 1): - # Inject internal padding to the end of the sub-item - value = _add_trailing_padding(value, intra_padding) - else: - # We can postpone the injection of internal padding, - # as the item appears at most once - extra_offset += intra_padding - - # Update common alignment - common_alignment = (align*common_alignment - / _gcd(align, common_alignment)) - - # Convert itemsize to sub-array - if itemsize != 1: - value = dtype((value, (itemsize,))) - - # Sub-arrays (2) - if shape is not None: - value = dtype((value, shape)) - - # Field name - this_explicit_name = False - if spec and spec.startswith(':'): - i = spec[1:].index(':') + 1 - name = spec[1:i] - spec = spec[i+1:] - explicit_name = True - this_explicit_name = True - else: - name = get_dummy_name() - - if not is_padding or this_explicit_name: - if name in fields: - raise RuntimeError("Duplicate field name '%s' in PEP3118 format" - % name) - fields[name] = (value, offset) - last_offset = offset - if not this_explicit_name: - next_dummy_name() - - byteorder = next_byteorder - - offset += value.itemsize - offset += extra_offset - - # Check if this was a simple 1-item type - if len(fields) == 1 and not explicit_name and fields['f0'][1] == 0 \ - and not is_subdtype: - ret = fields['f0'][0] - else: - ret = dtype(fields) - - # Trailing padding must be explicitly added - padding = offset - ret.itemsize - if byteorder == '@': - padding += (-offset) % common_alignment - if is_padding and not this_explicit_name: - ret = _add_trailing_padding(ret, padding) - - # Finished - if is_subdtype: - return ret, spec, common_alignment, byteorder - else: - return ret - -def _add_trailing_padding(value, padding): - """Inject the specified number of padding bytes at the end of a dtype""" - from numpy.core.multiarray import dtype - - if value.fields is None: - vfields = {'f0': (value, 0)} - else: - vfields = dict(value.fields) - - if value.names and value.names[-1] == '' and \ - value[''].char == 'V': - # A trailing padding field is already present - vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding), - vfields[''][1]) - value = dtype(vfields) - else: - # Get a free name for the padding field - j = 0 - while True: - name = 'pad%d' % j - if name not in vfields: - vfields[name] = ('V%d' % padding, value.itemsize) - break - j += 1 - - value = dtype(vfields) - if '' not in vfields: - # Strip out the name of the padding field - names = list(value.names) - names[-1] = '' - value.names = tuple(names) - return value - -def _prod(a): - p = 1 - for x in a: - p *= x - return p - -def _gcd(a, b): - """Calculate the greatest common divisor of a and b""" - while b: - a, b = b, a%b - return a diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_methods.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_methods.py deleted file mode 100644 index 00716e1b4e095..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/_methods.py +++ /dev/null @@ -1,134 +0,0 @@ -""" -Array methods which are called by both the C-code for the method -and the Python code for the NumPy-namespace function - -""" -from __future__ import division, absolute_import, print_function - -import warnings - -from numpy.core import multiarray as mu -from numpy.core import umath as um -from numpy.core.numeric import asanyarray -from numpy.core import numerictypes as nt - -# save those O(100) nanoseconds! -umr_maximum = um.maximum.reduce -umr_minimum = um.minimum.reduce -umr_sum = um.add.reduce -umr_prod = um.multiply.reduce -umr_any = um.logical_or.reduce -umr_all = um.logical_and.reduce - -# avoid keyword arguments to speed up parsing, saves about 15%-20% for very -# small reductions -def _amax(a, axis=None, out=None, keepdims=False): - return umr_maximum(a, axis, None, out, keepdims) - -def _amin(a, axis=None, out=None, keepdims=False): - return umr_minimum(a, axis, None, out, keepdims) - -def _sum(a, axis=None, dtype=None, out=None, keepdims=False): - return umr_sum(a, axis, dtype, out, keepdims) - -def _prod(a, axis=None, dtype=None, out=None, keepdims=False): - return umr_prod(a, axis, dtype, out, keepdims) - -def _any(a, axis=None, dtype=None, out=None, keepdims=False): - return umr_any(a, axis, dtype, out, keepdims) - -def _all(a, axis=None, dtype=None, out=None, keepdims=False): - return umr_all(a, axis, dtype, out, keepdims) - -def _count_reduce_items(arr, axis): - if axis is None: - axis = tuple(range(arr.ndim)) - if not isinstance(axis, tuple): - axis = (axis,) - items = 1 - for ax in axis: - items *= arr.shape[ax] - return items - -def _mean(a, axis=None, dtype=None, out=None, keepdims=False): - arr = asanyarray(a) - - rcount = _count_reduce_items(arr, axis) - # Make this warning show up first - if rcount == 0: - warnings.warn("Mean of empty slice.", RuntimeWarning) - - - # Cast bool, unsigned int, and int to float64 by default - if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): - dtype = mu.dtype('f8') - - ret = umr_sum(arr, axis, dtype, out, keepdims) - if isinstance(ret, mu.ndarray): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) - elif hasattr(ret, 'dtype'): - ret = ret.dtype.type(ret / rcount) - else: - ret = ret / rcount - - return ret - -def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - arr = asanyarray(a) - - rcount = _count_reduce_items(arr, axis) - # Make this warning show up on top. - if ddof >= rcount: - warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning) - - # Cast bool, unsigned int, and int to float64 by default - if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): - dtype = mu.dtype('f8') - - # Compute the mean. - # Note that if dtype is not of inexact type then arraymean will - # not be either. - arrmean = umr_sum(arr, axis, dtype, keepdims=True) - if isinstance(arrmean, mu.ndarray): - arrmean = um.true_divide( - arrmean, rcount, out=arrmean, casting='unsafe', subok=False) - else: - arrmean = arrmean.dtype.type(arrmean / rcount) - - # Compute sum of squared deviations from mean - # Note that x may not be inexact and that we need it to be an array, - # not a scalar. - x = asanyarray(arr - arrmean) - if issubclass(arr.dtype.type, nt.complexfloating): - x = um.multiply(x, um.conjugate(x), out=x).real - else: - x = um.multiply(x, x, out=x) - ret = umr_sum(x, axis, dtype, out, keepdims) - - # Compute degrees of freedom and make sure it is not negative. - rcount = max([rcount - ddof, 0]) - - # divide by degrees of freedom - if isinstance(ret, mu.ndarray): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) - elif hasattr(ret, 'dtype'): - ret = ret.dtype.type(ret / rcount) - else: - ret = ret / rcount - - return ret - -def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if isinstance(ret, mu.ndarray): - ret = um.sqrt(ret, out=ret) - elif hasattr(ret, 'dtype'): - ret = ret.dtype.type(um.sqrt(ret)) - else: - ret = um.sqrt(ret) - - return ret diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/arrayprint.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/arrayprint.py deleted file mode 100644 index db491e6f5d74c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/arrayprint.py +++ /dev/null @@ -1,752 +0,0 @@ -"""Array printing function - -$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ["array2string", "set_printoptions", "get_printoptions"] -__docformat__ = 'restructuredtext' - -# -# Written by Konrad Hinsen -# last revision: 1996-3-13 -# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) -# and by Perry Greenfield 2000-4-1 for numarray -# and by Travis Oliphant 2005-8-22 for numpy - -import sys -from functools import reduce -from . import numerictypes as _nt -from .umath import maximum, minimum, absolute, not_equal, isnan, isinf -from .multiarray import format_longfloat, datetime_as_string, datetime_data -from .fromnumeric import ravel - -if sys.version_info[0] >= 3: - _MAXINT = sys.maxsize - _MININT = -sys.maxsize - 1 -else: - _MAXINT = sys.maxint - _MININT = -sys.maxint - 1 - -def product(x, y): return x*y - -_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension -_summaryThreshold = 1000 # total items > triggers array summarization - -_float_output_precision = 8 -_float_output_suppress_small = False -_line_width = 75 -_nan_str = 'nan' -_inf_str = 'inf' -_formatter = None # formatting function for array elements - - -def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, - nanstr=None, infstr=None, - formatter=None): - """ - Set printing options. - - These options determine the way floating point numbers, arrays and - other NumPy objects are displayed. - - Parameters - ---------- - precision : int, optional - Number of digits of precision for floating point output (default 8). - threshold : int, optional - Total number of array elements which trigger summarization - rather than full repr (default 1000). - edgeitems : int, optional - Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth : int, optional - The number of characters per line for the purpose of inserting - line breaks (default 75). - suppress : bool, optional - Whether or not suppress printing of small floating point values - using scientific notation (default False). - nanstr : str, optional - String representation of floating point not-a-number (default nan). - infstr : str, optional - String representation of floating point infinity (default inf). - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - See Also - -------- - get_printoptions, set_string_function, array2string - - Notes - ----- - `formatter` is always reset with a call to `set_printoptions`. - - Examples - -------- - Floating point precision can be set: - - >>> np.set_printoptions(precision=4) - >>> print np.array([1.123456789]) - [ 1.1235] - - Long arrays can be summarised: - - >>> np.set_printoptions(threshold=5) - >>> print np.arange(10) - [0 1 2 ..., 7 8 9] - - Small results can be suppressed: - - >>> eps = np.finfo(float).eps - >>> x = np.arange(4.) - >>> x**2 - (x + eps)**2 - array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) - >>> np.set_printoptions(suppress=True) - >>> x**2 - (x + eps)**2 - array([-0., -0., 0., 0.]) - - A custom formatter can be used to display array elements as desired: - - >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) - >>> x = np.arange(3) - >>> x - array([int: 0, int: -1, int: -2]) - >>> np.set_printoptions() # formatter gets reset - >>> x - array([0, 1, 2]) - - To put back the default options, you can use: - - >>> np.set_printoptions(edgeitems=3,infstr='inf', - ... linewidth=75, nanstr='nan', precision=8, - ... suppress=False, threshold=1000, formatter=None) - """ - - global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \ - _line_width, _float_output_suppress_small, _nan_str, _inf_str, \ - _formatter - if linewidth is not None: - _line_width = linewidth - if threshold is not None: - _summaryThreshold = threshold - if edgeitems is not None: - _summaryEdgeItems = edgeitems - if precision is not None: - _float_output_precision = precision - if suppress is not None: - _float_output_suppress_small = not not suppress - if nanstr is not None: - _nan_str = nanstr - if infstr is not None: - _inf_str = infstr - _formatter = formatter - -def get_printoptions(): - """ - Return the current print options. - - Returns - ------- - print_opts : dict - Dictionary of current print options with keys - - - precision : int - - threshold : int - - edgeitems : int - - linewidth : int - - suppress : bool - - nanstr : str - - infstr : str - - formatter : dict of callables - - For a full description of these options, see `set_printoptions`. - - See Also - -------- - set_printoptions, set_string_function - - """ - d = dict(precision=_float_output_precision, - threshold=_summaryThreshold, - edgeitems=_summaryEdgeItems, - linewidth=_line_width, - suppress=_float_output_suppress_small, - nanstr=_nan_str, - infstr=_inf_str, - formatter=_formatter) - return d - -def _leading_trailing(a): - from . import numeric as _nc - if a.ndim == 1: - if len(a) > 2*_summaryEdgeItems: - b = _nc.concatenate((a[:_summaryEdgeItems], - a[-_summaryEdgeItems:])) - else: - b = a - else: - if len(a) > 2*_summaryEdgeItems: - l = [_leading_trailing(a[i]) for i in range( - min(len(a), _summaryEdgeItems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems), 0, -1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = _nc.concatenate(tuple(l)) - return b - -def _boolFormatter(x): - if x: - return ' True' - else: - return 'False' - - -def repr_format(x): - return repr(x) - -def _array2string(a, max_line_width, precision, suppress_small, separator=' ', - prefix="", formatter=None): - - if max_line_width is None: - max_line_width = _line_width - - if precision is None: - precision = _float_output_precision - - if suppress_small is None: - suppress_small = _float_output_suppress_small - - if formatter is None: - formatter = _formatter - - if a.size > _summaryThreshold: - summary_insert = "..., " - data = _leading_trailing(a) - else: - summary_insert = "" - data = ravel(a) - - formatdict = {'bool' : _boolFormatter, - 'int' : IntegerFormat(data), - 'float' : FloatFormat(data, precision, suppress_small), - 'longfloat' : LongFloatFormat(precision), - 'complexfloat' : ComplexFormat(data, precision, - suppress_small), - 'longcomplexfloat' : LongComplexFormat(precision), - 'datetime' : DatetimeFormat(data), - 'timedelta' : TimedeltaFormat(data), - 'numpystr' : repr_format, - 'str' : str} - - if formatter is not None: - fkeys = [k for k in formatter.keys() if formatter[k] is not None] - if 'all' in fkeys: - for key in formatdict.keys(): - formatdict[key] = formatter['all'] - if 'int_kind' in fkeys: - for key in ['int']: - formatdict[key] = formatter['int_kind'] - if 'float_kind' in fkeys: - for key in ['float', 'longfloat']: - formatdict[key] = formatter['float_kind'] - if 'complex_kind' in fkeys: - for key in ['complexfloat', 'longcomplexfloat']: - formatdict[key] = formatter['complex_kind'] - if 'str_kind' in fkeys: - for key in ['numpystr', 'str']: - formatdict[key] = formatter['str_kind'] - for key in formatdict.keys(): - if key in fkeys: - formatdict[key] = formatter[key] - - try: - format_function = a._format - msg = "The `_format` attribute is deprecated in Numpy 2.0 and " \ - "will be removed in 2.1. Use the `formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - # find the right formatting function for the array - dtypeobj = a.dtype.type - if issubclass(dtypeobj, _nt.bool_): - format_function = formatdict['bool'] - elif issubclass(dtypeobj, _nt.integer): - if issubclass(dtypeobj, _nt.timedelta64): - format_function = formatdict['timedelta'] - else: - format_function = formatdict['int'] - elif issubclass(dtypeobj, _nt.floating): - if issubclass(dtypeobj, _nt.longfloat): - format_function = formatdict['longfloat'] - else: - format_function = formatdict['float'] - elif issubclass(dtypeobj, _nt.complexfloating): - if issubclass(dtypeobj, _nt.clongfloat): - format_function = formatdict['longcomplexfloat'] - else: - format_function = formatdict['complexfloat'] - elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): - format_function = formatdict['numpystr'] - elif issubclass(dtypeobj, _nt.datetime64): - format_function = formatdict['datetime'] - else: - format_function = formatdict['numpystr'] - - # skip over "[" - next_line_prefix = " " - # skip over array( - next_line_prefix += " "*len(prefix) - - lst = _formatArray(a, format_function, len(a.shape), max_line_width, - next_line_prefix, separator, - _summaryEdgeItems, summary_insert)[:-1] - return lst - -def _convert_arrays(obj): - from . import numeric as _nc - newtup = [] - for k in obj: - if isinstance(k, _nc.ndarray): - k = k.tolist() - elif isinstance(k, tuple): - k = _convert_arrays(k) - newtup.append(k) - return tuple(newtup) - - -def array2string(a, max_line_width=None, precision=None, - suppress_small=None, separator=' ', prefix="", - style=repr, formatter=None): - """ - Return a string representation of an array. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters splits the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing - precision (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero. A number is "very small" if it - is smaller than the current printing precision. - separator : str, optional - Inserted between elements. - prefix : str, optional - An array is typically printed as:: - - 'prefix(' + array2string(a) + ')' - - The length of the prefix string is used to align the - output correctly. - style : function, optional - A function that accepts an ndarray and returns a string. Used only - when the shape of `a` is equal to ``()``, i.e. for 0-D arrays. - formatter : dict of callables, optional - If not None, the keys should indicate the type(s) that the respective - formatting function applies to. Callables should return a string. - Types that are not specified (by their corresponding keys) are handled - by the default formatters. Individual types for which a formatter - can be set are:: - - - 'bool' - - 'int' - - 'timedelta' : a `numpy.timedelta64` - - 'datetime' : a `numpy.datetime64` - - 'float' - - 'longfloat' : 128-bit floats - - 'complexfloat' - - 'longcomplexfloat' : composed of two 128-bit floats - - 'numpy_str' : types `numpy.string_` and `numpy.unicode_` - - 'str' : all other strings - - Other keys that can be used to set a group of types at once are:: - - - 'all' : sets all types - - 'int_kind' : sets 'int' - - 'float_kind' : sets 'float' and 'longfloat' - - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - - 'str_kind' : sets 'str' and 'numpystr' - - Returns - ------- - array_str : str - String representation of the array. - - Raises - ------ - TypeError - if a callable in `formatter` does not return a string. - - See Also - -------- - array_str, array_repr, set_printoptions, get_printoptions - - Notes - ----- - If a formatter is specified for a certain type, the `precision` keyword is - ignored for that type. - - Examples - -------- - >>> x = np.array([1e-16,1,2,3]) - >>> print np.array2string(x, precision=2, separator=',', - ... suppress_small=True) - [ 0., 1., 2., 3.] - - >>> x = np.arange(3.) - >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) - '[0.00 1.00 2.00]' - - >>> x = np.arange(3) - >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) - '[0x0L 0x1L 0x2L]' - - """ - - if a.shape == (): - x = a.item() - try: - lst = a._format(x) - msg = "The `_format` attribute is deprecated in Numpy " \ - "2.0 and will be removed in 2.1. Use the " \ - "`formatter` kw instead." - import warnings - warnings.warn(msg, DeprecationWarning) - except AttributeError: - if isinstance(x, tuple): - x = _convert_arrays(x) - lst = style(x) - elif reduce(product, a.shape) == 0: - # treat as a null array if any of shape elements == 0 - lst = "[]" - else: - lst = _array2string(a, max_line_width, precision, suppress_small, - separator, prefix, formatter=formatter) - return lst - -def _extendLine(s, line, word, max_line_len, next_line_prefix): - if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: - s += line.rstrip() + "\n" - line = next_line_prefix - line += word - return s, line - - -def _formatArray(a, format_function, rank, max_line_len, - next_line_prefix, separator, edge_items, summary_insert): - """formatArray is designed for two modes of operation: - - 1. Full output - - 2. Summarized output - - """ - if rank == 0: - obj = a.item() - if isinstance(obj, tuple): - obj = _convert_arrays(obj) - return str(obj) - - if summary_insert and 2*edge_items < len(a): - leading_items, trailing_items, summary_insert1 = \ - edge_items, edge_items, summary_insert - else: - leading_items, trailing_items, summary_insert1 = 0, len(a), "" - - if rank == 1: - s = "" - line = next_line_prefix - for i in range(leading_items): - word = format_function(a[i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - if summary_insert1: - s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) - - for i in range(trailing_items, 1, -1): - word = format_function(a[-i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - word = format_function(a[-1]) - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - s += line + "]\n" - s = '[' + s[len(next_line_prefix):] - else: - s = '[' - sep = separator.rstrip() - for i in range(leading_items): - if i > 0: - s += next_line_prefix - s += _formatArray(a[i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) - - if summary_insert1: - s += next_line_prefix + summary_insert1 + "\n" - - for i in range(trailing_items, 1, -1): - if leading_items or i != trailing_items: - s += next_line_prefix - s += _formatArray(a[-i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) - if leading_items or trailing_items > 1: - s += next_line_prefix - s += _formatArray(a[-1], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert).rstrip()+']\n' - return s - -class FloatFormat(object): - def __init__(self, data, precision, suppress_small, sign=False): - self.precision = precision - self.suppress_small = suppress_small - self.sign = sign - self.exp_format = False - self.large_exponent = False - self.max_str_len = 0 - try: - self.fillFormat(data) - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - - def fillFormat(self, data): - from . import numeric as _nc - - with _nc.errstate(all='ignore'): - special = isnan(data) | isinf(data) - valid = not_equal(data, 0) & ~special - non_zero = absolute(data.compress(valid)) - if len(non_zero) == 0: - max_val = 0. - min_val = 0. - else: - max_val = maximum.reduce(non_zero) - min_val = minimum.reduce(non_zero) - if max_val >= 1.e8: - self.exp_format = True - if not self.suppress_small and (min_val < 0.0001 - or max_val/min_val > 1000.): - self.exp_format = True - - if self.exp_format: - self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100 - self.max_str_len = 8 + self.precision - if self.large_exponent: - self.max_str_len += 1 - if self.sign: - format = '%+' - else: - format = '%' - format = format + '%d.%de' % (self.max_str_len, self.precision) - else: - format = '%%.%df' % (self.precision,) - if len(non_zero): - precision = max([_digits(x, self.precision, format) - for x in non_zero]) - else: - precision = 0 - precision = min(self.precision, precision) - self.max_str_len = len(str(int(max_val))) + precision + 2 - if _nc.any(special): - self.max_str_len = max(self.max_str_len, - len(_nan_str), - len(_inf_str)+1) - if self.sign: - format = '%#+' - else: - format = '%#' - format = format + '%d.%df' % (self.max_str_len, precision) - - self.special_fmt = '%%%ds' % (self.max_str_len,) - self.format = format - - def __call__(self, x, strip_zeros=True): - from . import numeric as _nc - - with _nc.errstate(invalid='ignore'): - if isnan(x): - if self.sign: - return self.special_fmt % ('+' + _nan_str,) - else: - return self.special_fmt % (_nan_str,) - elif isinf(x): - if x > 0: - if self.sign: - return self.special_fmt % ('+' + _inf_str,) - else: - return self.special_fmt % (_inf_str,) - else: - return self.special_fmt % ('-' + _inf_str,) - - s = self.format % x - if self.large_exponent: - # 3-digit exponent - expsign = s[-3] - if expsign == '+' or expsign == '-': - s = s[1:-2] + '0' + s[-2:] - elif self.exp_format: - # 2-digit exponent - if s[-3] == '0': - s = ' ' + s[:-3] + s[-2:] - elif strip_zeros: - z = s.rstrip('0') - s = z + ' '*(len(s)-len(z)) - return s - - -def _digits(x, precision, format): - s = format % x - z = s.rstrip('0') - return precision - len(s) + len(z) - - -class IntegerFormat(object): - def __init__(self, data): - try: - max_str_len = max(len(str(maximum.reduce(data))), - len(str(minimum.reduce(data)))) - self.format = '%' + str(max_str_len) + 'd' - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - except ValueError: - # this occurs when everything is NA - pass - - def __call__(self, x): - if _MININT < x < _MAXINT: - return self.format % x - else: - return "%s" % x - -class LongFloatFormat(object): - # XXX Have to add something to determine the width to use a la FloatFormat - # Right now, things won't line up properly - def __init__(self, precision, sign=False): - self.precision = precision - self.sign = sign - - def __call__(self, x): - if isnan(x): - if self.sign: - return '+' + _nan_str - else: - return ' ' + _nan_str - elif isinf(x): - if x > 0: - if self.sign: - return '+' + _inf_str - else: - return ' ' + _inf_str - else: - return '-' + _inf_str - elif x >= 0: - if self.sign: - return '+' + format_longfloat(x, self.precision) - else: - return ' ' + format_longfloat(x, self.precision) - else: - return format_longfloat(x, self.precision) - - -class LongComplexFormat(object): - def __init__(self, precision): - self.real_format = LongFloatFormat(precision) - self.imag_format = LongFloatFormat(precision, sign=True) - - def __call__(self, x): - r = self.real_format(x.real) - i = self.imag_format(x.imag) - return r + i + 'j' - - -class ComplexFormat(object): - def __init__(self, x, precision, suppress_small): - self.real_format = FloatFormat(x.real, precision, suppress_small) - self.imag_format = FloatFormat(x.imag, precision, suppress_small, - sign=True) - - def __call__(self, x): - r = self.real_format(x.real, strip_zeros=False) - i = self.imag_format(x.imag, strip_zeros=False) - if not self.imag_format.exp_format: - z = i.rstrip('0') - i = z + 'j' + ' '*(len(i)-len(z)) - else: - i = i + 'j' - return r + i - -class DatetimeFormat(object): - def __init__(self, x, unit=None, - timezone=None, casting='same_kind'): - # Get the unit from the dtype - if unit is None: - if x.dtype.kind == 'M': - unit = datetime_data(x.dtype)[0] - else: - unit = 's' - - # If timezone is default, make it 'local' or 'UTC' based on the unit - if timezone is None: - # Date units -> UTC, time units -> local - if unit in ('Y', 'M', 'W', 'D'): - self.timezone = 'UTC' - else: - self.timezone = 'local' - else: - self.timezone = timezone - self.unit = unit - self.casting = casting - - def __call__(self, x): - return "'%s'" % datetime_as_string(x, - unit=self.unit, - timezone=self.timezone, - casting=self.casting) - -class TimedeltaFormat(object): - def __init__(self, data): - if data.dtype.kind == 'm': - v = data.view('i8') - max_str_len = max(len(str(maximum.reduce(v))), - len(str(minimum.reduce(v)))) - self.format = '%' + str(max_str_len) + 'd' - - def __call__(self, x): - return self.format % x.astype('i8') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/cversions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/cversions.py deleted file mode 100644 index 7995dd9931e7e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/cversions.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Simple script to compute the api hash of the current API. - -The API has is defined by numpy_api_order and ufunc_api_order. - -""" -from __future__ import division, absolute_import, print_function - -from os.path import dirname - -from code_generators.genapi import fullapi_hash -from code_generators.numpy_api import full_api - -if __name__ == '__main__': - curdir = dirname(__file__) - print(fullapi_hash(full_api)) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/defchararray.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/defchararray.py deleted file mode 100644 index 121e323147bd4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/defchararray.py +++ /dev/null @@ -1,2687 +0,0 @@ -""" -This module contains a set of functions for vectorized string -operations and methods. - -.. note:: - The `chararray` class exists for backwards compatibility with - Numarray, it is not recommended for new development. Starting from numpy - 1.4, if one needs arrays of strings, it is recommended to use arrays of - `dtype` `object_`, `string_` or `unicode_`, and use the free functions - in the `numpy.char` module for fast vectorized string operations. - -Some methods will only be available if the corresponding string method is -available in your version of Python. - -The preferred alias for `defchararray` is `numpy.char`. - -""" -from __future__ import division, absolute_import, print_function - -import sys -from .numerictypes import string_, unicode_, integer, object_, bool_, character -from .numeric import ndarray, compare_chararrays -from .numeric import array as narray -from numpy.core.multiarray import _vec_string -from numpy.compat import asbytes, long -import numpy - -__all__ = ['chararray', - 'equal', 'not_equal', 'greater_equal', 'less_equal', 'greater', 'less', - 'str_len', 'add', 'multiply', 'mod', 'capitalize', 'center', 'count', - 'decode', 'encode', 'endswith', 'expandtabs', 'find', 'format', - 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', - 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', - 'partition', 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', - 'rsplit', 'rstrip', 'split', 'splitlines', 'startswith', 'strip', - 'swapcase', 'title', 'translate', 'upper', 'zfill', - 'isnumeric', 'isdecimal', - 'array', 'asarray'] - -_globalvar = 0 -if sys.version_info[0] >= 3: - _unicode = str - _bytes = bytes -else: - _unicode = unicode - _bytes = str -_len = len - -def _use_unicode(*args): - """ - Helper function for determining the output type of some string - operations. - - For an operation on two ndarrays, if at least one is unicode, the - result should be unicode. - """ - for x in args: - if (isinstance(x, _unicode) - or issubclass(numpy.asarray(x).dtype.type, unicode_)): - return unicode_ - return string_ - -def _to_string_or_unicode_array(result): - """ - Helper function to cast a result back into a string or unicode array - if an object array must be used as an intermediary. - """ - return numpy.asarray(result.tolist()) - -def _clean_args(*args): - """ - Helper function for delegating arguments to Python string - functions. - - Many of the Python string operations that have optional arguments - do not use 'None' to indicate a default value. In these cases, - we need to remove all `None` arguments, and those following them. - """ - newargs = [] - for chk in args: - if chk is None: - break - newargs.append(chk) - return newargs - -def _get_num_chars(a): - """ - Helper function that returns the number of characters per field in - a string or unicode array. This is to abstract out the fact that - for a unicode array this is itemsize / 4. - """ - if issubclass(a.dtype.type, unicode_): - return a.itemsize // 4 - return a.itemsize - - -def equal(x1, x2): - """ - Return (x1 == x2) element-wise. - - Unlike `numpy.equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - not_equal, greater_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '==', True) - -def not_equal(x1, x2): - """ - Return (x1 != x2) element-wise. - - Unlike `numpy.not_equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, greater_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '!=', True) - -def greater_equal(x1, x2): - """ - Return (x1 >= x2) element-wise. - - Unlike `numpy.greater_equal`, this comparison is performed by - first stripping whitespace characters from the end of the string. - This behavior is provided for backward-compatibility with - numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, less_equal, greater, less - """ - return compare_chararrays(x1, x2, '>=', True) - -def less_equal(x1, x2): - """ - Return (x1 <= x2) element-wise. - - Unlike `numpy.less_equal`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, greater, less - """ - return compare_chararrays(x1, x2, '<=', True) - -def greater(x1, x2): - """ - Return (x1 > x2) element-wise. - - Unlike `numpy.greater`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, less_equal, less - """ - return compare_chararrays(x1, x2, '>', True) - -def less(x1, x2): - """ - Return (x1 < x2) element-wise. - - Unlike `numpy.greater`, this comparison is performed by first - stripping whitespace characters from the end of the string. This - behavior is provided for backward-compatibility with numarray. - - Parameters - ---------- - x1, x2 : array_like of str or unicode - Input arrays of the same shape. - - Returns - ------- - out : {ndarray, bool} - Output array of bools, or a single bool if x1 and x2 are scalars. - - See Also - -------- - equal, not_equal, greater_equal, less_equal, greater - """ - return compare_chararrays(x1, x2, '<', True) - -def str_len(a): - """ - Return len(a) element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of integers - - See also - -------- - __builtin__.len - """ - return _vec_string(a, integer, '__len__') - -def add(x1, x2): - """ - Return element-wise string concatenation for two arrays of str or unicode. - - Arrays `x1` and `x2` must have the same shape. - - Parameters - ---------- - x1 : array_like of str or unicode - Input array. - x2 : array_like of str or unicode - Input array. - - Returns - ------- - add : ndarray - Output array of `string_` or `unicode_`, depending on input types - of the same shape as `x1` and `x2`. - - """ - arr1 = numpy.asarray(x1) - arr2 = numpy.asarray(x2) - out_size = _get_num_chars(arr1) + _get_num_chars(arr2) - dtype = _use_unicode(arr1, arr2) - return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,)) - -def multiply(a, i): - """ - Return (a * i), that is string multiple concatenation, - element-wise. - - Values in `i` of less than 0 are treated as 0 (which yields an - empty string). - - Parameters - ---------- - a : array_like of str or unicode - - i : array_like of ints - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - """ - a_arr = numpy.asarray(a) - i_arr = numpy.asarray(i) - if not issubclass(i_arr.dtype.type, integer): - raise ValueError("Can only multiply by integers") - out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0) - return _vec_string( - a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) - -def mod(a, values): - """ - Return (a % i), that is pre-Python 2.6 string formatting - (iterpolation), element-wise for a pair of array_likes of str - or unicode. - - Parameters - ---------- - a : array_like of str or unicode - - values : array_like of values - These values will be element-wise interpolated into the string. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - See also - -------- - str.__mod__ - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, '__mod__', (values,))) - -def capitalize(a): - """ - Return a copy of `a` with only the first character of each element - capitalized. - - Calls `str.capitalize` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - Input array of strings to capitalize. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input - types - - See also - -------- - str.capitalize - - Examples - -------- - >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c - array(['a1b2', '1b2a', 'b2a1', '2a1b'], - dtype='|S4') - >>> np.char.capitalize(c) - array(['A1b2', '1b2a', 'B2a1', '2a1b'], - dtype='|S4') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'capitalize') - - -def center(a, width, fillchar=' '): - """ - Return a copy of `a` with its elements centered in a string of - length `width`. - - Calls `str.center` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The padding character to use (default is space). - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input - types - - See also - -------- - str.center - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar)) - - -def count(a, sub, start=0, end=None): - """ - Returns an array with the number of non-overlapping occurrences of - substring `sub` in the range [`start`, `end`]. - - Calls `str.count` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - The substring to search for. - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as slice - notation to specify the range in which to count. - - Returns - ------- - out : ndarray - Output array of ints. - - See also - -------- - str.count - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - >>> np.char.count(c, 'A') - array([3, 1, 1]) - >>> np.char.count(c, 'aA') - array([3, 1, 0]) - >>> np.char.count(c, 'A', start=1, end=4) - array([2, 1, 1]) - >>> np.char.count(c, 'A', start=1, end=3) - array([1, 0, 0]) - - """ - return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end)) - - -def decode(a, encoding=None, errors=None): - """ - Calls `str.decode` element-wise. - - The set of available codecs comes from the Python standard library, - and may be extended at runtime. For more information, see the - :mod:`codecs` module. - - Parameters - ---------- - a : array_like of str or unicode - - encoding : str, optional - The name of an encoding - - errors : str, optional - Specifies how to handle encoding errors - - Returns - ------- - out : ndarray - - See also - -------- - str.decode - - Notes - ----- - The type of the result will depend on the encoding specified. - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - >>> np.char.encode(c, encoding='cp037') - array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@', - '\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], - dtype='|S7') - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'decode', _clean_args(encoding, errors))) - - -def encode(a, encoding=None, errors=None): - """ - Calls `str.encode` element-wise. - - The set of available codecs comes from the Python standard library, - and may be extended at runtime. For more information, see the codecs - module. - - Parameters - ---------- - a : array_like of str or unicode - - encoding : str, optional - The name of an encoding - - errors : str, optional - Specifies how to handle encoding errors - - Returns - ------- - out : ndarray - - See also - -------- - str.encode - - Notes - ----- - The type of the result will depend on the encoding specified. - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'encode', _clean_args(encoding, errors))) - - -def endswith(a, suffix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `a` ends with `suffix`, otherwise `False`. - - Calls `str.endswith` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - suffix : str - - start, end : int, optional - With optional `start`, test beginning at that position. With - optional `end`, stop comparing at that position. - - Returns - ------- - out : ndarray - Outputs an array of bools. - - See also - -------- - str.endswith - - Examples - -------- - >>> s = np.array(['foo', 'bar']) - >>> s[0] = 'foo' - >>> s[1] = 'bar' - >>> s - array(['foo', 'bar'], - dtype='|S3') - >>> np.char.endswith(s, 'ar') - array([False, True], dtype=bool) - >>> np.char.endswith(s, 'a', start=1, end=2) - array([False, True], dtype=bool) - - """ - return _vec_string( - a, bool_, 'endswith', [suffix, start] + _clean_args(end)) - - -def expandtabs(a, tabsize=8): - """ - Return a copy of each string element where all tab characters are - replaced by one or more spaces. - - Calls `str.expandtabs` element-wise. - - Return a copy of each string element where all tab characters are - replaced by one or more spaces, depending on the current column - and the given `tabsize`. The column number is reset to zero after - each newline occurring in the string. This doesn't understand other - non-printing characters or escape sequences. - - Parameters - ---------- - a : array_like of str or unicode - Input array - tabsize : int, optional - Replace tabs with `tabsize` number of spaces. If not given defaults - to 8 spaces. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.expandtabs - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'expandtabs', (tabsize,))) - - -def find(a, sub, start=0, end=None): - """ - For each element, return the lowest index in the string where - substring `sub` is found. - - Calls `str.find` element-wise. - - For each element, return the lowest index in the string where - substring `sub` is found, such that `sub` is contained in the - range [`start`, `end`]. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as in - slice notation. - - Returns - ------- - out : ndarray or int - Output array of ints. Returns -1 if `sub` is not found. - - See also - -------- - str.find - - """ - return _vec_string( - a, integer, 'find', [sub, start] + _clean_args(end)) - - -def index(a, sub, start=0, end=None): - """ - Like `find`, but raises `ValueError` when the substring is not found. - - Calls `str.index` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sub : str or unicode - - start, end : int, optional - - Returns - ------- - out : ndarray - Output array of ints. Returns -1 if `sub` is not found. - - See also - -------- - find, str.find - - """ - return _vec_string( - a, integer, 'index', [sub, start] + _clean_args(end)) - -def isalnum(a): - """ - Returns true for each element if all characters in the string are - alphanumeric and there is at least one character, false otherwise. - - Calls `str.isalnum` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.isalnum - """ - return _vec_string(a, bool_, 'isalnum') - -def isalpha(a): - """ - Returns true for each element if all characters in the string are - alphabetic and there is at least one character, false otherwise. - - Calls `str.isalpha` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isalpha - """ - return _vec_string(a, bool_, 'isalpha') - -def isdigit(a): - """ - Returns true for each element if all characters in the string are - digits and there is at least one character, false otherwise. - - Calls `str.isdigit` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isdigit - """ - return _vec_string(a, bool_, 'isdigit') - -def islower(a): - """ - Returns true for each element if all cased characters in the - string are lowercase and there is at least one cased character, - false otherwise. - - Calls `str.islower` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.islower - """ - return _vec_string(a, bool_, 'islower') - -def isspace(a): - """ - Returns true for each element if there are only whitespace - characters in the string and there is at least one character, - false otherwise. - - Calls `str.isspace` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isspace - """ - return _vec_string(a, bool_, 'isspace') - -def istitle(a): - """ - Returns true for each element if the element is a titlecased - string and there is at least one character, false otherwise. - - Call `str.istitle` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.istitle - """ - return _vec_string(a, bool_, 'istitle') - -def isupper(a): - """ - Returns true for each element if all cased characters in the - string are uppercase and there is at least one character, false - otherwise. - - Call `str.isupper` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of bools - - See also - -------- - str.isupper - """ - return _vec_string(a, bool_, 'isupper') - -def join(sep, seq): - """ - Return a string which is the concatenation of the strings in the - sequence `seq`. - - Calls `str.join` element-wise. - - Parameters - ---------- - sep : array_like of str or unicode - seq : array_like of str or unicode - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input types - - See also - -------- - str.join - """ - return _to_string_or_unicode_array( - _vec_string(sep, object_, 'join', (seq,))) - - -def ljust(a, width, fillchar=' '): - """ - Return an array with the elements of `a` left-justified in a - string of length `width`. - - Calls `str.ljust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The character to use for padding - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.ljust - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar)) - - -def lower(a): - """ - Return an array with the elements converted to lowercase. - - Call `str.lower` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type - - See also - -------- - str.lower - - Examples - -------- - >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c - array(['A1B C', '1BCA', 'BCA1'], - dtype='|S5') - >>> np.char.lower(c) - array(['a1b c', '1bca', 'bca1'], - dtype='|S5') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'lower') - - -def lstrip(a, chars=None): - """ - For each element in `a`, return a copy with the leading characters - removed. - - Calls `str.lstrip` element-wise. - - Parameters - ---------- - a : array-like, {str, unicode} - Input array. - - chars : {str, unicode}, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a prefix; rather, all combinations of its values are - stripped. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type - - See also - -------- - str.lstrip - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - - The 'a' variable is unstripped from c[1] because whitespace leading. - - >>> np.char.lstrip(c, 'a') - array(['AaAaA', ' aA ', 'bBABba'], - dtype='|S7') - - - >>> np.char.lstrip(c, 'A') # leaves c unchanged - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() - ... # XXX: is this a regression? this line now returns False - ... # np.char.lstrip(c,'') does not modify c at all. - True - >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() - True - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,)) - - -def partition(a, sep): - """ - Partition each element in `a` around `sep`. - - Calls `str.partition` element-wise. - - For each element in `a`, split the element as the first - occurrence of `sep`, and return 3 strings containing the part - before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array - sep : {str, unicode} - Separator to split each string element in `a`. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type. - The output array will have an extra dimension with 3 - elements per input element. - - See also - -------- - str.partition - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'partition', (sep,))) - - -def replace(a, old, new, count=None): - """ - For each element in `a`, return a copy of the string with all - occurrences of substring `old` replaced by `new`. - - Calls `str.replace` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - old, new : str or unicode - - count : int, optional - If the optional argument `count` is given, only the first - `count` occurrences are replaced. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.replace - - """ - return _to_string_or_unicode_array( - _vec_string( - a, object_, 'replace', [old, new] +_clean_args(count))) - - -def rfind(a, sub, start=0, end=None): - """ - For each element in `a`, return the highest index in the string - where substring `sub` is found, such that `sub` is contained - within [`start`, `end`]. - - Calls `str.rfind` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - sub : str or unicode - - start, end : int, optional - Optional arguments `start` and `end` are interpreted as in - slice notation. - - Returns - ------- - out : ndarray - Output array of ints. Return -1 on failure. - - See also - -------- - str.rfind - - """ - return _vec_string( - a, integer, 'rfind', [sub, start] + _clean_args(end)) - - -def rindex(a, sub, start=0, end=None): - """ - Like `rfind`, but raises `ValueError` when the substring `sub` is - not found. - - Calls `str.rindex` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - sub : str or unicode - - start, end : int, optional - - Returns - ------- - out : ndarray - Output array of ints. - - See also - -------- - rfind, str.rindex - - """ - return _vec_string( - a, integer, 'rindex', [sub, start] + _clean_args(end)) - - -def rjust(a, width, fillchar=' '): - """ - Return an array with the elements of `a` right-justified in a - string of length `width`. - - Calls `str.rjust` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - width : int - The length of the resulting strings - fillchar : str or unicode, optional - The character to use for padding - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.rjust - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - if numpy.issubdtype(a_arr.dtype, numpy.string_): - fillchar = asbytes(fillchar) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) - - -def rpartition(a, sep): - """ - Partition (split) each element around the right-most separator. - - Calls `str.rpartition` element-wise. - - For each element in `a`, split the element as the last - occurrence of `sep`, and return 3 strings containing the part - before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. - - Parameters - ---------- - a : array_like of str or unicode - Input array - sep : str or unicode - Right-most separator to split each element in array. - - Returns - ------- - out : ndarray - Output array of string or unicode, depending on input - type. The output array will have an extra dimension with - 3 elements per input element. - - See also - -------- - str.rpartition - - """ - return _to_string_or_unicode_array( - _vec_string(a, object_, 'rpartition', (sep,))) - - -def rsplit(a, sep=None, maxsplit=None): - """ - For each element in `a`, return a list of the words in the - string, using `sep` as the delimiter string. - - Calls `str.rsplit` element-wise. - - Except for splitting from the right, `rsplit` - behaves like `split`. - - Parameters - ---------- - a : array_like of str or unicode - - sep : str or unicode, optional - If `sep` is not specified or `None`, any whitespace string - is a separator. - maxsplit : int, optional - If `maxsplit` is given, at most `maxsplit` splits are done, - the rightmost ones. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.rsplit, split - - """ - # This will return an array of lists of different sizes, so we - # leave it as an object array - return _vec_string( - a, object_, 'rsplit', [sep] + _clean_args(maxsplit)) - - -def rstrip(a, chars=None): - """ - For each element in `a`, return a copy with the trailing - characters removed. - - Calls `str.rstrip` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - chars : str or unicode, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a suffix; rather, all combinations of its values are - stripped. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.rstrip - - Examples - -------- - >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c - array(['aAaAaA', 'abBABba'], - dtype='|S7') - >>> np.char.rstrip(c, 'a') - array(['aAaAaA', 'abBABb'], - dtype='|S7') - >>> np.char.rstrip(c, 'A') - array(['aAaAa', 'abBABba'], - dtype='|S7') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) - - -def split(a, sep=None, maxsplit=None): - """ - For each element in `a`, return a list of the words in the - string, using `sep` as the delimiter string. - - Calls `str.rsplit` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - sep : str or unicode, optional - If `sep` is not specified or `None`, any whitespace string is a - separator. - - maxsplit : int, optional - If `maxsplit` is given, at most `maxsplit` splits are done. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.split, rsplit - - """ - # This will return an array of lists of different sizes, so we - # leave it as an object array - return _vec_string( - a, object_, 'split', [sep] + _clean_args(maxsplit)) - - -def splitlines(a, keepends=None): - """ - For each element in `a`, return a list of the lines in the - element, breaking at line boundaries. - - Calls `str.splitlines` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - keepends : bool, optional - Line breaks are not included in the resulting list unless - keepends is given and true. - - Returns - ------- - out : ndarray - Array of list objects - - See also - -------- - str.splitlines - - """ - return _vec_string( - a, object_, 'splitlines', _clean_args(keepends)) - - -def startswith(a, prefix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `a` starts with `prefix`, otherwise `False`. - - Calls `str.startswith` element-wise. - - Parameters - ---------- - a : array_like of str or unicode - - prefix : str - - start, end : int, optional - With optional `start`, test beginning at that position. With - optional `end`, stop comparing at that position. - - Returns - ------- - out : ndarray - Array of booleans - - See also - -------- - str.startswith - - """ - return _vec_string( - a, bool_, 'startswith', [prefix, start] + _clean_args(end)) - - -def strip(a, chars=None): - """ - For each element in `a`, return a copy with the leading and - trailing characters removed. - - Calls `str.rstrip` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - chars : str or unicode, optional - The `chars` argument is a string specifying the set of - characters to be removed. If omitted or None, the `chars` - argument defaults to removing whitespace. The `chars` argument - is not a prefix or suffix; rather, all combinations of its - values are stripped. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.strip - - Examples - -------- - >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) - >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') - >>> np.char.strip(c) - array(['aAaAaA', 'aA', 'abBABba'], - dtype='|S7') - >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads - array(['AaAaA', ' aA ', 'bBABb'], - dtype='|S7') - >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails - array(['aAaAa', ' aA ', 'abBABba'], - dtype='|S7') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars)) - - -def swapcase(a): - """ - Return element-wise a copy of the string with - uppercase characters converted to lowercase and vice versa. - - Calls `str.swapcase` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type - - See also - -------- - str.swapcase - - Examples - -------- - >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c - array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], - dtype='|S5') - >>> np.char.swapcase(c) - array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], - dtype='|S5') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'swapcase') - - -def title(a): - """ - Return element-wise title cased version of string or unicode. - - Title case words start with uppercase characters, all remaining cased - characters are lowercase. - - Calls `str.title` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.title - - Examples - -------- - >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c - array(['a1b c', '1b ca', 'b ca1', 'ca1b'], - dtype='|S5') - >>> np.char.title(c) - array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], - dtype='|S5') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'title') - - -def translate(a, table, deletechars=None): - """ - For each element in `a`, return a copy of the string where all - characters occurring in the optional argument `deletechars` are - removed, and the remaining characters have been mapped through the - given translation table. - - Calls `str.translate` element-wise. - - Parameters - ---------- - a : array-like of str or unicode - - table : str of length 256 - - deletechars : str - - Returns - ------- - out : ndarray - Output array of str or unicode, depending on input type - - See also - -------- - str.translate - - """ - a_arr = numpy.asarray(a) - if issubclass(a_arr.dtype.type, unicode_): - return _vec_string( - a_arr, a_arr.dtype, 'translate', (table,)) - else: - return _vec_string( - a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars)) - - -def upper(a): - """ - Return an array with the elements converted to uppercase. - - Calls `str.upper` element-wise. - - For 8-bit strings, this method is locale-dependent. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type - - See also - -------- - str.upper - - Examples - -------- - >>> c = np.array(['a1b c', '1bca', 'bca1']); c - array(['a1b c', '1bca', 'bca1'], - dtype='|S5') - >>> np.char.upper(c) - array(['A1B C', '1BCA', 'BCA1'], - dtype='|S5') - - """ - a_arr = numpy.asarray(a) - return _vec_string(a_arr, a_arr.dtype, 'upper') - - -def zfill(a, width): - """ - Return the numeric string left-filled with zeros - - Calls `str.zfill` element-wise. - - Parameters - ---------- - a : array_like, {str, unicode} - Input array. - width : int - Width of string to left-fill elements in `a`. - - Returns - ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type - - See also - -------- - str.zfill - - """ - a_arr = numpy.asarray(a) - width_arr = numpy.asarray(width) - size = long(numpy.max(width_arr.flat)) - return _vec_string( - a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,)) - - -def isnumeric(a): - """ - For each element, return True if there are only numeric - characters in the element. - - Calls `unicode.isnumeric` element-wise. - - Numeric characters include digit characters, and all characters - that have the Unicode numeric value property, e.g. ``U+2155, - VULGAR FRACTION ONE FIFTH``. - - Parameters - ---------- - a : array_like, unicode - Input array. - - Returns - ------- - out : ndarray, bool - Array of booleans of same shape as `a`. - - See also - -------- - unicode.isnumeric - - """ - if _use_unicode(a) != unicode_: - raise TypeError("isnumeric is only available for Unicode strings and arrays") - return _vec_string(a, bool_, 'isnumeric') - - -def isdecimal(a): - """ - For each element, return True if there are only decimal - characters in the element. - - Calls `unicode.isdecimal` element-wise. - - Decimal characters include digit characters, and all characters - that that can be used to form decimal-radix numbers, - e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``. - - Parameters - ---------- - a : array_like, unicode - Input array. - - Returns - ------- - out : ndarray, bool - Array of booleans identical in shape to `a`. - - See also - -------- - unicode.isdecimal - - """ - if _use_unicode(a) != unicode_: - raise TypeError("isnumeric is only available for Unicode strings and arrays") - return _vec_string(a, bool_, 'isdecimal') - - -class chararray(ndarray): - """ - chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0, - strides=None, order=None) - - Provides a convenient view on arrays of string and unicode values. - - .. note:: - The `chararray` class exists for backwards compatibility with - Numarray, it is not recommended for new development. Starting from numpy - 1.4, if one needs arrays of strings, it is recommended to use arrays of - `dtype` `object_`, `string_` or `unicode_`, and use the free functions - in the `numpy.char` module for fast vectorized string operations. - - Versus a regular Numpy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``) - - chararrays should be created using `numpy.char.array` or - `numpy.char.asarray`, rather than this constructor directly. - - This constructor creates the array, using `buffer` (with `offset` - and `strides`) if it is not ``None``. If `buffer` is ``None``, then - constructs a new array with `strides` in "C order", unless both - ``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides` - is in "Fortran order". - - Methods - ------- - astype - argsort - copy - count - decode - dump - dumps - encode - endswith - expandtabs - fill - find - flatten - getfield - index - isalnum - isalpha - isdecimal - isdigit - islower - isnumeric - isspace - istitle - isupper - item - join - ljust - lower - lstrip - nonzero - put - ravel - repeat - replace - reshape - resize - rfind - rindex - rjust - rsplit - rstrip - searchsorted - setfield - setflags - sort - split - splitlines - squeeze - startswith - strip - swapaxes - swapcase - take - title - tofile - tolist - tostring - translate - transpose - upper - view - zfill - - Parameters - ---------- - shape : tuple - Shape of the array. - itemsize : int, optional - Length of each array element, in number of characters. Default is 1. - unicode : bool, optional - Are the array elements of type unicode (True) or string (False). - Default is False. - buffer : int, optional - Memory address of the start of the array data. Default is None, - in which case a new array is created. - offset : int, optional - Fixed stride displacement from the beginning of an axis? - Default is 0. Needs to be >=0. - strides : array_like of ints, optional - Strides for the array (see `ndarray.strides` for full description). - Default is None. - order : {'C', 'F'}, optional - The order in which the array data is stored in memory: 'C' -> - "row major" order (the default), 'F' -> "column major" - (Fortran) order. - - Examples - -------- - >>> charar = np.chararray((3, 3)) - >>> charar[:] = 'a' - >>> charar - chararray([['a', 'a', 'a'], - ['a', 'a', 'a'], - ['a', 'a', 'a']], - dtype='|S1') - - >>> charar = np.chararray(charar.shape, itemsize=5) - >>> charar[:] = 'abc' - >>> charar - chararray([['abc', 'abc', 'abc'], - ['abc', 'abc', 'abc'], - ['abc', 'abc', 'abc']], - dtype='|S5') - - """ - def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, - offset=0, strides=None, order='C'): - global _globalvar - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - # force itemsize to be a Python long, since using Numpy integer - # types results in itemsize.itemsize being used as the size of - # strings in the new array. - itemsize = long(itemsize) - - if sys.version_info[0] >= 3 and isinstance(buffer, _unicode): - # On Py3, unicode objects do not have the buffer interface - filler = buffer - buffer = None - else: - filler = None - - _globalvar = 1 - if buffer is None: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - order=order) - else: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), - buffer=buffer, - offset=offset, strides=strides, - order=order) - if filler is not None: - self[...] = filler - _globalvar = 0 - return self - - def __array_finalize__(self, obj): - # The b is a special case because it is used for reconstructing. - if not _globalvar and self.dtype.char not in 'SUbc': - raise ValueError("Can only create a chararray from string data.") - - def __getitem__(self, obj): - val = ndarray.__getitem__(self, obj) - if issubclass(val.dtype.type, character) and not _len(val) == 0: - temp = val.rstrip() - if _len(temp) == 0: - val = '' - else: - val = temp - return val - - # IMPLEMENTATION NOTE: Most of the methods of this class are - # direct delegations to the free functions in this module. - # However, those that return an array of strings should instead - # return a chararray, so some extra wrapping is required. - - def __eq__(self, other): - """ - Return (self == other) element-wise. - - See also - -------- - equal - """ - return equal(self, other) - - def __ne__(self, other): - """ - Return (self != other) element-wise. - - See also - -------- - not_equal - """ - return not_equal(self, other) - - def __ge__(self, other): - """ - Return (self >= other) element-wise. - - See also - -------- - greater_equal - """ - return greater_equal(self, other) - - def __le__(self, other): - """ - Return (self <= other) element-wise. - - See also - -------- - less_equal - """ - return less_equal(self, other) - - def __gt__(self, other): - """ - Return (self > other) element-wise. - - See also - -------- - greater - """ - return greater(self, other) - - def __lt__(self, other): - """ - Return (self < other) element-wise. - - See also - -------- - less - """ - return less(self, other) - - def __add__(self, other): - """ - Return (self + other), that is string concatenation, - element-wise for a pair of array_likes of str or unicode. - - See also - -------- - add - """ - return asarray(add(self, other)) - - def __radd__(self, other): - """ - Return (other + self), that is string concatenation, - element-wise for a pair of array_likes of `string_` or `unicode_`. - - See also - -------- - add - """ - return asarray(add(numpy.asarray(other), self)) - - def __mul__(self, i): - """ - Return (self * i), that is string multiple concatenation, - element-wise. - - See also - -------- - multiply - """ - return asarray(multiply(self, i)) - - def __rmul__(self, i): - """ - Return (self * i), that is string multiple concatenation, - element-wise. - - See also - -------- - multiply - """ - return asarray(multiply(self, i)) - - def __mod__(self, i): - """ - Return (self % i), that is pre-Python 2.6 string formatting - (iterpolation), element-wise for a pair of array_likes of `string_` - or `unicode_`. - - See also - -------- - mod - """ - return asarray(mod(self, i)) - - def __rmod__(self, other): - return NotImplemented - - def argsort(self, axis=-1, kind='quicksort', order=None): - """ - Return the indices that sort the array lexicographically. - - For full documentation see `numpy.argsort`, for which this method is - in fact merely a "thin wrapper." - - Examples - -------- - >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') - >>> c = c.view(np.chararray); c - chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], - dtype='|S5') - >>> c[c.argsort()] - chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], - dtype='|S5') - - """ - return self.__array__().argsort(axis, kind, order) - argsort.__doc__ = ndarray.argsort.__doc__ - - def capitalize(self): - """ - Return a copy of `self` with only the first character of each element - capitalized. - - See also - -------- - char.capitalize - - """ - return asarray(capitalize(self)) - - def center(self, width, fillchar=' '): - """ - Return a copy of `self` with its elements centered in a - string of length `width`. - - See also - -------- - center - """ - return asarray(center(self, width, fillchar)) - - def count(self, sub, start=0, end=None): - """ - Returns an array with the number of non-overlapping occurrences of - substring `sub` in the range [`start`, `end`]. - - See also - -------- - char.count - - """ - return count(self, sub, start, end) - - - def decode(self, encoding=None, errors=None): - """ - Calls `str.decode` element-wise. - - See also - -------- - char.decode - - """ - return decode(self, encoding, errors) - - def encode(self, encoding=None, errors=None): - """ - Calls `str.encode` element-wise. - - See also - -------- - char.encode - - """ - return encode(self, encoding, errors) - - def endswith(self, suffix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `self` ends with `suffix`, otherwise `False`. - - See also - -------- - char.endswith - - """ - return endswith(self, suffix, start, end) - - def expandtabs(self, tabsize=8): - """ - Return a copy of each string element where all tab characters are - replaced by one or more spaces. - - See also - -------- - char.expandtabs - - """ - return asarray(expandtabs(self, tabsize)) - - def find(self, sub, start=0, end=None): - """ - For each element, return the lowest index in the string where - substring `sub` is found. - - See also - -------- - char.find - - """ - return find(self, sub, start, end) - - def index(self, sub, start=0, end=None): - """ - Like `find`, but raises `ValueError` when the substring is not found. - - See also - -------- - char.index - - """ - return index(self, sub, start, end) - - def isalnum(self): - """ - Returns true for each element if all characters in the string - are alphanumeric and there is at least one character, false - otherwise. - - See also - -------- - char.isalnum - - """ - return isalnum(self) - - def isalpha(self): - """ - Returns true for each element if all characters in the string - are alphabetic and there is at least one character, false - otherwise. - - See also - -------- - char.isalpha - - """ - return isalpha(self) - - def isdigit(self): - """ - Returns true for each element if all characters in the string are - digits and there is at least one character, false otherwise. - - See also - -------- - char.isdigit - - """ - return isdigit(self) - - def islower(self): - """ - Returns true for each element if all cased characters in the - string are lowercase and there is at least one cased character, - false otherwise. - - See also - -------- - char.islower - - """ - return islower(self) - - def isspace(self): - """ - Returns true for each element if there are only whitespace - characters in the string and there is at least one character, - false otherwise. - - See also - -------- - char.isspace - - """ - return isspace(self) - - def istitle(self): - """ - Returns true for each element if the element is a titlecased - string and there is at least one character, false otherwise. - - See also - -------- - char.istitle - - """ - return istitle(self) - - def isupper(self): - """ - Returns true for each element if all cased characters in the - string are uppercase and there is at least one character, false - otherwise. - - See also - -------- - char.isupper - - """ - return isupper(self) - - def join(self, seq): - """ - Return a string which is the concatenation of the strings in the - sequence `seq`. - - See also - -------- - char.join - - """ - return join(self, seq) - - def ljust(self, width, fillchar=' '): - """ - Return an array with the elements of `self` left-justified in a - string of length `width`. - - See also - -------- - char.ljust - - """ - return asarray(ljust(self, width, fillchar)) - - def lower(self): - """ - Return an array with the elements of `self` converted to - lowercase. - - See also - -------- - char.lower - - """ - return asarray(lower(self)) - - def lstrip(self, chars=None): - """ - For each element in `self`, return a copy with the leading characters - removed. - - See also - -------- - char.lstrip - - """ - return asarray(lstrip(self, chars)) - - def partition(self, sep): - """ - Partition each element in `self` around `sep`. - - See also - -------- - partition - """ - return asarray(partition(self, sep)) - - def replace(self, old, new, count=None): - """ - For each element in `self`, return a copy of the string with all - occurrences of substring `old` replaced by `new`. - - See also - -------- - char.replace - - """ - return asarray(replace(self, old, new, count)) - - def rfind(self, sub, start=0, end=None): - """ - For each element in `self`, return the highest index in the string - where substring `sub` is found, such that `sub` is contained - within [`start`, `end`]. - - See also - -------- - char.rfind - - """ - return rfind(self, sub, start, end) - - def rindex(self, sub, start=0, end=None): - """ - Like `rfind`, but raises `ValueError` when the substring `sub` is - not found. - - See also - -------- - char.rindex - - """ - return rindex(self, sub, start, end) - - def rjust(self, width, fillchar=' '): - """ - Return an array with the elements of `self` - right-justified in a string of length `width`. - - See also - -------- - char.rjust - - """ - return asarray(rjust(self, width, fillchar)) - - def rpartition(self, sep): - """ - Partition each element in `self` around `sep`. - - See also - -------- - rpartition - """ - return asarray(rpartition(self, sep)) - - def rsplit(self, sep=None, maxsplit=None): - """ - For each element in `self`, return a list of the words in - the string, using `sep` as the delimiter string. - - See also - -------- - char.rsplit - - """ - return rsplit(self, sep, maxsplit) - - def rstrip(self, chars=None): - """ - For each element in `self`, return a copy with the trailing - characters removed. - - See also - -------- - char.rstrip - - """ - return asarray(rstrip(self, chars)) - - def split(self, sep=None, maxsplit=None): - """ - For each element in `self`, return a list of the words in the - string, using `sep` as the delimiter string. - - See also - -------- - char.split - - """ - return split(self, sep, maxsplit) - - def splitlines(self, keepends=None): - """ - For each element in `self`, return a list of the lines in the - element, breaking at line boundaries. - - See also - -------- - char.splitlines - - """ - return splitlines(self, keepends) - - def startswith(self, prefix, start=0, end=None): - """ - Returns a boolean array which is `True` where the string element - in `self` starts with `prefix`, otherwise `False`. - - See also - -------- - char.startswith - - """ - return startswith(self, prefix, start, end) - - def strip(self, chars=None): - """ - For each element in `self`, return a copy with the leading and - trailing characters removed. - - See also - -------- - char.strip - - """ - return asarray(strip(self, chars)) - - def swapcase(self): - """ - For each element in `self`, return a copy of the string with - uppercase characters converted to lowercase and vice versa. - - See also - -------- - char.swapcase - - """ - return asarray(swapcase(self)) - - def title(self): - """ - For each element in `self`, return a titlecased version of the - string: words start with uppercase characters, all remaining cased - characters are lowercase. - - See also - -------- - char.title - - """ - return asarray(title(self)) - - def translate(self, table, deletechars=None): - """ - For each element in `self`, return a copy of the string where - all characters occurring in the optional argument - `deletechars` are removed, and the remaining characters have - been mapped through the given translation table. - - See also - -------- - char.translate - - """ - return asarray(translate(self, table, deletechars)) - - def upper(self): - """ - Return an array with the elements of `self` converted to - uppercase. - - See also - -------- - char.upper - - """ - return asarray(upper(self)) - - def zfill(self, width): - """ - Return the numeric string left-filled with zeros in a string of - length `width`. - - See also - -------- - char.zfill - - """ - return asarray(zfill(self, width)) - - def isnumeric(self): - """ - For each element in `self`, return True if there are only - numeric characters in the element. - - See also - -------- - char.isnumeric - - """ - return isnumeric(self) - - def isdecimal(self): - """ - For each element in `self`, return True if there are only - decimal characters in the element. - - See also - -------- - char.isdecimal - - """ - return isdecimal(self) - - -def array(obj, itemsize=None, copy=True, unicode=None, order=None): - """ - Create a `chararray`. - - .. note:: - This class is provided for numarray backward-compatibility. - New code (not concerned with numarray compatibility) should use - arrays of type `string_` or `unicode_` and use the free functions - in :mod:`numpy.char ` for fast - vectorized string operations instead. - - Versus a regular Numpy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``) - - Parameters - ---------- - obj : array of str or unicode-like - - itemsize : int, optional - `itemsize` is the number of characters per scalar in the - resulting array. If `itemsize` is None, and `obj` is an - object array or a Python list, the `itemsize` will be - automatically determined. If `itemsize` is provided and `obj` - is of type str or unicode, then the `obj` string will be - chunked into `itemsize` pieces. - - copy : bool, optional - If true (default), then the object is copied. Otherwise, a copy - will only be made if __array__ returns a copy, if obj is a - nested sequence, or if a copy is needed to satisfy any of the other - requirements (`itemsize`, unicode, `order`, etc.). - - unicode : bool, optional - When true, the resulting `chararray` can contain Unicode - characters, when false only 8-bit characters. If unicode is - `None` and `obj` is one of the following: - - - a `chararray`, - - an ndarray of type `str` or `unicode` - - a Python str or unicode object, - - then the unicode setting of the output array will be - automatically determined. - - order : {'C', 'F', 'A'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). If order is 'A', then the returned array may - be in any order (either C-, Fortran-contiguous, or even - discontiguous). - """ - if isinstance(obj, (_bytes, _unicode)): - if unicode is None: - if isinstance(obj, _unicode): - unicode = True - else: - unicode = False - - if itemsize is None: - itemsize = _len(obj) - shape = _len(obj) // itemsize - - if unicode: - if sys.maxunicode == 0xffff: - # On a narrow Python build, the buffer for Unicode - # strings is UCS2, which doesn't match the buffer for - # Numpy Unicode types, which is ALWAYS UCS4. - # Therefore, we need to convert the buffer. On Python - # 2.6 and later, we can use the utf_32 codec. Earlier - # versions don't have that codec, so we convert to a - # numerical array that matches the input buffer, and - # then use Numpy to convert it to UCS4. All of this - # should happen in native endianness. - if sys.hexversion >= 0x2060000: - obj = obj.encode('utf_32') - else: - if isinstance(obj, str): - ascii = numpy.frombuffer(obj, 'u1') - ucs4 = numpy.array(ascii, 'u4') - obj = ucs4.data - else: - ucs2 = numpy.frombuffer(obj, 'u2') - ucs4 = numpy.array(ucs2, 'u4') - obj = ucs4.data - else: - obj = _unicode(obj) - else: - # Let the default Unicode -> string encoding (if any) take - # precedence. - obj = _bytes(obj) - - return chararray(shape, itemsize=itemsize, unicode=unicode, - buffer=obj, order=order) - - if isinstance(obj, (list, tuple)): - obj = numpy.asarray(obj) - - if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character): - # If we just have a vanilla chararray, create a chararray - # view around it. - if not isinstance(obj, chararray): - obj = obj.view(chararray) - - if itemsize is None: - itemsize = obj.itemsize - # itemsize is in 8-bit chars, so for Unicode, we need - # to divide by the size of a single Unicode character, - # which for Numpy is always 4 - if issubclass(obj.dtype.type, unicode_): - itemsize //= 4 - - if unicode is None: - if issubclass(obj.dtype.type, unicode_): - unicode = True - else: - unicode = False - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - if order is not None: - obj = numpy.asarray(obj, order=order) - if (copy - or (itemsize != obj.itemsize) - or (not unicode and isinstance(obj, unicode_)) - or (unicode and isinstance(obj, string_))): - obj = obj.astype((dtype, long(itemsize))) - return obj - - if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object): - if itemsize is None: - # Since no itemsize was specified, convert the input array to - # a list so the ndarray constructor will automatically - # determine the itemsize for us. - obj = obj.tolist() - # Fall through to the default case - - if unicode: - dtype = unicode_ - else: - dtype = string_ - - if itemsize is None: - val = narray(obj, dtype=dtype, order=order, subok=True) - else: - val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True) - return val.view(chararray) - - -def asarray(obj, itemsize=None, unicode=None, order=None): - """ - Convert the input to a `chararray`, copying the data only if - necessary. - - Versus a regular Numpy array of type `str` or `unicode`, this - class adds the following functionality: - - 1) values automatically have whitespace removed from the end - when indexed - - 2) comparison operators automatically remove whitespace from the - end when comparing values - - 3) vectorized string operations are provided as methods - (e.g. `str.endswith`) and infix operators (e.g. +, *, %) - - Parameters - ---------- - obj : array of str or unicode-like - - itemsize : int, optional - `itemsize` is the number of characters per scalar in the - resulting array. If `itemsize` is None, and `obj` is an - object array or a Python list, the `itemsize` will be - automatically determined. If `itemsize` is provided and `obj` - is of type str or unicode, then the `obj` string will be - chunked into `itemsize` pieces. - - unicode : bool, optional - When true, the resulting `chararray` can contain Unicode - characters, when false only 8-bit characters. If unicode is - `None` and `obj` is one of the following: - - - a `chararray`, - - an ndarray of type `str` or 'unicode` - - a Python str or unicode object, - - then the unicode setting of the output array will be - automatically determined. - - order : {'C', 'F'}, optional - Specify the order of the array. If order is 'C' (default), then the - array will be in C-contiguous order (last-index varies the - fastest). If order is 'F', then the returned array - will be in Fortran-contiguous order (first-index varies the - fastest). - """ - return array(obj, itemsize, copy=False, - unicode=unicode, order=order) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/fromnumeric.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/fromnumeric.py deleted file mode 100644 index 49fd57e29c34a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/fromnumeric.py +++ /dev/null @@ -1,2930 +0,0 @@ -"""Module containing non-deprecated functions borrowed from Numeric. - -""" -from __future__ import division, absolute_import, print_function - -import types -import warnings - -from .. import VisibleDeprecationWarning -from . import multiarray as mu -from . import umath as um -from . import numerictypes as nt -from .numeric import asarray, array, asanyarray, concatenate -from . import _methods - -_dt_ = nt.sctype2char - - -# functions that are methods -__all__ = [ - 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', - 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', - 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', - 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', - 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_', - 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', - 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', - ] - - -try: - _gentype = types.GeneratorType -except AttributeError: - _gentype = type(None) - -# save away Python sum -_sum_ = sum - -# functions that are now methods -def _wrapit(obj, method, *args, **kwds): - try: - wrap = obj.__array_wrap__ - except AttributeError: - wrap = None - result = getattr(asarray(obj), method)(*args, **kwds) - if wrap: - if not isinstance(result, mu.ndarray): - result = asarray(result) - result = wrap(result) - return result - - -def take(a, indices, axis=None, out=None, mode='raise'): - """ - Take elements from an array along an axis. - - This function does the same thing as "fancy" indexing (indexing arrays - using arrays); however, it can be easier to use if you need elements - along a given axis. - - Parameters - ---------- - a : array_like - The source array. - indices : array_like - The indices of the values to extract. - - .. versionadded:: 1.8.0 - - Also allow scalars for indices. - axis : int, optional - The axis over which to select values. By default, the flattened - input array is used. - out : ndarray, optional - If provided, the result will be placed in this array. It should - be of the appropriate shape and dtype. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - Returns - ------- - subarray : ndarray - The returned array has the same type as `a`. - - See Also - -------- - compress : Take elements using a boolean mask - ndarray.take : equivalent method - - Examples - -------- - >>> a = [4, 3, 5, 7, 6, 8] - >>> indices = [0, 1, 4] - >>> np.take(a, indices) - array([4, 3, 6]) - - In this example if `a` is an ndarray, "fancy" indexing can be used. - - >>> a = np.array(a) - >>> a[indices] - array([4, 3, 6]) - - If `indices` is not one dimensional, the output also has these dimensions. - - >>> np.take(a, [[0, 1], [2, 3]]) - array([[4, 3], - [5, 7]]) - """ - try: - take = a.take - except AttributeError: - return _wrapit(a, 'take', indices, axis, out, mode) - return take(indices, axis, out, mode) - - -# not deprecated --- copy if necessary, view otherwise -def reshape(a, newshape, order='C'): - """ - Gives a new shape to an array without changing its data. - - Parameters - ---------- - a : array_like - Array to be reshaped. - newshape : int or tuple of ints - The new shape should be compatible with the original shape. If - an integer, then the result will be a 1-D array of that length. - One shape dimension can be -1. In this case, the value is inferred - from the length of the array and remaining dimensions. - order : {'C', 'F', 'A'}, optional - Read the elements of `a` using this index order, and place the elements - into the reshaped array using this index order. 'C' means to - read / write the elements using C-like index order, with the last axis index - changing fastest, back to the first axis index changing slowest. 'F' - means to read / write the elements using Fortran-like index order, with - the first index changing fastest, and the last index changing slowest. - Note that the 'C' and 'F' options take no account of the memory layout - of the underlying array, and only refer to the order of indexing. 'A' - means to read / write the elements in Fortran-like index order if `a` is - Fortran *contiguous* in memory, C-like order otherwise. - - Returns - ------- - reshaped_array : ndarray - This will be a new view object if possible; otherwise, it will - be a copy. Note there is no guarantee of the *memory layout* (C- or - Fortran- contiguous) of the returned array. - - See Also - -------- - ndarray.reshape : Equivalent method. - - Notes - ----- - It is not always possible to change the shape of an array without - copying the data. If you want an error to be raise if the data is copied, - you should assign the new shape to the shape attribute of the array:: - - >>> a = np.zeros((10, 2)) - # A transpose make the array non-contiguous - >>> b = a.T - # Taking a view makes it possible to modify the shape without modifying the - # initial object. - >>> c = b.view() - >>> c.shape = (20) - AttributeError: incompatible shape for a non-contiguous array - - The `order` keyword gives the index ordering both for *fetching* the values - from `a`, and then *placing* the values into the output array. For example, - let's say you have an array: - - >>> a = np.arange(6).reshape((3, 2)) - >>> a - array([[0, 1], - [2, 3], - [4, 5]]) - - You can think of reshaping as first raveling the array (using the given - index order), then inserting the elements from the raveled array into the - new array using the same kind of index ordering as was used for the - raveling. - - >>> np.reshape(a, (2, 3)) # C-like index ordering - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering - array([[0, 4, 3], - [2, 1, 5]]) - >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') - array([[0, 4, 3], - [2, 1, 5]]) - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> np.reshape(a, 6) - array([1, 2, 3, 4, 5, 6]) - >>> np.reshape(a, 6, order='F') - array([1, 4, 2, 5, 3, 6]) - - >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 - array([[1, 2], - [3, 4], - [5, 6]]) - """ - try: - reshape = a.reshape - except AttributeError: - return _wrapit(a, 'reshape', newshape, order=order) - return reshape(newshape, order=order) - - -def choose(a, choices, out=None, mode='raise'): - """ - Construct an array from an index array and a set of arrays to choose from. - - First of all, if confused or uncertain, definitely look at the Examples - - in its full generality, this function is less simple than it might - seem from the following code description (below ndi = - `numpy.lib.index_tricks`): - - ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. - - But this omits some subtleties. Here is a fully general summary: - - Given an "index" array (`a`) of integers and a sequence of `n` arrays - (`choices`), `a` and each choice array are first broadcast, as necessary, - to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = - 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` - for each `i`. Then, a new array with shape ``Ba.shape`` is created as - follows: - - * if ``mode=raise`` (the default), then, first of all, each element of - `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that - `i` (in that range) is the value at the `(j0, j1, ..., jm)` position - in `Ba` - then the value at the same position in the new array is the - value in `Bchoices[i]` at that same position; - - * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) - integer; modular arithmetic is used to map integers outside the range - `[0, n-1]` back into that range; and then the new array is constructed - as above; - - * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) - integer; negative integers are mapped to 0; values greater than `n-1` - are mapped to `n-1`; and then the new array is constructed as above. - - Parameters - ---------- - a : int array - This array must contain integers in `[0, n-1]`, where `n` is the number - of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any - integers are permissible. - choices : sequence of arrays - Choice arrays. `a` and all of the choices must be broadcastable to the - same shape. If `choices` is itself an array (not recommended), then - its outermost dimension (i.e., the one corresponding to - ``choices.shape[0]``) is taken as defining the "sequence". - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. - mode : {'raise' (default), 'wrap', 'clip'}, optional - Specifies how indices outside `[0, n-1]` will be treated: - - * 'raise' : an exception is raised - * 'wrap' : value becomes value mod `n` - * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 - - Returns - ------- - merged_array : array - The merged result. - - Raises - ------ - ValueError: shape mismatch - If `a` and each choice array are not all broadcastable to the same - shape. - - See Also - -------- - ndarray.choose : equivalent method - - Notes - ----- - To reduce the chance of misinterpretation, even though the following - "abuse" is nominally supported, `choices` should neither be, nor be - thought of as, a single array, i.e., the outermost sequence-like container - should be either a list or a tuple. - - Examples - -------- - - >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], - ... [20, 21, 22, 23], [30, 31, 32, 33]] - >>> np.choose([2, 3, 1, 0], choices - ... # the first element of the result will be the first element of the - ... # third (2+1) "array" in choices, namely, 20; the second element - ... # will be the second element of the fourth (3+1) choice array, i.e., - ... # 31, etc. - ... ) - array([20, 31, 12, 3]) - >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) - array([20, 31, 12, 3]) - >>> # because there are 4 choice arrays - >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) - array([20, 1, 12, 3]) - >>> # i.e., 0 - - A couple examples illustrating how choose broadcasts: - - >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] - >>> choices = [-10, 10] - >>> np.choose(a, choices) - array([[ 10, -10, 10], - [-10, 10, -10], - [ 10, -10, 10]]) - - >>> # With thanks to Anne Archibald - >>> a = np.array([0, 1]).reshape((2,1,1)) - >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) - >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) - >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 - array([[[ 1, 1, 1, 1, 1], - [ 2, 2, 2, 2, 2], - [ 3, 3, 3, 3, 3]], - [[-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5], - [-1, -2, -3, -4, -5]]]) - - """ - try: - choose = a.choose - except AttributeError: - return _wrapit(a, 'choose', choices, out=out, mode=mode) - return choose(choices, out=out, mode=mode) - - -def repeat(a, repeats, axis=None): - """ - Repeat elements of an array. - - Parameters - ---------- - a : array_like - Input array. - repeats : {int, array of ints} - The number of repetitions for each element. `repeats` is broadcasted - to fit the shape of the given axis. - axis : int, optional - The axis along which to repeat values. By default, use the - flattened input array, and return a flat output array. - - Returns - ------- - repeated_array : ndarray - Output array which has the same shape as `a`, except along - the given axis. - - See Also - -------- - tile : Tile an array. - - Examples - -------- - >>> x = np.array([[1,2],[3,4]]) - >>> np.repeat(x, 2) - array([1, 1, 2, 2, 3, 3, 4, 4]) - >>> np.repeat(x, 3, axis=1) - array([[1, 1, 1, 2, 2, 2], - [3, 3, 3, 4, 4, 4]]) - >>> np.repeat(x, [1, 2], axis=0) - array([[1, 2], - [3, 4], - [3, 4]]) - - """ - try: - repeat = a.repeat - except AttributeError: - return _wrapit(a, 'repeat', repeats, axis) - return repeat(repeats, axis) - - -def put(a, ind, v, mode='raise'): - """ - Replaces specified elements of an array with given values. - - The indexing works on the flattened target array. `put` is roughly - equivalent to: - - :: - - a.flat[ind] = v - - Parameters - ---------- - a : ndarray - Target array. - ind : array_like - Target indices, interpreted as integers. - v : array_like - Values to place in `a` at target indices. If `v` is shorter than - `ind` it will be repeated as necessary. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' -- raise an error (default) - * 'wrap' -- wrap around - * 'clip' -- clip to the range - - 'clip' mode means that all indices that are too large are replaced - by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. - - See Also - -------- - putmask, place - - Examples - -------- - >>> a = np.arange(5) - >>> np.put(a, [0, 2], [-44, -55]) - >>> a - array([-44, 1, -55, 3, 4]) - - >>> a = np.arange(5) - >>> np.put(a, 22, -5, mode='clip') - >>> a - array([ 0, 1, 2, 3, -5]) - - """ - return a.put(ind, v, mode) - - -def swapaxes(a, axis1, axis2): - """ - Interchange two axes of an array. - - Parameters - ---------- - a : array_like - Input array. - axis1 : int - First axis. - axis2 : int - Second axis. - - Returns - ------- - a_swapped : ndarray - If `a` is an ndarray, then a view of `a` is returned; otherwise - a new array is created. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> np.swapaxes(x,0,1) - array([[1], - [2], - [3]]) - - >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) - >>> x - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - - >>> np.swapaxes(x,0,2) - array([[[0, 4], - [2, 6]], - [[1, 5], - [3, 7]]]) - - """ - try: - swapaxes = a.swapaxes - except AttributeError: - return _wrapit(a, 'swapaxes', axis1, axis2) - return swapaxes(axis1, axis2) - - -def transpose(a, axes=None): - """ - Permute the dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. - axes : list of ints, optional - By default, reverse the dimensions, otherwise permute the axes - according to the values given. - - Returns - ------- - p : ndarray - `a` with its axes permuted. A view is returned whenever - possible. - - See Also - -------- - rollaxis - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.transpose(x) - array([[0, 2], - [1, 3]]) - - >>> x = np.ones((1, 2, 3)) - >>> np.transpose(x, (1, 0, 2)).shape - (2, 1, 3) - - """ - try: - transpose = a.transpose - except AttributeError: - return _wrapit(a, 'transpose', axes) - return transpose(axes) - - -def partition(a, kth, axis=-1, kind='introselect', order=None): - """ - Return a partitioned copy of an array. - - Creates a copy of the array with its elements rearranged in such a way that - the value of the element in kth position is in the position it would be in - a sorted array. All elements smaller than the kth element are moved before - this element and all equal or greater are moved behind it. The ordering of - the elements in the two partitions is undefined. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to be sorted. - kth : int or sequence of ints - Element index to partition by. The kth value of the element will be in - its final sorted position and all smaller elements will be moved before - it and all equal or greater elements behind it. - The order all elements in the partitions is undefined. - If provided with a sequence of kth it will partition all elements - indexed by kth of them into their sorted position at once. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect'. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - - Returns - ------- - partitioned_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.partition : Method to sort an array in-place. - argpartition : Indirect partition. - sort : Full sorting - - Notes - ----- - The various selection algorithms are characterized by their average speed, - worst case performance, work space size, and whether they are stable. A - stable sort keeps items with the same key in the same relative order. The - available algorithms have the following properties: - - ================= ======= ============= ============ ======= - kind speed worst case work space stable - ================= ======= ============= ============ ======= - 'introselect' 1 O(n) 0 no - ================= ======= ============= ============ ======= - - All the partition algorithms make temporary copies of the data when - partitioning along any but the last axis. Consequently, partitioning - along the last axis is faster and uses less space than partitioning - along any other axis. - - The sort order for complex numbers is lexicographic. If both the real - and imaginary parts are non-nan then the order is determined by the - real parts except when they are equal, in which case the order is - determined by the imaginary parts. - - Examples - -------- - >>> a = np.array([3, 4, 2, 1]) - >>> np.partition(a, 3) - array([2, 1, 3, 4]) - - >>> np.partition(a, (1, 3)) - array([1, 2, 3, 4]) - - """ - if axis is None: - a = asanyarray(a).flatten() - axis = 0 - else: - a = asanyarray(a).copy(order="K") - a.partition(kth, axis=axis, kind=kind, order=order) - return a - - -def argpartition(a, kth, axis=-1, kind='introselect', order=None): - """ - Perform an indirect partition along the given axis using the algorithm - specified by the `kind` keyword. It returns an array of indices of the - same shape as `a` that index data along the given axis in partitioned - order. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array to sort. - kth : int or sequence of ints - Element index to partition by. The kth element will be in its final - sorted position and all smaller elements will be moved before it and - all larger elements behind it. - The order all elements in the partitions is undefined. - If provided with a sequence of kth it will partition all of them into - their sorted position at once. - axis : int or None, optional - Axis along which to sort. The default is -1 (the last axis). If None, - the flattened array is used. - kind : {'introselect'}, optional - Selection algorithm. Default is 'introselect' - order : list, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - Returns - ------- - index_array : ndarray, int - Array of indices that partition `a` along the specified axis. - In other words, ``a[index_array]`` yields a sorted `a`. - - See Also - -------- - partition : Describes partition algorithms used. - ndarray.partition : Inplace partition. - argsort : Full indirect sort - - Notes - ----- - See `partition` for notes on the different selection algorithms. - - Examples - -------- - One dimensional array: - - >>> x = np.array([3, 4, 2, 1]) - >>> x[np.argpartition(x, 3)] - array([2, 1, 3, 4]) - >>> x[np.argpartition(x, (1, 3))] - array([1, 2, 3, 4]) - - """ - return a.argpartition(kth, axis, kind=kind, order=order) - - -def sort(a, axis=-1, kind='quicksort', order=None): - """ - Return a sorted copy of an array. - - Parameters - ---------- - a : array_like - Array to be sorted. - axis : int or None, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. Default is 'quicksort'. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.sort : Method to sort an array in-place. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in a sorted array. - partition : Partial sort. - - Notes - ----- - The various sorting algorithms are characterized by their average speed, - worst case performance, work space size, and whether they are stable. A - stable sort keeps items with the same key in the same relative - order. The three available algorithms have the following - properties: - - =========== ======= ============= ============ ======= - kind speed worst case work space stable - =========== ======= ============= ============ ======= - 'quicksort' 1 O(n^2) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes - 'heapsort' 3 O(n*log(n)) 0 no - =========== ======= ============= ============ ======= - - All the sort algorithms make temporary copies of the data when - sorting along any but the last axis. Consequently, sorting along - the last axis is faster and uses less space than sorting along - any other axis. - - The sort order for complex numbers is lexicographic. If both the real - and imaginary parts are non-nan then the order is determined by the - real parts except when they are equal, in which case the order is - determined by the imaginary parts. - - Previous to numpy 1.4.0 sorting real and complex arrays containing nan - values led to undefined behaviour. In numpy versions >= 1.4.0 nan - values are sorted to the end. The extended sort order is: - - * Real: [R, nan] - * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] - - where R is a non-nan real value. Complex values with the same nan - placements are sorted according to the non-nan part if it exists. - Non-nan values are sorted as before. - - Examples - -------- - >>> a = np.array([[1,4],[3,1]]) - >>> np.sort(a) # sort along the last axis - array([[1, 4], - [1, 3]]) - >>> np.sort(a, axis=None) # sort the flattened array - array([1, 1, 3, 4]) - >>> np.sort(a, axis=0) # sort along the first axis - array([[1, 1], - [3, 4]]) - - Use the `order` keyword to specify a field to use when sorting a - structured array: - - >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] - >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), - ... ('Galahad', 1.7, 38)] - >>> a = np.array(values, dtype=dtype) # create a structured array - >>> np.sort(a, order='height') # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), - ('Lancelot', 1.8999999999999999, 38)], - dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP - array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), - ('Arthur', 1.8, 41)], - dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2]) - >>> np.argsort(x) - array([1, 2, 0]) - - Two-dimensional array: - - >>> x = np.array([[0, 3], [2, 2]]) - >>> x - array([[0, 3], - [2, 2]]) - - >>> np.argsort(x, axis=0) - array([[0, 1], - [1, 0]]) - - >>> np.argsort(x, axis=1) - array([[0, 1], - [0, 1]]) - - Sorting with keys: - - >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x - array([(1, 0), (0, 1)], - dtype=[('x', '>> np.argsort(x, order=('x','y')) - array([1, 0]) - - >>> np.argsort(x, order=('y','x')) - array([0, 1]) - - """ - try: - argsort = a.argsort - except AttributeError: - return _wrapit(a, 'argsort', axis, kind, order) - return argsort(axis, kind, order) - - -def argmax(a, axis=None): - """ - Indices of the maximum values along an axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - By default, the index is into the flattened array, otherwise - along the specified axis. - - Returns - ------- - index_array : ndarray of ints - Array of indices into the array. It has the same shape as `a.shape` - with the dimension along `axis` removed. - - See Also - -------- - ndarray.argmax, argmin - amax : The maximum value along a given axis. - unravel_index : Convert a flat index into an index tuple. - - Notes - ----- - In case of multiple occurrences of the maximum values, the indices - corresponding to the first occurrence are returned. - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.argmax(a) - 5 - >>> np.argmax(a, axis=0) - array([1, 1, 1]) - >>> np.argmax(a, axis=1) - array([2, 2]) - - >>> b = np.arange(6) - >>> b[1] = 5 - >>> b - array([0, 5, 2, 3, 4, 5]) - >>> np.argmax(b) # Only the first occurrence is returned. - 1 - - """ - try: - argmax = a.argmax - except AttributeError: - return _wrapit(a, 'argmax', axis) - return argmax(axis) - - -def argmin(a, axis=None): - """ - Return the indices of the minimum values along an axis. - - See Also - -------- - argmax : Similar function. Please refer to `numpy.argmax` for detailed - documentation. - - """ - try: - argmin = a.argmin - except AttributeError: - return _wrapit(a, 'argmin', axis) - return argmin(axis) - - -def searchsorted(a, v, side='left', sorter=None): - """ - Find indices where elements should be inserted to maintain order. - - Find the indices into a sorted array `a` such that, if the - corresponding elements in `v` were inserted before the indices, the - order of `a` would be preserved. - - Parameters - ---------- - a : 1-D array_like - Input array. If `sorter` is None, then it must be sorted in - ascending order, otherwise `sorter` must be an array of indices - that sort it. - v : array_like - Values to insert into `a`. - side : {'left', 'right'}, optional - If 'left', the index of the first suitable location found is given. - If 'right', return the last such index. If there is no suitable - index, return either 0 or N (where N is the length of `a`). - sorter : 1-D array_like, optional - .. versionadded:: 1.7.0 - Optional array of integer indices that sort array a into ascending - order. They are typically the result of argsort. - - Returns - ------- - indices : array of ints - Array of insertion points with the same shape as `v`. - - See Also - -------- - sort : Return a sorted copy of an array. - histogram : Produce histogram from 1-D data. - - Notes - ----- - Binary search is used to find the required insertion points. - - As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing - `nan` values. The enhanced sort order is documented in `sort`. - - Examples - -------- - >>> np.searchsorted([1,2,3,4,5], 3) - 2 - >>> np.searchsorted([1,2,3,4,5], 3, side='right') - 3 - >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) - array([0, 5, 1, 2]) - - """ - try: - searchsorted = a.searchsorted - except AttributeError: - return _wrapit(a, 'searchsorted', v, side, sorter) - return searchsorted(v, side, sorter) - - -def resize(a, new_shape): - """ - Return a new array with the specified shape. - - If the new array is larger than the original array, then the new - array is filled with repeated copies of `a`. Note that this behavior - is different from a.resize(new_shape) which fills with zeros instead - of repeated copies of `a`. - - Parameters - ---------- - a : array_like - Array to be resized. - - new_shape : int or tuple of int - Shape of resized array. - - Returns - ------- - reshaped_array : ndarray - The new array is formed from the data in the old array, repeated - if necessary to fill out the required number of elements. The - data are repeated in the order that they are stored in memory. - - See Also - -------- - ndarray.resize : resize an array in-place. - - Examples - -------- - >>> a=np.array([[0,1],[2,3]]) - >>> np.resize(a,(1,4)) - array([[0, 1, 2, 3]]) - >>> np.resize(a,(2,4)) - array([[0, 1, 2, 3], - [0, 1, 2, 3]]) - - """ - if isinstance(new_shape, (int, nt.integer)): - new_shape = (new_shape,) - a = ravel(a) - Na = len(a) - if not Na: return mu.zeros(new_shape, a.dtype.char) - total_size = um.multiply.reduce(new_shape) - n_copies = int(total_size / Na) - extra = total_size % Na - - if total_size == 0: - return a[:0] - - if extra != 0: - n_copies = n_copies+1 - extra = Na-extra - - a = concatenate( (a,)*n_copies) - if extra > 0: - a = a[:-extra] - - return reshape(a, new_shape) - - -def squeeze(a, axis=None): - """ - Remove single-dimensional entries from the shape of an array. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - .. versionadded:: 1.7.0 - - Selects a subset of the single-dimensional entries in the - shape. If an axis is selected with shape entry greater than - one, an error is raised. - - Returns - ------- - squeezed : ndarray - The input array, but with all or a subset of the - dimensions of length 1 removed. This is always `a` itself - or a view into `a`. - - Examples - -------- - >>> x = np.array([[[0], [1], [2]]]) - >>> x.shape - (1, 3, 1) - >>> np.squeeze(x).shape - (3,) - >>> np.squeeze(x, axis=(2,)).shape - (1, 3) - - """ - try: - squeeze = a.squeeze - except AttributeError: - return _wrapit(a, 'squeeze') - try: - # First try to use the new axis= parameter - return squeeze(axis=axis) - except TypeError: - # For backwards compatibility - return squeeze() - - -def diagonal(a, offset=0, axis1=0, axis2=1): - """ - Return specified diagonals. - - If `a` is 2-D, returns the diagonal of `a` with the given offset, - i.e., the collection of elements of the form ``a[i, i+offset]``. If - `a` has more than two dimensions, then the axes specified by `axis1` - and `axis2` are used to determine the 2-D sub-array whose diagonal is - returned. The shape of the resulting array can be determined by - removing `axis1` and `axis2` and appending an index to the right equal - to the size of the resulting diagonals. - - In versions of NumPy prior to 1.7, this function always returned a new, - independent array containing a copy of the values in the diagonal. - - In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, - but depending on this fact is deprecated. Writing to the resulting - array continues to work as it used to, but a FutureWarning is issued. - - In NumPy 1.9 it returns a read-only view on the original array. - Attempting to write to the resulting array will produce an error. - - In NumPy 1.10, it will return a read/write view, Writing to the returned - array will alter your original array. - - If you don't write to the array returned by this function, then you can - just ignore all of the above. - - If you depend on the current behavior, then we suggest copying the - returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead of - just ``np.diagonal(a)``. This will work with both past and future versions - of NumPy. - - Parameters - ---------- - a : array_like - Array from which the diagonals are taken. - offset : int, optional - Offset of the diagonal from the main diagonal. Can be positive or - negative. Defaults to main diagonal (0). - axis1 : int, optional - Axis to be used as the first axis of the 2-D sub-arrays from which - the diagonals should be taken. Defaults to first axis (0). - axis2 : int, optional - Axis to be used as the second axis of the 2-D sub-arrays from - which the diagonals should be taken. Defaults to second axis (1). - - Returns - ------- - array_of_diagonals : ndarray - If `a` is 2-D, a 1-D array containing the diagonal is returned. - If the dimension of `a` is larger, then an array of diagonals is - returned, "packed" from left-most dimension to right-most (e.g., - if `a` is 3-D, then the diagonals are "packed" along rows). - - Raises - ------ - ValueError - If the dimension of `a` is less than 2. - - See Also - -------- - diag : MATLAB work-a-like for 1-D and 2-D arrays. - diagflat : Create diagonal arrays. - trace : Sum along diagonals. - - Examples - -------- - >>> a = np.arange(4).reshape(2,2) - >>> a - array([[0, 1], - [2, 3]]) - >>> a.diagonal() - array([0, 3]) - >>> a.diagonal(1) - array([1]) - - A 3-D example: - - >>> a = np.arange(8).reshape(2,2,2); a - array([[[0, 1], - [2, 3]], - [[4, 5], - [6, 7]]]) - >>> a.diagonal(0, # Main diagonals of two arrays created by skipping - ... 0, # across the outer(left)-most axis last and - ... 1) # the "middle" (row) axis first. - array([[0, 6], - [1, 7]]) - - The sub-arrays whose main diagonals we just obtained; note that each - corresponds to fixing the right-most (column) axis, and that the - diagonals are "packed" in rows. - - >>> a[:,:,0] # main diagonal is [0 6] - array([[0, 2], - [4, 6]]) - >>> a[:,:,1] # main diagonal is [1 7] - array([[1, 3], - [5, 7]]) - - """ - return asarray(a).diagonal(offset, axis1, axis2) - - -def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """ - Return the sum along diagonals of the array. - - If `a` is 2-D, the sum along its diagonal with the given offset - is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. - - If `a` has more than two dimensions, then the axes specified by axis1 and - axis2 are used to determine the 2-D sub-arrays whose traces are returned. - The shape of the resulting array is the same as that of `a` with `axis1` - and `axis2` removed. - - Parameters - ---------- - a : array_like - Input array, from which the diagonals are taken. - offset : int, optional - Offset of the diagonal from the main diagonal. Can be both positive - and negative. Defaults to 0. - axis1, axis2 : int, optional - Axes to be used as the first and second axis of the 2-D sub-arrays - from which the diagonals should be taken. Defaults are the first two - axes of `a`. - dtype : dtype, optional - Determines the data-type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and `a` is - of integer type of precision less than the default integer - precision, then the default integer precision is used. Otherwise, - the precision is the same as that of `a`. - out : ndarray, optional - Array into which the output is placed. Its type is preserved and - it must be of the right shape to hold the output. - - Returns - ------- - sum_along_diagonals : ndarray - If `a` is 2-D, the sum along the diagonal is returned. If `a` has - larger dimensions, then an array of sums along diagonals is returned. - - See Also - -------- - diag, diagonal, diagflat - - Examples - -------- - >>> np.trace(np.eye(3)) - 3.0 - >>> a = np.arange(8).reshape((2,2,2)) - >>> np.trace(a) - array([6, 8]) - - >>> a = np.arange(24).reshape((2,2,2,3)) - >>> np.trace(a).shape - (2, 3) - - """ - return asarray(a).trace(offset, axis1, axis2, dtype, out) - -def ravel(a, order='C'): - """ - Return a flattened array. - - A 1-D array, containing the elements of the input, is returned. A copy is - made only if needed. - - Parameters - ---------- - a : array_like - Input array. The elements in `a` are read in the order specified by - `order`, and packed as a 1-D array. - order : {'C','F', 'A', 'K'}, optional - The elements of `a` are read using this index order. 'C' means to - index the elements in C-like order, with the last axis index changing - fastest, back to the first axis index changing slowest. 'F' means to - index the elements in Fortran-like index order, with the first index - changing fastest, and the last index changing slowest. Note that the 'C' - and 'F' options take no account of the memory layout of the underlying - array, and only refer to the order of axis indexing. 'A' means to read - the elements in Fortran-like index order if `a` is Fortran *contiguous* - in memory, C-like order otherwise. 'K' means to read the elements in - the order they occur in memory, except for reversing the data when - strides are negative. By default, 'C' index order is used. - - Returns - ------- - 1d_array : ndarray - Output of the same dtype as `a`, and of shape ``(a.size,)``. - - See Also - -------- - ndarray.flat : 1-D iterator over an array. - ndarray.flatten : 1-D array copy of the elements of an array - in row-major order. - - Notes - ----- - In C-like (row-major) order, in two dimensions, the row index varies the - slowest, and the column index the quickest. This can be generalized to - multiple dimensions, where row-major order implies that the index along the - first axis varies slowest, and the index along the last quickest. The - opposite holds for Fortran-like, or column-major, index ordering. - - Examples - -------- - It is equivalent to ``reshape(-1, order=order)``. - - >>> x = np.array([[1, 2, 3], [4, 5, 6]]) - >>> print np.ravel(x) - [1 2 3 4 5 6] - - >>> print x.reshape(-1) - [1 2 3 4 5 6] - - >>> print np.ravel(x, order='F') - [1 4 2 5 3 6] - - When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: - - >>> print np.ravel(x.T) - [1 4 2 5 3 6] - >>> print np.ravel(x.T, order='A') - [1 2 3 4 5 6] - - When ``order`` is 'K', it will preserve orderings that are neither 'C' - nor 'F', but won't reverse axes: - - >>> a = np.arange(3)[::-1]; a - array([2, 1, 0]) - >>> a.ravel(order='C') - array([2, 1, 0]) - >>> a.ravel(order='K') - array([2, 1, 0]) - - >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a - array([[[ 0, 2, 4], - [ 1, 3, 5]], - [[ 6, 8, 10], - [ 7, 9, 11]]]) - >>> a.ravel(order='C') - array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) - >>> a.ravel(order='K') - array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - - """ - return asarray(a).ravel(order) - - -def nonzero(a): - """ - Return the indices of the elements that are non-zero. - - Returns a tuple of arrays, one for each dimension of `a`, containing - the indices of the non-zero elements in that dimension. The - corresponding non-zero values can be obtained with:: - - a[nonzero(a)] - - To group the indices by element, rather than dimension, use:: - - transpose(nonzero(a)) - - The result of this is always a 2-D array, with a row for - each non-zero element. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - tuple_of_arrays : tuple - Indices of elements that are non-zero. - - See Also - -------- - flatnonzero : - Return indices that are non-zero in the flattened version of the input - array. - ndarray.nonzero : - Equivalent ndarray method. - count_nonzero : - Counts the number of non-zero elements in the input array. - - Examples - -------- - >>> x = np.eye(3) - >>> x - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - >>> np.nonzero(x) - (array([0, 1, 2]), array([0, 1, 2])) - - >>> x[np.nonzero(x)] - array([ 1., 1., 1.]) - >>> np.transpose(np.nonzero(x)) - array([[0, 0], - [1, 1], - [2, 2]]) - - A common use for ``nonzero`` is to find the indices of an array, where - a condition is True. Given an array `a`, the condition `a` > 3 is a - boolean array and since False is interpreted as 0, np.nonzero(a > 3) - yields the indices of the `a` where the condition is true. - - >>> a = np.array([[1,2,3],[4,5,6],[7,8,9]]) - >>> a > 3 - array([[False, False, False], - [ True, True, True], - [ True, True, True]], dtype=bool) - >>> np.nonzero(a > 3) - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - The ``nonzero`` method of the boolean array can also be called. - - >>> (a > 3).nonzero() - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - """ - try: - nonzero = a.nonzero - except AttributeError: - res = _wrapit(a, 'nonzero') - else: - res = nonzero() - return res - - -def shape(a): - """ - Return the shape of an array. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - shape : tuple of ints - The elements of the shape tuple give the lengths of the - corresponding array dimensions. - - See Also - -------- - alen - ndarray.shape : Equivalent array method. - - Examples - -------- - >>> np.shape(np.eye(3)) - (3, 3) - >>> np.shape([[1, 2]]) - (1, 2) - >>> np.shape([0]) - (1,) - >>> np.shape(0) - () - - >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - >>> np.shape(a) - (2,) - >>> a.shape - (2,) - - """ - try: - result = a.shape - except AttributeError: - result = asarray(a).shape - return result - - -def compress(condition, a, axis=None, out=None): - """ - Return selected slices of an array along given axis. - - When working along a given axis, a slice along that axis is returned in - `output` for each index where `condition` evaluates to True. When - working on a 1-D array, `compress` is equivalent to `extract`. - - Parameters - ---------- - condition : 1-D array of bools - Array that selects which entries to return. If len(condition) - is less than the size of `a` along the given axis, then output is - truncated to the length of the condition array. - a : array_like - Array from which to extract a part. - axis : int, optional - Axis along which to take slices. If None (default), work on the - flattened array. - out : ndarray, optional - Output array. Its type is preserved and it must be of the right - shape to hold the output. - - Returns - ------- - compressed_array : ndarray - A copy of `a` without the slices along axis for which `condition` - is false. - - See Also - -------- - take, choose, diag, diagonal, select - ndarray.compress : Equivalent method in ndarray - np.extract: Equivalent method when working on 1-D arrays - numpy.doc.ufuncs : Section "Output arguments" - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4], [5, 6]]) - >>> a - array([[1, 2], - [3, 4], - [5, 6]]) - >>> np.compress([0, 1], a, axis=0) - array([[3, 4]]) - >>> np.compress([False, True, True], a, axis=0) - array([[3, 4], - [5, 6]]) - >>> np.compress([False, True], a, axis=1) - array([[2], - [4], - [6]]) - - Working on the flattened array does not return slices along an axis but - selects elements. - - >>> np.compress([False, True], a) - array([2]) - - """ - try: - compress = a.compress - except AttributeError: - return _wrapit(a, 'compress', condition, axis, out) - return compress(condition, axis, out) - - -def clip(a, a_min, a_max, out=None): - """ - Clip (limit) the values in an array. - - Given an interval, values outside the interval are clipped to - the interval edges. For example, if an interval of ``[0, 1]`` - is specified, values smaller than 0 become 0, and values larger - than 1 become 1. - - Parameters - ---------- - a : array_like - Array containing elements to clip. - a_min : scalar or array_like - Minimum value. - a_max : scalar or array_like - Maximum value. If `a_min` or `a_max` are array_like, then they will - be broadcasted to the shape of `a`. - out : ndarray, optional - The results will be placed in this array. It may be the input - array for in-place clipping. `out` must be of the right shape - to hold the output. Its type is preserved. - - Returns - ------- - clipped_array : ndarray - An array with the elements of `a`, but where values - < `a_min` are replaced with `a_min`, and those > `a_max` - with `a_max`. - - See Also - -------- - numpy.doc.ufuncs : Section "Output arguments" - - Examples - -------- - >>> a = np.arange(10) - >>> np.clip(a, 1, 8) - array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.clip(a, 3, 6, out=a) - array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) - >>> a = np.arange(10) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8) - array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) - - """ - try: - clip = a.clip - except AttributeError: - return _wrapit(a, 'clip', a_min, a_max, out) - return clip(a_min, a_max, out) - - -def sum(a, axis=None, dtype=None, out=None, keepdims=False): - """ - Sum of array elements over a given axis. - - Parameters - ---------- - a : array_like - Elements to sum. - axis : None or int or tuple of ints, optional - Axis or axes along which a sum is performed. - The default (`axis` = `None`) is perform a sum over all - the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a sum is performed on multiple - axes, instead of a single axis or all the axes as before. - dtype : dtype, optional - The type of the returned array and of the accumulator in which - the elements are summed. By default, the dtype of `a` is used. - An exception is when `a` has an integer type with less precision - than the default platform integer. In that case, the default - platform integer is used instead. - out : ndarray, optional - Array into which the output is placed. By default, a new array is - created. If `out` is given, it must be of the appropriate shape - (the shape of `a` with `axis` removed, i.e., - ``numpy.delete(a.shape, axis)``). Its type is preserved. See - `doc.ufuncs` (Section "Output arguments") for more details. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - sum_along_axis : ndarray - An array with the same shape as `a`, with the specified - axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar - is returned. If an output array is specified, a reference to - `out` is returned. - - See Also - -------- - ndarray.sum : Equivalent method. - - cumsum : Cumulative sum of array elements. - - trapz : Integration of array values using the composite trapezoidal rule. - - mean, average - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> np.sum([0.5, 1.5]) - 2.0 - >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) - 1 - >>> np.sum([[0, 1], [0, 5]]) - 6 - >>> np.sum([[0, 1], [0, 5]], axis=0) - array([0, 6]) - >>> np.sum([[0, 1], [0, 5]], axis=1) - array([1, 5]) - - If the accumulator is too small, overflow occurs: - - >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) - -128 - - """ - if isinstance(a, _gentype): - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - elif type(a) is not mu.ndarray: - try: - sum = a.sum - except AttributeError: - return _methods._sum(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - # NOTE: Dropping the keepdims parameters here... - return sum(axis=axis, dtype=dtype, out=out) - else: - return _methods._sum(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def product (a, axis=None, dtype=None, out=None, keepdims=False): - """ - Return the product of array elements over a given axis. - - See Also - -------- - prod : equivalent function; see for details. - - """ - return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - -def sometrue(a, axis=None, out=None, keepdims=False): - """ - Check whether some values are true. - - Refer to `any` for full documentation. - - See Also - -------- - any : equivalent function - - """ - arr = asanyarray(a) - - try: - return arr.any(axis=axis, out=out, keepdims=keepdims) - except TypeError: - return arr.any(axis=axis, out=out) - -def alltrue (a, axis=None, out=None, keepdims=False): - """ - Check if all elements of input array are true. - - See Also - -------- - numpy.all : Equivalent function; see for details. - - """ - arr = asanyarray(a) - - try: - return arr.all(axis=axis, out=out, keepdims=keepdims) - except TypeError: - return arr.all(axis=axis, out=out) - -def any(a, axis=None, out=None, keepdims=False): - """ - Test whether any array element along a given axis evaluates to True. - - Returns single boolean unless `axis` is not ``None`` - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : None or int or tuple of ints, optional - Axis or axes along which a logical OR reduction is performed. - The default (`axis` = `None`) is to perform a logical OR over all - the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output and its type is preserved - (e.g., if it is of type float, then it will remain so, returning - 1.0 for True and 0.0 for False, regardless of the type of `a`). - See `doc.ufuncs` (Section "Output arguments") for details. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - any : bool or ndarray - A new boolean or `ndarray` is returned unless `out` is specified, - in which case a reference to `out` is returned. - - See Also - -------- - ndarray.any : equivalent method - - all : Test whether all elements along a given axis evaluate to True. - - Notes - ----- - Not a Number (NaN), positive infinity and negative infinity evaluate - to `True` because these are not equal to zero. - - Examples - -------- - >>> np.any([[True, False], [True, True]]) - True - - >>> np.any([[True, False], [False, False]], axis=0) - array([ True, False], dtype=bool) - - >>> np.any([-1, 0, 5]) - True - - >>> np.any(np.nan) - True - - >>> o=np.array([False]) - >>> z=np.any([-1, 4, 5], out=o) - >>> z, o - (array([ True], dtype=bool), array([ True], dtype=bool)) - >>> # Check now that z is a reference to o - >>> z is o - True - >>> id(z), id(o) # identity of z and o # doctest: +SKIP - (191614240, 191614240) - - """ - arr = asanyarray(a) - - try: - return arr.any(axis=axis, out=out, keepdims=keepdims) - except TypeError: - return arr.any(axis=axis, out=out) - -def all(a, axis=None, out=None, keepdims=False): - """ - Test whether all array elements along a given axis evaluate to True. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : None or int or tuple of ints, optional - Axis or axes along which a logical AND reduction is performed. - The default (`axis` = `None`) is to perform a logical AND over all - the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple - axes, instead of a single axis or all the axes as before. - out : ndarray, optional - Alternate output array in which to place the result. - It must have the same shape as the expected output and its - type is preserved (e.g., if ``dtype(out)`` is float, the result - will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section - "Output arguments") for more details. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - all : ndarray, bool - A new boolean or array is returned unless `out` is specified, - in which case a reference to `out` is returned. - - See Also - -------- - ndarray.all : equivalent method - - any : Test whether any element along a given axis evaluates to True. - - Notes - ----- - Not a Number (NaN), positive infinity and negative infinity - evaluate to `True` because these are not equal to zero. - - Examples - -------- - >>> np.all([[True,False],[True,True]]) - False - - >>> np.all([[True,False],[True,True]], axis=0) - array([ True, False], dtype=bool) - - >>> np.all([-1, 4, 5]) - True - - >>> np.all([1.0, np.nan]) - True - - >>> o=np.array([False]) - >>> z=np.all([-1, 4, 5], out=o) - >>> id(z), id(o), z # doctest: +SKIP - (28293632, 28293632, array([ True], dtype=bool)) - - """ - arr = asanyarray(a) - - try: - return arr.all(axis=axis, out=out, keepdims=keepdims) - except TypeError: - return arr.all(axis=axis, out=out) - -def cumsum (a, axis=None, dtype=None, out=None): - """ - Return the cumulative sum of the elements along a given axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative sum is computed. The default - (None) is to compute the cumsum over the flattened array. - dtype : dtype, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults - to the dtype of `a`, unless `a` has an integer dtype with a - precision less than that of the default platform integer. In - that case, the default platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. See `doc.ufuncs` - (Section "Output arguments") for more details. - - Returns - ------- - cumsum_along_axis : ndarray. - A new array holding the result is returned unless `out` is - specified, in which case a reference to `out` is returned. The - result has the same size as `a`, and the same shape as `a` if - `axis` is not None or `a` is a 1-d array. - - - See Also - -------- - sum : Sum array elements. - - trapz : Integration of array values using the composite trapezoidal rule. - - diff : Calculate the n-th order discrete difference along given axis. - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> a = np.array([[1,2,3], [4,5,6]]) - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.cumsum(a) - array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a, dtype=float) # specifies type of output value(s) - array([ 1., 3., 6., 10., 15., 21.]) - - >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns - array([[1, 2, 3], - [5, 7, 9]]) - >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows - array([[ 1, 3, 6], - [ 4, 9, 15]]) - - """ - try: - cumsum = a.cumsum - except AttributeError: - return _wrapit(a, 'cumsum', axis, dtype, out) - return cumsum(axis, dtype, out) - - -def cumproduct(a, axis=None, dtype=None, out=None): - """ - Return the cumulative product over the given axis. - - - See Also - -------- - cumprod : equivalent function; see for details. - - """ - try: - cumprod = a.cumprod - except AttributeError: - return _wrapit(a, 'cumprod', axis, dtype, out) - return cumprod(axis, dtype, out) - - -def ptp(a, axis=None, out=None): - """ - Range of values (maximum - minimum) along an axis. - - The name of the function comes from the acronym for 'peak to peak'. - - Parameters - ---------- - a : array_like - Input values. - axis : int, optional - Axis along which to find the peaks. By default, flatten the - array. - out : array_like - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type of the output values will be cast if necessary. - - Returns - ------- - ptp : ndarray - A new array holding the result, unless `out` was - specified, in which case a reference to `out` is returned. - - Examples - -------- - >>> x = np.arange(4).reshape((2,2)) - >>> x - array([[0, 1], - [2, 3]]) - - >>> np.ptp(x, axis=0) - array([2, 2]) - - >>> np.ptp(x, axis=1) - array([1, 1]) - - """ - try: - ptp = a.ptp - except AttributeError: - return _wrapit(a, 'ptp', axis, out) - return ptp(axis, out) - - -def amax(a, axis=None, out=None, keepdims=False): - """ - Return the maximum of an array or maximum along an axis. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default, flattened input is used. - out : ndarray, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - See `doc.ufuncs` (Section "Output arguments") for more details. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - amax : ndarray or scalar - Maximum of `a`. If `axis` is None, the result is a scalar value. - If `axis` is given, the result is an array of dimension - ``a.ndim - 1``. - - See Also - -------- - amin : - The minimum value of an array along a given axis, propagating any NaNs. - nanmax : - The maximum value of an array along a given axis, ignoring any NaNs. - maximum : - Element-wise maximum of two arrays, propagating any NaNs. - fmax : - Element-wise maximum of two arrays, ignoring any NaNs. - argmax : - Return the indices of the maximum values. - - nanmin, minimum, fmin - - Notes - ----- - NaN values are propagated, that is if at least one item is NaN, the - corresponding max value will be NaN as well. To ignore NaN values - (MATLAB behavior), please use nanmax. - - Don't use `amax` for element-wise comparison of 2 arrays; when - ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than - ``amax(a, axis=0)``. - - Examples - -------- - >>> a = np.arange(4).reshape((2,2)) - >>> a - array([[0, 1], - [2, 3]]) - >>> np.amax(a) # Maximum of the flattened array - 3 - >>> np.amax(a, axis=0) # Maxima along the first axis - array([2, 3]) - >>> np.amax(a, axis=1) # Maxima along the second axis - array([1, 3]) - - >>> b = np.arange(5, dtype=np.float) - >>> b[2] = np.NaN - >>> np.amax(b) - nan - >>> np.nanmax(b) - 4.0 - - """ - if type(a) is not mu.ndarray: - try: - amax = a.max - except AttributeError: - return _methods._amax(a, axis=axis, - out=out, keepdims=keepdims) - # NOTE: Dropping the keepdims parameter - return amax(axis=axis, out=out) - else: - return _methods._amax(a, axis=axis, - out=out, keepdims=keepdims) - -def amin(a, axis=None, out=None, keepdims=False): - """ - Return the minimum of an array or minimum along an axis. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default, flattened input is used. - out : ndarray, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - See `doc.ufuncs` (Section "Output arguments") for more details. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - amin : ndarray or scalar - Minimum of `a`. If `axis` is None, the result is a scalar value. - If `axis` is given, the result is an array of dimension - ``a.ndim - 1``. - - See Also - -------- - amax : - The maximum value of an array along a given axis, propagating any NaNs. - nanmin : - The minimum value of an array along a given axis, ignoring any NaNs. - minimum : - Element-wise minimum of two arrays, propagating any NaNs. - fmin : - Element-wise minimum of two arrays, ignoring any NaNs. - argmin : - Return the indices of the minimum values. - - nanmax, maximum, fmax - - Notes - ----- - NaN values are propagated, that is if at least one item is NaN, the - corresponding min value will be NaN as well. To ignore NaN values - (MATLAB behavior), please use nanmin. - - Don't use `amin` for element-wise comparison of 2 arrays; when - ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than - ``amin(a, axis=0)``. - - Examples - -------- - >>> a = np.arange(4).reshape((2,2)) - >>> a - array([[0, 1], - [2, 3]]) - >>> np.amin(a) # Minimum of the flattened array - 0 - >>> np.amin(a, axis=0) # Minima along the first axis - array([0, 1]) - >>> np.amin(a, axis=1) # Minima along the second axis - array([0, 2]) - - >>> b = np.arange(5, dtype=np.float) - >>> b[2] = np.NaN - >>> np.amin(b) - nan - >>> np.nanmin(b) - 0.0 - - """ - if type(a) is not mu.ndarray: - try: - amin = a.min - except AttributeError: - return _methods._amin(a, axis=axis, - out=out, keepdims=keepdims) - # NOTE: Dropping the keepdims parameter - return amin(axis=axis, out=out) - else: - return _methods._amin(a, axis=axis, - out=out, keepdims=keepdims) - -def alen(a): - """ - Return the length of the first dimension of the input array. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - alen : int - Length of the first dimension of `a`. - - See Also - -------- - shape, size - - Examples - -------- - >>> a = np.zeros((7,4,5)) - >>> a.shape[0] - 7 - >>> np.alen(a) - 7 - - """ - try: - return len(a) - except TypeError: - return len(array(a, ndmin=1)) - - -def prod(a, axis=None, dtype=None, out=None, keepdims=False): - """ - Return the product of array elements over a given axis. - - Parameters - ---------- - a : array_like - Input data. - axis : None or int or tuple of ints, optional - Axis or axes along which a product is performed. - The default (`axis` = `None`) is perform a product over all - the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a product is performed on multiple - axes, instead of a single axis or all the axes as before. - dtype : data-type, optional - The data-type of the returned array, as well as of the accumulator - in which the elements are multiplied. By default, if `a` is of - integer type, `dtype` is the default platform integer. (Note: if - the type of `a` is unsigned, then so is `dtype`.) Otherwise, - the dtype is the same as that of `a`. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the - output values will be cast if necessary. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - product_along_axis : ndarray, see `dtype` parameter above. - An array shaped as `a` but with the specified axis removed. - Returns a reference to `out` if specified. - - See Also - -------- - ndarray.prod : equivalent method - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. That means that, on a 32-bit platform: - - >>> x = np.array([536870910, 536870910, 536870910, 536870910]) - >>> np.prod(x) #random - 16 - - Examples - -------- - By default, calculate the product of all elements: - - >>> np.prod([1.,2.]) - 2.0 - - Even when the input array is two-dimensional: - - >>> np.prod([[1.,2.],[3.,4.]]) - 24.0 - - But we can also specify the axis over which to multiply: - - >>> np.prod([[1.,2.],[3.,4.]], axis=1) - array([ 2., 12.]) - - If the type of `x` is unsigned, then the output type is - the unsigned platform integer: - - >>> x = np.array([1, 2, 3], dtype=np.uint8) - >>> np.prod(x).dtype == np.uint - True - - If `x` is of a signed integer type, then the output type - is the default platform integer: - - >>> x = np.array([1, 2, 3], dtype=np.int8) - >>> np.prod(x).dtype == np.int - True - - """ - if type(a) is not mu.ndarray: - try: - prod = a.prod - except AttributeError: - return _methods._prod(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - return prod(axis=axis, dtype=dtype, out=out) - else: - return _methods._prod(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def cumprod(a, axis=None, dtype=None, out=None): - """ - Return the cumulative product of elements along a given axis. - - Parameters - ---------- - a : array_like - Input array. - axis : int, optional - Axis along which the cumulative product is computed. By default - the input is flattened. - dtype : dtype, optional - Type of the returned array, as well as of the accumulator in which - the elements are multiplied. If *dtype* is not specified, it - defaults to the dtype of `a`, unless `a` has an integer dtype with - a precision less than that of the default platform integer. In - that case, the default platform integer is used instead. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type of the resulting values will be cast if necessary. - - Returns - ------- - cumprod : ndarray - A new array holding the result is returned unless `out` is - specified, in which case a reference to out is returned. - - See Also - -------- - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> a = np.array([1,2,3]) - >>> np.cumprod(a) # intermediate results 1, 1*2 - ... # total product 1*2*3 = 6 - array([1, 2, 6]) - >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.cumprod(a, dtype=float) # specify type of output - array([ 1., 2., 6., 24., 120., 720.]) - - The cumulative product for each column (i.e., over the rows) of `a`: - - >>> np.cumprod(a, axis=0) - array([[ 1, 2, 3], - [ 4, 10, 18]]) - - The cumulative product for each row (i.e. over the columns) of `a`: - - >>> np.cumprod(a,axis=1) - array([[ 1, 2, 6], - [ 4, 20, 120]]) - - """ - try: - cumprod = a.cumprod - except AttributeError: - return _wrapit(a, 'cumprod', axis, dtype, out) - return cumprod(axis, dtype, out) - - -def ndim(a): - """ - Return the number of dimensions of an array. - - Parameters - ---------- - a : array_like - Input array. If it is not already an ndarray, a conversion is - attempted. - - Returns - ------- - number_of_dimensions : int - The number of dimensions in `a`. Scalars are zero-dimensional. - - See Also - -------- - ndarray.ndim : equivalent method - shape : dimensions of array - ndarray.shape : dimensions of array - - Examples - -------- - >>> np.ndim([[1,2,3],[4,5,6]]) - 2 - >>> np.ndim(np.array([[1,2,3],[4,5,6]])) - 2 - >>> np.ndim(1) - 0 - - """ - try: - return a.ndim - except AttributeError: - return asarray(a).ndim - - -def rank(a): - """ - Return the number of dimensions of an array. - - If `a` is not already an array, a conversion is attempted. - Scalars are zero dimensional. - - .. note:: - This function is deprecated in NumPy 1.9 to avoid confusion with - `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function - should be used instead. - - Parameters - ---------- - a : array_like - Array whose number of dimensions is desired. If `a` is not an array, - a conversion is attempted. - - Returns - ------- - number_of_dimensions : int - The number of dimensions in the array. - - See Also - -------- - ndim : equivalent function - ndarray.ndim : equivalent property - shape : dimensions of array - ndarray.shape : dimensions of array - - Notes - ----- - In the old Numeric package, `rank` was the term used for the number of - dimensions, but in Numpy `ndim` is used instead. - - Examples - -------- - >>> np.rank([1,2,3]) - 1 - >>> np.rank(np.array([[1,2,3],[4,5,6]])) - 2 - >>> np.rank(1) - 0 - - """ - warnings.warn( - "`rank` is deprecated; use the `ndim` attribute or function instead. " - "To find the rank of a matrix see `numpy.linalg.matrix_rank`.", - VisibleDeprecationWarning) - try: - return a.ndim - except AttributeError: - return asarray(a).ndim - - -def size(a, axis=None): - """ - Return the number of elements along a given axis. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which the elements are counted. By default, give - the total number of elements. - - Returns - ------- - element_count : int - Number of elements along the specified axis. - - See Also - -------- - shape : dimensions of array - ndarray.shape : dimensions of array - ndarray.size : number of elements in array - - Examples - -------- - >>> a = np.array([[1,2,3],[4,5,6]]) - >>> np.size(a) - 6 - >>> np.size(a,1) - 3 - >>> np.size(a,0) - 2 - - """ - if axis is None: - try: - return a.size - except AttributeError: - return asarray(a).size - else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] - - -def around(a, decimals=0, out=None): - """ - Evenly round to the given number of decimals. - - Parameters - ---------- - a : array_like - Input data. - decimals : int, optional - Number of decimal places to round to (default: 0). If - decimals is negative, it specifies the number of positions to - the left of the decimal point. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the output - values will be cast if necessary. See `doc.ufuncs` (Section - "Output arguments") for details. - - Returns - ------- - rounded_array : ndarray - An array of the same type as `a`, containing the rounded values. - Unless `out` was specified, a new array is created. A reference to - the result is returned. - - The real and imaginary parts of complex numbers are rounded - separately. The result of rounding a float is a float. - - See Also - -------- - ndarray.round : equivalent method - - ceil, fix, floor, rint, trunc - - - Notes - ----- - For values exactly halfway between rounded decimal values, Numpy - rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, - -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due - to the inexact representation of decimal fractions in the IEEE - floating point standard [1]_ and errors introduced when scaling - by powers of ten. - - References - ---------- - .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, - http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF - .. [2] "How Futile are Mindless Assessments of - Roundoff in Floating-Point Computation?", William Kahan, - http://www.cs.berkeley.edu/~wkahan/Mindless.pdf - - Examples - -------- - >>> np.around([0.37, 1.64]) - array([ 0., 2.]) - >>> np.around([0.37, 1.64], decimals=1) - array([ 0.4, 1.6]) - >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value - array([ 0., 2., 2., 4., 4.]) - >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned - array([ 1, 2, 3, 11]) - >>> np.around([1,2,3,11], decimals=-1) - array([ 0, 0, 0, 10]) - - """ - try: - round = a.round - except AttributeError: - return _wrapit(a, 'round', decimals, out) - return round(decimals, out) - - -def round_(a, decimals=0, out=None): - """ - Round an array to the given number of decimals. - - Refer to `around` for full documentation. - - See Also - -------- - around : equivalent function - - """ - try: - round = a.round - except AttributeError: - return _wrapit(a, 'round', decimals, out) - return round(decimals, out) - - -def mean(a, axis=None, dtype=None, out=None, keepdims=False): - """ - Compute the arithmetic mean along the specified axis. - - Returns the average of the array elements. The average is taken over - the flattened array by default, otherwise over the specified axis. - `float64` intermediate and return values are used for integer inputs. - - Parameters - ---------- - a : array_like - Array containing numbers whose mean is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the means are computed. The default is to compute - the mean of the flattened array. - dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default - is `float64`; for floating point inputs, it is the same as the - input dtype. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. - See `doc.ufuncs` for details. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - m : ndarray, see dtype parameter above - If `out=None`, returns a new array containing the mean values, - otherwise a reference to the output array is returned. - - See Also - -------- - average : Weighted average - std, var, nanmean, nanstd, nanvar - - Notes - ----- - The arithmetic mean is the sum of the elements along the axis divided - by the number of elements. - - Note that for floating-point input, the mean is computed using the - same precision the input has. Depending on the input data, this can - cause the results to be inaccurate, especially for `float32` (see - example below). Specifying a higher-precision accumulator using the - `dtype` keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.mean(a) - 2.5 - >>> np.mean(a, axis=0) - array([ 2., 3.]) - >>> np.mean(a, axis=1) - array([ 1.5, 3.5]) - - In single precision, `mean` can be inaccurate: - - >>> a = np.zeros((2, 512*512), dtype=np.float32) - >>> a[0, :] = 1.0 - >>> a[1, :] = 0.1 - >>> np.mean(a) - 0.546875 - - Computing the mean in float64 is more accurate: - - >>> np.mean(a, dtype=np.float64) - 0.55000000074505806 - - """ - if type(a) is not mu.ndarray: - try: - mean = a.mean - return mean(axis=axis, dtype=dtype, out=out) - except AttributeError: - pass - - return _methods._mean(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - -def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """ - Compute the standard deviation along the specified axis. - - Returns the standard deviation, a measure of the spread of a distribution, - of the array elements. The standard deviation is computed for the - flattened array by default, otherwise over the specified axis. - - Parameters - ---------- - a : array_like - Calculate the standard deviation of these values. - axis : int, optional - Axis along which the standard deviation is computed. The default is - to compute the standard deviation of the flattened array. - dtype : dtype, optional - Type to use in computing the standard deviation. For arrays of - integer type the default is float64, for arrays of float types it is - the same as the array type. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type (of the calculated - values) will be cast if necessary. - ddof : int, optional - Means Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of elements. - By default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - standard_deviation : ndarray, see dtype parameter above. - If `out` is None, return a new array containing the standard deviation, - otherwise return a reference to the output array. - - See Also - -------- - var, mean, nanmean, nanstd, nanvar - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - The standard deviation is the square root of the average of the squared - deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``. - - The average squared deviation is normally calculated as - ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, - the divisor ``N - ddof`` is used instead. In standard statistical - practice, ``ddof=1`` provides an unbiased estimator of the variance - of the infinite population. ``ddof=0`` provides a maximum likelihood - estimate of the variance for normally distributed variables. The - standard deviation computed in this function is the square root of - the estimated variance, so even with ``ddof=1``, it will not be an - unbiased estimate of the standard deviation per se. - - Note that, for complex numbers, `std` takes the absolute - value before squaring, so that the result is always real and nonnegative. - - For floating-point input, the *std* is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for float32 (see example below). - Specifying a higher-accuracy accumulator using the `dtype` keyword can - alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.std(a) - 1.1180339887498949 - >>> np.std(a, axis=0) - array([ 1., 1.]) - >>> np.std(a, axis=1) - array([ 0.5, 0.5]) - - In single precision, std() can be inaccurate: - - >>> a = np.zeros((2,512*512), dtype=np.float32) - >>> a[0,:] = 1.0 - >>> a[1,:] = 0.1 - >>> np.std(a) - 0.45172946707416706 - - Computing the standard deviation in float64 is more accurate: - - >>> np.std(a, dtype=np.float64) - 0.44999999925552653 - - """ - if type(a) is not mu.ndarray: - try: - std = a.std - return std(axis=axis, dtype=dtype, out=out, ddof=ddof) - except AttributeError: - pass - - return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - -def var(a, axis=None, dtype=None, out=None, ddof=0, - keepdims=False): - """ - Compute the variance along the specified axis. - - Returns the variance of the array elements, a measure of the spread of a - distribution. The variance is computed for the flattened array by - default, otherwise over the specified axis. - - Parameters - ---------- - a : array_like - Array containing numbers whose variance is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the variance is computed. The default is to compute - the variance of the flattened array. - dtype : data-type, optional - Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as - the array type. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output, but the type is cast if - necessary. - ddof : int, optional - "Delta Degrees of Freedom": the divisor used in the calculation is - ``N - ddof``, where ``N`` represents the number of elements. By - default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - variance : ndarray, see dtype parameter above - If ``out=None``, returns a new array containing the variance; - otherwise, a reference to the output array is returned. - - See Also - -------- - std , mean, nanmean, nanstd, nanvar - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - The variance is the average of the squared deviations from the mean, - i.e., ``var = mean(abs(x - x.mean())**2)``. - - The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. - If, however, `ddof` is specified, the divisor ``N - ddof`` is used - instead. In standard statistical practice, ``ddof=1`` provides an - unbiased estimator of the variance of a hypothetical infinite population. - ``ddof=0`` provides a maximum likelihood estimate of the variance for - normally distributed variables. - - Note that for complex numbers, the absolute value is taken before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the variance is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32` (see example - below). Specifying a higher-accuracy accumulator using the ``dtype`` - keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1,2],[3,4]]) - >>> np.var(a) - 1.25 - >>> np.var(a, axis=0) - array([ 1., 1.]) - >>> np.var(a, axis=1) - array([ 0.25, 0.25]) - - In single precision, var() can be inaccurate: - - >>> a = np.zeros((2,512*512), dtype=np.float32) - >>> a[0,:] = 1.0 - >>> a[1,:] = 0.1 - >>> np.var(a) - 0.20405951142311096 - - Computing the variance in float64 is more accurate: - - >>> np.var(a, dtype=np.float64) - 0.20249999932997387 - >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 - 0.20250000000000001 - - """ - if type(a) is not mu.ndarray: - try: - var = a.var - return var(axis=axis, dtype=dtype, out=out, ddof=ddof) - except AttributeError: - pass - - return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/function_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/function_base.py deleted file mode 100644 index 0bf93390e062a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/function_base.py +++ /dev/null @@ -1,188 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__all__ = ['logspace', 'linspace'] - -from . import numeric as _nx -from .numeric import array, result_type - - -def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None): - """ - Return evenly spaced numbers over a specified interval. - - Returns `num` evenly spaced samples, calculated over the - interval [`start`, `stop` ]. - - The endpoint of the interval can optionally be excluded. - - Parameters - ---------- - start : scalar - The starting value of the sequence. - stop : scalar - The end value of the sequence, unless `endpoint` is set to False. - In that case, the sequence consists of all but the last of ``num + 1`` - evenly spaced samples, so that `stop` is excluded. Note that the step - size changes when `endpoint` is False. - num : int, optional - Number of samples to generate. Default is 50. - endpoint : bool, optional - If True, `stop` is the last sample. Otherwise, it is not included. - Default is True. - retstep : bool, optional - If True, return (`samples`, `step`), where `step` is the spacing - between samples. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - - .. versionadded:: 1.9.0 - - Returns - ------- - samples : ndarray - There are `num` equally spaced samples in the closed interval - ``[start, stop]`` or the half-open interval ``[start, stop)`` - (depending on whether `endpoint` is True or False). - step : float (only if `retstep` is True) - Size of spacing between samples. - - - See Also - -------- - arange : Similar to `linspace`, but uses a step size (instead of the - number of samples). - logspace : Samples uniformly distributed in log space. - - Examples - -------- - >>> np.linspace(2.0, 3.0, num=5) - array([ 2. , 2.25, 2.5 , 2.75, 3. ]) - >>> np.linspace(2.0, 3.0, num=5, endpoint=False) - array([ 2. , 2.2, 2.4, 2.6, 2.8]) - >>> np.linspace(2.0, 3.0, num=5, retstep=True) - (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 8 - >>> y = np.zeros(N) - >>> x1 = np.linspace(0, 10, N, endpoint=True) - >>> x2 = np.linspace(0, 10, N, endpoint=False) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - num = int(num) - - # Convert float/complex array scalars to float, gh-3504 - start = start * 1. - stop = stop * 1. - - if dtype is None: - dtype = result_type(start, stop, float(num)) - - if num <= 0: - return array([], dtype) - if endpoint: - if num == 1: - return array([start], dtype=dtype) - step = (stop-start)/float((num-1)) - y = _nx.arange(0, num, dtype=dtype) * step + start - y[-1] = stop - else: - step = (stop-start)/float(num) - y = _nx.arange(0, num, dtype=dtype) * step + start - if retstep: - return y.astype(dtype), step - else: - return y.astype(dtype) - - -def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None): - """ - Return numbers spaced evenly on a log scale. - - In linear space, the sequence starts at ``base ** start`` - (`base` to the power of `start`) and ends with ``base ** stop`` - (see `endpoint` below). - - Parameters - ---------- - start : float - ``base ** start`` is the starting value of the sequence. - stop : float - ``base ** stop`` is the final value of the sequence, unless `endpoint` - is False. In that case, ``num + 1`` values are spaced over the - interval in log-space, of which all but the last (a sequence of - length ``num``) are returned. - num : integer, optional - Number of samples to generate. Default is 50. - endpoint : boolean, optional - If true, `stop` is the last sample. Otherwise, it is not included. - Default is True. - base : float, optional - The base of the log space. The step size between the elements in - ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. - Default is 10.0. - dtype : dtype - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. - - Returns - ------- - samples : ndarray - `num` samples, equally spaced on a log scale. - - See Also - -------- - arange : Similar to linspace, with the step size specified instead of the - number of samples. Note that, when used with a float endpoint, the - endpoint may or may not be included. - linspace : Similar to logspace, but with the samples uniformly distributed - in linear space, instead of log space. - - Notes - ----- - Logspace is equivalent to the code - - >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) - ... # doctest: +SKIP - >>> power(base, y).astype(dtype) - ... # doctest: +SKIP - - Examples - -------- - >>> np.logspace(2.0, 3.0, num=4) - array([ 100. , 215.443469 , 464.15888336, 1000. ]) - >>> np.logspace(2.0, 3.0, num=4, endpoint=False) - array([ 100. , 177.827941 , 316.22776602, 562.34132519]) - >>> np.logspace(2.0, 3.0, num=4, base=2.0) - array([ 4. , 5.0396842 , 6.34960421, 8. ]) - - Graphical illustration: - - >>> import matplotlib.pyplot as plt - >>> N = 10 - >>> x1 = np.logspace(0.1, 1, N, endpoint=True) - >>> x2 = np.logspace(0.1, 1, N, endpoint=False) - >>> y = np.zeros(N) - >>> plt.plot(x1, y, 'o') - [] - >>> plt.plot(x2, y + 0.5, 'o') - [] - >>> plt.ylim([-0.5, 1]) - (-0.5, 1) - >>> plt.show() - - """ - y = linspace(start, stop, num=num, endpoint=endpoint) - if dtype is None: - return _nx.power(base, y) - return _nx.power(base, y).astype(dtype) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/generate_numpy_api.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/generate_numpy_api.py deleted file mode 100644 index 415cbf7fcd00c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/generate_numpy_api.py +++ /dev/null @@ -1,259 +0,0 @@ -from __future__ import division, print_function - -import os -import genapi - -from genapi import \ - TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi - -import numpy_api - -# use annotated api when running under cpychecker -h_template = r""" -#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#else -NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#endif - -%s - -#else - -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -#else -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -#else -static void **PyArray_API=NULL; -#endif -#endif - -%s - -#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int -_import_array(void) -{ - int st; - PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyArray_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); - return -1; - } - - /* Perform runtime check of C API version */ - if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "ABI version %%x but this version of numpy is %%x", \ - (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); - return -1; - } - if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "API version %%x but this version of numpy is %%x", \ - (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); - return -1; - } - - /* - * Perform runtime check of endianness and check it matches the one set by - * the headers (npy_endian.h) as a safeguard - */ - st = PyArray_GetEndianness(); - if (st == NPY_CPU_UNKNOWN_ENDIAN) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); - return -1; - } -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN - if (st != NPY_CPU_BIG) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "big endian, but detected different endianness at runtime"); - return -1; - } -#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN - if (st != NPY_CPU_LITTLE) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "little endian, but detected different endianness at runtime"); - return -1; - } -#endif - - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_ARRAY_RETVAL NULL -#else -#define NUMPY_IMPORT_ARRAY_RETVAL -#endif - -#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } - -#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } - -#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } - -#endif - -#endif -""" - - -c_template = r""" -/* These pointers will be stored in the C-object for use in other - extension modules -*/ - -void *PyArray_API[] = { -%s -}; -""" - -c_api_header = """ -=========== -Numpy C-API -=========== -""" - -def generate_api(output_dir, force=False): - basename = 'multiarray_api' - - h_file = os.path.join(output_dir, '__%s.h' % basename) - c_file = os.path.join(output_dir, '__%s.c' % basename) - d_file = os.path.join(output_dir, '%s.txt' % basename) - targets = (h_file, c_file, d_file) - - sources = numpy_api.multiarray_api - - if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])): - return targets - else: - do_generate_api(targets, sources) - - return targets - -def do_generate_api(targets, sources): - header_file = targets[0] - c_file = targets[1] - doc_file = targets[2] - - global_vars = sources[0] - scalar_bool_values = sources[1] - types_api = sources[2] - multiarray_funcs = sources[3] - - multiarray_api = sources[:] - - module_list = [] - extension_list = [] - init_list = [] - - # Check multiarray api indexes - multiarray_api_index = genapi.merge_api_dicts(multiarray_api) - genapi.check_api_dict(multiarray_api_index) - - numpyapi_list = genapi.get_api_functions('NUMPY_API', - multiarray_funcs) - ordered_funcs_api = genapi.order_dict(multiarray_funcs) - - # Create dict name -> *Api instance - api_name = 'PyArray_API' - multiarray_api_dict = {} - for f in numpyapi_list: - name = f.name - index = multiarray_funcs[name][0] - annotations = multiarray_funcs[name][1:] - multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations, - f.return_type, - f.args, api_name) - - for name, val in global_vars.items(): - index, type = val - multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name) - - for name, val in scalar_bool_values.items(): - index = val[0] - multiarray_api_dict[name] = BoolValuesApi(name, index, api_name) - - for name, val in types_api.items(): - index = val[0] - multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) - - if len(multiarray_api_dict) != len(multiarray_api_index): - raise AssertionError("Multiarray API size mismatch %d %d" % - (len(multiarray_api_dict), len(multiarray_api_index))) - - extension_list = [] - for name, index in genapi.order_dict(multiarray_api_index): - api_item = multiarray_api_dict[name] - extension_list.append(api_item.define_from_array_api_string()) - init_list.append(api_item.array_api_define()) - module_list.append(api_item.internal_define()) - - # Write to header - fid = open(header_file, 'w') - s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) - fid.write(s) - fid.close() - - # Write to c-code - fid = open(c_file, 'w') - s = c_template % ',\n'.join(init_list) - fid.write(s) - fid.close() - - # write to documentation - fid = open(doc_file, 'w') - fid.write(c_api_header) - for func in numpyapi_list: - fid.write(func.to_ReST()) - fid.write('\n\n') - fid.close() - - return targets diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/getlimits.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/getlimits.py deleted file mode 100644 index 165ea68604933..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/getlimits.py +++ /dev/null @@ -1,306 +0,0 @@ -"""Machine limits for Float32 and Float64 and (long double) if available... - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['finfo', 'iinfo'] - -from .machar import MachAr -from . import numeric -from . import numerictypes as ntypes -from .numeric import array - -def _frz(a): - """fix rank-0 --> rank-1""" - if a.ndim == 0: a.shape = (1,) - return a - -_convert_to_float = { - ntypes.csingle: ntypes.single, - ntypes.complex_: ntypes.float_, - ntypes.clongfloat: ntypes.longfloat - } - -class finfo(object): - """ - finfo(dtype) - - Machine limits for floating point types. - - Attributes - ---------- - eps : float - The smallest representable positive number such that - ``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating - point type. - epsneg : floating point number of the appropriate type - The smallest representable positive number such that - ``1.0 - epsneg != 1.0``. - iexp : int - The number of bits in the exponent portion of the floating point - representation. - machar : MachAr - The object which calculated these parameters and holds more - detailed information. - machep : int - The exponent that yields `eps`. - max : floating point number of the appropriate type - The largest representable number. - maxexp : int - The smallest positive power of the base (2) that causes overflow. - min : floating point number of the appropriate type - The smallest representable number, typically ``-max``. - minexp : int - The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. - negep : int - The exponent that yields `epsneg`. - nexp : int - The number of bits in the exponent including its sign and bias. - nmant : int - The number of bits in the mantissa. - precision : int - The approximate number of decimal digits to which this kind of - float is precise. - resolution : floating point number of the appropriate type - The approximate decimal resolution of this type, i.e., - ``10**-precision``. - tiny : float - The smallest positive usable number. Type of `tiny` is an - appropriate floating point type. - - Parameters - ---------- - dtype : float, dtype, or instance - Kind of floating point data-type about which to get information. - - See Also - -------- - MachAr : The implementation of the tests that produce this information. - iinfo : The equivalent for integer data types. - - Notes - ----- - For developers of NumPy: do not instantiate this at the module level. - The initial calculation of these parameters is expensive and negatively - impacts import times. These objects are cached, so calling ``finfo()`` - repeatedly inside your functions is not a problem. - - """ - - _finfo_cache = {} - - def __new__(cls, dtype): - try: - dtype = numeric.dtype(dtype) - except TypeError: - # In case a float instance was given - dtype = numeric.dtype(type(dtype)) - - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - return obj - dtypes = [dtype] - newdtype = numeric.obj2sctype(dtype) - if newdtype is not dtype: - dtypes.append(newdtype) - dtype = newdtype - if not issubclass(dtype, numeric.inexact): - raise ValueError("data type %r not inexact" % (dtype)) - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - return obj - if not issubclass(dtype, numeric.floating): - newdtype = _convert_to_float[dtype] - if newdtype is not dtype: - dtypes.append(newdtype) - dtype = newdtype - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - return obj - obj = object.__new__(cls)._init(dtype) - for dt in dtypes: - cls._finfo_cache[dt] = obj - return obj - - def _init(self, dtype): - self.dtype = numeric.dtype(dtype) - if dtype is ntypes.double: - itype = ntypes.int64 - fmt = '%24.16e' - precname = 'double' - elif dtype is ntypes.single: - itype = ntypes.int32 - fmt = '%15.7e' - precname = 'single' - elif dtype is ntypes.longdouble: - itype = ntypes.longlong - fmt = '%s' - precname = 'long double' - elif dtype is ntypes.half: - itype = ntypes.int16 - fmt = '%12.5e' - precname = 'half' - else: - raise ValueError(repr(dtype)) - - machar = MachAr(lambda v:array([v], dtype), - lambda v:_frz(v.astype(itype))[0], - lambda v:array(_frz(v)[0], dtype), - lambda v: fmt % array(_frz(v)[0], dtype), - 'numpy %s precision floating point number' % precname) - - for word in ['precision', 'iexp', - 'maxexp', 'minexp', 'negep', - 'machep']: - setattr(self, word, getattr(machar, word)) - for word in ['tiny', 'resolution', 'epsneg']: - setattr(self, word, getattr(machar, word).flat[0]) - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self.machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - return self - - def __str__(self): - return '''\ -Machine parameters for %(dtype)s ---------------------------------------------------------------------- -precision=%(precision)3s resolution= %(_str_resolution)s -machep=%(machep)6s eps= %(_str_eps)s -negep =%(negep)6s epsneg= %(_str_epsneg)s -minexp=%(minexp)6s tiny= %(_str_tiny)s -maxexp=%(maxexp)6s max= %(_str_max)s -nexp =%(nexp)6s min= -max ---------------------------------------------------------------------- -''' % self.__dict__ - - def __repr__(self): - c = self.__class__.__name__ - d = self.__dict__.copy() - d['klass'] = c - return ("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," \ - + " max=%(_str_max)s, dtype=%(dtype)s)") \ - % d - - -class iinfo(object): - """ - iinfo(type) - - Machine limits for integer types. - - Attributes - ---------- - min : int - The smallest integer expressible by the type. - max : int - The largest integer expressible by the type. - - Parameters - ---------- - type : integer type, dtype, or instance - The kind of integer data type to get information about. - - See Also - -------- - finfo : The equivalent for floating point data types. - - Examples - -------- - With types: - - >>> ii16 = np.iinfo(np.int16) - >>> ii16.min - -32768 - >>> ii16.max - 32767 - >>> ii32 = np.iinfo(np.int32) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - With instances: - - >>> ii32 = np.iinfo(np.int32(10)) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - """ - - _min_vals = {} - _max_vals = {} - - def __init__(self, int_type): - try: - self.dtype = numeric.dtype(int_type) - except TypeError: - self.dtype = numeric.dtype(type(int_type)) - self.kind = self.dtype.kind - self.bits = self.dtype.itemsize * 8 - self.key = "%s%d" % (self.kind, self.bits) - if not self.kind in 'iu': - raise ValueError("Invalid integer data type.") - - def min(self): - """Minimum value of given dtype.""" - if self.kind == 'u': - return 0 - else: - try: - val = iinfo._min_vals[self.key] - except KeyError: - val = int(-(1 << (self.bits-1))) - iinfo._min_vals[self.key] = val - return val - - min = property(min) - - def max(self): - """Maximum value of given dtype.""" - try: - val = iinfo._max_vals[self.key] - except KeyError: - if self.kind == 'u': - val = int((1 << self.bits) - 1) - else: - val = int((1 << (self.bits-1)) - 1) - iinfo._max_vals[self.key] = val - return val - - max = property(max) - - def __str__(self): - """String representation.""" - return '''\ -Machine parameters for %(dtype)s ---------------------------------------------------------------------- -min = %(min)s -max = %(max)s ---------------------------------------------------------------------- -''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max} - - def __repr__(self): - return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, - self.min, self.max, self.dtype) - -if __name__ == '__main__': - f = finfo(ntypes.single) - print('single epsilon:', f.eps) - print('single tiny:', f.tiny) - f = finfo(ntypes.float) - print('float epsilon:', f.eps) - print('float tiny:', f.tiny) - f = finfo(ntypes.longfloat) - print('longfloat epsilon:', f.eps) - print('longfloat tiny:', f.tiny) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__multiarray_api.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__multiarray_api.h deleted file mode 100644 index b95762c4a43cb..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,1721 +0,0 @@ - -#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#else -NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#endif - -NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ - (void); -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type; -#else - NPY_NO_EXPORT PyTypeObject PyBigArray_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyArray_Type; -#else - NPY_NO_EXPORT PyTypeObject PyArray_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type; -#else - NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; -#else - NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; -#else - NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; -#else - NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; -#else - NPY_NO_EXPORT int NPY_NUMUSERTYPES; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#else -NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; -#endif - -NPY_NO_EXPORT int PyArray_SetNumericOps \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_GetNumericOps \ - (void); -NPY_NO_EXPORT int PyArray_INCREF \ - (PyArrayObject *); -NPY_NO_EXPORT int PyArray_XDECREF \ - (PyArrayObject *); -NPY_NO_EXPORT void PyArray_SetStringFunction \ - (PyObject *, int); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ - (int); -NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ - (int); -NPY_NO_EXPORT char * PyArray_Zero \ - (PyArrayObject *); -NPY_NO_EXPORT char * PyArray_One \ - (PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_CastToType \ - (PyArrayObject *, PyArray_Descr *, int); -NPY_NO_EXPORT int PyArray_CastTo \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CastAnyTo \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CanCastSafely \ - (int, int); -NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ - (PyArray_Descr *, PyArray_Descr *); -NPY_NO_EXPORT int PyArray_ObjectType \ - (PyObject *, int); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ - (PyObject *, PyArray_Descr *); -NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ - (PyObject *, int *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ - (PyObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ - (PyObject *); -NPY_NO_EXPORT npy_intp PyArray_Size \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Scalar \ - (void *, PyArray_Descr *, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ - (PyObject *, PyArray_Descr *); -NPY_NO_EXPORT void PyArray_ScalarAsCtype \ - (PyObject *, void *); -NPY_NO_EXPORT int PyArray_CastScalarToCtype \ - (PyObject *, void *, PyArray_Descr *); -NPY_NO_EXPORT int PyArray_CastScalarDirect \ - (PyObject *, PyArray_Descr *, void *, int); -NPY_NO_EXPORT PyObject * PyArray_ScalarFromObject \ - (PyObject *); -NPY_NO_EXPORT PyArray_VectorUnaryFunc * PyArray_GetCastFunc \ - (PyArray_Descr *, int); -NPY_NO_EXPORT PyObject * PyArray_FromDims \ - (int, int *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \ - (int, int *, PyArray_Descr *, char *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ - (PyObject *, PyArray_Descr *, int, int, int, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ - (PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_FromFile \ - (FILE *, PyArray_Descr *, npy_intp, char *); -NPY_NO_EXPORT PyObject * PyArray_FromString \ - (char *, npy_intp, PyArray_Descr *, npy_intp, char *); -NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ - (PyObject *, PyArray_Descr *, npy_intp, npy_intp); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ - (PyObject *, PyArray_Descr *, npy_intp); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ - (PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_GetField \ - (PyArrayObject *, PyArray_Descr *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) int PyArray_SetField \ - (PyArrayObject *, PyArray_Descr *, int, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Byteswap \ - (PyArrayObject *, npy_bool); -NPY_NO_EXPORT PyObject * PyArray_Resize \ - (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER); -NPY_NO_EXPORT int PyArray_MoveInto \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CopyInto \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CopyAnyInto \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT int PyArray_CopyObject \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_NewCopy \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT PyObject * PyArray_ToList \ - (PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_ToString \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT int PyArray_ToFile \ - (PyArrayObject *, FILE *, char *, char *); -NPY_NO_EXPORT int PyArray_Dump \ - (PyObject *, PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Dumps \ - (PyObject *, int); -NPY_NO_EXPORT int PyArray_ValidType \ - (int); -NPY_NO_EXPORT void PyArray_UpdateFlags \ - (PyArrayObject *, int); -NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_New \ - (PyTypeObject *, int, npy_intp *, int, npy_intp *, void *, int, int, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(1) NPY_GCC_NONNULL(2) PyObject * PyArray_NewFromDescr \ - (PyTypeObject *, PyArray_Descr *, int, npy_intp *, npy_intp *, void *, int, PyObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ - (PyArray_Descr *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ - (int); -NPY_NO_EXPORT double PyArray_GetPriority \ - (PyObject *, double); -NPY_NO_EXPORT PyObject * PyArray_IterNew \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_MultiIterNew \ - (int, ...); -NPY_NO_EXPORT int PyArray_PyIntAsInt \ - (PyObject *); -NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ - (PyObject *); -NPY_NO_EXPORT int PyArray_Broadcast \ - (PyArrayMultiIterObject *); -NPY_NO_EXPORT void PyArray_FillObjectArray \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT int PyArray_FillWithScalar \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ - (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *); -NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ - (PyArray_Descr *, char); -NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ - (PyObject *, int *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ - (PyObject *, PyArray_Descr *, int, int, int, PyObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ - (PyArrayObject *, PyArray_Descr *, int); -NPY_NO_EXPORT PyObject * PyArray_FromInterface \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ - (PyObject *, PyArray_Descr *, PyObject *); -NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ - (int, PyArrayObject **); -NPY_NO_EXPORT int PyArray_CanCoerceScalar \ - (int, int, NPY_SCALARKIND); -NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject \ - (PyObject *); -NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ - (PyTypeObject *, PyTypeObject *); -NPY_NO_EXPORT int PyArray_CompareUCS4 \ - (npy_ucs4 *, npy_ucs4 *, size_t); -NPY_NO_EXPORT int PyArray_RemoveSmallest \ - (PyArrayMultiIterObject *); -NPY_NO_EXPORT int PyArray_ElementStrides \ - (PyObject *); -NPY_NO_EXPORT void PyArray_Item_INCREF \ - (char *, PyArray_Descr *); -NPY_NO_EXPORT void PyArray_Item_XDECREF \ - (char *, PyArray_Descr *); -NPY_NO_EXPORT PyObject * PyArray_FieldNames \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Transpose \ - (PyArrayObject *, PyArray_Dims *); -NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ - (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); -NPY_NO_EXPORT PyObject * PyArray_PutTo \ - (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); -NPY_NO_EXPORT PyObject * PyArray_PutMask \ - (PyArrayObject *, PyObject*, PyObject*); -NPY_NO_EXPORT PyObject * PyArray_Repeat \ - (PyArrayObject *, PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Choose \ - (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); -NPY_NO_EXPORT int PyArray_Sort \ - (PyArrayObject *, int, NPY_SORTKIND); -NPY_NO_EXPORT PyObject * PyArray_ArgSort \ - (PyArrayObject *, int, NPY_SORTKIND); -NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ - (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_ArgMax \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_ArgMin \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Reshape \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Newshape \ - (PyArrayObject *, PyArray_Dims *, NPY_ORDER); -NPY_NO_EXPORT PyObject * PyArray_Squeeze \ - (PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ - (PyArrayObject *, PyArray_Descr *, PyTypeObject *); -NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ - (PyArrayObject *, int, int); -NPY_NO_EXPORT PyObject * PyArray_Max \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Min \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Ptp \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Mean \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Trace \ - (PyArrayObject *, int, int, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Diagonal \ - (PyArrayObject *, int, int, int); -NPY_NO_EXPORT PyObject * PyArray_Clip \ - (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Conjugate \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Nonzero \ - (PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Std \ - (PyArrayObject *, int, int, PyArrayObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Sum \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_CumSum \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Prod \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_CumProd \ - (PyArrayObject *, int, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_All \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Any \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Compress \ - (PyArrayObject *, PyObject *, int, PyArrayObject *); -NPY_NO_EXPORT PyObject * PyArray_Flatten \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT PyObject * PyArray_Ravel \ - (PyArrayObject *, NPY_ORDER); -NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ - (npy_intp *, int); -NPY_NO_EXPORT int PyArray_MultiplyIntList \ - (int *, int); -NPY_NO_EXPORT void * PyArray_GetPtr \ - (PyArrayObject *, npy_intp*); -NPY_NO_EXPORT int PyArray_CompareLists \ - (npy_intp *, npy_intp *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ - (PyObject **, void *, npy_intp *, int, PyArray_Descr*); -NPY_NO_EXPORT int PyArray_As1D \ - (PyObject **, char **, int *, int); -NPY_NO_EXPORT int PyArray_As2D \ - (PyObject **, char ***, int *, int *, int); -NPY_NO_EXPORT int PyArray_Free \ - (PyObject *, void *); -NPY_NO_EXPORT int PyArray_Converter \ - (PyObject *, PyObject **); -NPY_NO_EXPORT int PyArray_IntpFromSequence \ - (PyObject *, npy_intp *, int); -NPY_NO_EXPORT PyObject * PyArray_Concatenate \ - (PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ - (PyObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ - (PyObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_CopyAndTranspose \ - (PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Correlate \ - (PyObject *, PyObject *, int); -NPY_NO_EXPORT int PyArray_TypestrConvert \ - (int, int); -NPY_NO_EXPORT int PyArray_DescrConverter \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_DescrConverter2 \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_IntpConverter \ - (PyObject *, PyArray_Dims *); -NPY_NO_EXPORT int PyArray_BufferConverter \ - (PyObject *, PyArray_Chunk *); -NPY_NO_EXPORT int PyArray_AxisConverter \ - (PyObject *, int *); -NPY_NO_EXPORT int PyArray_BoolConverter \ - (PyObject *, npy_bool *); -NPY_NO_EXPORT int PyArray_ByteorderConverter \ - (PyObject *, char *); -NPY_NO_EXPORT int PyArray_OrderConverter \ - (PyObject *, NPY_ORDER *); -NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ - (PyArray_Descr *, PyArray_Descr *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ - (int, npy_intp *, PyArray_Descr *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ - (int, npy_intp *, PyArray_Descr *, int); -NPY_NO_EXPORT PyObject * PyArray_Where \ - (PyObject *, PyObject *, PyObject *); -NPY_NO_EXPORT PyObject * PyArray_Arange \ - (double, double, double, int); -NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ - (PyObject *, PyObject *, PyObject *, PyArray_Descr *); -NPY_NO_EXPORT int PyArray_SortkindConverter \ - (PyObject *, NPY_SORTKIND *); -NPY_NO_EXPORT PyObject * PyArray_LexSort \ - (PyObject *, int); -NPY_NO_EXPORT PyObject * PyArray_Round \ - (PyArrayObject *, int, PyArrayObject *); -NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ - (int, int); -NPY_NO_EXPORT int PyArray_RegisterDataType \ - (PyArray_Descr *); -NPY_NO_EXPORT int PyArray_RegisterCastFunc \ - (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); -NPY_NO_EXPORT int PyArray_RegisterCanCast \ - (PyArray_Descr *, int, NPY_SCALARKIND); -NPY_NO_EXPORT void PyArray_InitArrFuncs \ - (PyArray_ArrFuncs *); -NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ - (int, npy_intp *); -NPY_NO_EXPORT int PyArray_TypeNumFromName \ - (char *); -NPY_NO_EXPORT int PyArray_ClipmodeConverter \ - (PyObject *, NPY_CLIPMODE *); -NPY_NO_EXPORT int PyArray_OutputConverter \ - (PyObject *, PyArrayObject **); -NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ - (PyObject *, npy_intp *, int); -NPY_NO_EXPORT void _PyArray_SigintHandler \ - (int); -NPY_NO_EXPORT void* _PyArray_GetSigintBuf \ - (void); -NPY_NO_EXPORT int PyArray_DescrAlignConverter \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ - (PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyArray_SearchsideConverter \ - (PyObject *, void *); -NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ - (PyArrayObject *, int *, int); -NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ - (npy_intp *, int); -NPY_NO_EXPORT int PyArray_CompareString \ - (char *, char *, size_t); -NPY_NO_EXPORT PyObject * PyArray_MultiIterFromObjects \ - (PyObject **, int, int, ...); -NPY_NO_EXPORT int PyArray_GetEndianness \ - (void); -NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ - (void); -NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ - (PyObject *, PyObject *, int); -NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ - (PyArrayIterObject *, npy_intp *, int, PyArrayObject*); -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; -#else - NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; -#else - NPY_NO_EXPORT PyTypeObject NpyIter_Type; -#endif - -NPY_NO_EXPORT void PyArray_SetDatetimeParseFunction \ - (PyObject *); -NPY_NO_EXPORT void PyArray_DatetimeToDatetimeStruct \ - (npy_datetime, NPY_DATETIMEUNIT, npy_datetimestruct *); -NPY_NO_EXPORT void PyArray_TimedeltaToTimedeltaStruct \ - (npy_timedelta, NPY_DATETIMEUNIT, npy_timedeltastruct *); -NPY_NO_EXPORT npy_datetime PyArray_DatetimeStructToDatetime \ - (NPY_DATETIMEUNIT, npy_datetimestruct *); -NPY_NO_EXPORT npy_datetime PyArray_TimedeltaStructToTimedelta \ - (NPY_DATETIMEUNIT, npy_timedeltastruct *); -NPY_NO_EXPORT NpyIter * NpyIter_New \ - (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); -NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ - (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); -NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ - (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); -NPY_NO_EXPORT NpyIter * NpyIter_Copy \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_Deallocate \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ - (NpyIter *); -NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ - (NpyIter *); -NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_Reset \ - (NpyIter *, char **); -NPY_NO_EXPORT int NpyIter_ResetBasePointers \ - (NpyIter *, char **, char **); -NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ - (NpyIter *, npy_intp, npy_intp, char **); -NPY_NO_EXPORT int NpyIter_GetNDim \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GetNOp \ - (NpyIter *); -NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ - (NpyIter *, char **); -NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ - (NpyIter *); -NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ - (NpyIter *, npy_intp *, npy_intp *); -NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GotoIterIndex \ - (NpyIter *, npy_intp); -NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GetShape \ - (NpyIter *, npy_intp *); -NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ - (NpyIter *, char **); -NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ - (NpyIter *, npy_intp *); -NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ - (NpyIter *); -NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ - (NpyIter *); -NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_GotoIndex \ - (NpyIter *, npy_intp); -NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ - (NpyIter *); -NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ - (NpyIter *); -NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ - (NpyIter *); -NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ - (NpyIter *, npy_intp); -NPY_NO_EXPORT void NpyIter_GetReadFlags \ - (NpyIter *, char *); -NPY_NO_EXPORT void NpyIter_GetWriteFlags \ - (NpyIter *, char *); -NPY_NO_EXPORT void NpyIter_DebugPrint \ - (NpyIter *); -NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ - (NpyIter *); -NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ - (NpyIter *, npy_intp *); -NPY_NO_EXPORT int NpyIter_RemoveAxis \ - (NpyIter *, int); -NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ - (NpyIter *, int); -NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ - (NpyIter *); -NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ - (NpyIter *); -NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ - (NpyIter *, npy_intp, npy_intp *); -NPY_NO_EXPORT int PyArray_CastingConverter \ - (PyObject *, NPY_CASTING *); -NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ - (PyArrayObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ - (PyArray_Descr *, PyArray_Descr *); -NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ - (PyArrayObject *); -NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ - (npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **); -NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ - (PyArrayObject *, PyArray_Descr *, NPY_CASTING); -NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ - (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); -NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ - (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) NPY_GCC_NONNULL(1) PyObject * PyArray_NewLikeArray \ - (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); -NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject \ - (PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *); -NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ - (PyObject *, NPY_CLIPMODE *, int); -NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ - (PyObject *, PyObject *, PyArrayObject*); -NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ - (NpyIter *, int); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ - (int, npy_intp *, npy_stride_sort_item *); -NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ - (PyArrayObject *, npy_bool *); -NPY_NO_EXPORT void PyArray_DebugPrint \ - (PyArrayObject *); -NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ - (PyArrayObject *, const char *); -NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ - (PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT void * PyDataMem_NEW \ - (size_t); -NPY_NO_EXPORT void PyDataMem_FREE \ - (void *); -NPY_NO_EXPORT void * PyDataMem_RENEW \ - (void *, size_t); -NPY_NO_EXPORT PyDataMem_EventHookFunc * PyDataMem_SetEventHook \ - (PyDataMem_EventHookFunc *, void *, void **); -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; -#else - NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; -#endif - -NPY_NO_EXPORT void PyArray_MapIterSwapAxes \ - (PyArrayMapIterObject *, PyArrayObject **, int); -NPY_NO_EXPORT PyObject * PyArray_MapIterArray \ - (PyArrayObject *, PyObject *); -NPY_NO_EXPORT void PyArray_MapIterNext \ - (PyArrayMapIterObject *); -NPY_NO_EXPORT int PyArray_Partition \ - (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); -NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ - (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); -NPY_NO_EXPORT int PyArray_SelectkindConverter \ - (PyObject *, NPY_SELECTKIND *); -NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ - (size_t, size_t); - -#else - -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -#else -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -#else -static void **PyArray_API=NULL; -#endif -#endif - -#define PyArray_GetNDArrayCVersion \ - (*(unsigned int (*)(void)) \ - PyArray_API[0]) -#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) -#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) -#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) -#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) -#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) -#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) -#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) -#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) -#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) -#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) -#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) -#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) -#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) -#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) -#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) -#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) -#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) -#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) -#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) -#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) -#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) -#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) -#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) -#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) -#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) -#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) -#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) -#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) -#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) -#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) -#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) -#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) -#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) -#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) -#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) -#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) -#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) -#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) -#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) -#define PyArray_SetNumericOps \ - (*(int (*)(PyObject *)) \ - PyArray_API[40]) -#define PyArray_GetNumericOps \ - (*(PyObject * (*)(void)) \ - PyArray_API[41]) -#define PyArray_INCREF \ - (*(int (*)(PyArrayObject *)) \ - PyArray_API[42]) -#define PyArray_XDECREF \ - (*(int (*)(PyArrayObject *)) \ - PyArray_API[43]) -#define PyArray_SetStringFunction \ - (*(void (*)(PyObject *, int)) \ - PyArray_API[44]) -#define PyArray_DescrFromType \ - (*(PyArray_Descr * (*)(int)) \ - PyArray_API[45]) -#define PyArray_TypeObjectFromType \ - (*(PyObject * (*)(int)) \ - PyArray_API[46]) -#define PyArray_Zero \ - (*(char * (*)(PyArrayObject *)) \ - PyArray_API[47]) -#define PyArray_One \ - (*(char * (*)(PyArrayObject *)) \ - PyArray_API[48]) -#define PyArray_CastToType \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ - PyArray_API[49]) -#define PyArray_CastTo \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[50]) -#define PyArray_CastAnyTo \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[51]) -#define PyArray_CanCastSafely \ - (*(int (*)(int, int)) \ - PyArray_API[52]) -#define PyArray_CanCastTo \ - (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ - PyArray_API[53]) -#define PyArray_ObjectType \ - (*(int (*)(PyObject *, int)) \ - PyArray_API[54]) -#define PyArray_DescrFromObject \ - (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ - PyArray_API[55]) -#define PyArray_ConvertToCommonType \ - (*(PyArrayObject ** (*)(PyObject *, int *)) \ - PyArray_API[56]) -#define PyArray_DescrFromScalar \ - (*(PyArray_Descr * (*)(PyObject *)) \ - PyArray_API[57]) -#define PyArray_DescrFromTypeObject \ - (*(PyArray_Descr * (*)(PyObject *)) \ - PyArray_API[58]) -#define PyArray_Size \ - (*(npy_intp (*)(PyObject *)) \ - PyArray_API[59]) -#define PyArray_Scalar \ - (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ - PyArray_API[60]) -#define PyArray_FromScalar \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ - PyArray_API[61]) -#define PyArray_ScalarAsCtype \ - (*(void (*)(PyObject *, void *)) \ - PyArray_API[62]) -#define PyArray_CastScalarToCtype \ - (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ - PyArray_API[63]) -#define PyArray_CastScalarDirect \ - (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ - PyArray_API[64]) -#define PyArray_ScalarFromObject \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[65]) -#define PyArray_GetCastFunc \ - (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \ - PyArray_API[66]) -#define PyArray_FromDims \ - (*(PyObject * (*)(int, int *, int)) \ - PyArray_API[67]) -#define PyArray_FromDimsAndDataAndDescr \ - (*(PyObject * (*)(int, int *, PyArray_Descr *, char *)) \ - PyArray_API[68]) -#define PyArray_FromAny \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ - PyArray_API[69]) -#define PyArray_EnsureArray \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[70]) -#define PyArray_EnsureAnyArray \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[71]) -#define PyArray_FromFile \ - (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ - PyArray_API[72]) -#define PyArray_FromString \ - (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ - PyArray_API[73]) -#define PyArray_FromBuffer \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ - PyArray_API[74]) -#define PyArray_FromIter \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ - PyArray_API[75]) -#define PyArray_Return \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[76]) -#define PyArray_GetField \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ - PyArray_API[77]) -#define PyArray_SetField \ - (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ - PyArray_API[78]) -#define PyArray_Byteswap \ - (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ - PyArray_API[79]) -#define PyArray_Resize \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER)) \ - PyArray_API[80]) -#define PyArray_MoveInto \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[81]) -#define PyArray_CopyInto \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[82]) -#define PyArray_CopyAnyInto \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[83]) -#define PyArray_CopyObject \ - (*(int (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[84]) -#define PyArray_NewCopy \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[85]) -#define PyArray_ToList \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[86]) -#define PyArray_ToString \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[87]) -#define PyArray_ToFile \ - (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ - PyArray_API[88]) -#define PyArray_Dump \ - (*(int (*)(PyObject *, PyObject *, int)) \ - PyArray_API[89]) -#define PyArray_Dumps \ - (*(PyObject * (*)(PyObject *, int)) \ - PyArray_API[90]) -#define PyArray_ValidType \ - (*(int (*)(int)) \ - PyArray_API[91]) -#define PyArray_UpdateFlags \ - (*(void (*)(PyArrayObject *, int)) \ - PyArray_API[92]) -#define PyArray_New \ - (*(PyObject * (*)(PyTypeObject *, int, npy_intp *, int, npy_intp *, void *, int, int, PyObject *)) \ - PyArray_API[93]) -#define PyArray_NewFromDescr \ - (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp *, npy_intp *, void *, int, PyObject *)) \ - PyArray_API[94]) -#define PyArray_DescrNew \ - (*(PyArray_Descr * (*)(PyArray_Descr *)) \ - PyArray_API[95]) -#define PyArray_DescrNewFromType \ - (*(PyArray_Descr * (*)(int)) \ - PyArray_API[96]) -#define PyArray_GetPriority \ - (*(double (*)(PyObject *, double)) \ - PyArray_API[97]) -#define PyArray_IterNew \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[98]) -#define PyArray_MultiIterNew \ - (*(PyObject * (*)(int, ...)) \ - PyArray_API[99]) -#define PyArray_PyIntAsInt \ - (*(int (*)(PyObject *)) \ - PyArray_API[100]) -#define PyArray_PyIntAsIntp \ - (*(npy_intp (*)(PyObject *)) \ - PyArray_API[101]) -#define PyArray_Broadcast \ - (*(int (*)(PyArrayMultiIterObject *)) \ - PyArray_API[102]) -#define PyArray_FillObjectArray \ - (*(void (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[103]) -#define PyArray_FillWithScalar \ - (*(int (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[104]) -#define PyArray_CheckStrides \ - (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)) \ - PyArray_API[105]) -#define PyArray_DescrNewByteorder \ - (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ - PyArray_API[106]) -#define PyArray_IterAllButAxis \ - (*(PyObject * (*)(PyObject *, int *)) \ - PyArray_API[107]) -#define PyArray_CheckFromAny \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ - PyArray_API[108]) -#define PyArray_FromArray \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ - PyArray_API[109]) -#define PyArray_FromInterface \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[110]) -#define PyArray_FromStructInterface \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[111]) -#define PyArray_FromArrayAttr \ - (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ - PyArray_API[112]) -#define PyArray_ScalarKind \ - (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ - PyArray_API[113]) -#define PyArray_CanCoerceScalar \ - (*(int (*)(int, int, NPY_SCALARKIND)) \ - PyArray_API[114]) -#define PyArray_NewFlagsObject \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[115]) -#define PyArray_CanCastScalar \ - (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ - PyArray_API[116]) -#define PyArray_CompareUCS4 \ - (*(int (*)(npy_ucs4 *, npy_ucs4 *, size_t)) \ - PyArray_API[117]) -#define PyArray_RemoveSmallest \ - (*(int (*)(PyArrayMultiIterObject *)) \ - PyArray_API[118]) -#define PyArray_ElementStrides \ - (*(int (*)(PyObject *)) \ - PyArray_API[119]) -#define PyArray_Item_INCREF \ - (*(void (*)(char *, PyArray_Descr *)) \ - PyArray_API[120]) -#define PyArray_Item_XDECREF \ - (*(void (*)(char *, PyArray_Descr *)) \ - PyArray_API[121]) -#define PyArray_FieldNames \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[122]) -#define PyArray_Transpose \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ - PyArray_API[123]) -#define PyArray_TakeFrom \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ - PyArray_API[124]) -#define PyArray_PutTo \ - (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ - PyArray_API[125]) -#define PyArray_PutMask \ - (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ - PyArray_API[126]) -#define PyArray_Repeat \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ - PyArray_API[127]) -#define PyArray_Choose \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ - PyArray_API[128]) -#define PyArray_Sort \ - (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ - PyArray_API[129]) -#define PyArray_ArgSort \ - (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ - PyArray_API[130]) -#define PyArray_SearchSorted \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ - PyArray_API[131]) -#define PyArray_ArgMax \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[132]) -#define PyArray_ArgMin \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[133]) -#define PyArray_Reshape \ - (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[134]) -#define PyArray_Newshape \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ - PyArray_API[135]) -#define PyArray_Squeeze \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[136]) -#define PyArray_View \ - (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ - PyArray_API[137]) -#define PyArray_SwapAxes \ - (*(PyObject * (*)(PyArrayObject *, int, int)) \ - PyArray_API[138]) -#define PyArray_Max \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[139]) -#define PyArray_Min \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[140]) -#define PyArray_Ptp \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[141]) -#define PyArray_Mean \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[142]) -#define PyArray_Trace \ - (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ - PyArray_API[143]) -#define PyArray_Diagonal \ - (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ - PyArray_API[144]) -#define PyArray_Clip \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ - PyArray_API[145]) -#define PyArray_Conjugate \ - (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[146]) -#define PyArray_Nonzero \ - (*(PyObject * (*)(PyArrayObject *)) \ - PyArray_API[147]) -#define PyArray_Std \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ - PyArray_API[148]) -#define PyArray_Sum \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[149]) -#define PyArray_CumSum \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[150]) -#define PyArray_Prod \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[151]) -#define PyArray_CumProd \ - (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ - PyArray_API[152]) -#define PyArray_All \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[153]) -#define PyArray_Any \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[154]) -#define PyArray_Compress \ - (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ - PyArray_API[155]) -#define PyArray_Flatten \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[156]) -#define PyArray_Ravel \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ - PyArray_API[157]) -#define PyArray_MultiplyList \ - (*(npy_intp (*)(npy_intp *, int)) \ - PyArray_API[158]) -#define PyArray_MultiplyIntList \ - (*(int (*)(int *, int)) \ - PyArray_API[159]) -#define PyArray_GetPtr \ - (*(void * (*)(PyArrayObject *, npy_intp*)) \ - PyArray_API[160]) -#define PyArray_CompareLists \ - (*(int (*)(npy_intp *, npy_intp *, int)) \ - PyArray_API[161]) -#define PyArray_AsCArray \ - (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ - PyArray_API[162]) -#define PyArray_As1D \ - (*(int (*)(PyObject **, char **, int *, int)) \ - PyArray_API[163]) -#define PyArray_As2D \ - (*(int (*)(PyObject **, char ***, int *, int *, int)) \ - PyArray_API[164]) -#define PyArray_Free \ - (*(int (*)(PyObject *, void *)) \ - PyArray_API[165]) -#define PyArray_Converter \ - (*(int (*)(PyObject *, PyObject **)) \ - PyArray_API[166]) -#define PyArray_IntpFromSequence \ - (*(int (*)(PyObject *, npy_intp *, int)) \ - PyArray_API[167]) -#define PyArray_Concatenate \ - (*(PyObject * (*)(PyObject *, int)) \ - PyArray_API[168]) -#define PyArray_InnerProduct \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyArray_API[169]) -#define PyArray_MatrixProduct \ - (*(PyObject * (*)(PyObject *, PyObject *)) \ - PyArray_API[170]) -#define PyArray_CopyAndTranspose \ - (*(PyObject * (*)(PyObject *)) \ - PyArray_API[171]) -#define PyArray_Correlate \ - (*(PyObject * (*)(PyObject *, PyObject *, int)) \ - PyArray_API[172]) -#define PyArray_TypestrConvert \ - (*(int (*)(int, int)) \ - PyArray_API[173]) -#define PyArray_DescrConverter \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[174]) -#define PyArray_DescrConverter2 \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[175]) -#define PyArray_IntpConverter \ - (*(int (*)(PyObject *, PyArray_Dims *)) \ - PyArray_API[176]) -#define PyArray_BufferConverter \ - (*(int (*)(PyObject *, PyArray_Chunk *)) \ - PyArray_API[177]) -#define PyArray_AxisConverter \ - (*(int (*)(PyObject *, int *)) \ - PyArray_API[178]) -#define PyArray_BoolConverter \ - (*(int (*)(PyObject *, npy_bool *)) \ - PyArray_API[179]) -#define PyArray_ByteorderConverter \ - (*(int (*)(PyObject *, char *)) \ - PyArray_API[180]) -#define PyArray_OrderConverter \ - (*(int (*)(PyObject *, NPY_ORDER *)) \ - PyArray_API[181]) -#define PyArray_EquivTypes \ - (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ - PyArray_API[182]) -#define PyArray_Zeros \ - (*(PyObject * (*)(int, npy_intp *, PyArray_Descr *, int)) \ - PyArray_API[183]) -#define PyArray_Empty \ - (*(PyObject * (*)(int, npy_intp *, PyArray_Descr *, int)) \ - PyArray_API[184]) -#define PyArray_Where \ - (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ - PyArray_API[185]) -#define PyArray_Arange \ - (*(PyObject * (*)(double, double, double, int)) \ - PyArray_API[186]) -#define PyArray_ArangeObj \ - (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ - PyArray_API[187]) -#define PyArray_SortkindConverter \ - (*(int (*)(PyObject *, NPY_SORTKIND *)) \ - PyArray_API[188]) -#define PyArray_LexSort \ - (*(PyObject * (*)(PyObject *, int)) \ - PyArray_API[189]) -#define PyArray_Round \ - (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ - PyArray_API[190]) -#define PyArray_EquivTypenums \ - (*(unsigned char (*)(int, int)) \ - PyArray_API[191]) -#define PyArray_RegisterDataType \ - (*(int (*)(PyArray_Descr *)) \ - PyArray_API[192]) -#define PyArray_RegisterCastFunc \ - (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ - PyArray_API[193]) -#define PyArray_RegisterCanCast \ - (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ - PyArray_API[194]) -#define PyArray_InitArrFuncs \ - (*(void (*)(PyArray_ArrFuncs *)) \ - PyArray_API[195]) -#define PyArray_IntTupleFromIntp \ - (*(PyObject * (*)(int, npy_intp *)) \ - PyArray_API[196]) -#define PyArray_TypeNumFromName \ - (*(int (*)(char *)) \ - PyArray_API[197]) -#define PyArray_ClipmodeConverter \ - (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ - PyArray_API[198]) -#define PyArray_OutputConverter \ - (*(int (*)(PyObject *, PyArrayObject **)) \ - PyArray_API[199]) -#define PyArray_BroadcastToShape \ - (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ - PyArray_API[200]) -#define _PyArray_SigintHandler \ - (*(void (*)(int)) \ - PyArray_API[201]) -#define _PyArray_GetSigintBuf \ - (*(void* (*)(void)) \ - PyArray_API[202]) -#define PyArray_DescrAlignConverter \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[203]) -#define PyArray_DescrAlignConverter2 \ - (*(int (*)(PyObject *, PyArray_Descr **)) \ - PyArray_API[204]) -#define PyArray_SearchsideConverter \ - (*(int (*)(PyObject *, void *)) \ - PyArray_API[205]) -#define PyArray_CheckAxis \ - (*(PyObject * (*)(PyArrayObject *, int *, int)) \ - PyArray_API[206]) -#define PyArray_OverflowMultiplyList \ - (*(npy_intp (*)(npy_intp *, int)) \ - PyArray_API[207]) -#define PyArray_CompareString \ - (*(int (*)(char *, char *, size_t)) \ - PyArray_API[208]) -#define PyArray_MultiIterFromObjects \ - (*(PyObject * (*)(PyObject **, int, int, ...)) \ - PyArray_API[209]) -#define PyArray_GetEndianness \ - (*(int (*)(void)) \ - PyArray_API[210]) -#define PyArray_GetNDArrayCFeatureVersion \ - (*(unsigned int (*)(void)) \ - PyArray_API[211]) -#define PyArray_Correlate2 \ - (*(PyObject * (*)(PyObject *, PyObject *, int)) \ - PyArray_API[212]) -#define PyArray_NeighborhoodIterNew \ - (*(PyObject* (*)(PyArrayIterObject *, npy_intp *, int, PyArrayObject*)) \ - PyArray_API[213]) -#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) -#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) -#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) -#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) -#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) -#define PyArray_SetDatetimeParseFunction \ - (*(void (*)(PyObject *)) \ - PyArray_API[219]) -#define PyArray_DatetimeToDatetimeStruct \ - (*(void (*)(npy_datetime, NPY_DATETIMEUNIT, npy_datetimestruct *)) \ - PyArray_API[220]) -#define PyArray_TimedeltaToTimedeltaStruct \ - (*(void (*)(npy_timedelta, NPY_DATETIMEUNIT, npy_timedeltastruct *)) \ - PyArray_API[221]) -#define PyArray_DatetimeStructToDatetime \ - (*(npy_datetime (*)(NPY_DATETIMEUNIT, npy_datetimestruct *)) \ - PyArray_API[222]) -#define PyArray_TimedeltaStructToTimedelta \ - (*(npy_datetime (*)(NPY_DATETIMEUNIT, npy_timedeltastruct *)) \ - PyArray_API[223]) -#define NpyIter_New \ - (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ - PyArray_API[224]) -#define NpyIter_MultiNew \ - (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ - PyArray_API[225]) -#define NpyIter_AdvancedNew \ - (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ - PyArray_API[226]) -#define NpyIter_Copy \ - (*(NpyIter * (*)(NpyIter *)) \ - PyArray_API[227]) -#define NpyIter_Deallocate \ - (*(int (*)(NpyIter *)) \ - PyArray_API[228]) -#define NpyIter_HasDelayedBufAlloc \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[229]) -#define NpyIter_HasExternalLoop \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[230]) -#define NpyIter_EnableExternalLoop \ - (*(int (*)(NpyIter *)) \ - PyArray_API[231]) -#define NpyIter_GetInnerStrideArray \ - (*(npy_intp * (*)(NpyIter *)) \ - PyArray_API[232]) -#define NpyIter_GetInnerLoopSizePtr \ - (*(npy_intp * (*)(NpyIter *)) \ - PyArray_API[233]) -#define NpyIter_Reset \ - (*(int (*)(NpyIter *, char **)) \ - PyArray_API[234]) -#define NpyIter_ResetBasePointers \ - (*(int (*)(NpyIter *, char **, char **)) \ - PyArray_API[235]) -#define NpyIter_ResetToIterIndexRange \ - (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ - PyArray_API[236]) -#define NpyIter_GetNDim \ - (*(int (*)(NpyIter *)) \ - PyArray_API[237]) -#define NpyIter_GetNOp \ - (*(int (*)(NpyIter *)) \ - PyArray_API[238]) -#define NpyIter_GetIterNext \ - (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ - PyArray_API[239]) -#define NpyIter_GetIterSize \ - (*(npy_intp (*)(NpyIter *)) \ - PyArray_API[240]) -#define NpyIter_GetIterIndexRange \ - (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ - PyArray_API[241]) -#define NpyIter_GetIterIndex \ - (*(npy_intp (*)(NpyIter *)) \ - PyArray_API[242]) -#define NpyIter_GotoIterIndex \ - (*(int (*)(NpyIter *, npy_intp)) \ - PyArray_API[243]) -#define NpyIter_HasMultiIndex \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[244]) -#define NpyIter_GetShape \ - (*(int (*)(NpyIter *, npy_intp *)) \ - PyArray_API[245]) -#define NpyIter_GetGetMultiIndex \ - (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ - PyArray_API[246]) -#define NpyIter_GotoMultiIndex \ - (*(int (*)(NpyIter *, npy_intp *)) \ - PyArray_API[247]) -#define NpyIter_RemoveMultiIndex \ - (*(int (*)(NpyIter *)) \ - PyArray_API[248]) -#define NpyIter_HasIndex \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[249]) -#define NpyIter_IsBuffered \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[250]) -#define NpyIter_IsGrowInner \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[251]) -#define NpyIter_GetBufferSize \ - (*(npy_intp (*)(NpyIter *)) \ - PyArray_API[252]) -#define NpyIter_GetIndexPtr \ - (*(npy_intp * (*)(NpyIter *)) \ - PyArray_API[253]) -#define NpyIter_GotoIndex \ - (*(int (*)(NpyIter *, npy_intp)) \ - PyArray_API[254]) -#define NpyIter_GetDataPtrArray \ - (*(char ** (*)(NpyIter *)) \ - PyArray_API[255]) -#define NpyIter_GetDescrArray \ - (*(PyArray_Descr ** (*)(NpyIter *)) \ - PyArray_API[256]) -#define NpyIter_GetOperandArray \ - (*(PyArrayObject ** (*)(NpyIter *)) \ - PyArray_API[257]) -#define NpyIter_GetIterView \ - (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ - PyArray_API[258]) -#define NpyIter_GetReadFlags \ - (*(void (*)(NpyIter *, char *)) \ - PyArray_API[259]) -#define NpyIter_GetWriteFlags \ - (*(void (*)(NpyIter *, char *)) \ - PyArray_API[260]) -#define NpyIter_DebugPrint \ - (*(void (*)(NpyIter *)) \ - PyArray_API[261]) -#define NpyIter_IterationNeedsAPI \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[262]) -#define NpyIter_GetInnerFixedStrideArray \ - (*(void (*)(NpyIter *, npy_intp *)) \ - PyArray_API[263]) -#define NpyIter_RemoveAxis \ - (*(int (*)(NpyIter *, int)) \ - PyArray_API[264]) -#define NpyIter_GetAxisStrideArray \ - (*(npy_intp * (*)(NpyIter *, int)) \ - PyArray_API[265]) -#define NpyIter_RequiresBuffering \ - (*(npy_bool (*)(NpyIter *)) \ - PyArray_API[266]) -#define NpyIter_GetInitialDataPtrArray \ - (*(char ** (*)(NpyIter *)) \ - PyArray_API[267]) -#define NpyIter_CreateCompatibleStrides \ - (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ - PyArray_API[268]) -#define PyArray_CastingConverter \ - (*(int (*)(PyObject *, NPY_CASTING *)) \ - PyArray_API[269]) -#define PyArray_CountNonzero \ - (*(npy_intp (*)(PyArrayObject *)) \ - PyArray_API[270]) -#define PyArray_PromoteTypes \ - (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ - PyArray_API[271]) -#define PyArray_MinScalarType \ - (*(PyArray_Descr * (*)(PyArrayObject *)) \ - PyArray_API[272]) -#define PyArray_ResultType \ - (*(PyArray_Descr * (*)(npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **)) \ - PyArray_API[273]) -#define PyArray_CanCastArrayTo \ - (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ - PyArray_API[274]) -#define PyArray_CanCastTypeTo \ - (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ - PyArray_API[275]) -#define PyArray_EinsteinSum \ - (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ - PyArray_API[276]) -#define PyArray_NewLikeArray \ - (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ - PyArray_API[277]) -#define PyArray_GetArrayParamsFromObject \ - (*(int (*)(PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *)) \ - PyArray_API[278]) -#define PyArray_ConvertClipmodeSequence \ - (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ - PyArray_API[279]) -#define PyArray_MatrixProduct2 \ - (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ - PyArray_API[280]) -#define NpyIter_IsFirstVisit \ - (*(npy_bool (*)(NpyIter *, int)) \ - PyArray_API[281]) -#define PyArray_SetBaseObject \ - (*(int (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[282]) -#define PyArray_CreateSortedStridePerm \ - (*(void (*)(int, npy_intp *, npy_stride_sort_item *)) \ - PyArray_API[283]) -#define PyArray_RemoveAxesInPlace \ - (*(void (*)(PyArrayObject *, npy_bool *)) \ - PyArray_API[284]) -#define PyArray_DebugPrint \ - (*(void (*)(PyArrayObject *)) \ - PyArray_API[285]) -#define PyArray_FailUnlessWriteable \ - (*(int (*)(PyArrayObject *, const char *)) \ - PyArray_API[286]) -#define PyArray_SetUpdateIfCopyBase \ - (*(int (*)(PyArrayObject *, PyArrayObject *)) \ - PyArray_API[287]) -#define PyDataMem_NEW \ - (*(void * (*)(size_t)) \ - PyArray_API[288]) -#define PyDataMem_FREE \ - (*(void (*)(void *)) \ - PyArray_API[289]) -#define PyDataMem_RENEW \ - (*(void * (*)(void *, size_t)) \ - PyArray_API[290]) -#define PyDataMem_SetEventHook \ - (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \ - PyArray_API[291]) -#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) -#define PyArray_MapIterSwapAxes \ - (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \ - PyArray_API[293]) -#define PyArray_MapIterArray \ - (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ - PyArray_API[294]) -#define PyArray_MapIterNext \ - (*(void (*)(PyArrayMapIterObject *)) \ - PyArray_API[295]) -#define PyArray_Partition \ - (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ - PyArray_API[296]) -#define PyArray_ArgPartition \ - (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ - PyArray_API[297]) -#define PyArray_SelectkindConverter \ - (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ - PyArray_API[298]) -#define PyDataMem_NEW_ZEROED \ - (*(void * (*)(size_t, size_t)) \ - PyArray_API[299]) - -#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int -_import_array(void) -{ - int st; - PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyArray_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); - return -1; - } - - /* Perform runtime check of C API version */ - if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "ABI version %x but this version of numpy is %x", \ - (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); - return -1; - } - if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "API version %x but this version of numpy is %x", \ - (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); - return -1; - } - - /* - * Perform runtime check of endianness and check it matches the one set by - * the headers (npy_endian.h) as a safeguard - */ - st = PyArray_GetEndianness(); - if (st == NPY_CPU_UNKNOWN_ENDIAN) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); - return -1; - } -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN - if (st != NPY_CPU_BIG) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "big endian, but detected different endianness at runtime"); - return -1; - } -#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN - if (st != NPY_CPU_LITTLE) { - PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ - "little endian, but detected different endianness at runtime"); - return -1; - } -#endif - - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_ARRAY_RETVAL NULL -#else -#define NUMPY_IMPORT_ARRAY_RETVAL -#endif - -#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } - -#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } - -#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } - -#endif - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__ufunc_api.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__ufunc_api.h deleted file mode 100644 index e1fd1cda05ad5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/__ufunc_api.h +++ /dev/null @@ -1,328 +0,0 @@ - -#ifdef _UMATHMODULE - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION -extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#else -NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#endif - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#else - NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#endif - -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int); -NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ - (PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *); -NPY_NO_EXPORT int PyUFunc_GenericFunction \ - (PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **); -NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_f_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_g_g \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_F_F \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_D_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_G_G \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_O_O \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ff_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_gg_g \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_DD_D \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_FF_F \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_GG_G \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_OO_O \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_O_O_method \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_OO_O_method \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_On_Om \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT int PyUFunc_GetPyValues \ - (char *, int *, int *, PyObject **); -NPY_NO_EXPORT int PyUFunc_checkfperr \ - (int, PyObject *, int *); -NPY_NO_EXPORT void PyUFunc_clearfperr \ - (void); -NPY_NO_EXPORT int PyUFunc_getfperr \ - (void); -NPY_NO_EXPORT int PyUFunc_handlefperr \ - (int, PyObject *, int, int *); -NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ - (PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *); -NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *); -NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \ - (void **, size_t); -NPY_NO_EXPORT void PyUFunc_e_e \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *); -NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ - (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); -NPY_NO_EXPORT int PyUFunc_ValidateCasting \ - (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **); -NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ - (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); - -#else - -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) -extern void **PyUFunc_API; -#else -#if defined(PY_UFUNC_UNIQUE_SYMBOL) -void **PyUFunc_API; -#else -static void **PyUFunc_API=NULL; -#endif -#endif - -#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) -#define PyUFunc_FromFuncAndData \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \ - PyUFunc_API[1]) -#define PyUFunc_RegisterLoopForType \ - (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *)) \ - PyUFunc_API[2]) -#define PyUFunc_GenericFunction \ - (*(int (*)(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **)) \ - PyUFunc_API[3]) -#define PyUFunc_f_f_As_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[4]) -#define PyUFunc_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[5]) -#define PyUFunc_f_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[6]) -#define PyUFunc_g_g \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[7]) -#define PyUFunc_F_F_As_D_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[8]) -#define PyUFunc_F_F \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[9]) -#define PyUFunc_D_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[10]) -#define PyUFunc_G_G \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[11]) -#define PyUFunc_O_O \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[12]) -#define PyUFunc_ff_f_As_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[13]) -#define PyUFunc_ff_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[14]) -#define PyUFunc_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[15]) -#define PyUFunc_gg_g \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[16]) -#define PyUFunc_FF_F_As_DD_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[17]) -#define PyUFunc_DD_D \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[18]) -#define PyUFunc_FF_F \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[19]) -#define PyUFunc_GG_G \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[20]) -#define PyUFunc_OO_O \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[21]) -#define PyUFunc_O_O_method \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[22]) -#define PyUFunc_OO_O_method \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[23]) -#define PyUFunc_On_Om \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[24]) -#define PyUFunc_GetPyValues \ - (*(int (*)(char *, int *, int *, PyObject **)) \ - PyUFunc_API[25]) -#define PyUFunc_checkfperr \ - (*(int (*)(int, PyObject *, int *)) \ - PyUFunc_API[26]) -#define PyUFunc_clearfperr \ - (*(void (*)(void)) \ - PyUFunc_API[27]) -#define PyUFunc_getfperr \ - (*(int (*)(void)) \ - PyUFunc_API[28]) -#define PyUFunc_handlefperr \ - (*(int (*)(int, PyObject *, int, int *)) \ - PyUFunc_API[29]) -#define PyUFunc_ReplaceLoopBySignature \ - (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)) \ - PyUFunc_API[30]) -#define PyUFunc_FromFuncAndDataAndSignature \ - (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \ - PyUFunc_API[31]) -#define PyUFunc_SetUsesArraysAsData \ - (*(int (*)(void **, size_t)) \ - PyUFunc_API[32]) -#define PyUFunc_e_e \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[33]) -#define PyUFunc_e_e_As_f_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[34]) -#define PyUFunc_e_e_As_d_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[35]) -#define PyUFunc_ee_e \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[36]) -#define PyUFunc_ee_e_As_ff_f \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[37]) -#define PyUFunc_ee_e_As_dd_d \ - (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \ - PyUFunc_API[38]) -#define PyUFunc_DefaultTypeResolver \ - (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ - PyUFunc_API[39]) -#define PyUFunc_ValidateCasting \ - (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \ - PyUFunc_API[40]) -#define PyUFunc_RegisterLoopForDescr \ - (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ - PyUFunc_API[41]) - -static int -_import_umath(void) -{ - PyObject *numpy = PyImport_ImportModule("numpy.core.umath"); - PyObject *c_api = NULL; - - if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import"); - return -1; - } - c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); - Py_DECREF(numpy); - if (c_api == NULL) { - PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); - return -1; - } - -#if PY_VERSION_HEX >= 0x03000000 - if (!PyCapsule_CheckExact(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); -#else - if (!PyCObject_Check(c_api)) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object"); - Py_DECREF(c_api); - return -1; - } - PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); -#endif - Py_DECREF(c_api); - if (PyUFunc_API == NULL) { - PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); - return -1; - } - return 0; -} - -#if PY_VERSION_HEX >= 0x03000000 -#define NUMPY_IMPORT_UMATH_RETVAL NULL -#else -#define NUMPY_IMPORT_UMATH_RETVAL -#endif - -#define import_umath() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return NUMPY_IMPORT_UMATH_RETVAL;\ - }\ - } while(0) - -#define import_umath1(ret) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - return ret;\ - }\ - } while(0) - -#define import_umath2(ret, msg) \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError, msg);\ - return ret;\ - }\ - } while(0) - -#define import_ufunc() \ - do {\ - UFUNC_NOFPE\ - if (_import_umath() < 0) {\ - PyErr_Print();\ - PyErr_SetString(PyExc_ImportError,\ - "numpy.core.umath failed to import");\ - }\ - } while(0) - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_neighborhood_iterator_imp.h deleted file mode 100644 index e8860cbc73bba..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_neighborhood_iterator_imp.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP -#error You should not include this header directly -#endif -/* - * Private API (here for inline) - */ -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); - -/* - * Update to next item of the iterator - * - * Note: this simply increment the coordinates vector, last dimension - * incremented first , i.e, for dimension 3 - * ... - * -1, -1, -1 - * -1, -1, 0 - * -1, -1, 1 - * .... - * -1, 0, -1 - * -1, 0, 0 - * .... - * 0, -1, -1 - * 0, -1, 0 - * .... - */ -#define _UPDATE_COORD_ITER(c) \ - wb = iter->coordinates[c] < iter->bounds[c][1]; \ - if (wb) { \ - iter->coordinates[c] += 1; \ - return 0; \ - } \ - else { \ - iter->coordinates[c] = iter->bounds[c][0]; \ - } - -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp i, wb; - - for (i = iter->nd - 1; i >= 0; --i) { - _UPDATE_COORD_ITER(i) - } - - return 0; -} - -/* - * Version optimized for 2d arrays, manual loop unrolling - */ -static NPY_INLINE int -_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp wb; - - _UPDATE_COORD_ITER(1) - _UPDATE_COORD_ITER(0) - - return 0; -} -#undef _UPDATE_COORD_ITER - -/* - * Advance to the next neighbour - */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) -{ - _PyArrayNeighborhoodIter_IncrCoord (iter); - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); - - return 0; -} - -/* - * Reset functions - */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) -{ - npy_intp i; - - for (i = 0; i < iter->nd; ++i) { - iter->coordinates[i] = iter->bounds[i][0]; - } - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); - - return 0; -} diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_numpyconfig.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_numpyconfig.h deleted file mode 100644 index 79ccc290418ff..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/_numpyconfig.h +++ /dev/null @@ -1,32 +0,0 @@ -#define NPY_HAVE_ENDIAN_H 1 -#define NPY_SIZEOF_SHORT SIZEOF_SHORT -#define NPY_SIZEOF_INT SIZEOF_INT -#define NPY_SIZEOF_LONG SIZEOF_LONG -#define NPY_SIZEOF_FLOAT 4 -#define NPY_SIZEOF_COMPLEX_FLOAT 8 -#define NPY_SIZEOF_DOUBLE 8 -#define NPY_SIZEOF_COMPLEX_DOUBLE 16 -#define NPY_SIZEOF_LONGDOUBLE 16 -#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 -#define NPY_SIZEOF_PY_INTPTR_T 8 -#define NPY_SIZEOF_OFF_T 8 -#define NPY_SIZEOF_PY_LONG_LONG 8 -#define NPY_SIZEOF_LONGLONG 8 -#define NPY_NO_SMP 0 -#define NPY_HAVE_DECL_ISNAN -#define NPY_HAVE_DECL_ISINF -#define NPY_HAVE_DECL_ISFINITE -#define NPY_HAVE_DECL_SIGNBIT -#define NPY_USE_C99_COMPLEX 1 -#define NPY_HAVE_COMPLEX_DOUBLE 1 -#define NPY_HAVE_COMPLEX_FLOAT 1 -#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1 -#define NPY_ENABLE_SEPARATE_COMPILATION 1 -#define NPY_USE_C99_FORMATS 1 -#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) -#define NPY_ABI_VERSION 0x01000009 -#define NPY_API_VERSION 0x00000009 - -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS 1 -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayobject.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayobject.h deleted file mode 100644 index 4f46d6b1ac91d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayobject.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef Py_ARRAYOBJECT_H -#define Py_ARRAYOBJECT_H - -#include "ndarrayobject.h" -#include "npy_interrupt.h" - -#ifdef NPY_NO_PREFIX -#include "noprefix.h" -#endif - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayscalars.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayscalars.h deleted file mode 100644 index 64450e7132132..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/arrayscalars.h +++ /dev/null @@ -1,175 +0,0 @@ -#ifndef _NPY_ARRAYSCALARS_H_ -#define _NPY_ARRAYSCALARS_H_ - -#ifndef _MULTIARRAYMODULE -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; -#endif - - -typedef struct { - PyObject_HEAD - signed char obval; -} PyByteScalarObject; - - -typedef struct { - PyObject_HEAD - short obval; -} PyShortScalarObject; - - -typedef struct { - PyObject_HEAD - int obval; -} PyIntScalarObject; - - -typedef struct { - PyObject_HEAD - long obval; -} PyLongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longlong obval; -} PyLongLongScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned char obval; -} PyUByteScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned short obval; -} PyUShortScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned int obval; -} PyUIntScalarObject; - - -typedef struct { - PyObject_HEAD - unsigned long obval; -} PyULongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_ulonglong obval; -} PyULongLongScalarObject; - - -typedef struct { - PyObject_HEAD - npy_half obval; -} PyHalfScalarObject; - - -typedef struct { - PyObject_HEAD - float obval; -} PyFloatScalarObject; - - -typedef struct { - PyObject_HEAD - double obval; -} PyDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_longdouble obval; -} PyLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cfloat obval; -} PyCFloatScalarObject; - - -typedef struct { - PyObject_HEAD - npy_cdouble obval; -} PyCDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - npy_clongdouble obval; -} PyCLongDoubleScalarObject; - - -typedef struct { - PyObject_HEAD - PyObject * obval; -} PyObjectScalarObject; - -typedef struct { - PyObject_HEAD - npy_datetime obval; - PyArray_DatetimeMetaData obmeta; -} PyDatetimeScalarObject; - -typedef struct { - PyObject_HEAD - npy_timedelta obval; - PyArray_DatetimeMetaData obmeta; -} PyTimedeltaScalarObject; - - -typedef struct { - PyObject_HEAD - char obval; -} PyScalarObject; - -#define PyStringScalarObject PyStringObject -#define PyUnicodeScalarObject PyUnicodeObject - -typedef struct { - PyObject_VAR_HEAD - char *obval; - PyArray_Descr *descr; - int flags; - PyObject *base; -} PyVoidScalarObject; - -/* Macros - PyScalarObject - PyArrType_Type - are defined in ndarrayobject.h -*/ - -#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) -#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) -#define PyArrayScalar_FromLong(i) \ - ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) -#define PyArrayScalar_RETURN_FALSE \ - return Py_INCREF(PyArrayScalar_False), \ - PyArrayScalar_False -#define PyArrayScalar_RETURN_TRUE \ - return Py_INCREF(PyArrayScalar_True), \ - PyArrayScalar_True - -#define PyArrayScalar_New(cls) \ - Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) -#define PyArrayScalar_VAL(obj, cls) \ - ((Py##cls##ScalarObject *)obj)->obval -#define PyArrayScalar_ASSIGN(obj, cls, val) \ - PyArrayScalar_VAL(obj, cls) = val - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/halffloat.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/halffloat.h deleted file mode 100644 index 944f0ea34b482..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/halffloat.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef __NPY_HALFFLOAT_H__ -#define __NPY_HALFFLOAT_H__ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Half-precision routines - */ - -/* Conversions */ -float npy_half_to_float(npy_half h); -double npy_half_to_double(npy_half h); -npy_half npy_float_to_half(float f); -npy_half npy_double_to_half(double d); -/* Comparisons */ -int npy_half_eq(npy_half h1, npy_half h2); -int npy_half_ne(npy_half h1, npy_half h2); -int npy_half_le(npy_half h1, npy_half h2); -int npy_half_lt(npy_half h1, npy_half h2); -int npy_half_ge(npy_half h1, npy_half h2); -int npy_half_gt(npy_half h1, npy_half h2); -/* faster *_nonan variants for when you know h1 and h2 are not NaN */ -int npy_half_eq_nonan(npy_half h1, npy_half h2); -int npy_half_lt_nonan(npy_half h1, npy_half h2); -int npy_half_le_nonan(npy_half h1, npy_half h2); -/* Miscellaneous functions */ -int npy_half_iszero(npy_half h); -int npy_half_isnan(npy_half h); -int npy_half_isinf(npy_half h); -int npy_half_isfinite(npy_half h); -int npy_half_signbit(npy_half h); -npy_half npy_half_copysign(npy_half x, npy_half y); -npy_half npy_half_spacing(npy_half h); -npy_half npy_half_nextafter(npy_half x, npy_half y); - -/* - * Half-precision constants - */ - -#define NPY_HALF_ZERO (0x0000u) -#define NPY_HALF_PZERO (0x0000u) -#define NPY_HALF_NZERO (0x8000u) -#define NPY_HALF_ONE (0x3c00u) -#define NPY_HALF_NEGONE (0xbc00u) -#define NPY_HALF_PINF (0x7c00u) -#define NPY_HALF_NINF (0xfc00u) -#define NPY_HALF_NAN (0x7e00u) - -#define NPY_MAX_HALF (0x7bffu) - -/* - * Bit-level conversions - */ - -npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); -npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); -npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); -npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/multiarray_api.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/multiarray_api.txt deleted file mode 100644 index 599ac5cb19221..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/multiarray_api.txt +++ /dev/null @@ -1,2442 +0,0 @@ - -=========== -Numpy C-API -=========== -:: - - unsigned int - PyArray_GetNDArrayCVersion(void ) - - -Included at the very first so not auto-grabbed and thus not labeled. - -:: - - int - PyArray_SetNumericOps(PyObject *dict) - -Set internal structure with number functions that all arrays will use - -:: - - PyObject * - PyArray_GetNumericOps(void ) - -Get dictionary showing number functions that all arrays will use - -:: - - int - PyArray_INCREF(PyArrayObject *mp) - -For object arrays, increment all internal references. - -:: - - int - PyArray_XDECREF(PyArrayObject *mp) - -Decrement all internal references for object arrays. -(or arrays with object fields) - -:: - - void - PyArray_SetStringFunction(PyObject *op, int repr) - -Set the array print function to be a Python function. - -:: - - PyArray_Descr * - PyArray_DescrFromType(int type) - -Get the PyArray_Descr structure for a type. - -:: - - PyObject * - PyArray_TypeObjectFromType(int type) - -Get a typeobject from a type-number -- can return NULL. - -New reference - -:: - - char * - PyArray_Zero(PyArrayObject *arr) - -Get pointer to zero of correct type for array. - -:: - - char * - PyArray_One(PyArrayObject *arr) - -Get pointer to one of correct type for array - -:: - - PyObject * - PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int - is_f_order) - -For backward compatibility - -Cast an array using typecode structure. -steals reference to dtype --- cannot be NULL - -This function always makes a copy of arr, even if the dtype -doesn't change. - -:: - - int - PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp) - -Cast to an already created array. - -:: - - int - PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) - -Cast to an already created array. Arrays don't have to be "broadcastable" -Only requirement is they have the same number of elements. - -:: - - int - PyArray_CanCastSafely(int fromtype, int totype) - -Check the type coercion rules. - -:: - - npy_bool - PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) - -leaves reference count alone --- cannot be NULL - -PyArray_CanCastTypeTo is equivalent to this, but adds a 'casting' -parameter. - -:: - - int - PyArray_ObjectType(PyObject *op, int minimum_type) - -Return the typecode of the array a Python object would be converted to - -Returns the type number the result should have, or NPY_NOTYPE on error. - -:: - - PyArray_Descr * - PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) - -new reference -- accepts NULL for mintype - -:: - - PyArrayObject ** - PyArray_ConvertToCommonType(PyObject *op, int *retn) - - -:: - - PyArray_Descr * - PyArray_DescrFromScalar(PyObject *sc) - -Return descr object from array scalar. - -New reference - -:: - - PyArray_Descr * - PyArray_DescrFromTypeObject(PyObject *type) - - -:: - - npy_intp - PyArray_Size(PyObject *op) - -Compute the size of an array (in number of items) - -:: - - PyObject * - PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) - -Get scalar-equivalent to a region of memory described by a descriptor. - -:: - - PyObject * - PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) - -Get 0-dim array from scalar - -0-dim array from array-scalar object -always contains a copy of the data -unless outcode is NULL, it is of void type and the referrer does -not own it either. - -steals reference to outcode - -:: - - void - PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) - -Convert to c-type - -no error checking is performed -- ctypeptr must be same type as scalar -in case of flexible type, the data is not copied -into ctypeptr which is expected to be a pointer to pointer - -:: - - int - PyArray_CastScalarToCtype(PyObject *scalar, void - *ctypeptr, PyArray_Descr *outcode) - -Cast Scalar to c-type - -The output buffer must be large-enough to receive the value -Even for flexible types which is different from ScalarAsCtype -where only a reference for flexible types is returned - -This may not work right on narrow builds for NumPy unicode scalars. - -:: - - int - PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr - *indescr, void *ctypeptr, int outtype) - -Cast Scalar to c-type - -:: - - PyObject * - PyArray_ScalarFromObject(PyObject *object) - -Get an Array Scalar From a Python Object - -Returns NULL if unsuccessful but error is only set if another error occurred. -Currently only Numeric-like object supported. - -:: - - PyArray_VectorUnaryFunc * - PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) - -Get a cast function to cast from the input descriptor to the -output type_number (must be a registered data-type). -Returns NULL if un-successful. - -:: - - PyObject * - PyArray_FromDims(int nd, int *d, int type) - -Construct an empty array from dimensions and typenum - -:: - - PyObject * - PyArray_FromDimsAndDataAndDescr(int nd, int *d, PyArray_Descr - *descr, char *data) - -Like FromDimsAndData but uses the Descr structure instead of typecode -as input. - -:: - - PyObject * - PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int - min_depth, int max_depth, int flags, PyObject - *context) - -Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags -Steals a reference to newtype --- which can be NULL - -:: - - PyObject * - PyArray_EnsureArray(PyObject *op) - -This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, ENSUREARRAY) -that special cases Arrays and PyArray_Scalars up front -It *steals a reference* to the object -It also guarantees that the result is PyArray_Type -Because it decrefs op if any conversion needs to take place -so it can be used like PyArray_EnsureArray(some_function(...)) - -:: - - PyObject * - PyArray_EnsureAnyArray(PyObject *op) - - -:: - - PyObject * - PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char - *sep) - - -Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an -array corresponding to the data encoded in that file. - -If the dtype is NULL, the default array type is used (double). -If non-null, the reference is stolen. - -The number of elements to read is given as ``num``; if it is < 0, then -then as many as possible are read. - -If ``sep`` is NULL or empty, then binary data is assumed, else -text data, with ``sep`` as the separator between elements. Whitespace in -the separator matches any length of whitespace in the text, and a match -for whitespace around the separator is added. - -For memory-mapped files, use the buffer interface. No more data than -necessary is read by this routine. - -:: - - PyObject * - PyArray_FromString(char *data, npy_intp slen, PyArray_Descr - *dtype, npy_intp num, char *sep) - - -Given a pointer to a string ``data``, a string length ``slen``, and -a ``PyArray_Descr``, return an array corresponding to the data -encoded in that string. - -If the dtype is NULL, the default array type is used (double). -If non-null, the reference is stolen. - -If ``slen`` is < 0, then the end of string is used for text data. -It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs -would be the norm). - -The number of elements to read is given as ``num``; if it is < 0, then -then as many as possible are read. - -If ``sep`` is NULL or empty, then binary data is assumed, else -text data, with ``sep`` as the separator between elements. Whitespace in -the separator matches any length of whitespace in the text, and a match -for whitespace around the separator is added. - -:: - - PyObject * - PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, npy_intp - count, npy_intp offset) - - -:: - - PyObject * - PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) - - -steals a reference to dtype (which cannot be NULL) - -:: - - PyObject * - PyArray_Return(PyArrayObject *mp) - - -Return either an array or the appropriate Python object if the array -is 0d and matches a Python type. -steals reference to mp - -:: - - PyObject * - PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int - offset) - -Get a subset of bytes from each element of the array -steals reference to typed, must not be NULL - -:: - - int - PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int - offset, PyObject *val) - -Set a subset of bytes from each element of the array -steals reference to dtype, must not be NULL - -:: - - PyObject * - PyArray_Byteswap(PyArrayObject *self, npy_bool inplace) - - -:: - - PyObject * - PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int - refcheck, NPY_ORDER order) - -Resize (reallocate data). Only works if nothing else is referencing this -array and it is contiguous. If refcheck is 0, then the reference count is -not checked and assumed to be 1. You still must own this data and have no -weak-references and no base object. - -:: - - int - PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src) - -Move the memory of one array into another, allowing for overlapping data. - -Returns 0 on success, negative on failure. - -:: - - int - PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src) - -Copy an Array into another array. -Broadcast to the destination shape if necessary. - -Returns 0 on success, -1 on failure. - -:: - - int - PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src) - -Copy an Array into another array -- memory must not overlap -Does not require src and dest to have "broadcastable" shapes -(only the same number of elements). - -TODO: For NumPy 2.0, this could accept an order parameter which -only allows NPY_CORDER and NPY_FORDER. Could also rename -this to CopyAsFlat to make the name more intuitive. - -Returns 0 on success, -1 on error. - -:: - - int - PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) - - -:: - - PyObject * - PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order) - -Copy an array. - -:: - - PyObject * - PyArray_ToList(PyArrayObject *self) - -To List - -:: - - PyObject * - PyArray_ToString(PyArrayObject *self, NPY_ORDER order) - - -:: - - int - PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) - -To File - -:: - - int - PyArray_Dump(PyObject *self, PyObject *file, int protocol) - - -:: - - PyObject * - PyArray_Dumps(PyObject *self, int protocol) - - -:: - - int - PyArray_ValidType(int type) - -Is the typenum valid? - -:: - - void - PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) - -Update Several Flags at once. - -:: - - PyObject * - PyArray_New(PyTypeObject *subtype, int nd, npy_intp *dims, int - type_num, npy_intp *strides, void *data, int itemsize, int - flags, PyObject *obj) - -Generic new array creation routine. - -:: - - PyObject * - PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int - nd, npy_intp *dims, npy_intp *strides, void - *data, int flags, PyObject *obj) - -Generic new array creation routine. - -steals a reference to descr (even on failure) - -:: - - PyArray_Descr * - PyArray_DescrNew(PyArray_Descr *base) - -base cannot be NULL - -:: - - PyArray_Descr * - PyArray_DescrNewFromType(int type_num) - - -:: - - double - PyArray_GetPriority(PyObject *obj, double default_) - -Get Priority from object - -:: - - PyObject * - PyArray_IterNew(PyObject *obj) - -Get Iterator. - -:: - - PyObject * - PyArray_MultiIterNew(int n, ... ) - -Get MultiIterator, - -:: - - int - PyArray_PyIntAsInt(PyObject *o) - - -:: - - npy_intp - PyArray_PyIntAsIntp(PyObject *o) - - -:: - - int - PyArray_Broadcast(PyArrayMultiIterObject *mit) - - -:: - - void - PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) - -Assumes contiguous - -:: - - int - PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) - - -:: - - npy_bool - PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp - offset, npy_intp *dims, npy_intp *newstrides) - - -:: - - PyArray_Descr * - PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) - - -returns a copy of the PyArray_Descr structure with the byteorder -altered: -no arguments: The byteorder is swapped (in all subfields as well) -single argument: The byteorder is forced to the given state -(in all subfields as well) - -Valid states: ('big', '>') or ('little' or '<') -('native', or '=') - -If a descr structure with | is encountered it's own -byte-order is not changed but any fields are: - - -Deep bytorder change of a data-type descriptor -Leaves reference count of self unchanged --- does not DECREF self *** - -:: - - PyObject * - PyArray_IterAllButAxis(PyObject *obj, int *inaxis) - -Get Iterator that iterates over all but one axis (don't use this with -PyArray_ITER_GOTO1D). The axis will be over-written if negative -with the axis having the smallest stride. - -:: - - PyObject * - PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int - min_depth, int max_depth, int requires, PyObject - *context) - -steals a reference to descr -- accepts NULL - -:: - - PyObject * - PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int - flags) - -steals reference to newtype --- acc. NULL - -:: - - PyObject * - PyArray_FromInterface(PyObject *origin) - - -:: - - PyObject * - PyArray_FromStructInterface(PyObject *input) - - -:: - - PyObject * - PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject - *context) - - -:: - - NPY_SCALARKIND - PyArray_ScalarKind(int typenum, PyArrayObject **arr) - -ScalarKind - -Returns the scalar kind of a type number, with an -optional tweak based on the scalar value itself. -If no scalar is provided, it returns INTPOS_SCALAR -for both signed and unsigned integers, otherwise -it checks the sign of any signed integer to choose -INTNEG_SCALAR when appropriate. - -:: - - int - PyArray_CanCoerceScalar(int thistype, int neededtype, NPY_SCALARKIND - scalar) - - -Determines whether the data type 'thistype', with -scalar kind 'scalar', can be coerced into 'neededtype'. - -:: - - PyObject * - PyArray_NewFlagsObject(PyObject *obj) - - -Get New ArrayFlagsObject - -:: - - npy_bool - PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) - -See if array scalars can be cast. - -TODO: For NumPy 2.0, add a NPY_CASTING parameter. - -:: - - int - PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) - - -:: - - int - PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) - -Adjusts previously broadcasted iterators so that the axis with -the smallest sum of iterator strides is not iterated over. -Returns dimension which is smallest in the range [0,multi->nd). -A -1 is returned if multi->nd == 0. - -don't use with PyArray_ITER_GOTO1D because factors are not adjusted - -:: - - int - PyArray_ElementStrides(PyObject *obj) - - -:: - - void - PyArray_Item_INCREF(char *data, PyArray_Descr *descr) - - -:: - - void - PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) - - -:: - - PyObject * - PyArray_FieldNames(PyObject *fields) - -Return the tuple of ordered field names from a dictionary. - -:: - - PyObject * - PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute) - -Return Transpose. - -:: - - PyObject * - PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int - axis, PyArrayObject *out, NPY_CLIPMODE clipmode) - -Take - -:: - - PyObject * - PyArray_PutTo(PyArrayObject *self, PyObject*values0, PyObject - *indices0, NPY_CLIPMODE clipmode) - -Put values into an array - -:: - - PyObject * - PyArray_PutMask(PyArrayObject *self, PyObject*values0, PyObject*mask0) - -Put values into an array according to a mask. - -:: - - PyObject * - PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) - -Repeat the array. - -:: - - PyObject * - PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject - *out, NPY_CLIPMODE clipmode) - - -:: - - int - PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) - -Sort an array in-place - -:: - - PyObject * - PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) - -ArgSort an array - -:: - - PyObject * - PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE - side, PyObject *perm) - - -Search the sorted array op1 for the location of the items in op2. The -result is an array of indexes, one for each element in op2, such that if -the item were to be inserted in op1 just before that index the array -would still be in sorted order. - -Parameters ----------- -op1 : PyArrayObject * -Array to be searched, must be 1-D. -op2 : PyObject * -Array of items whose insertion indexes in op1 are wanted -side : {NPY_SEARCHLEFT, NPY_SEARCHRIGHT} -If NPY_SEARCHLEFT, return first valid insertion indexes -If NPY_SEARCHRIGHT, return last valid insertion indexes -perm : PyObject * -Permutation array that sorts op1 (optional) - -Returns -------- -ret : PyObject * -New reference to npy_intp array containing indexes where items in op2 -could be validly inserted into op1. NULL on error. - -Notes ------ -Binary search is used to find the indexes. - -:: - - PyObject * - PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) - -ArgMax - -:: - - PyObject * - PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out) - -ArgMin - -:: - - PyObject * - PyArray_Reshape(PyArrayObject *self, PyObject *shape) - -Reshape - -:: - - PyObject * - PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER - order) - -New shape for an array - -:: - - PyObject * - PyArray_Squeeze(PyArrayObject *self) - - -return a new view of the array object with all of its unit-length -dimensions squeezed out if needed, otherwise -return the same array. - -:: - - PyObject * - PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject - *pytype) - -View -steals a reference to type -- accepts NULL - -:: - - PyObject * - PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) - -SwapAxes - -:: - - PyObject * - PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) - -Max - -:: - - PyObject * - PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) - -Min - -:: - - PyObject * - PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) - -Ptp - -:: - - PyObject * - PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -Mean - -:: - - PyObject * - PyArray_Trace(PyArrayObject *self, int offset, int axis1, int - axis2, int rtype, PyArrayObject *out) - -Trace - -:: - - PyObject * - PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int - axis2) - -Diagonal - -In NumPy versions prior to 1.7, this function always returned a copy of -the diagonal array. In 1.7, the code has been updated to compute a view -onto 'self', but it still copies this array before returning, as well as -setting the internal WARN_ON_WRITE flag. In a future version, it will -simply return a view onto self. - -:: - - PyObject * - PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject - *max, PyArrayObject *out) - -Clip - -:: - - PyObject * - PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) - -Conjugate - -:: - - PyObject * - PyArray_Nonzero(PyArrayObject *self) - -Nonzero - -TODO: In NumPy 2.0, should make the iteration order a parameter. - -:: - - PyObject * - PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out, int variance) - -Set variance to 1 to by-pass square-root calculation and return variance -Std - -:: - - PyObject * - PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -Sum - -:: - - PyObject * - PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -CumSum - -:: - - PyObject * - PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject - *out) - -Prod - -:: - - PyObject * - PyArray_CumProd(PyArrayObject *self, int axis, int - rtype, PyArrayObject *out) - -CumProd - -:: - - PyObject * - PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) - -All - -:: - - PyObject * - PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) - -Any - -:: - - PyObject * - PyArray_Compress(PyArrayObject *self, PyObject *condition, int - axis, PyArrayObject *out) - -Compress - -:: - - PyObject * - PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) - -Flatten - -:: - - PyObject * - PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order) - -Ravel -Returns a contiguous array - -:: - - npy_intp - PyArray_MultiplyList(npy_intp *l1, int n) - -Multiply a List - -:: - - int - PyArray_MultiplyIntList(int *l1, int n) - -Multiply a List of ints - -:: - - void * - PyArray_GetPtr(PyArrayObject *obj, npy_intp*ind) - -Produce a pointer into array - -:: - - int - PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n) - -Compare Lists - -:: - - int - PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int - nd, PyArray_Descr*typedescr) - -Simulate a C-array -steals a reference to typedescr -- can be NULL - -:: - - int - PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) - -Convert to a 1D C-array - -:: - - int - PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int - typecode) - -Convert to a 2D C-array - -:: - - int - PyArray_Free(PyObject *op, void *ptr) - -Free pointers created if As2D is called - -:: - - int - PyArray_Converter(PyObject *object, PyObject **address) - - -Useful to pass as converter function for O& processing in PyArgs_ParseTuple. - -This conversion function can be used with the "O&" argument for -PyArg_ParseTuple. It will immediately return an object of array type -or will convert to a NPY_ARRAY_CARRAY any other object. - -If you use PyArray_Converter, you must DECREF the array when finished -as you get a new reference to it. - -:: - - int - PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) - -PyArray_IntpFromSequence -Returns the number of integers converted or -1 if an error occurred. -vals must be large enough to hold maxvals - -:: - - PyObject * - PyArray_Concatenate(PyObject *op, int axis) - -Concatenate - -Concatenate an arbitrary Python sequence into an array. -op is a python object supporting the sequence interface. -Its elements will be concatenated together to form a single -multidimensional array. If axis is NPY_MAXDIMS or bigger, then -each sequence object will be flattened before concatenation - -:: - - PyObject * - PyArray_InnerProduct(PyObject *op1, PyObject *op2) - -Numeric.innerproduct(a,v) - -:: - - PyObject * - PyArray_MatrixProduct(PyObject *op1, PyObject *op2) - -Numeric.matrixproduct(a,v) -just like inner product but does the swapaxes stuff on the fly - -:: - - PyObject * - PyArray_CopyAndTranspose(PyObject *op) - -Copy and Transpose - -Could deprecate this function, as there isn't a speed benefit over -calling Transpose and then Copy. - -:: - - PyObject * - PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) - -Numeric.correlate(a1,a2,mode) - -:: - - int - PyArray_TypestrConvert(int itemsize, int gentype) - -Typestr converter - -:: - - int - PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) - -Get typenum from an object -- None goes to NPY_DEFAULT_TYPE -This function takes a Python object representing a type and converts it -to a the correct PyArray_Descr * structure to describe the type. - -Many objects can be used to represent a data-type which in NumPy is -quite a flexible concept. - -This is the central code that converts Python objects to -Type-descriptor objects that are used throughout numpy. - -Returns a new reference in *at, but the returned should not be -modified as it may be one of the canonical immutable objects or -a reference to the input obj. - -:: - - int - PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) - -Get typenum from an object -- None goes to NULL - -:: - - int - PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) - -Get intp chunk from sequence - -This function takes a Python sequence object and allocates and -fills in an intp array with the converted values. - -Remember to free the pointer seq.ptr when done using -PyDimMem_FREE(seq.ptr)** - -:: - - int - PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) - -Get buffer chunk from object - -this function takes a Python object which exposes the (single-segment) -buffer interface and returns a pointer to the data segment - -You should increment the reference count by one of buf->base -if you will hang on to a reference - -You only get a borrowed reference to the object. Do not free the -memory... - -:: - - int - PyArray_AxisConverter(PyObject *obj, int *axis) - -Get axis from an object (possibly None) -- a converter function, - -See also PyArray_ConvertMultiAxis, which also handles a tuple of axes. - -:: - - int - PyArray_BoolConverter(PyObject *object, npy_bool *val) - -Convert an object to true / false - -:: - - int - PyArray_ByteorderConverter(PyObject *obj, char *endian) - -Convert object to endian - -:: - - int - PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) - -Convert an object to FORTRAN / C / ANY / KEEP - -:: - - unsigned char - PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) - - -This function returns true if the two typecodes are -equivalent (same basic kind and same itemsize). - -:: - - PyObject * - PyArray_Zeros(int nd, npy_intp *dims, PyArray_Descr *type, int - is_f_order) - -Zeros - -steal a reference -accepts NULL type - -:: - - PyObject * - PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int - is_f_order) - -Empty - -accepts NULL type -steals referenct to type - -:: - - PyObject * - PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) - -Where - -:: - - PyObject * - PyArray_Arange(double start, double stop, double step, int type_num) - -Arange, - -:: - - PyObject * - PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject - *step, PyArray_Descr *dtype) - - -ArangeObj, - -this doesn't change the references - -:: - - int - PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) - -Convert object to sort kind - -:: - - PyObject * - PyArray_LexSort(PyObject *sort_keys, int axis) - -LexSort an array providing indices that will sort a collection of arrays -lexicographically. The first key is sorted on first, followed by the second key --- requires that arg"merge"sort is available for each sort_key - -Returns an index array that shows the indexes for the lexicographic sort along -the given axis. - -:: - - PyObject * - PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) - -Round - -:: - - unsigned char - PyArray_EquivTypenums(int typenum1, int typenum2) - - -:: - - int - PyArray_RegisterDataType(PyArray_Descr *descr) - -Register Data type -Does not change the reference count of descr - -:: - - int - PyArray_RegisterCastFunc(PyArray_Descr *descr, int - totype, PyArray_VectorUnaryFunc *castfunc) - -Register Casting Function -Replaces any function currently stored. - -:: - - int - PyArray_RegisterCanCast(PyArray_Descr *descr, int - totype, NPY_SCALARKIND scalar) - -Register a type number indicating that a descriptor can be cast -to it safely - -:: - - void - PyArray_InitArrFuncs(PyArray_ArrFuncs *f) - -Initialize arrfuncs to NULL - -:: - - PyObject * - PyArray_IntTupleFromIntp(int len, npy_intp *vals) - -PyArray_IntTupleFromIntp - -:: - - int - PyArray_TypeNumFromName(char *str) - - -:: - - int - PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) - -Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP - -:: - - int - PyArray_OutputConverter(PyObject *object, PyArrayObject **address) - -Useful to pass as converter function for O& processing in -PyArgs_ParseTuple for output arrays - -:: - - PyObject * - PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd) - -Get Iterator broadcast to a particular shape - -:: - - void - _PyArray_SigintHandler(int signum) - - -:: - - void* - _PyArray_GetSigintBuf(void ) - - -:: - - int - PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) - - -Get type-descriptor from an object forcing alignment if possible -None goes to DEFAULT type. - -any object with the .fields attribute and/or .itemsize attribute (if the -.fields attribute does not give the total size -- i.e. a partial record -naming). If itemsize is given it must be >= size computed from fields - -The .fields attribute must return a convertible dictionary if present. -Result inherits from NPY_VOID. - -:: - - int - PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) - - -Get type-descriptor from an object forcing alignment if possible -None goes to NULL. - -:: - - int - PyArray_SearchsideConverter(PyObject *obj, void *addr) - -Convert object to searchsorted side - -:: - - PyObject * - PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags) - -PyArray_CheckAxis - -check that axis is valid -convert 0-d arrays to 1-d arrays - -:: - - npy_intp - PyArray_OverflowMultiplyList(npy_intp *l1, int n) - -Multiply a List of Non-negative numbers with over-flow detection. - -:: - - int - PyArray_CompareString(char *s1, char *s2, size_t len) - - -:: - - PyObject * - PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ... ) - -Get MultiIterator from array of Python objects and any additional - -PyObject **mps -- array of PyObjects -int n - number of PyObjects in the array -int nadd - number of additional arrays to include in the iterator. - -Returns a multi-iterator object. - -:: - - int - PyArray_GetEndianness(void ) - - -:: - - unsigned int - PyArray_GetNDArrayCFeatureVersion(void ) - -Returns the built-in (at compilation time) C API version - -:: - - PyObject * - PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) - -correlate(a1,a2,mode) - -This function computes the usual correlation (correlate(a1, a2) != -correlate(a2, a1), and conjugate the second argument for complex inputs - -:: - - PyObject* - PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp - *bounds, int mode, PyArrayObject*fill) - -A Neighborhood Iterator object. - -:: - - void - PyArray_SetDatetimeParseFunction(PyObject *op) - -This function is scheduled to be removed - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - void - PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT - fr, npy_datetimestruct *result) - -Fill the datetime struct from the value and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - void - PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT - fr, npy_timedeltastruct *result) - -Fill the timedelta struct from the timedelta value and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - npy_datetime - PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT - fr, npy_datetimestruct *d) - -Create a datetime value from a filled datetime struct and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - npy_datetime - PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT - fr, npy_timedeltastruct *d) - -Create a timdelta value from a filled timedelta struct and resolution unit. - -TO BE REMOVED - NOT USED INTERNALLY. - -:: - - NpyIter * - NpyIter_New(PyArrayObject *op, npy_uint32 flags, NPY_ORDER - order, NPY_CASTING casting, PyArray_Descr*dtype) - -Allocate a new iterator for one array object. - -:: - - NpyIter * - NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32 - flags, NPY_ORDER order, NPY_CASTING - casting, npy_uint32 *op_flags, PyArray_Descr - **op_request_dtypes) - -Allocate a new iterator for more than one array object, using -standard NumPy broadcasting rules and the default buffer size. - -:: - - NpyIter * - NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 - flags, NPY_ORDER order, NPY_CASTING - casting, npy_uint32 *op_flags, PyArray_Descr - **op_request_dtypes, int oa_ndim, int - **op_axes, npy_intp *itershape, npy_intp - buffersize) - -Allocate a new iterator for multiple array objects, and advanced -options for controlling the broadcasting, shape, and buffer size. - -:: - - NpyIter * - NpyIter_Copy(NpyIter *iter) - -Makes a copy of the iterator - -:: - - int - NpyIter_Deallocate(NpyIter *iter) - -Deallocate an iterator - -:: - - npy_bool - NpyIter_HasDelayedBufAlloc(NpyIter *iter) - -Whether the buffer allocation is being delayed - -:: - - npy_bool - NpyIter_HasExternalLoop(NpyIter *iter) - -Whether the iterator handles the inner loop - -:: - - int - NpyIter_EnableExternalLoop(NpyIter *iter) - -Removes the inner loop handling (so HasExternalLoop returns true) - -:: - - npy_intp * - NpyIter_GetInnerStrideArray(NpyIter *iter) - -Get the array of strides for the inner loop (when HasExternalLoop is true) - -This function may be safely called without holding the Python GIL. - -:: - - npy_intp * - NpyIter_GetInnerLoopSizePtr(NpyIter *iter) - -Get a pointer to the size of the inner loop (when HasExternalLoop is true) - -This function may be safely called without holding the Python GIL. - -:: - - int - NpyIter_Reset(NpyIter *iter, char **errmsg) - -Resets the iterator to its initial state - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char - **errmsg) - -Resets the iterator to its initial state, with new base data pointers. -This function requires great caution. - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_ResetToIterIndexRange(NpyIter *iter, npy_intp istart, npy_intp - iend, char **errmsg) - -Resets the iterator to a new iterator index range - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_GetNDim(NpyIter *iter) - -Gets the number of dimensions being iterated - -:: - - int - NpyIter_GetNOp(NpyIter *iter) - -Gets the number of operands being iterated - -:: - - NpyIter_IterNextFunc * - NpyIter_GetIterNext(NpyIter *iter, char **errmsg) - -Compute the specialized iteration function for an iterator - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - npy_intp - NpyIter_GetIterSize(NpyIter *iter) - -Gets the number of elements being iterated - -:: - - void - NpyIter_GetIterIndexRange(NpyIter *iter, npy_intp *istart, npy_intp - *iend) - -Gets the range of iteration indices being iterated - -:: - - npy_intp - NpyIter_GetIterIndex(NpyIter *iter) - -Gets the current iteration index - -:: - - int - NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex) - -Sets the iterator position to the specified iterindex, -which matches the iteration order of the iterator. - -Returns NPY_SUCCEED on success, NPY_FAIL on failure. - -:: - - npy_bool - NpyIter_HasMultiIndex(NpyIter *iter) - -Whether the iterator is tracking a multi-index - -:: - - int - NpyIter_GetShape(NpyIter *iter, npy_intp *outshape) - -Gets the broadcast shape if a multi-index is being tracked by the iterator, -otherwise gets the shape of the iteration as Fortran-order -(fastest-changing index first). - -The reason Fortran-order is returned when a multi-index -is not enabled is that this is providing a direct view into how -the iterator traverses the n-dimensional space. The iterator organizes -its memory from fastest index to slowest index, and when -a multi-index is enabled, it uses a permutation to recover the original -order. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - NpyIter_GetMultiIndexFunc * - NpyIter_GetGetMultiIndex(NpyIter *iter, char **errmsg) - -Compute a specialized get_multi_index function for the iterator - -If errmsg is non-NULL, it should point to a variable which will -receive the error message, and no Python exception will be set. -This is so that the function can be called from code not holding -the GIL. - -:: - - int - NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp *multi_index) - -Sets the iterator to the specified multi-index, which must have the -correct number of entries for 'ndim'. It is only valid -when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation -fails if the multi-index is out of bounds. - -Returns NPY_SUCCEED on success, NPY_FAIL on failure. - -:: - - int - NpyIter_RemoveMultiIndex(NpyIter *iter) - -Removes multi-index support from an iterator. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - npy_bool - NpyIter_HasIndex(NpyIter *iter) - -Whether the iterator is tracking an index - -:: - - npy_bool - NpyIter_IsBuffered(NpyIter *iter) - -Whether the iterator is buffered - -:: - - npy_bool - NpyIter_IsGrowInner(NpyIter *iter) - -Whether the inner loop can grow if buffering is unneeded - -:: - - npy_intp - NpyIter_GetBufferSize(NpyIter *iter) - -Gets the size of the buffer, or 0 if buffering is not enabled - -:: - - npy_intp * - NpyIter_GetIndexPtr(NpyIter *iter) - -Get a pointer to the index, if it is being tracked - -:: - - int - NpyIter_GotoIndex(NpyIter *iter, npy_intp flat_index) - -If the iterator is tracking an index, sets the iterator -to the specified index. - -Returns NPY_SUCCEED on success, NPY_FAIL on failure. - -:: - - char ** - NpyIter_GetDataPtrArray(NpyIter *iter) - -Get the array of data pointers (1 per object being iterated) - -This function may be safely called without holding the Python GIL. - -:: - - PyArray_Descr ** - NpyIter_GetDescrArray(NpyIter *iter) - -Get the array of data type pointers (1 per object being iterated) - -:: - - PyArrayObject ** - NpyIter_GetOperandArray(NpyIter *iter) - -Get the array of objects being iterated - -:: - - PyArrayObject * - NpyIter_GetIterView(NpyIter *iter, npy_intp i) - -Returns a view to the i-th object with the iterator's internal axes - -:: - - void - NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags) - -Gets an array of read flags (1 per object being iterated) - -:: - - void - NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags) - -Gets an array of write flags (1 per object being iterated) - -:: - - void - NpyIter_DebugPrint(NpyIter *iter) - -For debugging - -:: - - npy_bool - NpyIter_IterationNeedsAPI(NpyIter *iter) - -Whether the iteration loop, and in particular the iternext() -function, needs API access. If this is true, the GIL must -be retained while iterating. - -:: - - void - NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides) - -Get an array of strides which are fixed. Any strides which may -change during iteration receive the value NPY_MAX_INTP. Once -the iterator is ready to iterate, call this to get the strides -which will always be fixed in the inner loop, then choose optimized -inner loop functions which take advantage of those fixed strides. - -This function may be safely called without holding the Python GIL. - -:: - - int - NpyIter_RemoveAxis(NpyIter *iter, int axis) - -Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX -was set for iterator creation, and does not work if buffering is -enabled. This function also resets the iterator to its initial state. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - npy_intp * - NpyIter_GetAxisStrideArray(NpyIter *iter, int axis) - -Gets the array of strides for the specified axis. -If the iterator is tracking a multi-index, gets the strides -for the axis specified, otherwise gets the strides for -the iteration axis as Fortran order (fastest-changing axis first). - -Returns NULL if an error occurs. - -:: - - npy_bool - NpyIter_RequiresBuffering(NpyIter *iter) - -Whether the iteration could be done with no buffering. - -:: - - char ** - NpyIter_GetInitialDataPtrArray(NpyIter *iter) - -Get the array of data pointers (1 per object being iterated), -directly into the arrays (never pointing to a buffer), for starting -unbuffered iteration. This always returns the addresses for the -iterator position as reset to iterator index 0. - -These pointers are different from the pointers accepted by -NpyIter_ResetBasePointers, because the direction along some -axes may have been reversed, requiring base offsets. - -This function may be safely called without holding the Python GIL. - -:: - - int - NpyIter_CreateCompatibleStrides(NpyIter *iter, npy_intp - itemsize, npy_intp *outstrides) - -Builds a set of strides which are the same as the strides of an -output array created using the NPY_ITER_ALLOCATE flag, where NULL -was passed for op_axes. This is for data packed contiguously, -but not necessarily in C or Fortran order. This should be used -together with NpyIter_GetShape and NpyIter_GetNDim. - -A use case for this function is to match the shape and layout of -the iterator and tack on one or more dimensions. For example, -in order to generate a vector per input value for a numerical gradient, -you pass in ndim*itemsize for itemsize, then add another dimension to -the end with size ndim and stride itemsize. To do the Hessian matrix, -you do the same thing but add two dimensions, or take advantage of -the symmetry and pack it into 1 dimension with a particular encoding. - -This function may only be called if the iterator is tracking a multi-index -and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from -being iterated in reverse order. - -If an array is created with this method, simply adding 'itemsize' -for each iteration will traverse the new array matching the -iterator. - -Returns NPY_SUCCEED or NPY_FAIL. - -:: - - int - PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) - -Convert any Python object, *obj*, to an NPY_CASTING enum. - -:: - - npy_intp - PyArray_CountNonzero(PyArrayObject *self) - -Counts the number of non-zero elements in the array. - -Returns -1 on error. - -:: - - PyArray_Descr * - PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) - -Produces the smallest size and lowest kind type to which both -input types can be cast. - -:: - - PyArray_Descr * - PyArray_MinScalarType(PyArrayObject *arr) - -If arr is a scalar (has 0 dimensions) with a built-in number data type, -finds the smallest type size/kind which can still represent its data. -Otherwise, returns the array's data type. - - -:: - - PyArray_Descr * - PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, npy_intp - ndtypes, PyArray_Descr **dtypes) - -Produces the result type of a bunch of inputs, using the UFunc -type promotion rules. Use this function when you have a set of -input arrays, and need to determine an output array dtype. - -If all the inputs are scalars (have 0 dimensions) or the maximum "kind" -of the scalars is greater than the maximum "kind" of the arrays, does -a regular type promotion. - -Otherwise, does a type promotion on the MinScalarType -of all the inputs. Data types passed directly are treated as array -types. - - -:: - - npy_bool - PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr - *to, NPY_CASTING casting) - -Returns 1 if the array object may be cast to the given data type using -the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in -that it handles scalar arrays (0 dimensions) specially, by checking -their value. - -:: - - npy_bool - PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr - *to, NPY_CASTING casting) - -Returns true if data of type 'from' may be cast to data of type -'to' according to the rule 'casting'. - -:: - - PyArrayObject * - PyArray_EinsteinSum(char *subscripts, npy_intp nop, PyArrayObject - **op_in, PyArray_Descr *dtype, NPY_ORDER - order, NPY_CASTING casting, PyArrayObject *out) - -This function provides summation of array elements according to -the Einstein summation convention. For example: -- trace(a) -> einsum("ii", a) -- transpose(a) -> einsum("ji", a) -- multiply(a,b) -> einsum(",", a, b) -- inner(a,b) -> einsum("i,i", a, b) -- outer(a,b) -> einsum("i,j", a, b) -- matvec(a,b) -> einsum("ij,j", a, b) -- matmat(a,b) -> einsum("ij,jk", a, b) - -subscripts: The string of subscripts for einstein summation. -nop: The number of operands -op_in: The array of operands -dtype: Either NULL, or the data type to force the calculation as. -order: The order for the calculation/the output axes. -casting: What kind of casts should be permitted. -out: Either NULL, or an array into which the output should be placed. - -By default, the labels get placed in alphabetical order -at the end of the output. So, if c = einsum("i,j", a, b) -then c[i,j] == a[i]*b[j], but if c = einsum("j,i", a, b) -then c[i,j] = a[j]*b[i]. - -Alternatively, you can control the output order or prevent -an axis from being summed/force an axis to be summed by providing -indices for the output. This allows us to turn 'trace' into -'diag', for example. -- diag(a) -> einsum("ii->i", a) -- sum(a, axis=0) -> einsum("i...->", a) - -Subscripts at the beginning and end may be specified by -putting an ellipsis "..." in the middle. For example, -the function einsum("i...i", a) takes the diagonal of -the first and last dimensions of the operand, and -einsum("ij...,jk...->ik...") takes the matrix product using -the first two indices of each operand instead of the last two. - -When there is only one operand, no axes being summed, and -no output parameter, this function returns a view -into the operand instead of making a copy. - -:: - - PyObject * - PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER - order, PyArray_Descr *dtype, int subok) - -Creates a new array with the same shape as the provided one, -with possible memory layout order and data type changes. - -prototype - The array the new one should be like. -order - NPY_CORDER - C-contiguous result. -NPY_FORTRANORDER - Fortran-contiguous result. -NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise. -NPY_KEEPORDER - Keeps the axis ordering of prototype. -dtype - If not NULL, overrides the data type of the result. -subok - If 1, use the prototype's array subtype, otherwise -always create a base-class array. - -NOTE: If dtype is not NULL, steals the dtype reference. - -:: - - int - PyArray_GetArrayParamsFromObject(PyObject *op, PyArray_Descr - *requested_dtype, npy_bool - writeable, PyArray_Descr - **out_dtype, int *out_ndim, npy_intp - *out_dims, PyArrayObject - **out_arr, PyObject *context) - -Retrieves the array parameters for viewing/converting an arbitrary -PyObject* to a NumPy array. This allows the "innate type and shape" -of Python list-of-lists to be discovered without -actually converting to an array. - -In some cases, such as structured arrays and the __array__ interface, -a data type needs to be used to make sense of the object. When -this is needed, provide a Descr for 'requested_dtype', otherwise -provide NULL. This reference is not stolen. Also, if the requested -dtype doesn't modify the interpretation of the input, out_dtype will -still get the "innate" dtype of the object, not the dtype passed -in 'requested_dtype'. - -If writing to the value in 'op' is desired, set the boolean -'writeable' to 1. This raises an error when 'op' is a scalar, list -of lists, or other non-writeable 'op'. - -Result: When success (0 return value) is returned, either out_arr -is filled with a non-NULL PyArrayObject and -the rest of the parameters are untouched, or out_arr is -filled with NULL, and the rest of the parameters are -filled. - -Typical usage: - -PyArrayObject *arr = NULL; -PyArray_Descr *dtype = NULL; -int ndim = 0; -npy_intp dims[NPY_MAXDIMS]; - -if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype, -&ndim, dims, &arr, NULL) < 0) { -return NULL; -} -if (arr == NULL) { -... validate/change dtype, validate flags, ndim, etc ... -// Could make custom strides here too -arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, -dims, NULL, -is_f_order ? NPY_ARRAY_F_CONTIGUOUS : 0, -NULL); -if (arr == NULL) { -return NULL; -} -if (PyArray_CopyObject(arr, op) < 0) { -Py_DECREF(arr); -return NULL; -} -} -else { -... in this case the other parameters weren't filled, just -validate and possibly copy arr itself ... -} -... use arr ... - -:: - - int - PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE - *modes, int n) - -Convert an object to an array of n NPY_CLIPMODE values. -This is intended to be used in functions where a different mode -could be applied to each axis, like in ravel_multi_index. - -:: - - PyObject * - PyArray_MatrixProduct2(PyObject *op1, PyObject - *op2, PyArrayObject*out) - -Numeric.matrixproduct(a,v,out) -just like inner product but does the swapaxes stuff on the fly - -:: - - npy_bool - NpyIter_IsFirstVisit(NpyIter *iter, int iop) - -Checks to see whether this is the first time the elements -of the specified reduction operand which the iterator points at are -being seen for the first time. The function returns -a reasonable answer for reduction operands and when buffering is -disabled. The answer may be incorrect for buffered non-reduction -operands. - -This function is intended to be used in EXTERNAL_LOOP mode only, -and will produce some wrong answers when that mode is not enabled. - -If this function returns true, the caller should also -check the inner loop stride of the operand, because if -that stride is 0, then only the first element of the innermost -external loop is being visited for the first time. - -WARNING: For performance reasons, 'iop' is not bounds-checked, -it is not confirmed that 'iop' is actually a reduction -operand, and it is not confirmed that EXTERNAL_LOOP -mode is enabled. These checks are the responsibility of -the caller, and should be done outside of any inner loops. - -:: - - int - PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) - -Sets the 'base' attribute of the array. This steals a reference -to 'obj'. - -Returns 0 on success, -1 on failure. - -:: - - void - PyArray_CreateSortedStridePerm(int ndim, npy_intp - *strides, npy_stride_sort_item - *out_strideperm) - - -This function populates the first ndim elements -of strideperm with sorted descending by their absolute values. -For example, the stride array (4, -2, 12) becomes -[(2, 12), (0, 4), (1, -2)]. - -:: - - void - PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags) - - -Removes the axes flagged as True from the array, -modifying it in place. If an axis flagged for removal -has a shape entry bigger than one, this effectively selects -index zero for that axis. - -WARNING: If an axis flagged for removal has a shape equal to zero, -the array will point to invalid memory. The caller must -validate this! -If an axis flagged for removal has a shape larger then one, -the aligned flag (and in the future the contiguous flags), -may need explicite update. -(check also NPY_RELAXED_STRIDES_CHECKING) - -For example, this can be used to remove the reduction axes -from a reduction result once its computation is complete. - -:: - - void - PyArray_DebugPrint(PyArrayObject *obj) - -Prints the raw data of the ndarray in a form useful for debugging -low-level C issues. - -:: - - int - PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name) - - -This function does nothing if obj is writeable, and raises an exception -(and returns -1) if obj is not writeable. It may also do other -house-keeping, such as issuing warnings on arrays which are transitioning -to become views. Always call this function at some point before writing to -an array. - -'name' is a name for the array, used to give better error -messages. Something like "assignment destination", "output array", or even -just "array". - -:: - - int - PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) - - -Precondition: 'arr' is a copy of 'base' (though possibly with different -strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the -->base pointer on 'arr', so that when 'arr' is destructed, it will copy any -changes back to 'base'. - -Steals a reference to 'base'. - -Returns 0 on success, -1 on failure. - -:: - - void * - PyDataMem_NEW(size_t size) - -Allocates memory for array data. - -:: - - void - PyDataMem_FREE(void *ptr) - -Free memory for array data. - -:: - - void * - PyDataMem_RENEW(void *ptr, size_t size) - -Reallocate/resize memory for array data. - -:: - - PyDataMem_EventHookFunc * - PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void - *user_data, void **old_data) - -Sets the allocation event hook for numpy array data. -Takes a PyDataMem_EventHookFunc *, which has the signature: -void hook(void *old, void *new, size_t size, void *user_data). -Also takes a void *user_data, and void **old_data. - -Returns a pointer to the previous hook or NULL. If old_data is -non-NULL, the previous user_data pointer will be copied to it. - -If not NULL, hook will be called at the end of each PyDataMem_NEW/FREE/RENEW: -result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data) -PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data) -result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data) - -When the hook is called, the GIL will be held by the calling -thread. The hook should be written to be reentrant, if it performs -operations that might cause new allocation events (such as the -creation/descruction numpy objects, or creating/destroying Python -objects which might cause a gc) - -:: - - void - PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject - **ret, int getmap) - - -:: - - PyObject * - PyArray_MapIterArray(PyArrayObject *a, PyObject *index) - - -Use advanced indexing to iterate an array. Please note -that most of this public API is currently not guaranteed -to stay the same between versions. If you plan on using -it, please consider adding more utility functions here -to accommodate new features. - -:: - - void - PyArray_MapIterNext(PyArrayMapIterObject *mit) - -This function needs to update the state of the map iterator -and point mit->dataptr to the memory-location of the next object - -Note that this function never handles an extra operand but provides -compatibility for an old (exposed) API. - -:: - - int - PyArray_Partition(PyArrayObject *op, PyArrayObject *ktharray, int - axis, NPY_SELECTKIND which) - -Partition an array in-place - -:: - - PyObject * - PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int - axis, NPY_SELECTKIND which) - -ArgPartition an array - -:: - - int - PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind) - -Convert object to select kind - -:: - - void * - PyDataMem_NEW_ZEROED(size_t size, size_t elsize) - -Allocates zeroed memory for array data. - diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarrayobject.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarrayobject.h deleted file mode 100644 index b8c7c3a2d38e6..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarrayobject.h +++ /dev/null @@ -1,237 +0,0 @@ -/* - * DON'T INCLUDE THIS DIRECTLY. - */ - -#ifndef NPY_NDARRAYOBJECT_H -#define NPY_NDARRAYOBJECT_H -#ifdef __cplusplus -#define CONFUSE_EMACS { -#define CONFUSE_EMACS2 } -extern "C" CONFUSE_EMACS -#undef CONFUSE_EMACS -#undef CONFUSE_EMACS2 -/* ... otherwise a semi-smart identer (like emacs) tries to indent - everything when you're typing */ -#endif - -#include "ndarraytypes.h" - -/* Includes the "function" C-API -- these are all stored in a - list of pointers --- one for each file - The two lists are concatenated into one in multiarray. - - They are available as import_array() -*/ - -#include "__multiarray_api.h" - - -/* C-API that requries previous API to be defined */ - -#define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type) - -#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) -#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) - -#define PyArray_HasArrayInterfaceType(op, type, context, out) \ - ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ - (((out)=PyArray_FromArrayAttr(op, type, context)) != \ - Py_NotImplemented)) - -#define PyArray_HasArrayInterface(op, out) \ - PyArray_HasArrayInterfaceType(op, NULL, NULL, out) - -#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ - (PyArray_NDIM((PyArrayObject *)op) == 0)) - -#define PyArray_IsScalar(obj, cls) \ - (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) - -#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ - PyArray_IsZeroDim(m)) - -#define PyArray_IsPythonNumber(obj) \ - (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \ - PyLong_Check(obj) || PyBool_Check(obj)) - -#define PyArray_IsPythonScalar(obj) \ - (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \ - PyUnicode_Check(obj)) - -#define PyArray_IsAnyScalar(obj) \ - (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) - -#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ - PyArray_CheckScalar(obj)) - -#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \ - || PyLong_Check(obj) \ - || PyArray_IsScalar((obj), Integer)) - - -#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ - Py_INCREF(m), (m) : \ - (PyArrayObject *)(PyArray_Copy(m))) - -#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ - PyArray_CompareLists(PyArray_DIMS(a1), \ - PyArray_DIMS(a2), \ - PyArray_NDIM(a1))) - -#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) -#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) -#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) - -#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ - NULL) - -#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ - PyArray_DescrFromType(type), 0, 0, 0, NULL); - -#define PyArray_FROM_OTF(m, type, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ - (((flags) & NPY_ARRAY_ENSURECOPY) ? \ - ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) - -#define PyArray_FROMANY(m, type, min, max, flags) \ - PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ - (((flags) & NPY_ARRAY_ENSURECOPY) ? \ - (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) - -#define PyArray_ZEROS(m, dims, type, is_f_order) \ - PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) - -#define PyArray_EMPTY(m, dims, type, is_f_order) \ - PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) - -#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ - PyArray_NBYTES(obj)) - -#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) -#define NPY_REFCOUNT PyArray_REFCOUNT -#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE) - -#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_DEFAULT, NULL) - -#define PyArray_EquivArrTypes(a1, a2) \ - PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) - -#define PyArray_EquivByteorders(b1, b2) \ - (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) - -#define PyArray_SimpleNew(nd, dims, typenum) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) - -#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ - PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ - data, 0, NPY_ARRAY_CARRAY, NULL) - -#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ - PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ - NULL, NULL, 0, NULL) - -#define PyArray_ToScalar(data, arr) \ - PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) - - -/* These might be faster without the dereferencing of obj - going on inside -- of course an optimizing compiler should - inline the constants inside a for loop making it a moot point -*/ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0])) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1])) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2])) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDES(obj)[0] + \ - (j)*PyArray_STRIDES(obj)[1] + \ - (k)*PyArray_STRIDES(obj)[2] + \ - (l)*PyArray_STRIDES(obj)[3])) - -static NPY_INLINE void -PyArray_XDECREF_ERR(PyArrayObject *arr) -{ - if (arr != NULL) { - if (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY) { - PyArrayObject *base = (PyArrayObject *)PyArray_BASE(arr); - PyArray_ENABLEFLAGS(base, NPY_ARRAY_WRITEABLE); - PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); - } - Py_DECREF(arr); - } -} - -#define PyArray_DESCR_REPLACE(descr) do { \ - PyArray_Descr *_new_; \ - _new_ = PyArray_DescrNew(descr); \ - Py_XDECREF(descr); \ - descr = _new_; \ - } while(0) - -/* Copy should always return contiguous array */ -#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) - -#define PyArray_FromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_BEHAVED | \ - NPY_ARRAY_ENSUREARRAY, NULL) - -#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_DEFAULT | \ - NPY_ARRAY_ENSUREARRAY, NULL) - -#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ - PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ - max_depth, NPY_ARRAY_ENSURECOPY | \ - NPY_ARRAY_DEFAULT | \ - NPY_ARRAY_ENSUREARRAY, NULL) - -#define PyArray_Cast(mp, type_num) \ - PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) - -#define PyArray_Take(ap, items, axis) \ - PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) - -#define PyArray_Put(ap, items, values) \ - PyArray_PutTo(ap, items, values, NPY_RAISE) - -/* Compatibility with old Numeric stuff -- don't use in new code */ - -#define PyArray_FromDimsAndData(nd, d, type, data) \ - PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ - data) - - -/* - Check to see if this key in the dictionary is the "title" - entry of the tuple (i.e. a duplicate dictionary entry in the fields - dict. -*/ - -#define NPY_TITLE_KEY(key, value) ((PyTuple_GET_SIZE((value))==3) && \ - (PyTuple_GET_ITEM((value), 2) == (key))) - - -#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) -#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) - - -#ifdef __cplusplus -} -#endif - - -#endif /* NPY_NDARRAYOBJECT_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarraytypes.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarraytypes.h deleted file mode 100644 index 21ff8cd1ae894..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ndarraytypes.h +++ /dev/null @@ -1,1820 +0,0 @@ -#ifndef NDARRAYTYPES_H -#define NDARRAYTYPES_H - -#include "npy_common.h" -#include "npy_endian.h" -#include "npy_cpu.h" -#include "utils.h" - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - - - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION - -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - -/* - * These characters correspond to the array type and the struct - * module - */ - -enum NPY_TYPECHAR { - NPY_BOOLLTR = '?', - NPY_BYTELTR = 'b', - NPY_UBYTELTR = 'B', - NPY_SHORTLTR = 'h', - NPY_USHORTLTR = 'H', - NPY_INTLTR = 'i', - NPY_UINTLTR = 'I', - NPY_LONGLTR = 'l', - NPY_ULONGLTR = 'L', - NPY_LONGLONGLTR = 'q', - NPY_ULONGLONGLTR = 'Q', - NPY_HALFLTR = 'e', - NPY_FLOATLTR = 'f', - NPY_DOUBLELTR = 'd', - NPY_LONGDOUBLELTR = 'g', - NPY_CFLOATLTR = 'F', - NPY_CDOUBLELTR = 'D', - NPY_CLONGDOUBLELTR = 'G', - NPY_OBJECTLTR = 'O', - NPY_STRINGLTR = 'S', - NPY_STRINGLTR2 = 'a', - NPY_UNICODELTR = 'U', - NPY_VOIDLTR = 'V', - NPY_DATETIMELTR = 'M', - NPY_TIMEDELTALTR = 'm', - NPY_CHARLTR = 'c', - - /* - * No Descriptor, just a define -- this let's - * Python users specify an array of integers - * large enough to hold a pointer on the - * platform - */ - NPY_INTPLTR = 'p', - NPY_UINTPLTR = 'P', - - /* - * These are for dtype 'kinds', not dtype 'typecodes' - * as the above are for. - */ - NPY_GENBOOLLTR ='b', - NPY_SIGNEDLTR = 'i', - NPY_UNSIGNEDLTR = 'u', - NPY_FLOATINGLTR = 'f', - NPY_COMPLEXLTR = 'c' -}; - -typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2 -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_INTROSELECT=0, -} NPY_SELECTKIND; -#define NPY_NSELECTS (NPY_INTROSELECT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { - NPY_NOSCALAR=-1, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR -} NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) - -/* For specifying array memory layout or iteration order */ -typedef enum { - /* Fortran order if inputs are all Fortran, C otherwise */ - NPY_ANYORDER=-1, - /* C order */ - NPY_CORDER=0, - /* Fortran order */ - NPY_FORTRANORDER=1, - /* An order as close to the inputs as possible */ - NPY_KEEPORDER=2 -} NPY_ORDER; - -/* For specifying allowed casting in operations which support it */ -typedef enum { - /* Only allow identical types */ - NPY_NO_CASTING=0, - /* Allow identical and byte swapped types */ - NPY_EQUIV_CASTING=1, - /* Only allow safe casts */ - NPY_SAFE_CASTING=2, - /* Allow safe casts or casts within the same kind */ - NPY_SAME_KIND_CASTING=3, - /* Allow any casts */ - NPY_UNSAFE_CASTING=4, - - /* - * Temporary internal definition only, will be removed in upcoming - * release, see below - * */ - NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, -} NPY_CASTING; - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -/* The special not-a-time (NaT) value */ -#define NPY_DATETIME_NAT NPY_MIN_INT64 - -/* - * Upper bound on the length of a DATETIME ISO 8601 string - * YEAR: 21 (64-bit year) - * MONTH: 3 - * DAY: 3 - * HOURS: 3 - * MINUTES: 3 - * SECONDS: 3 - * ATTOSECONDS: 1 + 3*6 - * TIMEZONE: 5 - * NULL TERMINATOR: 1 - */ -#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) - -typedef enum { - NPY_FR_Y = 0, /* Years */ - NPY_FR_M = 1, /* Months */ - NPY_FR_W = 2, /* Weeks */ - /* Gap where 1.6 NPY_FR_B (value 3) was */ - NPY_FR_D = 4, /* Days */ - NPY_FR_h = 5, /* hours */ - NPY_FR_m = 6, /* minutes */ - NPY_FR_s = 7, /* seconds */ - NPY_FR_ms = 8, /* milliseconds */ - NPY_FR_us = 9, /* microseconds */ - NPY_FR_ns = 10,/* nanoseconds */ - NPY_FR_ps = 11,/* picoseconds */ - NPY_FR_fs = 12,/* femtoseconds */ - NPY_FR_as = 13,/* attoseconds */ - NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ -} NPY_DATETIMEUNIT; - -/* - * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS - * is technically one more than the actual number of units. - */ -#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC - -/* - * Business day conventions for mapping invalid business - * days to valid business days. - */ -typedef enum { - /* Go forward in time to the following business day. */ - NPY_BUSDAY_FORWARD, - NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, - /* Go backward in time to the preceding business day. */ - NPY_BUSDAY_BACKWARD, - NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, - /* - * Go forward in time to the following business day, unless it - * crosses a month boundary, in which case go backward - */ - NPY_BUSDAY_MODIFIEDFOLLOWING, - /* - * Go backward in time to the preceding business day, unless it - * crosses a month boundary, in which case go forward. - */ - NPY_BUSDAY_MODIFIEDPRECEDING, - /* Produce a NaT for non-business days. */ - NPY_BUSDAY_NAT, - /* Raise an exception for non-business days. */ - NPY_BUSDAY_RAISE -} NPY_BUSDAY_ROLL; - -/************************************************************ - * NumPy Auxiliary Data for inner loops, sort functions, etc. - ************************************************************/ - -/* - * When creating an auxiliary data struct, this should always appear - * as the first member, like this: - * - * typedef struct { - * NpyAuxData base; - * double constant; - * } constant_multiplier_aux_data; - */ -typedef struct NpyAuxData_tag NpyAuxData; - -/* Function pointers for freeing or cloning auxiliary data */ -typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); -typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); - -struct NpyAuxData_tag { - NpyAuxData_FreeFunc *free; - NpyAuxData_CloneFunc *clone; - /* To allow for a bit of expansion without breaking the ABI */ - void *reserved[2]; -}; - -/* Macros to use for freeing and cloning auxiliary data */ -#define NPY_AUXDATA_FREE(auxdata) \ - do { \ - if ((auxdata) != NULL) { \ - (auxdata)->free(auxdata); \ - } \ - } while(0) -#define NPY_AUXDATA_CLONE(auxdata) \ - ((auxdata)->clone(auxdata)) - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 -#define PyArray_malloc PyMem_Malloc -#define PyArray_free PyMem_Free -#define PyArray_realloc PyMem_Realloc -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - -/* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - -/* - * These assume aligned and notswapped data -- a buffer will be used - * before or contiguous data will be obtained - */ - -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* - * XXX the ignore argument should be removed next time the API version - * is bumped. It used to be the separator. - */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); -typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); -typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* - * Functions to cast to most other standard types - * Can have some NULL entries. The types - * DATETIME, TIMEDELTA, and HALF go into the castdict - * even though they are built-in. - */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; - - /* The next four functions *cannot* be NULL */ - - /* - * Functions to get and set items with standard Python types - * -- not array scalars - */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* - * Copy and/or swap data. Memory areas may not overlap - * Use memmove first if they might - */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* - * Function to compare items - * Can be NULL - */ - PyArray_CompareFunc *compare; - - /* - * Function to select largest - * Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* - * Function to compute dot product - * Can be NULL - */ - PyArray_DotFunc *dotfunc; - - /* - * Function to scan an ASCII file and - * place a single value plus possible separator - * Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* - * Function to read a single value from a string - * and adjust the pointer; Can be NULL - */ - PyArray_FromStrFunc *fromstr; - - /* - * Function to determine if data is zero or not - * If NULL a default version is - * used at Registration time. - */ - PyArray_NonzeroFunc *nonzero; - - /* - * Used for arange. - * Can be NULL. - */ - PyArray_FillFunc *fill; - - /* - * Function to fill arrays with scalar values - * Can be NULL - */ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* - * Sorting functions - * Can be NULL - */ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* - * Dictionary of additional casting functions - * PyArray_VectorUnaryFuncs - * which can be populated to support casting - * to other registered types. Can be NULL - */ - PyObject *castdict; - - /* - * Functions useful for generalizing - * the casting rules. - * Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* - * Function to select smallest - * Can be NULL - */ - PyArray_ArgFunc *argmin; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* A sticky flag specifically for structured arrays */ -#define NPY_ALIGNED_STRUCT 0x80 - -/* - *These are inherited for global data-type if any data-types in the - * field have them - */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - /* - * the type object representing an - * instance of this type -- should not - * be two type_numbers with the same type - * object. - */ - PyTypeObject *typeobj; - /* kind for this type */ - char kind; - /* unique-character representing this type */ - char type; - /* - * '>' (big), '<' (little), '|' - * (not-applicable), or '=' (native). - */ - char byteorder; - /* flags describing data type */ - char flags; - /* number representing this type */ - int type_num; - /* element size (itemsize) for this type */ - int elsize; - /* alignment needed for this type */ - int alignment; - /* - * Non-NULL if this type is - * is an array (C-contiguous) - * of some other type - */ - struct _arr_descr *subarray; - /* - * The fields dictionary for this type - * For statically defined descr this - * is always Py_None - */ - PyObject *fields; - /* - * An ordered tuple of field names or NULL - * if no fields are defined - */ - PyObject *names; - /* - * a table of functions specific for each - * basic data descriptor - */ - PyArray_ArrFuncs *f; - /* Metadata about this dtype */ - PyObject *metadata; - /* - * Metadata specific to the C implementation - * of the particular dtype. This was added - * for NumPy 1.7.0. - */ - NpyAuxData *c_metadata; -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - * The main array object structure. - * - * It has been recommended to use the inline functions defined below - * (PyArray_DATA and friends) to access fields here for a number of - * releases. Direct access to the members themselves is deprecated. - * To ensure that your code does not use deprecated access, - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - * (or NPY_1_8_API_VERSION or higher as required). - */ -/* This struct will be moved to a private header in a future release */ -typedef struct tagPyArrayObject_fields { - PyObject_HEAD - /* Pointer to the raw data buffer */ - char *data; - /* The number of dimensions, also called 'ndim' */ - int nd; - /* The size in each dimension, also called 'shape' */ - npy_intp *dimensions; - /* - * Number of bytes to jump to get to the - * next element in each dimension - */ - npy_intp *strides; - /* - * This object is decref'd upon - * deletion of array. Except in the - * case of UPDATEIFCOPY which has - * special handling. - * - * For views it points to the original - * array, collapsed so no chains of - * views occur. - * - * For creation from buffer object it - * points to an object that shold be - * decref'd on deletion - * - * For UPDATEIFCOPY flag this is an - * array to-be-updated upon deletion - * of this one - */ - PyObject *base; - /* Pointer to type structure */ - PyArray_Descr *descr; - /* Flags describing array -- see below */ - int flags; - /* For weak references */ - PyObject *weakreflist; -} PyArrayObject_fields; - -/* - * To hide the implementation details, we only expose - * the Python struct HEAD. - */ -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -/* - * Can't put this in npy_deprecated_api.h like the others. - * PyArrayObject field access is deprecated as of NumPy 1.7. - */ -typedef PyArrayObject_fields PyArrayObject; -#else -typedef struct tagPyArrayObject { - PyObject_HEAD -} PyArrayObject; -#endif - -#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - -typedef struct { - NPY_DATETIMEUNIT base; - int num; -} PyArray_DatetimeMetaData; - -typedef struct { - NpyAuxData base; - PyArray_DatetimeMetaData meta; -} PyArray_DatetimeDTypeMetaData; - -/* - * This structure contains an exploded view of a date-time value. - * NaT is represented by year == NPY_DATETIME_NAT. - */ -typedef struct { - npy_int64 year; - npy_int32 month, day, hour, min, sec, us, ps, as; -} npy_datetimestruct; - -/* This is not used internally. */ -typedef struct { - npy_int64 day; - npy_int32 sec, us, ps, as; -} npy_timedeltastruct; - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* - * Means c-style contiguous (last index varies the fastest). The data - * elements right after each other. - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 - -/* - * Set if array is a contiguous Fortran array: the first index varies - * the fastest in memory (strides array is reverse of C-contiguous - * array) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 - -/* - * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a - * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with - * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS - * at the same time if they have either zero or one element. - * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional - * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements - * and the array is contiguous if ndarray.squeeze() is contiguous. - * I.e. dimensions for which `ndarray.shape[dimension] == 1` are - * ignored. - */ - -/* - * If set, the array owns the data: it will be free'd when the array - * is deleted. - * - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_OWNDATA 0x0004 - -/* - * An array never has the next four set; they're only used as parameter - * flags to the the various FromAny functions - * - * This flag may be requested in constructor functions. - */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_ARRAY_FORCECAST 0x0010 - -/* - * Always copy the array. Returned arrays are always CONTIGUOUS, - * ALIGNED, and WRITEABLE. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSURECOPY 0x0020 - -/* - * Make sure the returned array is a base-class ndarray - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSUREARRAY 0x0040 - -/* - * Make sure that the strides are in units of the element size Needed - * for some operations with record-arrays. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 - -/* - * Array data is aligned on the appropiate memory address for the type - * stored according to how the compiler would align things (e.g., an - * array of integers (4 bytes each) starts on a memory address that's - * a multiple of 4) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_ALIGNED 0x0100 - -/* - * Array data has the native endianness - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_NOTSWAPPED 0x0200 - -/* - * Array data is writeable - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_WRITEABLE 0x0400 - -/* - * If this flag is set, then base contains a pointer to an array of - * the same size that should be updated with the current contents of - * this array when this array is deallocated - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -/* - * NOTE: there are also internal flags defined in multiarray/arrayobject.h, - * which start at bit 31 and work down. - */ - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -/* This flag is for the array interface, not PyArrayObject */ -#define NPY_ARR_HAS_DESCR 0x0800 - - - - -/* - * Size of internal buffers used for alignment Make BUFSIZE a multiple - * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned - */ -#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) -#define NPY_BUFSIZE 8192 -/* buffer stress test size: */ -/*#define NPY_BUFSIZE 17*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) - -/* - * C API: consists of Macros and functions. The MACROS are defined - * here. - */ - - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) - -/* the variable is used in some places, so always define it */ -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); -#define NPY_END_THREADS do { if (_save) \ - { PyEval_RestoreThread(_save); _save = NULL;} } while (0); -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ - { _save = PyEval_SaveThread();} } while (0); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS;} while (0); - -#define NPY_END_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS; } while (0); - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); -#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -/********************************** - * The nditer object, added in 1.6 - **********************************/ - -/* The actual structure of the iterator is an internal detail */ -typedef struct NpyIter_InternalOnly NpyIter; - -/* Iterator function pointers that may be specialized */ -typedef int (NpyIter_IterNextFunc)(NpyIter *iter); -typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, - npy_intp *outcoords); - -/*** Global flags that may be passed to the iterator constructors ***/ - -/* Track an index representing C order */ -#define NPY_ITER_C_INDEX 0x00000001 -/* Track an index representing Fortran order */ -#define NPY_ITER_F_INDEX 0x00000002 -/* Track a multi-index */ -#define NPY_ITER_MULTI_INDEX 0x00000004 -/* User code external to the iterator does the 1-dimensional innermost loop */ -#define NPY_ITER_EXTERNAL_LOOP 0x00000008 -/* Convert all the operands to a common data type */ -#define NPY_ITER_COMMON_DTYPE 0x00000010 -/* Operands may hold references, requiring API access during iteration */ -#define NPY_ITER_REFS_OK 0x00000020 -/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ -#define NPY_ITER_ZEROSIZE_OK 0x00000040 -/* Permits reductions (size-0 stride with dimension size > 1) */ -#define NPY_ITER_REDUCE_OK 0x00000080 -/* Enables sub-range iteration */ -#define NPY_ITER_RANGED 0x00000100 -/* Enables buffering */ -#define NPY_ITER_BUFFERED 0x00000200 -/* When buffering is enabled, grows the inner loop if possible */ -#define NPY_ITER_GROWINNER 0x00000400 -/* Delay allocation of buffers until first Reset* call */ -#define NPY_ITER_DELAY_BUFALLOC 0x00000800 -/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ -#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 - -/*** Per-operand flags that may be passed to the iterator constructors ***/ - -/* The operand will be read from and written to */ -#define NPY_ITER_READWRITE 0x00010000 -/* The operand will only be read from */ -#define NPY_ITER_READONLY 0x00020000 -/* The operand will only be written to */ -#define NPY_ITER_WRITEONLY 0x00040000 -/* The operand's data must be in native byte order */ -#define NPY_ITER_NBO 0x00080000 -/* The operand's data must be aligned */ -#define NPY_ITER_ALIGNED 0x00100000 -/* The operand's data must be contiguous (within the inner loop) */ -#define NPY_ITER_CONTIG 0x00200000 -/* The operand may be copied to satisfy requirements */ -#define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ -#define NPY_ITER_UPDATEIFCOPY 0x00800000 -/* Allocate the operand if it is NULL */ -#define NPY_ITER_ALLOCATE 0x01000000 -/* If an operand is allocated, don't use any subtype */ -#define NPY_ITER_NO_SUBTYPE 0x02000000 -/* This is a virtual array slot, operand is NULL but temporary data is there */ -#define NPY_ITER_VIRTUAL 0x04000000 -/* Require that the dimension match the iterator dimensions exactly */ -#define NPY_ITER_NO_BROADCAST 0x08000000 -/* A mask is being used on this array, affects buffer -> array copy */ -#define NPY_ITER_WRITEMASKED 0x10000000 -/* This array is the mask for all WRITEMASKED operands */ -#define NPY_ITER_ARRAYMASK 0x20000000 - -#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff -#define NPY_ITER_PER_OP_FLAGS 0xffff0000 - - -/***************************** - * Basic iterator object - *****************************/ - -/* FWD declaration */ -typedef struct PyArrayIterObject_tag PyArrayIterObject; - -/* - * type of the function which translates a set of coordinates to a - * pointer to the data - */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); - -struct PyArrayIterObject_tag { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) do { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} while (0) - -#define _PyArray_ITER_NEXT1(it) do { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} while (0) - -#define _PyArray_ITER_NEXT2(it) do { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} while (0) - -#define _PyArray_ITER_NEXT3(it) do { \ - if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ - (it)->coordinates[2]++; \ - (it)->dataptr += (it)->strides[2]; \ - } \ - else { \ - (it)->coordinates[2] = 0; \ - (it)->dataptr -= (it)->backstrides[2]; \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] \ - (it)->backstrides[1]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_NEXT(it) do { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} while (0) - -#define PyArray_ITER_GOTO(it, destination) do { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} while (0) - -#define PyArray_ITER_GOTO1D(it, ind) do { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp) (ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - * Any object passed to PyArray_Broadcast must be binary compatible - * with this structure. - */ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_NEXT(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_GOTO(multi, dest) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - - -/* - * Store the information needed for fancy-indexing over an array. The - * fields are slightly unordered to keep consec, dataptr and subspace - * where they were originally. - */ -typedef struct { - PyObject_HEAD - /* - * Multi-iterator portion --- needs to be present in this - * order to work with PyArray_Broadcast - */ - - int numiter; /* number of index-array - iterators */ - npy_intp size; /* size of broadcasted - result */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - NpyIter *outer; /* index objects - iterator */ - void *unused[NPY_MAXDIMS - 2]; - PyArrayObject *array; - /* Flat iterator for the indexed array. For compatibility solely. */ - PyArrayIterObject *ait; - - /* - * Subspace array. For binary compatibility (was an iterator, - * but only the check for NULL should be used). - */ - PyArrayObject *subspace; - - /* - * if subspace iteration, then this is the array of axes in - * the underlying array represented by the index objects - */ - int iteraxes[NPY_MAXDIMS]; - npy_intp fancy_strides[NPY_MAXDIMS]; - - /* pointer when all fancy indices are 0 */ - char *baseoffset; - - /* - * after binding consec denotes at which axis the fancy axes - * are inserted. - */ - int consec; - char *dataptr; - - int nd_fancy; - npy_intp fancy_dims[NPY_MAXDIMS]; - - /* Whether the iterator (any of the iterators) requires API */ - int needs_api; - - /* - * Extra op information. - */ - PyArrayObject *extra_op; - PyArray_Descr *extra_op_dtype; /* desired dtype */ - npy_uint32 *extra_op_flags; /* Iterator flags */ - - NpyIter *extra_op_iter; - NpyIter_IterNextFunc *extra_op_next; - char **extra_op_ptrs; - - /* - * Information about the iteration state. - */ - NpyIter_IterNextFunc *outer_next; - char **outer_ptrs; - npy_intp *outer_strides; - - /* - * Information about the subspace iterator. - */ - NpyIter *subspace_iter; - NpyIter_IterNextFunc *subspace_next; - char **subspace_ptrs; - npy_intp *subspace_strides; - - /* Count for the external loop (which ever it is) for API iteration */ - npy_intp iter_count; - -} PyArrayMapIterObject; - -enum { - NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, - NPY_NEIGHBORHOOD_ITER_ONE_PADDING, - NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, - NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, - NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING -}; - -typedef struct { - PyObject_HEAD - - /* - * PyArrayIterObject part: keep this in this exact order - */ - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; - - /* - * New members - */ - npy_intp nd; - - /* Dimensions is the dimension of the array */ - npy_intp dimensions[NPY_MAXDIMS]; - - /* - * Neighborhood points coordinates are computed relatively to the - * point pointed by _internal_iter - */ - PyArrayIterObject* _internal_iter; - /* - * To keep a reference to the representation of the constant value - * for constant padding - */ - char* constant; - - int mode; -} PyArrayNeighborhoodIterObject; - -/* - * Neighborhood iterator API - */ - -/* General: those work for any mode */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); -#if 0 -static NPY_INLINE int -PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); -#endif - -/* - * Include inline implementations - functions defined there are not - * considered public API - */ -#define _NPY_INCLUDE_NEIGHBORHOOD_IMP -#include "_neighborhood_iterator_imp.h" -#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP - -/* The default array type */ -#define NPY_DEFAULT_TYPE NPY_DOUBLE - -/* - * All sorts of useful ways to look into a PyArrayObject. It is recommended - * to use PyArrayObject * objects instead of always casting from PyObject *, - * for improved type checking. - * - * In many cases here the macro versions of the accessors are deprecated, - * but can't be immediately changed to inline functions because the - * preexisting macros accept PyObject * and do automatic casts. Inline - * functions accepting PyArrayObject * provides for some compile-time - * checking of correctness when working with these objects in C. - */ - -#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) - -#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ - (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) - -#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ - NPY_ARRAY_F_CONTIGUOUS : 0)) - -#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) -/* - * Changing access macros into functions, to allow for future hiding - * of the internal memory layout. This later hiding will allow the 2.x series - * to change the internal representation of arrays without affecting - * ABI compatibility. - */ - -static NPY_INLINE int -PyArray_NDIM(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->nd; -} - -static NPY_INLINE void * -PyArray_DATA(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE char * -PyArray_BYTES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE npy_intp * -PyArray_DIMS(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -static NPY_INLINE npy_intp * -PyArray_STRIDES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->strides; -} - -static NPY_INLINE npy_intp -PyArray_DIM(const PyArrayObject *arr, int idim) -{ - return ((PyArrayObject_fields *)arr)->dimensions[idim]; -} - -static NPY_INLINE npy_intp -PyArray_STRIDE(const PyArrayObject *arr, int istride) -{ - return ((PyArrayObject_fields *)arr)->strides[istride]; -} - -static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject * -PyArray_BASE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->base; -} - -static NPY_INLINE NPY_RETURNS_BORROWED_REF PyArray_Descr * -PyArray_DESCR(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE int -PyArray_FLAGS(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->flags; -} - -static NPY_INLINE npy_intp -PyArray_ITEMSIZE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->elsize; -} - -static NPY_INLINE int -PyArray_TYPE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->type_num; -} - -static NPY_INLINE int -PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) -{ - return (PyArray_FLAGS(arr) & flags) == flags; -} - -static NPY_INLINE PyObject * -PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) -{ - return ((PyArrayObject_fields *)arr)->descr->f->getitem( - (void *)itemptr, (PyArrayObject *)arr); -} - -static NPY_INLINE int -PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) -{ - return ((PyArrayObject_fields *)arr)->descr->f->setitem( - v, itemptr, arr); -} - -#else - -/* These macros are deprecated as of NumPy 1.7. */ -#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) -#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) -#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) -#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) -#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) -#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) -#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) -#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) -#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) -#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) -#define PyArray_CHKFLAGS(m, FLAGS) \ - ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) -#define PyArray_ITEMSIZE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->elsize) -#define PyArray_TYPE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->type_num) -#define PyArray_GETITEM(obj,itemptr) \ - PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)) - -#define PyArray_SETITEM(obj,itemptr,v) \ - PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ - (char *)(itemptr), \ - (PyArrayObject *)(obj)) -#endif - -static NPY_INLINE PyArray_Descr * -PyArray_DTYPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE npy_intp * -PyArray_SHAPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -/* - * Enables the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags |= flags; -} - -/* - * Clears the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags &= ~flags; -} - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) - -#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ - ((type) == NPY_USHORT) || \ - ((type) == NPY_UINT) || \ - ((type) == NPY_ULONG) || \ - ((type) == NPY_ULONGLONG)) - -#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ - ((type) == NPY_SHORT) || \ - ((type) == NPY_INT) || \ - ((type) == NPY_LONG) || \ - ((type) == NPY_LONGLONG)) - -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) - -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ - ((type) == NPY_UNICODE)) - -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ - ((type) == NPY_DOUBLE) || \ - ((type) == NPY_CDOUBLE) || \ - ((type) == NPY_BOOL) || \ - ((type) == NPY_OBJECT )) - -#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ - ((type) <=NPY_VOID)) - -#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ - ((type) <=NPY_TIMEDELTA)) - -#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ - ((type) < NPY_USERDEF+ \ - NPY_NUMUSERTYPES)) - -#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ - PyTypeNum_ISUSERDEF(type)) - -#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) - - -#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) -#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) -#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) -#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) - -#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) -#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) -#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) -#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) -#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) -#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) -#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) -#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) -#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) -#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) -#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) -#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) -#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) - - /* - * FIXME: This should check for a flag on the data-type that - * states whether or not it is variable length. Because the - * ISFLEXIBLE check is hard-coded to the built-in data-types. - */ -#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) - -#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) - - -#define NPY_LITTLE '<' -#define NPY_BIG '>' -#define NPY_NATIVE '=' -#define NPY_SWAP 's' -#define NPY_IGNORE '|' - -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN -#define NPY_NATBYTE NPY_BIG -#define NPY_OPPBYTE NPY_LITTLE -#else -#define NPY_NATBYTE NPY_LITTLE -#define NPY_OPPBYTE NPY_BIG -#endif - -#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) -#define PyArray_IsNativeByteOrder PyArray_ISNBO -#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) -#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - - -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) -#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) - -/************************************************************ - * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. - ************************************************************/ - -typedef struct { - npy_intp perm, stride; -} npy_stride_sort_item; - -/************************************************************ - * This is the form of the struct that's returned pointed by the - * PyCObject attribute of an array __array_struct__. See - * http://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full - * documentation. - ************************************************************/ -typedef struct { - int two; /* - * contains the integer 2 as a sanity - * check - */ - - int nd; /* number of dimensions */ - - char typekind; /* - * kind in array --- character code of - * typestr - */ - - int itemsize; /* size of each element */ - - int flags; /* - * how should be data interpreted. Valid - * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), - * ALIGNED (0x100), NOTSWAPPED (0x200), and - * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) - * states that arrdescr field is present in - * structure - */ - - npy_intp *shape; /* - * A length-nd array of shape - * information - */ - - npy_intp *strides; /* A length-nd array of stride information */ - - void *data; /* A pointer to the first element of the array */ - - PyObject *descr; /* - * A list of fields or NULL (ignored if flags - * does not have ARR_HAS_DESCR flag set) - */ -} PyArrayInterface; - -/* - * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. - * See the documentation for PyDataMem_SetEventHook. - */ -typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, - void *user_data); - -/* - * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files - * npy_*_*_deprecated_api.h are only included from here and nowhere else. - */ -#ifdef NPY_DEPRECATED_INCLUDES -#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." -#endif -#define NPY_DEPRECATED_INCLUDES -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -#include "npy_1_7_deprecated_api.h" -#endif -/* - * There is no file npy_1_8_deprecated_api.h since there are no additional - * deprecated API features in NumPy 1.8. - * - * Note to maintainers: insert code like the following in future NumPy - * versions. - * - * #if !defined(NPY_NO_DEPRECATED_API) || \ - * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) - * #include "npy_1_9_deprecated_api.h" - * #endif - */ -#undef NPY_DEPRECATED_INCLUDES - -#endif /* NPY_ARRAYTYPES_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/noprefix.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/noprefix.h deleted file mode 100644 index 8306170876ba4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/noprefix.h +++ /dev/null @@ -1,209 +0,0 @@ -#ifndef NPY_NOPREFIX_H -#define NPY_NOPREFIX_H - -/* - * You can directly include noprefix.h as a backward - * compatibility measure - */ -#ifndef NPY_NO_PREFIX -#include "ndarrayobject.h" -#include "npy_interrupt.h" -#endif - -#define SIGSETJMP NPY_SIGSETJMP -#define SIGLONGJMP NPY_SIGLONGJMP -#define SIGJMP_BUF NPY_SIGJMP_BUF - -#define MAX_DIMS NPY_MAXDIMS - -#define longlong npy_longlong -#define ulonglong npy_ulonglong -#define Bool npy_bool -#define longdouble npy_longdouble -#define byte npy_byte - -#ifndef _BSD_SOURCE -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#endif - -#define ubyte npy_ubyte -#define ushort npy_ushort -#define uint npy_uint -#define ulong npy_ulong -#define cfloat npy_cfloat -#define cdouble npy_cdouble -#define clongdouble npy_clongdouble -#define Int8 npy_int8 -#define UInt8 npy_uint8 -#define Int16 npy_int16 -#define UInt16 npy_uint16 -#define Int32 npy_int32 -#define UInt32 npy_uint32 -#define Int64 npy_int64 -#define UInt64 npy_uint64 -#define Int128 npy_int128 -#define UInt128 npy_uint128 -#define Int256 npy_int256 -#define UInt256 npy_uint256 -#define Float16 npy_float16 -#define Complex32 npy_complex32 -#define Float32 npy_float32 -#define Complex64 npy_complex64 -#define Float64 npy_float64 -#define Complex128 npy_complex128 -#define Float80 npy_float80 -#define Complex160 npy_complex160 -#define Float96 npy_float96 -#define Complex192 npy_complex192 -#define Float128 npy_float128 -#define Complex256 npy_complex256 -#define intp npy_intp -#define uintp npy_uintp -#define datetime npy_datetime -#define timedelta npy_timedelta - -#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG -#define SIZEOF_INTP NPY_SIZEOF_INTP -#define SIZEOF_UINTP NPY_SIZEOF_UINTP -#define SIZEOF_HALF NPY_SIZEOF_HALF -#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE -#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME -#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA - -#define LONGLONG_FMT NPY_LONGLONG_FMT -#define ULONGLONG_FMT NPY_ULONGLONG_FMT -#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX -#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX - -#define MAX_INT8 127 -#define MIN_INT8 -128 -#define MAX_UINT8 255 -#define MAX_INT16 32767 -#define MIN_INT16 -32768 -#define MAX_UINT16 65535 -#define MAX_INT32 2147483647 -#define MIN_INT32 (-MAX_INT32 - 1) -#define MAX_UINT32 4294967295U -#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) -#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) -#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) -#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) -#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) -#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) - -#define MAX_BYTE NPY_MAX_BYTE -#define MIN_BYTE NPY_MIN_BYTE -#define MAX_UBYTE NPY_MAX_UBYTE -#define MAX_SHORT NPY_MAX_SHORT -#define MIN_SHORT NPY_MIN_SHORT -#define MAX_USHORT NPY_MAX_USHORT -#define MAX_INT NPY_MAX_INT -#define MIN_INT NPY_MIN_INT -#define MAX_UINT NPY_MAX_UINT -#define MAX_LONG NPY_MAX_LONG -#define MIN_LONG NPY_MIN_LONG -#define MAX_ULONG NPY_MAX_ULONG -#define MAX_LONGLONG NPY_MAX_LONGLONG -#define MIN_LONGLONG NPY_MIN_LONGLONG -#define MAX_ULONGLONG NPY_MAX_ULONGLONG -#define MIN_DATETIME NPY_MIN_DATETIME -#define MAX_DATETIME NPY_MAX_DATETIME -#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA -#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA - -#define BITSOF_BOOL NPY_BITSOF_BOOL -#define BITSOF_CHAR NPY_BITSOF_CHAR -#define BITSOF_SHORT NPY_BITSOF_SHORT -#define BITSOF_INT NPY_BITSOF_INT -#define BITSOF_LONG NPY_BITSOF_LONG -#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG -#define BITSOF_HALF NPY_BITSOF_HALF -#define BITSOF_FLOAT NPY_BITSOF_FLOAT -#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE -#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE -#define BITSOF_DATETIME NPY_BITSOF_DATETIME -#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA - -#define _pya_malloc PyArray_malloc -#define _pya_free PyArray_free -#define _pya_realloc PyArray_realloc - -#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF -#define BEGIN_THREADS NPY_BEGIN_THREADS -#define END_THREADS NPY_END_THREADS -#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF -#define ALLOW_C_API NPY_ALLOW_C_API -#define DISABLE_C_API NPY_DISABLE_C_API - -#define PY_FAIL NPY_FAIL -#define PY_SUCCEED NPY_SUCCEED - -#ifndef TRUE -#define TRUE NPY_TRUE -#endif - -#ifndef FALSE -#define FALSE NPY_FALSE -#endif - -#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT - -#define CONTIGUOUS NPY_CONTIGUOUS -#define C_CONTIGUOUS NPY_C_CONTIGUOUS -#define FORTRAN NPY_FORTRAN -#define F_CONTIGUOUS NPY_F_CONTIGUOUS -#define OWNDATA NPY_OWNDATA -#define FORCECAST NPY_FORCECAST -#define ENSURECOPY NPY_ENSURECOPY -#define ENSUREARRAY NPY_ENSUREARRAY -#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES -#define ALIGNED NPY_ALIGNED -#define NOTSWAPPED NPY_NOTSWAPPED -#define WRITEABLE NPY_WRITEABLE -#define UPDATEIFCOPY NPY_UPDATEIFCOPY -#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR -#define BEHAVED NPY_BEHAVED -#define BEHAVED_NS NPY_BEHAVED_NS -#define CARRAY NPY_CARRAY -#define CARRAY_RO NPY_CARRAY_RO -#define FARRAY NPY_FARRAY -#define FARRAY_RO NPY_FARRAY_RO -#define DEFAULT NPY_DEFAULT -#define IN_ARRAY NPY_IN_ARRAY -#define OUT_ARRAY NPY_OUT_ARRAY -#define INOUT_ARRAY NPY_INOUT_ARRAY -#define IN_FARRAY NPY_IN_FARRAY -#define OUT_FARRAY NPY_OUT_FARRAY -#define INOUT_FARRAY NPY_INOUT_FARRAY -#define UPDATE_ALL NPY_UPDATE_ALL - -#define OWN_DATA NPY_OWNDATA -#define BEHAVED_FLAGS NPY_BEHAVED -#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS -#define CARRAY_FLAGS_RO NPY_CARRAY_RO -#define CARRAY_FLAGS NPY_CARRAY -#define FARRAY_FLAGS NPY_FARRAY -#define FARRAY_FLAGS_RO NPY_FARRAY_RO -#define DEFAULT_FLAGS NPY_DEFAULT -#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS - -#ifndef MIN -#define MIN PyArray_MIN -#endif -#ifndef MAX -#define MAX PyArray_MAX -#endif -#define MAX_INTP NPY_MAX_INTP -#define MIN_INTP NPY_MIN_INTP -#define MAX_UINTP NPY_MAX_UINTP -#define INTP_FMT NPY_INTP_FMT - -#define REFCOUNT PyArray_REFCOUNT -#define MAX_ELSIZE NPY_MAX_ELSIZE - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_1_7_deprecated_api.h deleted file mode 100644 index 4c318bc4784c2..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_1_7_deprecated_api.h +++ /dev/null @@ -1,130 +0,0 @@ -#ifndef _NPY_1_7_DEPRECATED_API_H -#define _NPY_1_7_DEPRECATED_API_H - -#ifndef NPY_DEPRECATED_INCLUDES -#error "Should never include npy_*_*_deprecated_api directly." -#endif - -#if defined(_WIN32) -#define _WARN___STR2__(x) #x -#define _WARN___STR1__(x) _WARN___STR2__(x) -#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " -#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it by " \ - "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") -#elif defined(__GNUC__) -#warning "Using deprecated NumPy API, disable it by " \ - "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" -#endif -/* TODO: How to do this warning message for other compilers? */ - -/* - * This header exists to collect all dangerous/deprecated NumPy API - * as of NumPy 1.7. - * - * This is an attempt to remove bad API, the proliferation of macros, - * and namespace pollution currently produced by the NumPy headers. - */ - -/* These array flags are deprecated as of NumPy 1.7 */ -#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS - -/* - * The consistent NPY_ARRAY_* names which don't pollute the NPY_* - * namespace were added in NumPy 1.7. - * - * These versions of the carray flags are deprecated, but - * probably should only be removed after two releases instead of one. - */ -#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS -#define NPY_OWNDATA NPY_ARRAY_OWNDATA -#define NPY_FORCECAST NPY_ARRAY_FORCECAST -#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY -#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY -#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES -#define NPY_ALIGNED NPY_ARRAY_ALIGNED -#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED -#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE -#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY -#define NPY_BEHAVED NPY_ARRAY_BEHAVED -#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS -#define NPY_CARRAY NPY_ARRAY_CARRAY -#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO -#define NPY_DEFAULT NPY_ARRAY_DEFAULT -#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY -#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY -#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY -#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY -#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY -#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY -#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL - -/* This way of accessing the default type is deprecated as of NumPy 1.7 */ -#define PyArray_DEFAULT NPY_DEFAULT_TYPE - -/* These DATETIME bits aren't used internally */ -#if PY_VERSION_HEX >= 0x03000000 -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ - PyDict_GetItemString( \ - descr->metadata, NPY_METADATA_DTSTR), NULL)))) -#else -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ - PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) -#endif - -/* - * Deprecated as of NumPy 1.7, this kind of shortcut doesn't - * belong in the public API. - */ -#define NPY_AO PyArrayObject - -/* - * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't - * belong in the public API. - */ -#define fortran fortran_ - -/* - * Deprecated as of NumPy 1.7, as it is a namespace-polluting - * macro. - */ -#define FORTRAN_IF PyArray_FORTRAN_IF - -/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ -#define NPY_METADATA_DTSTR "__timeunit__" - -/* - * Deprecated as of NumPy 1.7. - * The reasoning: - * - These are for datetime, but there's no datetime "namespace". - * - They just turn NPY_STR_ into "", which is just - * making something simple be indirected. - */ -#define NPY_STR_Y "Y" -#define NPY_STR_M "M" -#define NPY_STR_W "W" -#define NPY_STR_D "D" -#define NPY_STR_h "h" -#define NPY_STR_m "m" -#define NPY_STR_s "s" -#define NPY_STR_ms "ms" -#define NPY_STR_us "us" -#define NPY_STR_ns "ns" -#define NPY_STR_ps "ps" -#define NPY_STR_fs "fs" -#define NPY_STR_as "as" - -/* - * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be - * removed in the next major release. - */ -#include "old_defines.h" - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_3kcompat.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_3kcompat.h deleted file mode 100644 index fec95779a1dfd..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_3kcompat.h +++ /dev/null @@ -1,506 +0,0 @@ -/* - * This is a convenience header file providing compatibility utilities - * for supporting Python 2 and Python 3 in the same code base. - * - * If you want to use this for your own projects, it's recommended to make a - * copy of it. Although the stuff below is unlikely to change, we don't provide - * strong backwards compatibility guarantees at the moment. - */ - -#ifndef _NPY_3KCOMPAT_H_ -#define _NPY_3KCOMPAT_H_ - -#include -#include - -#if PY_VERSION_HEX >= 0x03000000 -#ifndef NPY_PY3K -#define NPY_PY3K 1 -#endif -#endif - -#include "numpy/npy_common.h" -#include "numpy/ndarrayobject.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * PyInt -> PyLong - */ - -#if defined(NPY_PY3K) -/* Return True only if the long fits in a C long */ -static NPY_INLINE int PyInt_Check(PyObject *op) { - int overflow = 0; - if (!PyLong_Check(op)) { - return 0; - } - PyLong_AsLongAndOverflow(op, &overflow); - return (overflow == 0); -} - -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsSsize_t PyLong_AsSsize_t - -/* NOTE: - * - * Since the PyLong type is very different from the fixed-range PyInt, - * we don't define PyInt_Type -> PyLong_Type. - */ -#endif /* NPY_PY3K */ - -/* - * PyString -> PyBytes - */ - -#if defined(NPY_PY3K) - -#define PyString_Type PyBytes_Type -#define PyString_Check PyBytes_Check -#define PyStringObject PyBytesObject -#define PyString_FromString PyBytes_FromString -#define PyString_FromStringAndSize PyBytes_FromStringAndSize -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_AsStringAndSize PyBytes_AsStringAndSize -#define PyString_FromFormat PyBytes_FromFormat -#define PyString_Concat PyBytes_Concat -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_Size PyBytes_Size - -#define PyUString_Type PyUnicode_Type -#define PyUString_Check PyUnicode_Check -#define PyUStringObject PyUnicodeObject -#define PyUString_FromString PyUnicode_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyUString_FromFormat PyUnicode_FromFormat -#define PyUString_Concat PyUnicode_Concat2 -#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel -#define PyUString_GET_SIZE PyUnicode_GET_SIZE -#define PyUString_Size PyUnicode_Size -#define PyUString_InternFromString PyUnicode_InternFromString -#define PyUString_Format PyUnicode_Format - -#else - -#define PyBytes_Type PyString_Type -#define PyBytes_Check PyString_Check -#define PyBytesObject PyStringObject -#define PyBytes_FromString PyString_FromString -#define PyBytes_FromStringAndSize PyString_FromStringAndSize -#define PyBytes_AS_STRING PyString_AS_STRING -#define PyBytes_AsStringAndSize PyString_AsStringAndSize -#define PyBytes_FromFormat PyString_FromFormat -#define PyBytes_Concat PyString_Concat -#define PyBytes_ConcatAndDel PyString_ConcatAndDel -#define PyBytes_AsString PyString_AsString -#define PyBytes_GET_SIZE PyString_GET_SIZE -#define PyBytes_Size PyString_Size - -#define PyUString_Type PyString_Type -#define PyUString_Check PyString_Check -#define PyUStringObject PyStringObject -#define PyUString_FromString PyString_FromString -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#define PyUString_FromFormat PyString_FromFormat -#define PyUString_Concat PyString_Concat -#define PyUString_ConcatAndDel PyString_ConcatAndDel -#define PyUString_GET_SIZE PyString_GET_SIZE -#define PyUString_Size PyString_Size -#define PyUString_InternFromString PyString_InternFromString -#define PyUString_Format PyString_Format - -#endif /* NPY_PY3K */ - - -static NPY_INLINE void -PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) -{ - PyObject *newobj; - newobj = PyUnicode_Concat(*left, right); - Py_DECREF(*left); - Py_DECREF(right); - *left = newobj; -} - -static NPY_INLINE void -PyUnicode_Concat2(PyObject **left, PyObject *right) -{ - PyObject *newobj; - newobj = PyUnicode_Concat(*left, right); - Py_DECREF(*left); - *left = newobj; -} - -/* - * PyFile_* compatibility - */ -#if defined(NPY_PY3K) -/* - * Get a FILE* handle to the file represented by the Python object - */ -static NPY_INLINE FILE* -npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) -{ - int fd, fd2; - PyObject *ret, *os; - npy_off_t pos; - FILE *handle; - - /* Flush first to ensure things end up in the file in the correct order */ - ret = PyObject_CallMethod(file, "flush", ""); - if (ret == NULL) { - return NULL; - } - Py_DECREF(ret); - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - return NULL; - } - - /* - * The handle needs to be dup'd because we have to call fclose - * at the end - */ - os = PyImport_ImportModule("os"); - if (os == NULL) { - return NULL; - } - ret = PyObject_CallMethod(os, "dup", "i", fd); - Py_DECREF(os); - if (ret == NULL) { - return NULL; - } - fd2 = PyNumber_AsSsize_t(ret, NULL); - Py_DECREF(ret); - - /* Convert to FILE* handle */ -#ifdef _WIN32 - handle = _fdopen(fd2, mode); -#else - handle = fdopen(fd2, mode); -#endif - if (handle == NULL) { - PyErr_SetString(PyExc_IOError, - "Getting a FILE* from a Python file object failed"); - } - - /* Record the original raw file handle position */ - *orig_pos = npy_ftell(handle); - if (*orig_pos == -1) { - PyErr_SetString(PyExc_IOError, "obtaining file position failed"); - fclose(handle); - return NULL; - } - - /* Seek raw handle to the Python-side position */ - ret = PyObject_CallMethod(file, "tell", ""); - if (ret == NULL) { - fclose(handle); - return NULL; - } - pos = PyLong_AsLongLong(ret); - Py_DECREF(ret); - if (PyErr_Occurred()) { - fclose(handle); - return NULL; - } - if (npy_fseek(handle, pos, SEEK_SET) == -1) { - PyErr_SetString(PyExc_IOError, "seeking file failed"); - fclose(handle); - return NULL; - } - return handle; -} - -/* - * Close the dup-ed file handle, and seek the Python one to the current position - */ -static NPY_INLINE int -npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) -{ - int fd; - PyObject *ret; - npy_off_t position; - - position = npy_ftell(handle); - - /* Close the FILE* handle */ - fclose(handle); - - /* - * Restore original file handle position, in order to not confuse - * Python-side data structures - */ - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - return -1; - } - if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { - PyErr_SetString(PyExc_IOError, "seeking file failed"); - return -1; - } - - if (position == -1) { - PyErr_SetString(PyExc_IOError, "obtaining file position failed"); - return -1; - } - - /* Seek Python-side handle to the FILE* handle position */ - ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); - if (ret == NULL) { - return -1; - } - Py_DECREF(ret); - return 0; -} - -static NPY_INLINE int -npy_PyFile_Check(PyObject *file) -{ - int fd; - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - PyErr_Clear(); - return 0; - } - return 1; -} - -/* - * DEPRECATED DO NOT USE - * use npy_PyFile_DupClose2 instead - * this function will mess ups python3 internal file object buffering - * Close the dup-ed file handle, and seek the Python one to the current position - */ -static NPY_INLINE int -npy_PyFile_DupClose(PyObject *file, FILE* handle) -{ - PyObject *ret; - Py_ssize_t position; - position = npy_ftell(handle); - fclose(handle); - - ret = PyObject_CallMethod(file, "seek", NPY_SSIZE_T_PYFMT "i", position, 0); - if (ret == NULL) { - return -1; - } - Py_DECREF(ret); - return 0; -} - - -#else - -/* DEPRECATED, DO NOT USE */ -#define npy_PyFile_DupClose(f, h, p) npy_PyFile_DupClose2((f), (h), (p)) - -/* use these */ -static NPY_INLINE FILE * -npy_PyFile_Dup2(PyObject *file, - const char *NPY_UNUSED(mode), npy_off_t *NPY_UNUSED(orig_pos)) -{ - return PyFile_AsFile(file); -} - -static NPY_INLINE int -npy_PyFile_DupClose2(PyObject *NPY_UNUSED(file), FILE* NPY_UNUSED(handle), - npy_off_t NPY_UNUSED(orig_pos)) -{ - return 0; -} - -#define npy_PyFile_Check PyFile_Check - -#endif - -/* - * DEPRECATED, DO NOT USE - * Use npy_PyFile_Dup2 instead. - * This function will mess up python3 internal file object buffering. - * Get a FILE* handle to the file represented by the Python object. - */ -static NPY_INLINE FILE* -npy_PyFile_Dup(PyObject *file, char *mode) -{ - npy_off_t orig; - if (DEPRECATE("npy_PyFile_Dup is deprecated, use " - "npy_PyFile_Dup2") < 0) { - return NULL; - } - - return npy_PyFile_Dup2(file, mode, &orig); -} - -static NPY_INLINE PyObject* -npy_PyFile_OpenFile(PyObject *filename, const char *mode) -{ - PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); - if (open == NULL) { - return NULL; - } - return PyObject_CallFunction(open, "Os", filename, mode); -} - -static NPY_INLINE int -npy_PyFile_CloseFile(PyObject *file) -{ - PyObject *ret; - - ret = PyObject_CallMethod(file, "close", NULL); - if (ret == NULL) { - return -1; - } - Py_DECREF(ret); - return 0; -} - -/* - * PyObject_Cmp - */ -#if defined(NPY_PY3K) -static NPY_INLINE int -PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) -{ - int v; - v = PyObject_RichCompareBool(i1, i2, Py_LT); - if (v == 0) { - *cmp = -1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_GT); - if (v == 0) { - *cmp = 1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_EQ); - if (v == 0) { - *cmp = 0; - return 1; - } - else { - *cmp = 0; - return -1; - } -} -#endif - -/* - * PyCObject functions adapted to PyCapsules. - * - * The main job here is to get rid of the improved error handling - * of PyCapsules. It's a shame... - */ -#if PY_VERSION_HEX >= 0x03000000 - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) -{ - PyObject *ret = PyCapsule_New(ptr, NULL, dtor); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) -{ - PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); - if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { - PyErr_Clear(); - Py_DECREF(ret); - ret = NULL; - } - return ret; -} - -static NPY_INLINE void * -NpyCapsule_AsVoidPtr(PyObject *obj) -{ - void *ret = PyCapsule_GetPointer(obj, NULL); - if (ret == NULL) { - PyErr_Clear(); - } - return ret; -} - -static NPY_INLINE void * -NpyCapsule_GetDesc(PyObject *obj) -{ - return PyCapsule_GetContext(obj); -} - -static NPY_INLINE int -NpyCapsule_Check(PyObject *ptr) -{ - return PyCapsule_CheckExact(ptr); -} - -#else - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)) -{ - return PyCObject_FromVoidPtr(ptr, dtor); -} - -static NPY_INLINE PyObject * -NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, - void (*dtor)(void *, void *)) -{ - return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor); -} - -static NPY_INLINE void * -NpyCapsule_AsVoidPtr(PyObject *ptr) -{ - return PyCObject_AsVoidPtr(ptr); -} - -static NPY_INLINE void * -NpyCapsule_GetDesc(PyObject *obj) -{ - return PyCObject_GetDesc(obj); -} - -static NPY_INLINE int -NpyCapsule_Check(PyObject *ptr) -{ - return PyCObject_Check(ptr); -} - -#endif - -/* - * Hash value compatibility. - * As of Python 3.2 hash values are of type Py_hash_t. - * Previous versions use C long. - */ -#if PY_VERSION_HEX < 0x03020000 -typedef long npy_hash_t; -#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG -#else -typedef Py_hash_t npy_hash_t; -#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* _NPY_3KCOMPAT_H_ */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_common.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_common.h deleted file mode 100644 index 5cba8c9d2a3ce..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_common.h +++ /dev/null @@ -1,1046 +0,0 @@ -#ifndef _NPY_COMMON_H_ -#define _NPY_COMMON_H_ - -/* numpconfig.h is auto-generated */ -#include "numpyconfig.h" -#ifdef HAVE_NPY_CONFIG_H -#include -#endif - -/* - * gcc does not unroll even with -O3 - * use with care, unrolling on modern cpus rarely speeds things up - */ -#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS -#define NPY_GCC_UNROLL_LOOPS \ - __attribute__((optimize("unroll-loops"))) -#else -#define NPY_GCC_UNROLL_LOOPS -#endif - -/* highest gcc optimization level, enabled autovectorizer */ -#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 -#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) -#else -#define NPY_GCC_OPT_3 -#endif - -/* - * mark an argument (starting from 1) that must not be NULL and is not checked - * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check - */ -#ifdef HAVE_ATTRIBUTE_NONNULL -#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) -#else -#define NPY_GCC_NONNULL(n) -#endif - -#if defined HAVE_XMMINTRIN_H && defined HAVE__MM_LOAD_PS -#define NPY_HAVE_SSE_INTRINSICS -#endif - -#if defined HAVE_EMMINTRIN_H && defined HAVE__MM_LOAD_PD -#define NPY_HAVE_SSE2_INTRINSICS -#endif - -/* - * give a hint to the compiler which branch is more likely or unlikely - * to occur, e.g. rare error cases: - * - * if (NPY_UNLIKELY(failure == 0)) - * return NULL; - * - * the double !! is to cast the expression (e.g. NULL) to a boolean required by - * the intrinsic - */ -#ifdef HAVE___BUILTIN_EXPECT -#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) -#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define NPY_LIKELY(x) (x) -#define NPY_UNLIKELY(x) (x) -#endif - -#if defined(_MSC_VER) - #define NPY_INLINE __inline -#elif defined(__GNUC__) - #if defined(__STRICT_ANSI__) - #define NPY_INLINE __inline__ - #else - #define NPY_INLINE inline - #endif -#else - #define NPY_INLINE -#endif - -#ifdef HAVE___THREAD - #define NPY_TLS __thread -#else - #ifdef HAVE___DECLSPEC_THREAD_ - #define NPY_TLS __declspec(thread) - #else - #define NPY_TLS - #endif -#endif - -#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE - #define NPY_RETURNS_BORROWED_REF \ - __attribute__((cpychecker_returns_borrowed_ref)) -#else - #define NPY_RETURNS_BORROWED_REF -#endif - -#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE - #define NPY_STEALS_REF_TO_ARG(n) \ - __attribute__((cpychecker_steals_reference_to_arg(n))) -#else - #define NPY_STEALS_REF_TO_ARG(n) -#endif - -/* 64 bit file position support, also on win-amd64. Ticket #1660 */ -#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ - defined(__MINGW32__) || defined(__MINGW64__) - #include - -/* mingw based on 3.4.5 has lseek but not ftell/fseek */ -#if defined(__MINGW32__) || defined(__MINGW64__) -extern int __cdecl _fseeki64(FILE *, long long, int); -extern long long __cdecl _ftelli64(FILE *); -#endif - - #define npy_fseek _fseeki64 - #define npy_ftell _ftelli64 - #define npy_lseek _lseeki64 - #define npy_off_t npy_int64 - - #if NPY_SIZEOF_INT == 8 - #define NPY_OFF_T_PYFMT "i" - #elif NPY_SIZEOF_LONG == 8 - #define NPY_OFF_T_PYFMT "l" - #elif NPY_SIZEOF_LONGLONG == 8 - #define NPY_OFF_T_PYFMT "L" - #else - #error Unsupported size for type off_t - #endif -#else -#ifdef HAVE_FSEEKO - #define npy_fseek fseeko -#else - #define npy_fseek fseek -#endif -#ifdef HAVE_FTELLO - #define npy_ftell ftello -#else - #define npy_ftell ftell -#endif - #define npy_lseek lseek - #define npy_off_t off_t - - #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT - #define NPY_OFF_T_PYFMT "h" - #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT - #define NPY_OFF_T_PYFMT "i" - #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG - #define NPY_OFF_T_PYFMT "l" - #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG - #define NPY_OFF_T_PYFMT "L" - #else - #error Unsupported size for type off_t - #endif -#endif - -/* enums for detected endianness */ -enum { - NPY_CPU_UNKNOWN_ENDIAN, - NPY_CPU_LITTLE, - NPY_CPU_BIG -}; - -/* - * This is to typedef npy_intp to the appropriate pointer size for this - * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. - */ -typedef Py_intptr_t npy_intp; -typedef Py_uintptr_t npy_uintp; - -/* - * Define sizes that were not defined in numpyconfig.h. - */ -#define NPY_SIZEOF_CHAR 1 -#define NPY_SIZEOF_BYTE 1 -#define NPY_SIZEOF_DATETIME 8 -#define NPY_SIZEOF_TIMEDELTA 8 -#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T -#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T -#define NPY_SIZEOF_HALF 2 -#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT -#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE -#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE - -#ifdef constchar -#undef constchar -#endif - -#define NPY_SSIZE_T_PYFMT "n" -#define constchar char - -/* NPY_INTP_FMT Note: - * Unlike the other NPY_*_FMT macros which are used with - * PyOS_snprintf, NPY_INTP_FMT is used with PyErr_Format and - * PyString_Format. These functions use different formatting - * codes which are portably specified according to the Python - * documentation. See ticket #1795. - * - * On Windows x64, the LONGLONG formatter should be used, but - * in Python 2.6 the %lld formatter is not supported. In this - * case we work around the problem by using the %zd formatter. - */ -#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT - #define NPY_INTP NPY_INT - #define NPY_UINTP NPY_UINT - #define PyIntpArrType_Type PyIntArrType_Type - #define PyUIntpArrType_Type PyUIntArrType_Type - #define NPY_MAX_INTP NPY_MAX_INT - #define NPY_MIN_INTP NPY_MIN_INT - #define NPY_MAX_UINTP NPY_MAX_UINT - #define NPY_INTP_FMT "d" -#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG - #define NPY_INTP NPY_LONG - #define NPY_UINTP NPY_ULONG - #define PyIntpArrType_Type PyLongArrType_Type - #define PyUIntpArrType_Type PyULongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONG - #define NPY_MIN_INTP NPY_MIN_LONG - #define NPY_MAX_UINTP NPY_MAX_ULONG - #define NPY_INTP_FMT "ld" -#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) - #define NPY_INTP NPY_LONGLONG - #define NPY_UINTP NPY_ULONGLONG - #define PyIntpArrType_Type PyLongLongArrType_Type - #define PyUIntpArrType_Type PyULongLongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONGLONG - #define NPY_MIN_INTP NPY_MIN_LONGLONG - #define NPY_MAX_UINTP NPY_MAX_ULONGLONG - #if (PY_VERSION_HEX >= 0x02070000) - #define NPY_INTP_FMT "lld" - #else - #define NPY_INTP_FMT "zd" - #endif -#endif - -/* - * We can only use C99 formats for npy_int_p if it is the same as - * intp_t, hence the condition on HAVE_UNITPTR_T - */ -#if (NPY_USE_C99_FORMATS) == 1 \ - && (defined HAVE_UINTPTR_T) \ - && (defined HAVE_INTTYPES_H) - #include - #undef NPY_INTP_FMT - #define NPY_INTP_FMT PRIdPTR -#endif - - -/* - * Some platforms don't define bool, long long, or long double. - * Handle that here. - */ -#define NPY_BYTE_FMT "hhd" -#define NPY_UBYTE_FMT "hhu" -#define NPY_SHORT_FMT "hd" -#define NPY_USHORT_FMT "hu" -#define NPY_INT_FMT "d" -#define NPY_UINT_FMT "u" -#define NPY_LONG_FMT "ld" -#define NPY_ULONG_FMT "lu" -#define NPY_HALF_FMT "g" -#define NPY_FLOAT_FMT "g" -#define NPY_DOUBLE_FMT "g" - - -#ifdef PY_LONG_LONG -typedef PY_LONG_LONG npy_longlong; -typedef unsigned PY_LONG_LONG npy_ulonglong; -# ifdef _MSC_VER -# define NPY_LONGLONG_FMT "I64d" -# define NPY_ULONGLONG_FMT "I64u" -# elif defined(__APPLE__) || defined(__FreeBSD__) -/* "%Ld" only parses 4 bytes -- "L" is floating modifier on MacOS X/BSD */ -# define NPY_LONGLONG_FMT "lld" -# define NPY_ULONGLONG_FMT "llu" -/* - another possible variant -- *quad_t works on *BSD, but is deprecated: - #define LONGLONG_FMT "qd" - #define ULONGLONG_FMT "qu" -*/ -# else -# define NPY_LONGLONG_FMT "Ld" -# define NPY_ULONGLONG_FMT "Lu" -# endif -# ifdef _MSC_VER -# define NPY_LONGLONG_SUFFIX(x) (x##i64) -# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) -# else -# define NPY_LONGLONG_SUFFIX(x) (x##LL) -# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) -# endif -#else -typedef long npy_longlong; -typedef unsigned long npy_ulonglong; -# define NPY_LONGLONG_SUFFIX(x) (x##L) -# define NPY_ULONGLONG_SUFFIX(x) (x##UL) -#endif - - -typedef unsigned char npy_bool; -#define NPY_FALSE 0 -#define NPY_TRUE 1 - - -#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE - typedef double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "g" -#else - typedef long double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "Lg" -#endif - -#ifndef Py_USING_UNICODE -#error Must use Python with unicode enabled. -#endif - - -typedef signed char npy_byte; -typedef unsigned char npy_ubyte; -typedef unsigned short npy_ushort; -typedef unsigned int npy_uint; -typedef unsigned long npy_ulong; - -/* These are for completeness */ -typedef char npy_char; -typedef short npy_short; -typedef int npy_int; -typedef long npy_long; -typedef float npy_float; -typedef double npy_double; - -/* - * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being - * able to do .real/.imag. Will have to convert code first. - */ -#if 0 -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE) -typedef complex npy_cdouble; -#else -typedef struct { double real, imag; } npy_cdouble; -#endif - -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT) -typedef complex float npy_cfloat; -#else -typedef struct { float real, imag; } npy_cfloat; -#endif - -#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE) -typedef complex long double npy_clongdouble; -#else -typedef struct {npy_longdouble real, imag;} npy_clongdouble; -#endif -#endif -#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE -#error npy_cdouble definition is not compatible with C99 complex definition ! \ - Please contact Numpy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { double real, imag; } npy_cdouble; - -#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT -#error npy_cfloat definition is not compatible with C99 complex definition ! \ - Please contact Numpy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { float real, imag; } npy_cfloat; - -#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE -#error npy_clongdouble definition is not compatible with C99 complex definition ! \ - Please contact Numpy maintainers and give detailed information about your \ - compiler and platform -#endif -typedef struct { npy_longdouble real, imag; } npy_clongdouble; - -/* - * numarray-style bit-width typedefs - */ -#define NPY_MAX_INT8 127 -#define NPY_MIN_INT8 -128 -#define NPY_MAX_UINT8 255 -#define NPY_MAX_INT16 32767 -#define NPY_MIN_INT16 -32768 -#define NPY_MAX_UINT16 65535 -#define NPY_MAX_INT32 2147483647 -#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) -#define NPY_MAX_UINT32 4294967295U -#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) -#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) -#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) -#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) -#define NPY_MIN_DATETIME NPY_MIN_INT64 -#define NPY_MAX_DATETIME NPY_MAX_INT64 -#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 -#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 - - /* Need to find the number of bits for each type and - make definitions accordingly. - - C states that sizeof(char) == 1 by definition - - So, just using the sizeof keyword won't help. - - It also looks like Python itself uses sizeof(char) quite a - bit, which by definition should be 1 all the time. - - Idea: Make Use of CHAR_BIT which should tell us how many - BITS per CHARACTER - */ - - /* Include platform definitions -- These are in the C89/90 standard */ -#include -#define NPY_MAX_BYTE SCHAR_MAX -#define NPY_MIN_BYTE SCHAR_MIN -#define NPY_MAX_UBYTE UCHAR_MAX -#define NPY_MAX_SHORT SHRT_MAX -#define NPY_MIN_SHORT SHRT_MIN -#define NPY_MAX_USHORT USHRT_MAX -#define NPY_MAX_INT INT_MAX -#ifndef INT_MIN -#define INT_MIN (-INT_MAX - 1) -#endif -#define NPY_MIN_INT INT_MIN -#define NPY_MAX_UINT UINT_MAX -#define NPY_MAX_LONG LONG_MAX -#define NPY_MIN_LONG LONG_MIN -#define NPY_MAX_ULONG ULONG_MAX - -#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) -#define NPY_BITSOF_CHAR CHAR_BIT -#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) -#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) -#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) -#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) -#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) -#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) -#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) -#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) -#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) -#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) -#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) -#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) -#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) -#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) -#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) - -#if NPY_BITSOF_LONG == 8 -#define NPY_INT8 NPY_LONG -#define NPY_UINT8 NPY_ULONG - typedef long npy_int8; - typedef unsigned long npy_uint8; -#define PyInt8ScalarObject PyLongScalarObject -#define PyInt8ArrType_Type PyLongArrType_Type -#define PyUInt8ScalarObject PyULongScalarObject -#define PyUInt8ArrType_Type PyULongArrType_Type -#define NPY_INT8_FMT NPY_LONG_FMT -#define NPY_UINT8_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 16 -#define NPY_INT16 NPY_LONG -#define NPY_UINT16 NPY_ULONG - typedef long npy_int16; - typedef unsigned long npy_uint16; -#define PyInt16ScalarObject PyLongScalarObject -#define PyInt16ArrType_Type PyLongArrType_Type -#define PyUInt16ScalarObject PyULongScalarObject -#define PyUInt16ArrType_Type PyULongArrType_Type -#define NPY_INT16_FMT NPY_LONG_FMT -#define NPY_UINT16_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 32 -#define NPY_INT32 NPY_LONG -#define NPY_UINT32 NPY_ULONG - typedef long npy_int32; - typedef unsigned long npy_uint32; - typedef unsigned long npy_ucs4; -#define PyInt32ScalarObject PyLongScalarObject -#define PyInt32ArrType_Type PyLongArrType_Type -#define PyUInt32ScalarObject PyULongScalarObject -#define PyUInt32ArrType_Type PyULongArrType_Type -#define NPY_INT32_FMT NPY_LONG_FMT -#define NPY_UINT32_FMT NPY_ULONG_FMT -#elif NPY_BITSOF_LONG == 64 -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG - typedef long npy_int64; - typedef unsigned long npy_uint64; -#define PyInt64ScalarObject PyLongScalarObject -#define PyInt64ArrType_Type PyLongArrType_Type -#define PyUInt64ScalarObject PyULongScalarObject -#define PyUInt64ArrType_Type PyULongArrType_Type -#define NPY_INT64_FMT NPY_LONG_FMT -#define NPY_UINT64_FMT NPY_ULONG_FMT -#define MyPyLong_FromInt64 PyLong_FromLong -#define MyPyLong_AsInt64 PyLong_AsLong -#elif NPY_BITSOF_LONG == 128 -#define NPY_INT128 NPY_LONG -#define NPY_UINT128 NPY_ULONG - typedef long npy_int128; - typedef unsigned long npy_uint128; -#define PyInt128ScalarObject PyLongScalarObject -#define PyInt128ArrType_Type PyLongArrType_Type -#define PyUInt128ScalarObject PyULongScalarObject -#define PyUInt128ArrType_Type PyULongArrType_Type -#define NPY_INT128_FMT NPY_LONG_FMT -#define NPY_UINT128_FMT NPY_ULONG_FMT -#endif - -#if NPY_BITSOF_LONGLONG == 8 -# ifndef NPY_INT8 -# define NPY_INT8 NPY_LONGLONG -# define NPY_UINT8 NPY_ULONGLONG - typedef npy_longlong npy_int8; - typedef npy_ulonglong npy_uint8; -# define PyInt8ScalarObject PyLongLongScalarObject -# define PyInt8ArrType_Type PyLongLongArrType_Type -# define PyUInt8ScalarObject PyULongLongScalarObject -# define PyUInt8ArrType_Type PyULongLongArrType_Type -#define NPY_INT8_FMT NPY_LONGLONG_FMT -#define NPY_UINT8_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT8 -# define NPY_MIN_LONGLONG NPY_MIN_INT8 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 -#elif NPY_BITSOF_LONGLONG == 16 -# ifndef NPY_INT16 -# define NPY_INT16 NPY_LONGLONG -# define NPY_UINT16 NPY_ULONGLONG - typedef npy_longlong npy_int16; - typedef npy_ulonglong npy_uint16; -# define PyInt16ScalarObject PyLongLongScalarObject -# define PyInt16ArrType_Type PyLongLongArrType_Type -# define PyUInt16ScalarObject PyULongLongScalarObject -# define PyUInt16ArrType_Type PyULongLongArrType_Type -#define NPY_INT16_FMT NPY_LONGLONG_FMT -#define NPY_UINT16_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT16 -# define NPY_MIN_LONGLONG NPY_MIN_INT16 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 -#elif NPY_BITSOF_LONGLONG == 32 -# ifndef NPY_INT32 -# define NPY_INT32 NPY_LONGLONG -# define NPY_UINT32 NPY_ULONGLONG - typedef npy_longlong npy_int32; - typedef npy_ulonglong npy_uint32; - typedef npy_ulonglong npy_ucs4; -# define PyInt32ScalarObject PyLongLongScalarObject -# define PyInt32ArrType_Type PyLongLongArrType_Type -# define PyUInt32ScalarObject PyULongLongScalarObject -# define PyUInt32ArrType_Type PyULongLongArrType_Type -#define NPY_INT32_FMT NPY_LONGLONG_FMT -#define NPY_UINT32_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT32 -# define NPY_MIN_LONGLONG NPY_MIN_INT32 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 -#elif NPY_BITSOF_LONGLONG == 64 -# ifndef NPY_INT64 -# define NPY_INT64 NPY_LONGLONG -# define NPY_UINT64 NPY_ULONGLONG - typedef npy_longlong npy_int64; - typedef npy_ulonglong npy_uint64; -# define PyInt64ScalarObject PyLongLongScalarObject -# define PyInt64ArrType_Type PyLongLongArrType_Type -# define PyUInt64ScalarObject PyULongLongScalarObject -# define PyUInt64ArrType_Type PyULongLongArrType_Type -#define NPY_INT64_FMT NPY_LONGLONG_FMT -#define NPY_UINT64_FMT NPY_ULONGLONG_FMT -# define MyPyLong_FromInt64 PyLong_FromLongLong -# define MyPyLong_AsInt64 PyLong_AsLongLong -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT64 -# define NPY_MIN_LONGLONG NPY_MIN_INT64 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 -#elif NPY_BITSOF_LONGLONG == 128 -# ifndef NPY_INT128 -# define NPY_INT128 NPY_LONGLONG -# define NPY_UINT128 NPY_ULONGLONG - typedef npy_longlong npy_int128; - typedef npy_ulonglong npy_uint128; -# define PyInt128ScalarObject PyLongLongScalarObject -# define PyInt128ArrType_Type PyLongLongArrType_Type -# define PyUInt128ScalarObject PyULongLongScalarObject -# define PyUInt128ArrType_Type PyULongLongArrType_Type -#define NPY_INT128_FMT NPY_LONGLONG_FMT -#define NPY_UINT128_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT128 -# define NPY_MIN_LONGLONG NPY_MIN_INT128 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 -#elif NPY_BITSOF_LONGLONG == 256 -# define NPY_INT256 NPY_LONGLONG -# define NPY_UINT256 NPY_ULONGLONG - typedef npy_longlong npy_int256; - typedef npy_ulonglong npy_uint256; -# define PyInt256ScalarObject PyLongLongScalarObject -# define PyInt256ArrType_Type PyLongLongArrType_Type -# define PyUInt256ScalarObject PyULongLongScalarObject -# define PyUInt256ArrType_Type PyULongLongArrType_Type -#define NPY_INT256_FMT NPY_LONGLONG_FMT -#define NPY_UINT256_FMT NPY_ULONGLONG_FMT -# define NPY_MAX_LONGLONG NPY_MAX_INT256 -# define NPY_MIN_LONGLONG NPY_MIN_INT256 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 -#endif - -#if NPY_BITSOF_INT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_INT -#define NPY_UINT8 NPY_UINT - typedef int npy_int8; - typedef unsigned int npy_uint8; -# define PyInt8ScalarObject PyIntScalarObject -# define PyInt8ArrType_Type PyIntArrType_Type -# define PyUInt8ScalarObject PyUIntScalarObject -# define PyUInt8ArrType_Type PyUIntArrType_Type -#define NPY_INT8_FMT NPY_INT_FMT -#define NPY_UINT8_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_INT -#define NPY_UINT16 NPY_UINT - typedef int npy_int16; - typedef unsigned int npy_uint16; -# define PyInt16ScalarObject PyIntScalarObject -# define PyInt16ArrType_Type PyIntArrType_Type -# define PyUInt16ScalarObject PyIntUScalarObject -# define PyUInt16ArrType_Type PyIntUArrType_Type -#define NPY_INT16_FMT NPY_INT_FMT -#define NPY_UINT16_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT - typedef int npy_int32; - typedef unsigned int npy_uint32; - typedef unsigned int npy_ucs4; -# define PyInt32ScalarObject PyIntScalarObject -# define PyInt32ArrType_Type PyIntArrType_Type -# define PyUInt32ScalarObject PyUIntScalarObject -# define PyUInt32ArrType_Type PyUIntArrType_Type -#define NPY_INT32_FMT NPY_INT_FMT -#define NPY_UINT32_FMT NPY_UINT_FMT -#endif -#elif NPY_BITSOF_INT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_INT -#define NPY_UINT64 NPY_UINT - typedef int npy_int64; - typedef unsigned int npy_uint64; -# define PyInt64ScalarObject PyIntScalarObject -# define PyInt64ArrType_Type PyIntArrType_Type -# define PyUInt64ScalarObject PyUIntScalarObject -# define PyUInt64ArrType_Type PyUIntArrType_Type -#define NPY_INT64_FMT NPY_INT_FMT -#define NPY_UINT64_FMT NPY_UINT_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_INT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_INT -#define NPY_UINT128 NPY_UINT - typedef int npy_int128; - typedef unsigned int npy_uint128; -# define PyInt128ScalarObject PyIntScalarObject -# define PyInt128ArrType_Type PyIntArrType_Type -# define PyUInt128ScalarObject PyUIntScalarObject -# define PyUInt128ArrType_Type PyUIntArrType_Type -#define NPY_INT128_FMT NPY_INT_FMT -#define NPY_UINT128_FMT NPY_UINT_FMT -#endif -#endif - -#if NPY_BITSOF_SHORT == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_SHORT -#define NPY_UINT8 NPY_USHORT - typedef short npy_int8; - typedef unsigned short npy_uint8; -# define PyInt8ScalarObject PyShortScalarObject -# define PyInt8ArrType_Type PyShortArrType_Type -# define PyUInt8ScalarObject PyUShortScalarObject -# define PyUInt8ArrType_Type PyUShortArrType_Type -#define NPY_INT8_FMT NPY_SHORT_FMT -#define NPY_UINT8_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT - typedef short npy_int16; - typedef unsigned short npy_uint16; -# define PyInt16ScalarObject PyShortScalarObject -# define PyInt16ArrType_Type PyShortArrType_Type -# define PyUInt16ScalarObject PyUShortScalarObject -# define PyUInt16ArrType_Type PyUShortArrType_Type -#define NPY_INT16_FMT NPY_SHORT_FMT -#define NPY_UINT16_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_SHORT -#define NPY_UINT32 NPY_USHORT - typedef short npy_int32; - typedef unsigned short npy_uint32; - typedef unsigned short npy_ucs4; -# define PyInt32ScalarObject PyShortScalarObject -# define PyInt32ArrType_Type PyShortArrType_Type -# define PyUInt32ScalarObject PyUShortScalarObject -# define PyUInt32ArrType_Type PyUShortArrType_Type -#define NPY_INT32_FMT NPY_SHORT_FMT -#define NPY_UINT32_FMT NPY_USHORT_FMT -#endif -#elif NPY_BITSOF_SHORT == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_SHORT -#define NPY_UINT64 NPY_USHORT - typedef short npy_int64; - typedef unsigned short npy_uint64; -# define PyInt64ScalarObject PyShortScalarObject -# define PyInt64ArrType_Type PyShortArrType_Type -# define PyUInt64ScalarObject PyUShortScalarObject -# define PyUInt64ArrType_Type PyUShortArrType_Type -#define NPY_INT64_FMT NPY_SHORT_FMT -#define NPY_UINT64_FMT NPY_USHORT_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_SHORT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_SHORT -#define NPY_UINT128 NPY_USHORT - typedef short npy_int128; - typedef unsigned short npy_uint128; -# define PyInt128ScalarObject PyShortScalarObject -# define PyInt128ArrType_Type PyShortArrType_Type -# define PyUInt128ScalarObject PyUShortScalarObject -# define PyUInt128ArrType_Type PyUShortArrType_Type -#define NPY_INT128_FMT NPY_SHORT_FMT -#define NPY_UINT128_FMT NPY_USHORT_FMT -#endif -#endif - - -#if NPY_BITSOF_CHAR == 8 -#ifndef NPY_INT8 -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE - typedef signed char npy_int8; - typedef unsigned char npy_uint8; -# define PyInt8ScalarObject PyByteScalarObject -# define PyInt8ArrType_Type PyByteArrType_Type -# define PyUInt8ScalarObject PyUByteScalarObject -# define PyUInt8ArrType_Type PyUByteArrType_Type -#define NPY_INT8_FMT NPY_BYTE_FMT -#define NPY_UINT8_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 16 -#ifndef NPY_INT16 -#define NPY_INT16 NPY_BYTE -#define NPY_UINT16 NPY_UBYTE - typedef signed char npy_int16; - typedef unsigned char npy_uint16; -# define PyInt16ScalarObject PyByteScalarObject -# define PyInt16ArrType_Type PyByteArrType_Type -# define PyUInt16ScalarObject PyUByteScalarObject -# define PyUInt16ArrType_Type PyUByteArrType_Type -#define NPY_INT16_FMT NPY_BYTE_FMT -#define NPY_UINT16_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 32 -#ifndef NPY_INT32 -#define NPY_INT32 NPY_BYTE -#define NPY_UINT32 NPY_UBYTE - typedef signed char npy_int32; - typedef unsigned char npy_uint32; - typedef unsigned char npy_ucs4; -# define PyInt32ScalarObject PyByteScalarObject -# define PyInt32ArrType_Type PyByteArrType_Type -# define PyUInt32ScalarObject PyUByteScalarObject -# define PyUInt32ArrType_Type PyUByteArrType_Type -#define NPY_INT32_FMT NPY_BYTE_FMT -#define NPY_UINT32_FMT NPY_UBYTE_FMT -#endif -#elif NPY_BITSOF_CHAR == 64 -#ifndef NPY_INT64 -#define NPY_INT64 NPY_BYTE -#define NPY_UINT64 NPY_UBYTE - typedef signed char npy_int64; - typedef unsigned char npy_uint64; -# define PyInt64ScalarObject PyByteScalarObject -# define PyInt64ArrType_Type PyByteArrType_Type -# define PyUInt64ScalarObject PyUByteScalarObject -# define PyUInt64ArrType_Type PyUByteArrType_Type -#define NPY_INT64_FMT NPY_BYTE_FMT -#define NPY_UINT64_FMT NPY_UBYTE_FMT -# define MyPyLong_FromInt64 PyLong_FromLong -# define MyPyLong_AsInt64 PyLong_AsLong -#endif -#elif NPY_BITSOF_CHAR == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_BYTE -#define NPY_UINT128 NPY_UBYTE - typedef signed char npy_int128; - typedef unsigned char npy_uint128; -# define PyInt128ScalarObject PyByteScalarObject -# define PyInt128ArrType_Type PyByteArrType_Type -# define PyUInt128ScalarObject PyUByteScalarObject -# define PyUInt128ArrType_Type PyUByteArrType_Type -#define NPY_INT128_FMT NPY_BYTE_FMT -#define NPY_UINT128_FMT NPY_UBYTE_FMT -#endif -#endif - - - -#if NPY_BITSOF_DOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_DOUBLE -#define NPY_COMPLEX64 NPY_CDOUBLE - typedef double npy_float32; - typedef npy_cdouble npy_complex64; -# define PyFloat32ScalarObject PyDoubleScalarObject -# define PyComplex64ScalarObject PyCDoubleScalarObject -# define PyFloat32ArrType_Type PyDoubleArrType_Type -# define PyComplex64ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX128 NPY_CDOUBLE - typedef double npy_float64; - typedef npy_cdouble npy_complex128; -# define PyFloat64ScalarObject PyDoubleScalarObject -# define PyComplex128ScalarObject PyCDoubleScalarObject -# define PyFloat64ArrType_Type PyDoubleArrType_Type -# define PyComplex128ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_DOUBLE -#define NPY_COMPLEX160 NPY_CDOUBLE - typedef double npy_float80; - typedef npy_cdouble npy_complex160; -# define PyFloat80ScalarObject PyDoubleScalarObject -# define PyComplex160ScalarObject PyCDoubleScalarObject -# define PyFloat80ArrType_Type PyDoubleArrType_Type -# define PyComplex160ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_DOUBLE -#define NPY_COMPLEX192 NPY_CDOUBLE - typedef double npy_float96; - typedef npy_cdouble npy_complex192; -# define PyFloat96ScalarObject PyDoubleScalarObject -# define PyComplex192ScalarObject PyCDoubleScalarObject -# define PyFloat96ArrType_Type PyDoubleArrType_Type -# define PyComplex192ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT -#endif -#elif NPY_BITSOF_DOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_DOUBLE -#define NPY_COMPLEX256 NPY_CDOUBLE - typedef double npy_float128; - typedef npy_cdouble npy_complex256; -# define PyFloat128ScalarObject PyDoubleScalarObject -# define PyComplex256ScalarObject PyCDoubleScalarObject -# define PyFloat128ArrType_Type PyDoubleArrType_Type -# define PyComplex256ArrType_Type PyCDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT -#endif -#endif - - - -#if NPY_BITSOF_FLOAT == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_COMPLEX64 NPY_CFLOAT - typedef float npy_float32; - typedef npy_cfloat npy_complex64; -# define PyFloat32ScalarObject PyFloatScalarObject -# define PyComplex64ScalarObject PyCFloatScalarObject -# define PyFloat32ArrType_Type PyFloatArrType_Type -# define PyComplex64ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT32_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_FLOAT -#define NPY_COMPLEX128 NPY_CFLOAT - typedef float npy_float64; - typedef npy_cfloat npy_complex128; -# define PyFloat64ScalarObject PyFloatScalarObject -# define PyComplex128ScalarObject PyCFloatScalarObject -# define PyFloat64ArrType_Type PyFloatArrType_Type -# define PyComplex128ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT64_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_FLOAT -#define NPY_COMPLEX160 NPY_CFLOAT - typedef float npy_float80; - typedef npy_cfloat npy_complex160; -# define PyFloat80ScalarObject PyFloatScalarObject -# define PyComplex160ScalarObject PyCFloatScalarObject -# define PyFloat80ArrType_Type PyFloatArrType_Type -# define PyComplex160ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT80_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_FLOAT -#define NPY_COMPLEX192 NPY_CFLOAT - typedef float npy_float96; - typedef npy_cfloat npy_complex192; -# define PyFloat96ScalarObject PyFloatScalarObject -# define PyComplex192ScalarObject PyCFloatScalarObject -# define PyFloat96ArrType_Type PyFloatArrType_Type -# define PyComplex192ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT96_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT -#endif -#elif NPY_BITSOF_FLOAT == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_FLOAT -#define NPY_COMPLEX256 NPY_CFLOAT - typedef float npy_float128; - typedef npy_cfloat npy_complex256; -# define PyFloat128ScalarObject PyFloatScalarObject -# define PyComplex256ScalarObject PyCFloatScalarObject -# define PyFloat128ArrType_Type PyFloatArrType_Type -# define PyComplex256ArrType_Type PyCFloatArrType_Type -#define NPY_FLOAT128_FMT NPY_FLOAT_FMT -#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT -#endif -#endif - -/* half/float16 isn't a floating-point type in C */ -#define NPY_FLOAT16 NPY_HALF -typedef npy_uint16 npy_half; -typedef npy_half npy_float16; - -#if NPY_BITSOF_LONGDOUBLE == 32 -#ifndef NPY_FLOAT32 -#define NPY_FLOAT32 NPY_LONGDOUBLE -#define NPY_COMPLEX64 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float32; - typedef npy_clongdouble npy_complex64; -# define PyFloat32ScalarObject PyLongDoubleScalarObject -# define PyComplex64ScalarObject PyCLongDoubleScalarObject -# define PyFloat32ArrType_Type PyLongDoubleArrType_Type -# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 64 -#ifndef NPY_FLOAT64 -#define NPY_FLOAT64 NPY_LONGDOUBLE -#define NPY_COMPLEX128 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float64; - typedef npy_clongdouble npy_complex128; -# define PyFloat64ScalarObject PyLongDoubleScalarObject -# define PyComplex128ScalarObject PyCLongDoubleScalarObject -# define PyFloat64ArrType_Type PyLongDoubleArrType_Type -# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 80 -#ifndef NPY_FLOAT80 -#define NPY_FLOAT80 NPY_LONGDOUBLE -#define NPY_COMPLEX160 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float80; - typedef npy_clongdouble npy_complex160; -# define PyFloat80ScalarObject PyLongDoubleScalarObject -# define PyComplex160ScalarObject PyCLongDoubleScalarObject -# define PyFloat80ArrType_Type PyLongDoubleArrType_Type -# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 96 -#ifndef NPY_FLOAT96 -#define NPY_FLOAT96 NPY_LONGDOUBLE -#define NPY_COMPLEX192 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float96; - typedef npy_clongdouble npy_complex192; -# define PyFloat96ScalarObject PyLongDoubleScalarObject -# define PyComplex192ScalarObject PyCLongDoubleScalarObject -# define PyFloat96ArrType_Type PyLongDoubleArrType_Type -# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 128 -#ifndef NPY_FLOAT128 -#define NPY_FLOAT128 NPY_LONGDOUBLE -#define NPY_COMPLEX256 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float128; - typedef npy_clongdouble npy_complex256; -# define PyFloat128ScalarObject PyLongDoubleScalarObject -# define PyComplex256ScalarObject PyCLongDoubleScalarObject -# define PyFloat128ArrType_Type PyLongDoubleArrType_Type -# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT -#endif -#elif NPY_BITSOF_LONGDOUBLE == 256 -#define NPY_FLOAT256 NPY_LONGDOUBLE -#define NPY_COMPLEX512 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float256; - typedef npy_clongdouble npy_complex512; -# define PyFloat256ScalarObject PyLongDoubleScalarObject -# define PyComplex512ScalarObject PyCLongDoubleScalarObject -# define PyFloat256ArrType_Type PyLongDoubleArrType_Type -# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT -#endif - -/* datetime typedefs */ -typedef npy_int64 npy_timedelta; -typedef npy_int64 npy_datetime; -#define NPY_DATETIME_FMT NPY_INT64_FMT -#define NPY_TIMEDELTA_FMT NPY_INT64_FMT - -/* End of typedefs for numarray style bit-width names */ - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_cpu.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_cpu.h deleted file mode 100644 index 24d4ce1fc8ce8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_cpu.h +++ /dev/null @@ -1,122 +0,0 @@ -/* - * This set (target) cpu specific macros: - * - Possible values: - * NPY_CPU_X86 - * NPY_CPU_AMD64 - * NPY_CPU_PPC - * NPY_CPU_PPC64 - * NPY_CPU_PPC64LE - * NPY_CPU_SPARC - * NPY_CPU_S390 - * NPY_CPU_IA64 - * NPY_CPU_HPPA - * NPY_CPU_ALPHA - * NPY_CPU_ARMEL - * NPY_CPU_ARMEB - * NPY_CPU_SH_LE - * NPY_CPU_SH_BE - */ -#ifndef _NPY_CPUARCH_H_ -#define _NPY_CPUARCH_H_ - -#include "numpyconfig.h" - -#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) - /* - * __i386__ is defined by gcc and Intel compiler on Linux, - * _M_IX86 by VS compiler, - * i386 by Sun compilers on opensolaris at least - */ - #define NPY_CPU_X86 -#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) - /* - * both __x86_64__ and __amd64__ are defined by gcc - * __x86_64 defined by sun compiler on opensolaris at least - * _M_AMD64 defined by MS compiler - */ - #define NPY_CPU_AMD64 -#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) - /* - * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, - * but can't find it ATM - * _ARCH_PPC is used by at least gcc on AIX - */ - #define NPY_CPU_PPC -#elif defined(__ppc64le__) - #define NPY_CPU_PPC64LE -#elif defined(__ppc64__) - #define NPY_CPU_PPC64 -#elif defined(__sparc__) || defined(__sparc) - /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ - #define NPY_CPU_SPARC -#elif defined(__s390__) - #define NPY_CPU_S390 -#elif defined(__ia64) - #define NPY_CPU_IA64 -#elif defined(__hppa) - #define NPY_CPU_HPPA -#elif defined(__alpha__) - #define NPY_CPU_ALPHA -#elif defined(__arm__) && defined(__ARMEL__) - #define NPY_CPU_ARMEL -#elif defined(__arm__) && defined(__ARMEB__) - #define NPY_CPU_ARMEB -#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) - #define NPY_CPU_SH_LE -#elif defined(__sh__) && defined(__BIG_ENDIAN__) - #define NPY_CPU_SH_BE -#elif defined(__MIPSEL__) - #define NPY_CPU_MIPSEL -#elif defined(__MIPSEB__) - #define NPY_CPU_MIPSEB -#elif defined(__or1k__) - #define NPY_CPU_OR1K -#elif defined(__aarch64__) - #define NPY_CPU_AARCH64 -#elif defined(__mc68000__) - #define NPY_CPU_M68K -#else - #error Unknown CPU, please report this to numpy maintainers with \ - information about your platform (OS, CPU and compiler) -#endif - -/* - This "white-lists" the architectures that we know don't require - pointer alignment. We white-list, since the memcpy version will - work everywhere, whereas assignment will only work where pointer - dereferencing doesn't require alignment. - - TODO: There may be more architectures we can white list. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) - #define NPY_COPY_PYOBJECT_PTR(dst, src) (*((PyObject **)(dst)) = *((PyObject **)(src))) -#else - #if NPY_SIZEOF_PY_INTPTR_T == 4 - #define NPY_COPY_PYOBJECT_PTR(dst, src) \ - ((char*)(dst))[0] = ((char*)(src))[0]; \ - ((char*)(dst))[1] = ((char*)(src))[1]; \ - ((char*)(dst))[2] = ((char*)(src))[2]; \ - ((char*)(dst))[3] = ((char*)(src))[3]; - #elif NPY_SIZEOF_PY_INTPTR_T == 8 - #define NPY_COPY_PYOBJECT_PTR(dst, src) \ - ((char*)(dst))[0] = ((char*)(src))[0]; \ - ((char*)(dst))[1] = ((char*)(src))[1]; \ - ((char*)(dst))[2] = ((char*)(src))[2]; \ - ((char*)(dst))[3] = ((char*)(src))[3]; \ - ((char*)(dst))[4] = ((char*)(src))[4]; \ - ((char*)(dst))[5] = ((char*)(src))[5]; \ - ((char*)(dst))[6] = ((char*)(src))[6]; \ - ((char*)(dst))[7] = ((char*)(src))[7]; - #else - #error Unknown architecture, please report this to numpy maintainers with \ - information about your platform (OS, CPU and compiler) - #endif -#endif - -#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)) -#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1 -#else -#define NPY_CPU_HAVE_UNALIGNED_ACCESS 0 -#endif - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_endian.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_endian.h deleted file mode 100644 index 3ba03d0e38724..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_endian.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef _NPY_ENDIAN_H_ -#define _NPY_ENDIAN_H_ - -/* - * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in - * endian.h - */ - -#ifdef NPY_HAVE_ENDIAN_H - /* Use endian.h if available */ - #include - - #define NPY_BYTE_ORDER __BYTE_ORDER - #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN - #define NPY_BIG_ENDIAN __BIG_ENDIAN -#else - /* Set endianness info using target CPU */ - #include "npy_cpu.h" - - #define NPY_LITTLE_ENDIAN 1234 - #define NPY_BIG_ENDIAN 4321 - - #if defined(NPY_CPU_X86) \ - || defined(NPY_CPU_AMD64) \ - || defined(NPY_CPU_IA64) \ - || defined(NPY_CPU_ALPHA) \ - || defined(NPY_CPU_ARMEL) \ - || defined(NPY_CPU_AARCH64) \ - || defined(NPY_CPU_SH_LE) \ - || defined(NPY_CPU_MIPSEL) \ - || defined(NPY_CPU_PPC64LE) - #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN - #elif defined(NPY_CPU_PPC) \ - || defined(NPY_CPU_SPARC) \ - || defined(NPY_CPU_S390) \ - || defined(NPY_CPU_HPPA) \ - || defined(NPY_CPU_PPC64) \ - || defined(NPY_CPU_ARMEB) \ - || defined(NPY_CPU_SH_BE) \ - || defined(NPY_CPU_MIPSEB) \ - || defined(NPY_CPU_OR1K) \ - || defined(NPY_CPU_M68K) - #define NPY_BYTE_ORDER NPY_BIG_ENDIAN - #else - #error Unknown CPU: can not set endianness - #endif -#endif - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_interrupt.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_interrupt.h deleted file mode 100644 index f71fd689ebfb5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_interrupt.h +++ /dev/null @@ -1,117 +0,0 @@ - -/* Signal handling: - -This header file defines macros that allow your code to handle -interrupts received during processing. Interrupts that -could reasonably be handled: - -SIGINT, SIGABRT, SIGALRM, SIGSEGV - -****Warning*************** - -Do not allow code that creates temporary memory or increases reference -counts of Python objects to be interrupted unless you handle it -differently. - -************************** - -The mechanism for handling interrupts is conceptually simple: - - - replace the signal handler with our own home-grown version - and store the old one. - - run the code to be interrupted -- if an interrupt occurs - the handler should basically just cause a return to the - calling function for finish work. - - restore the old signal handler - -Of course, every code that allows interrupts must account for -returning via the interrupt and handle clean-up correctly. But, -even still, the simple paradigm is complicated by at least three -factors. - - 1) platform portability (i.e. Microsoft says not to use longjmp - to return from signal handling. They have a __try and __except - extension to C instead but what about mingw?). - - 2) how to handle threads: apparently whether signals are delivered to - every thread of the process or the "invoking" thread is platform - dependent. --- we don't handle threads for now. - - 3) do we need to worry about re-entrance. For now, assume the - code will not call-back into itself. - -Ideas: - - 1) Start by implementing an approach that works on platforms that - can use setjmp and longjmp functionality and does nothing - on other platforms. - - 2) Ignore threads --- i.e. do not mix interrupt handling and threads - - 3) Add a default signal_handler function to the C-API but have the rest - use macros. - - -Simple Interface: - - -In your C-extension: around a block of code you want to be interruptable -with a SIGINT - -NPY_SIGINT_ON -[code] -NPY_SIGINT_OFF - -In order for this to work correctly, the -[code] block must not allocate any memory or alter the reference count of any -Python objects. In other words [code] must be interruptible so that continuation -after NPY_SIGINT_OFF will only be "missing some computations" - -Interrupt handling does not work well with threads. - -*/ - -/* Add signal handling macros - Make the global variable and signal handler part of the C-API -*/ - -#ifndef NPY_INTERRUPT_H -#define NPY_INTERRUPT_H - -#ifndef NPY_NO_SIGNAL - -#include -#include - -#ifndef sigsetjmp - -#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1) -#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) -#define NPY_SIGJMP_BUF jmp_buf - -#else - -#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) -#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) -#define NPY_SIGJMP_BUF sigjmp_buf - -#endif - -# define NPY_SIGINT_ON { \ - PyOS_sighandler_t _npy_sig_save; \ - _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ - if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ - 1) == 0) { \ - -# define NPY_SIGINT_OFF } \ - PyOS_setsig(SIGINT, _npy_sig_save); \ - } - -#else /* NPY_NO_SIGNAL */ - -#define NPY_SIGINT_ON -#define NPY_SIGINT_OFF - -#endif /* HAVE_SIGSETJMP */ - -#endif /* NPY_INTERRUPT_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_math.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_math.h deleted file mode 100644 index b7920460d88ad..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_math.h +++ /dev/null @@ -1,479 +0,0 @@ -#ifndef __NPY_MATH_C99_H_ -#define __NPY_MATH_C99_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#ifdef __SUNPRO_CC -#include -#endif -#ifdef HAVE_NPY_CONFIG_H -#include -#endif -#include - - -/* - * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 - * for INFINITY) - * - * XXX: I should test whether INFINITY and NAN are available on the platform - */ -NPY_INLINE static float __npy_inff(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_nanf(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_pzerof(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; - return __bint.__f; -} - -NPY_INLINE static float __npy_nzerof(void) -{ - const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; - return __bint.__f; -} - -#define NPY_INFINITYF __npy_inff() -#define NPY_NANF __npy_nanf() -#define NPY_PZEROF __npy_pzerof() -#define NPY_NZEROF __npy_nzerof() - -#define NPY_INFINITY ((npy_double)NPY_INFINITYF) -#define NPY_NAN ((npy_double)NPY_NANF) -#define NPY_PZERO ((npy_double)NPY_PZEROF) -#define NPY_NZERO ((npy_double)NPY_NZEROF) - -#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) -#define NPY_NANL ((npy_longdouble)NPY_NANF) -#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) -#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) - -/* - * Useful constants - */ -#define NPY_E 2.718281828459045235360287471352662498 /* e */ -#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ -#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ -#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ -#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ -#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ -#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ -#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ -#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ -#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ -#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ -#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ -#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ - -#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ -#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ -#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ -#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ -#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ -#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ -#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ -#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ -#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ -#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ -#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constan*/ -#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ -#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ - -#define NPY_El 2.718281828459045235360287471352662498L /* e */ -#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ -#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ -#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ -#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ -#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ -#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ -#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ -#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ -#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ -#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constan*/ -#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ -#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ - -/* - * C99 double math funcs - */ -double npy_sin(double x); -double npy_cos(double x); -double npy_tan(double x); -double npy_sinh(double x); -double npy_cosh(double x); -double npy_tanh(double x); - -double npy_asin(double x); -double npy_acos(double x); -double npy_atan(double x); -double npy_aexp(double x); -double npy_alog(double x); -double npy_asqrt(double x); -double npy_afabs(double x); - -double npy_log(double x); -double npy_log10(double x); -double npy_exp(double x); -double npy_sqrt(double x); - -double npy_fabs(double x); -double npy_ceil(double x); -double npy_fmod(double x, double y); -double npy_floor(double x); - -double npy_expm1(double x); -double npy_log1p(double x); -double npy_hypot(double x, double y); -double npy_acosh(double x); -double npy_asinh(double xx); -double npy_atanh(double x); -double npy_rint(double x); -double npy_trunc(double x); -double npy_exp2(double x); -double npy_log2(double x); - -double npy_atan2(double x, double y); -double npy_pow(double x, double y); -double npy_modf(double x, double* y); - -double npy_copysign(double x, double y); -double npy_nextafter(double x, double y); -double npy_spacing(double x); - -/* - * IEEE 754 fpu handling. Those are guaranteed to be macros - */ - -/* use builtins to avoid function calls in tight loops - * only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISNAN - #define npy_isnan(x) __builtin_isnan(x) -#else - #ifndef NPY_HAVE_DECL_ISNAN - #define npy_isnan(x) ((x) != (x)) - #else - #ifdef _MSC_VER - #define npy_isnan(x) _isnan((x)) - #else - #define npy_isnan(x) isnan(x) - #endif - #endif -#endif - - -/* only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISFINITE - #define npy_isfinite(x) __builtin_isfinite(x) -#else - #ifndef NPY_HAVE_DECL_ISFINITE - #ifdef _MSC_VER - #define npy_isfinite(x) _finite((x)) - #else - #define npy_isfinite(x) !npy_isnan((x) + (-x)) - #endif - #else - #define npy_isfinite(x) isfinite((x)) - #endif -#endif - -/* only available if npy_config.h is available (= numpys own build) */ -#if HAVE___BUILTIN_ISINF - #define npy_isinf(x) __builtin_isinf(x) -#else - #ifndef NPY_HAVE_DECL_ISINF - #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) - #else - #ifdef _MSC_VER - #define npy_isinf(x) (!_finite((x)) && !_isnan((x))) - #else - #define npy_isinf(x) isinf((x)) - #endif - #endif -#endif - -#ifndef NPY_HAVE_DECL_SIGNBIT - int _npy_signbit_f(float x); - int _npy_signbit_d(double x); - int _npy_signbit_ld(long double x); - #define npy_signbit(x) \ - (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ - : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ - : _npy_signbit_f (x)) -#else - #define npy_signbit(x) signbit((x)) -#endif - -/* - * float C99 math functions - */ - -float npy_sinf(float x); -float npy_cosf(float x); -float npy_tanf(float x); -float npy_sinhf(float x); -float npy_coshf(float x); -float npy_tanhf(float x); -float npy_fabsf(float x); -float npy_floorf(float x); -float npy_ceilf(float x); -float npy_rintf(float x); -float npy_truncf(float x); -float npy_sqrtf(float x); -float npy_log10f(float x); -float npy_logf(float x); -float npy_expf(float x); -float npy_expm1f(float x); -float npy_asinf(float x); -float npy_acosf(float x); -float npy_atanf(float x); -float npy_asinhf(float x); -float npy_acoshf(float x); -float npy_atanhf(float x); -float npy_log1pf(float x); -float npy_exp2f(float x); -float npy_log2f(float x); - -float npy_atan2f(float x, float y); -float npy_hypotf(float x, float y); -float npy_powf(float x, float y); -float npy_fmodf(float x, float y); - -float npy_modff(float x, float* y); - -float npy_copysignf(float x, float y); -float npy_nextafterf(float x, float y); -float npy_spacingf(float x); - -/* - * float C99 math functions - */ - -npy_longdouble npy_sinl(npy_longdouble x); -npy_longdouble npy_cosl(npy_longdouble x); -npy_longdouble npy_tanl(npy_longdouble x); -npy_longdouble npy_sinhl(npy_longdouble x); -npy_longdouble npy_coshl(npy_longdouble x); -npy_longdouble npy_tanhl(npy_longdouble x); -npy_longdouble npy_fabsl(npy_longdouble x); -npy_longdouble npy_floorl(npy_longdouble x); -npy_longdouble npy_ceill(npy_longdouble x); -npy_longdouble npy_rintl(npy_longdouble x); -npy_longdouble npy_truncl(npy_longdouble x); -npy_longdouble npy_sqrtl(npy_longdouble x); -npy_longdouble npy_log10l(npy_longdouble x); -npy_longdouble npy_logl(npy_longdouble x); -npy_longdouble npy_expl(npy_longdouble x); -npy_longdouble npy_expm1l(npy_longdouble x); -npy_longdouble npy_asinl(npy_longdouble x); -npy_longdouble npy_acosl(npy_longdouble x); -npy_longdouble npy_atanl(npy_longdouble x); -npy_longdouble npy_asinhl(npy_longdouble x); -npy_longdouble npy_acoshl(npy_longdouble x); -npy_longdouble npy_atanhl(npy_longdouble x); -npy_longdouble npy_log1pl(npy_longdouble x); -npy_longdouble npy_exp2l(npy_longdouble x); -npy_longdouble npy_log2l(npy_longdouble x); - -npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); - -npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); - -npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_spacingl(npy_longdouble x); - -/* - * Non standard functions - */ -double npy_deg2rad(double x); -double npy_rad2deg(double x); -double npy_logaddexp(double x, double y); -double npy_logaddexp2(double x, double y); - -float npy_deg2radf(float x); -float npy_rad2degf(float x); -float npy_logaddexpf(float x, float y); -float npy_logaddexp2f(float x, float y); - -npy_longdouble npy_deg2radl(npy_longdouble x); -npy_longdouble npy_rad2degl(npy_longdouble x); -npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); - -#define npy_degrees npy_rad2deg -#define npy_degreesf npy_rad2degf -#define npy_degreesl npy_rad2degl - -#define npy_radians npy_deg2rad -#define npy_radiansf npy_deg2radf -#define npy_radiansl npy_deg2radl - -/* - * Complex declarations - */ - -/* - * C99 specifies that complex numbers have the same representation as - * an array of two elements, where the first element is the real part - * and the second element is the imaginary part. - */ -#define __NPY_CPACK_IMP(x, y, type, ctype) \ - union { \ - ctype z; \ - type a[2]; \ - } z1;; \ - \ - z1.a[0] = (x); \ - z1.a[1] = (y); \ - \ - return z1.z; - -static NPY_INLINE npy_cdouble npy_cpack(double x, double y) -{ - __NPY_CPACK_IMP(x, y, double, npy_cdouble); -} - -static NPY_INLINE npy_cfloat npy_cpackf(float x, float y) -{ - __NPY_CPACK_IMP(x, y, float, npy_cfloat); -} - -static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) -{ - __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble); -} -#undef __NPY_CPACK_IMP - -/* - * Same remark as above, but in the other direction: extract first/second - * member of complex number, assuming a C99-compatible representation - * - * Those are defineds as static inline, and such as a reasonable compiler would - * most likely compile this to one or two instructions (on CISC at least) - */ -#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \ - union { \ - ctype z; \ - type a[2]; \ - } __z_repr; \ - __z_repr.z = z; \ - \ - return __z_repr.a[index]; - -static NPY_INLINE double npy_creal(npy_cdouble z) -{ - __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble); -} - -static NPY_INLINE double npy_cimag(npy_cdouble z) -{ - __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble); -} - -static NPY_INLINE float npy_crealf(npy_cfloat z) -{ - __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat); -} - -static NPY_INLINE float npy_cimagf(npy_cfloat z) -{ - __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat); -} - -static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z) -{ - __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble); -} - -static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z) -{ - __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble); -} -#undef __NPY_CEXTRACT_IMP - -/* - * Double precision complex functions - */ -double npy_cabs(npy_cdouble z); -double npy_carg(npy_cdouble z); - -npy_cdouble npy_cexp(npy_cdouble z); -npy_cdouble npy_clog(npy_cdouble z); -npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); - -npy_cdouble npy_csqrt(npy_cdouble z); - -npy_cdouble npy_ccos(npy_cdouble z); -npy_cdouble npy_csin(npy_cdouble z); - -/* - * Single precision complex functions - */ -float npy_cabsf(npy_cfloat z); -float npy_cargf(npy_cfloat z); - -npy_cfloat npy_cexpf(npy_cfloat z); -npy_cfloat npy_clogf(npy_cfloat z); -npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); - -npy_cfloat npy_csqrtf(npy_cfloat z); - -npy_cfloat npy_ccosf(npy_cfloat z); -npy_cfloat npy_csinf(npy_cfloat z); - -/* - * Extended precision complex functions - */ -npy_longdouble npy_cabsl(npy_clongdouble z); -npy_longdouble npy_cargl(npy_clongdouble z); - -npy_clongdouble npy_cexpl(npy_clongdouble z); -npy_clongdouble npy_clogl(npy_clongdouble z); -npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); - -npy_clongdouble npy_csqrtl(npy_clongdouble z); - -npy_clongdouble npy_ccosl(npy_clongdouble z); -npy_clongdouble npy_csinl(npy_clongdouble z); - -/* - * Functions that set the floating point error - * status word. - */ - -/* - * platform-dependent code translates floating point - * status to an integer sum of these values - */ -#define NPY_FPE_DIVIDEBYZERO 1 -#define NPY_FPE_OVERFLOW 2 -#define NPY_FPE_UNDERFLOW 4 -#define NPY_FPE_INVALID 8 - -int npy_get_floatstatus(void); -int npy_clear_floatstatus(void); -void npy_set_floatstatus_divbyzero(void); -void npy_set_floatstatus_overflow(void); -void npy_set_floatstatus_underflow(void); -void npy_set_floatstatus_invalid(void); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_no_deprecated_api.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_no_deprecated_api.h deleted file mode 100644 index 6183dc2784a78..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_no_deprecated_api.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * This include file is provided for inclusion in Cython *.pyd files where - * one would like to define the NPY_NO_DEPRECATED_API macro. It can be - * included by - * - * cdef extern from "npy_no_deprecated_api.h": pass - * - */ -#ifndef NPY_NO_DEPRECATED_API - -/* put this check here since there may be multiple includes in C extensions. */ -#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \ - defined(OLD_DEFINES_H) -#error "npy_no_deprecated_api.h" must be first among numpy includes. -#else -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#endif - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_os.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_os.h deleted file mode 100644 index 9228c3916eab5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/npy_os.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _NPY_OS_H_ -#define _NPY_OS_H_ - -#if defined(linux) || defined(__linux) || defined(__linux__) - #define NPY_OS_LINUX -#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ - defined(__OpenBSD__) || defined(__DragonFly__) - #define NPY_OS_BSD - #ifdef __FreeBSD__ - #define NPY_OS_FREEBSD - #elif defined(__NetBSD__) - #define NPY_OS_NETBSD - #elif defined(__OpenBSD__) - #define NPY_OS_OPENBSD - #elif defined(__DragonFly__) - #define NPY_OS_DRAGONFLY - #endif -#elif defined(sun) || defined(__sun) - #define NPY_OS_SOLARIS -#elif defined(__CYGWIN__) - #define NPY_OS_CYGWIN -#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) - #define NPY_OS_WIN32 -#elif defined(__APPLE__) - #define NPY_OS_DARWIN -#else - #define NPY_OS_UNKNOWN -#endif - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/numpyconfig.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/numpyconfig.h deleted file mode 100644 index 9d6dce004ac59..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/numpyconfig.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef _NPY_NUMPYCONFIG_H_ -#define _NPY_NUMPYCONFIG_H_ - -#include "_numpyconfig.h" - -/* - * On Mac OS X, because there is only one configuration stage for all the archs - * in universal builds, any macro which depends on the arch needs to be - * harcoded - */ -#ifdef __APPLE__ - #undef NPY_SIZEOF_LONG - #undef NPY_SIZEOF_PY_INTPTR_T - - #ifdef __LP64__ - #define NPY_SIZEOF_LONG 8 - #define NPY_SIZEOF_PY_INTPTR_T 8 - #else - #define NPY_SIZEOF_LONG 4 - #define NPY_SIZEOF_PY_INTPTR_T 4 - #endif -#endif - -/** - * To help with the NPY_NO_DEPRECATED_API macro, we include API version - * numbers for specific versions of NumPy. To exclude all API that was - * deprecated as of 1.7, add the following before #including any NumPy - * headers: - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - */ -#define NPY_1_7_API_VERSION 0x00000007 -#define NPY_1_8_API_VERSION 0x00000008 -#define NPY_1_9_API_VERSION 0x00000008 - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/old_defines.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/old_defines.h deleted file mode 100644 index abf81595ae160..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/old_defines.h +++ /dev/null @@ -1,187 +0,0 @@ -/* This header is deprecated as of NumPy 1.7 */ -#ifndef OLD_DEFINES_H -#define OLD_DEFINES_H - -#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION -#error The header "old_defines.h" is deprecated as of NumPy 1.7. -#endif - -#define NDARRAY_VERSION NPY_VERSION - -#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE -#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE -#define PyArray_BUFSIZE NPY_BUFSIZE - -#define PyArray_PRIORITY NPY_PRIORITY -#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY -#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE - -#define NPY_MAX PyArray_MAX -#define NPY_MIN PyArray_MIN - -#define PyArray_TYPES NPY_TYPES -#define PyArray_BOOL NPY_BOOL -#define PyArray_BYTE NPY_BYTE -#define PyArray_UBYTE NPY_UBYTE -#define PyArray_SHORT NPY_SHORT -#define PyArray_USHORT NPY_USHORT -#define PyArray_INT NPY_INT -#define PyArray_UINT NPY_UINT -#define PyArray_LONG NPY_LONG -#define PyArray_ULONG NPY_ULONG -#define PyArray_LONGLONG NPY_LONGLONG -#define PyArray_ULONGLONG NPY_ULONGLONG -#define PyArray_HALF NPY_HALF -#define PyArray_FLOAT NPY_FLOAT -#define PyArray_DOUBLE NPY_DOUBLE -#define PyArray_LONGDOUBLE NPY_LONGDOUBLE -#define PyArray_CFLOAT NPY_CFLOAT -#define PyArray_CDOUBLE NPY_CDOUBLE -#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE -#define PyArray_OBJECT NPY_OBJECT -#define PyArray_STRING NPY_STRING -#define PyArray_UNICODE NPY_UNICODE -#define PyArray_VOID NPY_VOID -#define PyArray_DATETIME NPY_DATETIME -#define PyArray_TIMEDELTA NPY_TIMEDELTA -#define PyArray_NTYPES NPY_NTYPES -#define PyArray_NOTYPE NPY_NOTYPE -#define PyArray_CHAR NPY_CHAR -#define PyArray_USERDEF NPY_USERDEF -#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES - -#define PyArray_INTP NPY_INTP -#define PyArray_UINTP NPY_UINTP - -#define PyArray_INT8 NPY_INT8 -#define PyArray_UINT8 NPY_UINT8 -#define PyArray_INT16 NPY_INT16 -#define PyArray_UINT16 NPY_UINT16 -#define PyArray_INT32 NPY_INT32 -#define PyArray_UINT32 NPY_UINT32 - -#ifdef NPY_INT64 -#define PyArray_INT64 NPY_INT64 -#define PyArray_UINT64 NPY_UINT64 -#endif - -#ifdef NPY_INT128 -#define PyArray_INT128 NPY_INT128 -#define PyArray_UINT128 NPY_UINT128 -#endif - -#ifdef NPY_FLOAT16 -#define PyArray_FLOAT16 NPY_FLOAT16 -#define PyArray_COMPLEX32 NPY_COMPLEX32 -#endif - -#ifdef NPY_FLOAT80 -#define PyArray_FLOAT80 NPY_FLOAT80 -#define PyArray_COMPLEX160 NPY_COMPLEX160 -#endif - -#ifdef NPY_FLOAT96 -#define PyArray_FLOAT96 NPY_FLOAT96 -#define PyArray_COMPLEX192 NPY_COMPLEX192 -#endif - -#ifdef NPY_FLOAT128 -#define PyArray_FLOAT128 NPY_FLOAT128 -#define PyArray_COMPLEX256 NPY_COMPLEX256 -#endif - -#define PyArray_FLOAT32 NPY_FLOAT32 -#define PyArray_COMPLEX64 NPY_COMPLEX64 -#define PyArray_FLOAT64 NPY_FLOAT64 -#define PyArray_COMPLEX128 NPY_COMPLEX128 - - -#define PyArray_TYPECHAR NPY_TYPECHAR -#define PyArray_BOOLLTR NPY_BOOLLTR -#define PyArray_BYTELTR NPY_BYTELTR -#define PyArray_UBYTELTR NPY_UBYTELTR -#define PyArray_SHORTLTR NPY_SHORTLTR -#define PyArray_USHORTLTR NPY_USHORTLTR -#define PyArray_INTLTR NPY_INTLTR -#define PyArray_UINTLTR NPY_UINTLTR -#define PyArray_LONGLTR NPY_LONGLTR -#define PyArray_ULONGLTR NPY_ULONGLTR -#define PyArray_LONGLONGLTR NPY_LONGLONGLTR -#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR -#define PyArray_HALFLTR NPY_HALFLTR -#define PyArray_FLOATLTR NPY_FLOATLTR -#define PyArray_DOUBLELTR NPY_DOUBLELTR -#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR -#define PyArray_CFLOATLTR NPY_CFLOATLTR -#define PyArray_CDOUBLELTR NPY_CDOUBLELTR -#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR -#define PyArray_OBJECTLTR NPY_OBJECTLTR -#define PyArray_STRINGLTR NPY_STRINGLTR -#define PyArray_STRINGLTR2 NPY_STRINGLTR2 -#define PyArray_UNICODELTR NPY_UNICODELTR -#define PyArray_VOIDLTR NPY_VOIDLTR -#define PyArray_DATETIMELTR NPY_DATETIMELTR -#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR -#define PyArray_CHARLTR NPY_CHARLTR -#define PyArray_INTPLTR NPY_INTPLTR -#define PyArray_UINTPLTR NPY_UINTPLTR -#define PyArray_GENBOOLLTR NPY_GENBOOLLTR -#define PyArray_SIGNEDLTR NPY_SIGNEDLTR -#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR -#define PyArray_FLOATINGLTR NPY_FLOATINGLTR -#define PyArray_COMPLEXLTR NPY_COMPLEXLTR - -#define PyArray_QUICKSORT NPY_QUICKSORT -#define PyArray_HEAPSORT NPY_HEAPSORT -#define PyArray_MERGESORT NPY_MERGESORT -#define PyArray_SORTKIND NPY_SORTKIND -#define PyArray_NSORTS NPY_NSORTS - -#define PyArray_NOSCALAR NPY_NOSCALAR -#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR -#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR -#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR -#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR -#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR -#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR -#define PyArray_SCALARKIND NPY_SCALARKIND -#define PyArray_NSCALARKINDS NPY_NSCALARKINDS - -#define PyArray_ANYORDER NPY_ANYORDER -#define PyArray_CORDER NPY_CORDER -#define PyArray_FORTRANORDER NPY_FORTRANORDER -#define PyArray_ORDER NPY_ORDER - -#define PyDescr_ISBOOL PyDataType_ISBOOL -#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED -#define PyDescr_ISSIGNED PyDataType_ISSIGNED -#define PyDescr_ISINTEGER PyDataType_ISINTEGER -#define PyDescr_ISFLOAT PyDataType_ISFLOAT -#define PyDescr_ISNUMBER PyDataType_ISNUMBER -#define PyDescr_ISSTRING PyDataType_ISSTRING -#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX -#define PyDescr_ISPYTHON PyDataType_ISPYTHON -#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE -#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF -#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED -#define PyDescr_ISOBJECT PyDataType_ISOBJECT -#define PyDescr_HASFIELDS PyDataType_HASFIELDS - -#define PyArray_LITTLE NPY_LITTLE -#define PyArray_BIG NPY_BIG -#define PyArray_NATIVE NPY_NATIVE -#define PyArray_SWAP NPY_SWAP -#define PyArray_IGNORE NPY_IGNORE - -#define PyArray_NATBYTE NPY_NATBYTE -#define PyArray_OPPBYTE NPY_OPPBYTE - -#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE - -#define PyArray_USE_PYMEM NPY_USE_PYMEM - -#define PyArray_RemoveLargest PyArray_RemoveSmallest - -#define PyArray_UCS4 npy_ucs4 - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/oldnumeric.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/oldnumeric.h deleted file mode 100644 index 748f06da31d26..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/oldnumeric.h +++ /dev/null @@ -1,23 +0,0 @@ -#include "arrayobject.h" - -#ifndef REFCOUNT -# define REFCOUNT NPY_REFCOUNT -# define MAX_ELSIZE 16 -#endif - -#define PyArray_UNSIGNED_TYPES -#define PyArray_SBYTE NPY_BYTE -#define PyArray_CopyArray PyArray_CopyInto -#define _PyArray_multiply_list PyArray_MultiplyIntList -#define PyArray_ISSPACESAVER(m) NPY_FALSE -#define PyScalarArray_Check PyArray_CheckScalar - -#define CONTIGUOUS NPY_CONTIGUOUS -#define OWN_DIMENSIONS 0 -#define OWN_STRIDES 0 -#define OWN_DATA NPY_OWNDATA -#define SAVESPACE 0 -#define SAVESPACEBIT 0 - -#undef import_array -#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufunc_api.txt b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufunc_api.txt deleted file mode 100644 index 606037f35108c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufunc_api.txt +++ /dev/null @@ -1,321 +0,0 @@ - -================= -Numpy Ufunc C-API -================= -:: - - PyObject * - PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void - **data, char *types, int ntypes, int nin, int - nout, int identity, const char *name, const - char *doc, int check_return) - - -:: - - int - PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int - usertype, PyUFuncGenericFunction - function, int *arg_types, void *data) - - -:: - - int - PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject - *kwds, PyArrayObject **op) - - -This generic function is called with the ufunc object, the arguments to it, -and an array of (pointers to) PyArrayObjects which are NULL. - -'op' is an array of at least NPY_MAXARGS PyArrayObject *. - -:: - - void - PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - int - PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject - **errobj) - - -On return, if errobj is populated with a non-NULL value, the caller -owns a new reference to errobj. - -:: - - int - PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) - - -:: - - void - PyUFunc_clearfperr() - - -:: - - int - PyUFunc_getfperr(void ) - - -:: - - int - PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int - *first) - - -:: - - int - PyUFunc_ReplaceLoopBySignature(PyUFuncObject - *func, PyUFuncGenericFunction - newfunc, int - *signature, PyUFuncGenericFunction - *oldfunc) - - -:: - - PyObject * - PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void - **data, char *types, int - ntypes, int nin, int nout, int - identity, const char *name, const - char *doc, int check_return, const - char *signature) - - -:: - - int - PyUFunc_SetUsesArraysAsData(void **data, size_t i) - - -:: - - void - PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void - *func) - - -:: - - void - PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - void - PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp - *steps, void *func) - - -:: - - int - PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING - casting, PyArrayObject - **operands, PyObject - *type_tup, PyArray_Descr **out_dtypes) - - -This function applies the default type resolution rules -for the provided ufunc. - -Returns 0 on success, -1 on error. - -:: - - int - PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING - casting, PyArrayObject - **operands, PyArray_Descr **dtypes) - - -Validates that the input operands can be cast to -the input types, and the output types can be cast to -the output operands where provided. - -Returns 0 on success, -1 (with exception raised) on validation failure. - -:: - - int - PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, PyArray_Descr - *user_dtype, PyUFuncGenericFunction - function, PyArray_Descr - **arg_dtypes, void *data) - - diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufuncobject.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufuncobject.h deleted file mode 100644 index a24a0d83774fb..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/ufuncobject.h +++ /dev/null @@ -1,375 +0,0 @@ -#ifndef Py_UFUNCOBJECT_H -#define Py_UFUNCOBJECT_H - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * The legacy generic inner loop for a standard element-wise or - * generalized ufunc. - */ -typedef void (*PyUFuncGenericFunction) - (char **args, - npy_intp *dimensions, - npy_intp *strides, - void *innerloopdata); - -/* - * The most generic one-dimensional inner loop for - * a standard element-wise ufunc. This typedef is also - * more consistent with the other NumPy function pointer typedefs - * than PyUFuncGenericFunction. - */ -typedef void (PyUFunc_StridedInnerLoopFunc)( - char **dataptrs, npy_intp *strides, - npy_intp count, - NpyAuxData *innerloopdata); - -/* - * The most generic one-dimensional inner loop for - * a masked standard element-wise ufunc. "Masked" here means that it skips - * doing calculations on any items for which the maskptr array has a true - * value. - */ -typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( - char **dataptrs, npy_intp *strides, - char *maskptr, npy_intp mask_stride, - npy_intp count, - NpyAuxData *innerloopdata); - -/* Forward declaration for the type resolver and loop selector typedefs */ -struct _tagPyUFuncObject; - -/* - * Given the operands for calling a ufunc, should determine the - * calculation input and output data types and return an inner loop function. - * This function should validate that the casting rule is being followed, - * and fail if it is not. - * - * For backwards compatibility, the regular type resolution function does not - * support auxiliary data with object semantics. The type resolution call - * which returns a masked generic function returns a standard NpyAuxData - * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros - * work. - * - * ufunc: The ufunc object. - * casting: The 'casting' parameter provided to the ufunc. - * operands: An array of length (ufunc->nin + ufunc->nout), - * with the output parameters possibly NULL. - * type_tup: Either NULL, or the type_tup passed to the ufunc. - * out_dtypes: An array which should be populated with new - * references to (ufunc->nin + ufunc->nout) new - * dtypes, one for each input and output. These - * dtypes should all be in native-endian format. - * - * Should return 0 on success, -1 on failure (with exception set), - * or -2 if Py_NotImplemented should be returned. - */ -typedef int (PyUFunc_TypeResolutionFunc)( - struct _tagPyUFuncObject *ufunc, - NPY_CASTING casting, - PyArrayObject **operands, - PyObject *type_tup, - PyArray_Descr **out_dtypes); - -/* - * Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc, - * and an array of fixed strides (the array will contain NPY_MAX_INTP for - * strides which are not necessarily fixed), returns an inner loop - * with associated auxiliary data. - * - * For backwards compatibility, there is a variant of the inner loop - * selection which returns an inner loop irrespective of the strides, - * and with a void* static auxiliary data instead of an NpyAuxData * - * dynamically allocatable auxiliary data. - * - * ufunc: The ufunc object. - * dtypes: An array which has been populated with dtypes, - * in most cases by the type resolution funciton - * for the same ufunc. - * fixed_strides: For each input/output, either the stride that - * will be used every time the function is called - * or NPY_MAX_INTP if the stride might change or - * is not known ahead of time. The loop selection - * function may use this stride to pick inner loops - * which are optimized for contiguous or 0-stride - * cases. - * out_innerloop: Should be populated with the correct ufunc inner - * loop for the given type. - * out_innerloopdata: Should be populated with the void* data to - * be passed into the out_innerloop function. - * out_needs_api: If the inner loop needs to use the Python API, - * should set the to 1, otherwise should leave - * this untouched. - */ -typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - PyUFuncGenericFunction *out_innerloop, - void **out_innerloopdata, - int *out_needs_api); -typedef int (PyUFunc_InnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - npy_intp *fixed_strides, - PyUFunc_StridedInnerLoopFunc **out_innerloop, - NpyAuxData **out_innerloopdata, - int *out_needs_api); -typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - PyArray_Descr *mask_dtype, - npy_intp *fixed_strides, - npy_intp fixed_mask_stride, - PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop, - NpyAuxData **out_innerloopdata, - int *out_needs_api); - -typedef struct _tagPyUFuncObject { - PyObject_HEAD - /* - * nin: Number of inputs - * nout: Number of outputs - * nargs: Always nin + nout (Why is it stored?) - */ - int nin, nout, nargs; - - /* Identity for reduction, either PyUFunc_One or PyUFunc_Zero */ - int identity; - - /* Array of one-dimensional core loops */ - PyUFuncGenericFunction *functions; - /* Array of funcdata that gets passed into the functions */ - void **data; - /* The number of elements in 'functions' and 'data' */ - int ntypes; - - /* Does not appear to be used */ - int check_return; - - /* The name of the ufunc */ - const char *name; - - /* Array of type numbers, of size ('nargs' * 'ntypes') */ - char *types; - - /* Documentation string */ - const char *doc; - - void *ptr; - PyObject *obj; - PyObject *userloops; - - /* generalized ufunc parameters */ - - /* 0 for scalar ufunc; 1 for generalized ufunc */ - int core_enabled; - /* number of distinct dimension names in signature */ - int core_num_dim_ix; - - /* - * dimension indices of input/output argument k are stored in - * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] - */ - - /* numbers of core dimensions of each argument */ - int *core_num_dims; - /* - * dimension indices in a flatted form; indices - * are in the range of [0,core_num_dim_ix) - */ - int *core_dim_ixs; - /* - * positions of 1st core dimensions of each - * argument in core_dim_ixs - */ - int *core_offsets; - /* signature string for printing purpose */ - char *core_signature; - - /* - * A function which resolves the types and fills an array - * with the dtypes for the inputs and outputs. - */ - PyUFunc_TypeResolutionFunc *type_resolver; - /* - * A function which returns an inner loop written for - * NumPy 1.6 and earlier ufuncs. This is for backwards - * compatibility, and may be NULL if inner_loop_selector - * is specified. - */ - PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; - /* - * A function which returns an inner loop for the new mechanism - * in NumPy 1.7 and later. If provided, this is used, otherwise - * if NULL the legacy_inner_loop_selector is used instead. - */ - PyUFunc_InnerLoopSelectionFunc *inner_loop_selector; - /* - * A function which returns a masked inner loop for the ufunc. - */ - PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector; - - /* - * List of flags for each operand when ufunc is called by nditer object. - * These flags will be used in addition to the default flags for each - * operand set by nditer object. - */ - npy_uint32 *op_flags; - - /* - * List of global flags used when ufunc is called by nditer object. - * These flags will be used in addition to the default global flags - * set by nditer object. - */ - npy_uint32 iter_flags; -} PyUFuncObject; - -#include "arrayobject.h" - -#define UFUNC_ERR_IGNORE 0 -#define UFUNC_ERR_WARN 1 -#define UFUNC_ERR_RAISE 2 -#define UFUNC_ERR_CALL 3 -#define UFUNC_ERR_PRINT 4 -#define UFUNC_ERR_LOG 5 - - /* Python side integer mask */ - -#define UFUNC_MASK_DIVIDEBYZERO 0x07 -#define UFUNC_MASK_OVERFLOW 0x3f -#define UFUNC_MASK_UNDERFLOW 0x1ff -#define UFUNC_MASK_INVALID 0xfff - -#define UFUNC_SHIFT_DIVIDEBYZERO 0 -#define UFUNC_SHIFT_OVERFLOW 3 -#define UFUNC_SHIFT_UNDERFLOW 6 -#define UFUNC_SHIFT_INVALID 9 - - -#define UFUNC_OBJ_ISOBJECT 1 -#define UFUNC_OBJ_NEEDS_API 2 - - /* Default user error mode */ -#define UFUNC_ERR_DEFAULT \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \ - (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID) - -#if NPY_ALLOW_THREADS -#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); -#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); -#else -#define NPY_LOOP_BEGIN_THREADS -#define NPY_LOOP_END_THREADS -#endif - -/* - * UFunc has unit of 1, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_One 1 -/* - * UFunc has unit of 0, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_Zero 0 -/* - * UFunc has no unit, and the order of operations cannot be reordered. - * This case does not allow reduction with multiple axes at once. - */ -#define PyUFunc_None -1 -/* - * UFunc has no unit, and the order of operations can be reordered - * This case allows reduction with multiple axes at once. - */ -#define PyUFunc_ReorderableNone -2 - -#define UFUNC_REDUCE 0 -#define UFUNC_ACCUMULATE 1 -#define UFUNC_REDUCEAT 2 -#define UFUNC_OUTER 3 - - -typedef struct { - int nin; - int nout; - PyObject *callable; -} PyUFunc_PyFuncData; - -/* A linked-list of function information for - user-defined 1-d loops. - */ -typedef struct _loop1d_info { - PyUFuncGenericFunction func; - void *data; - int *arg_types; - struct _loop1d_info *next; - int nargs; - PyArray_Descr **arg_dtypes; -} PyUFunc_Loop1d; - - -#include "__ufunc_api.h" - -#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" - -#define UFUNC_CHECK_ERROR(arg) \ - do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \ - ((arg)->errormask && \ - PyUFunc_checkfperr((arg)->errormask, \ - (arg)->errobj, \ - &(arg)->first))) \ - goto fail;} while (0) - - -/* keep in sync with ieee754.c.src */ -#if defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || \ - (defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || \ - defined(__NetBSD__) || \ - defined(__GLIBC__) || defined(__APPLE__) || \ - defined(__CYGWIN__) || defined(__MINGW32__) || \ - (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) || \ - defined(_AIX) || \ - defined(_MSC_VER) || \ - defined(__osf__) && defined(__alpha) -#else -#define NO_FLOATING_POINT_SUPPORT -#endif - - -/* - * THESE MACROS ARE DEPRECATED. - * Use npy_set_floatstatus_* in the npymath library. - */ -#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO -#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW -#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW -#define UFUNC_FPE_INVALID NPY_FPE_INVALID - -#define UFUNC_CHECK_STATUS(ret) \ - { \ - ret = npy_clear_floatstatus(); \ - } -#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() -#define generate_overflow_error() npy_set_floatstatus_overflow() - - /* Make sure it gets defined if it isn't already */ -#ifndef UFUNC_NOFPE -/* Clear the floating point exception default of Borland C++ */ -#if defined(__BORLANDC__) -#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); -#else -#define UFUNC_NOFPE -#endif -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_UFUNCOBJECT_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/utils.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/utils.h deleted file mode 100644 index cc968a35442d5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/include/numpy/utils.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __NUMPY_UTILS_HEADER__ -#define __NUMPY_UTILS_HEADER__ - -#ifndef __COMP_NPY_UNUSED - #if defined(__GNUC__) - #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) - # elif defined(__ICC) - #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) - #else - #define __COMP_NPY_UNUSED - #endif -#endif - -/* Use this to tag a variable as not used. It will remove unused variable - * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable - * to avoid accidental use */ -#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED - -#endif diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/info.py deleted file mode 100644 index 241f209b556ef..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/info.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Defines a multi-dimensional array and useful procedures for Numerical computation. - -Functions - -- array - NumPy Array construction -- zeros - Return an array of all zeros -- empty - Return an unitialized array -- shape - Return shape of sequence or array -- rank - Return number of dimensions -- size - Return number of elements in entire array or a - certain dimension -- fromstring - Construct array from (byte) string -- take - Select sub-arrays using sequence of indices -- put - Set sub-arrays using sequence of 1-D indices -- putmask - Set portion of arrays using a mask -- reshape - Return array with new shape -- repeat - Repeat elements of array -- choose - Construct new array from indexed array tuple -- correlate - Correlate two 1-d arrays -- searchsorted - Search for element in 1-d array -- sum - Total sum over a specified dimension -- average - Average, possibly weighted, over axis or array. -- cumsum - Cumulative sum over a specified dimension -- product - Total product over a specified dimension -- cumproduct - Cumulative product over a specified dimension -- alltrue - Logical and over an entire axis -- sometrue - Logical or over an entire axis -- allclose - Tests if sequences are essentially equal - -More Functions: - -- arange - Return regularly spaced array -- asarray - Guarantee NumPy array -- convolve - Convolve two 1-d arrays -- swapaxes - Exchange axes -- concatenate - Join arrays together -- transpose - Permute axes -- sort - Sort elements of array -- argsort - Indices of sorted array -- argmax - Index of largest value -- argmin - Index of smallest value -- inner - Innerproduct of two arrays -- dot - Dot product (matrix multiplication) -- outer - Outerproduct of two arrays -- resize - Return array with arbitrary new shape -- indices - Tuple of indices -- fromfunction - Construct array from universal function -- diagonal - Return diagonal array -- trace - Trace of array -- dump - Dump array to file object (pickle) -- dumps - Return pickled string representing data -- load - Return array stored in file object -- loads - Return array from pickled string -- ravel - Return array as 1-D -- nonzero - Indices of nonzero elements for 1-D array -- shape - Shape of array -- where - Construct array from binary result -- compress - Elements of array where condition is true -- clip - Clip array between two values -- ones - Array of all ones -- identity - 2-D identity array (matrix) - -(Universal) Math Functions - - add logical_or exp - subtract logical_xor log - multiply logical_not log10 - divide maximum sin - divide_safe minimum sinh - conjugate bitwise_and sqrt - power bitwise_or tan - absolute bitwise_xor tanh - negative invert ceil - greater left_shift fabs - greater_equal right_shift floor - less arccos arctan2 - less_equal arcsin fmod - equal arctan hypot - not_equal cos around - logical_and cosh sign - arccosh arcsinh arctanh - -""" -from __future__ import division, absolute_import, print_function - -depends = ['testing'] -global_symbols = ['*'] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/mlib.ini b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/mlib.ini deleted file mode 100644 index 5840f5e1bc167..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/mlib.ini +++ /dev/null @@ -1,12 +0,0 @@ -[meta] -Name = mlib -Description = Math library used with this version of numpy -Version = 1.0 - -[default] -Libs=-lm -Cflags= - -[msvc] -Libs=m.lib -Cflags= diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/npymath.ini b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/npymath.ini deleted file mode 100644 index 3e465ad2aceaf..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/lib/npy-pkg-config/npymath.ini +++ /dev/null @@ -1,20 +0,0 @@ -[meta] -Name=npymath -Description=Portable, core math library implementing C99 standard -Version=0.1 - -[variables] -pkgname=numpy.core -prefix=${pkgdir} -libdir=${prefix}/lib -includedir=${prefix}/include - -[default] -Libs=-L${libdir} -lnpymath -Cflags=-I${includedir} -Requires=mlib - -[msvc] -Libs=/LIBPATH:${libdir} npymath.lib -Cflags=/INCLUDE:${includedir} -Requires=mlib diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/machar.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/machar.py deleted file mode 100644 index 9eb4430a62052..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/machar.py +++ /dev/null @@ -1,338 +0,0 @@ -""" -Machine arithmetics - determine the parameters of the -floating-point arithmetic system - -Author: Pearu Peterson, September 2003 - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['MachAr'] - -from numpy.core.fromnumeric import any -from numpy.core.numeric import errstate - -# Need to speed this up...especially for longfloat - -class MachAr(object): - """ - Diagnosing machine parameters. - - Attributes - ---------- - ibeta : int - Radix in which numbers are represented. - it : int - Number of base-`ibeta` digits in the floating point mantissa M. - machep : int - Exponent of the smallest (most negative) power of `ibeta` that, - added to 1.0, gives something different from 1.0 - eps : float - Floating-point number ``beta**machep`` (floating point precision) - negep : int - Exponent of the smallest power of `ibeta` that, substracted - from 1.0, gives something different from 1.0. - epsneg : float - Floating-point number ``beta**negep``. - iexp : int - Number of bits in the exponent (including its sign and bias). - minexp : int - Smallest (most negative) power of `ibeta` consistent with there - being no leading zeros in the mantissa. - xmin : float - Floating point number ``beta**minexp`` (the smallest [in - magnitude] usable floating value). - maxexp : int - Smallest (positive) power of `ibeta` that causes overflow. - xmax : float - ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] - usable floating value). - irnd : int - In ``range(6)``, information on what kind of rounding is done - in addition, and on how underflow is handled. - ngrd : int - Number of 'guard digits' used when truncating the product - of two mantissas to fit the representation. - epsilon : float - Same as `eps`. - tiny : float - Same as `xmin`. - huge : float - Same as `xmax`. - precision : float - ``- int(-log10(eps))`` - resolution : float - ``- 10**(-precision)`` - - Parameters - ---------- - float_conv : function, optional - Function that converts an integer or integer array to a float - or float array. Default is `float`. - int_conv : function, optional - Function that converts a float or float array to an integer or - integer array. Default is `int`. - float_to_float : function, optional - Function that converts a float array to float. Default is `float`. - Note that this does not seem to do anything useful in the current - implementation. - float_to_str : function, optional - Function that converts a single float to a string. Default is - ``lambda v:'%24.16e' %v``. - title : str, optional - Title that is printed in the string representation of `MachAr`. - - See Also - -------- - finfo : Machine limits for floating point types. - iinfo : Machine limits for integer types. - - References - ---------- - .. [1] Press, Teukolsky, Vetterling and Flannery, - "Numerical Recipes in C++," 2nd ed, - Cambridge University Press, 2002, p. 31. - - """ - def __init__(self, float_conv=float,int_conv=int, - float_to_float=float, - float_to_str = lambda v:'%24.16e' % v, - title = 'Python floating point number'): - """ - float_conv - convert integer to float (array) - int_conv - convert float (array) to integer - float_to_float - convert float array to float - float_to_str - convert array float to str - title - description of used floating point numbers - """ - # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the runninng arch. - with errstate(under='ignore'): - self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) - - def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): - max_iterN = 10000 - msg = "Did not converge after %d tries with %s" - one = float_conv(1) - two = one + one - zero = one - one - - # Do we really need to do this? Aren't they 2 and 2.0? - # Determine ibeta and beta - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - b = one - for _ in range(max_iterN): - b = b + b - temp = a + b - itemp = int_conv(temp-a) - if any(itemp != 0): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - ibeta = itemp - beta = float_conv(ibeta) - - # Determine it and irnd - it = -1 - b = one - for _ in range(max_iterN): - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - - betah = beta / two - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - temp = a + betah - irnd = 0 - if any(temp-a != zero): - irnd = 1 - tempa = a + beta - temp = tempa + betah - if irnd==0 and any(temp-tempa != zero): - irnd = 2 - - # Determine negep and epsneg - negep = it + 3 - betain = one / beta - a = one - for i in range(negep): - a = a * betain - b = a - for _ in range(max_iterN): - temp = one - a - if any(temp-one != zero): - break - a = a * beta - negep = negep - 1 - # Prevent infinite loop on PPC with gcc 4.0: - if negep < 0: - raise RuntimeError("could not determine machine tolerance " - "for 'negep', locals() -> %s" % (locals())) - else: - raise RuntimeError(msg % (_, one.dtype)) - negep = -negep - epsneg = a - - # Determine machep and eps - machep = - it - 3 - a = b - - for _ in range(max_iterN): - temp = one + a - if any(temp-one != zero): - break - a = a * beta - machep = machep + 1 - else: - raise RuntimeError(msg % (_, one.dtype)) - eps = a - - # Determine ngrd - ngrd = 0 - temp = one + eps - if irnd==0 and any(temp*one - one != zero): - ngrd = 1 - - # Determine iexp - i = 0 - k = 1 - z = betain - t = one + eps - nxres = 0 - for _ in range(max_iterN): - y = z - z = y*y - a = z*one # Check here for underflow - temp = z*t - if any(a+a == zero) or any(abs(z)>=y): - break - temp1 = temp * betain - if any(temp1*beta == z): - break - i = i + 1 - k = k + k - else: - raise RuntimeError(msg % (_, one.dtype)) - if ibeta != 10: - iexp = i + 1 - mx = k + k - else: - iexp = 2 - iz = ibeta - while k >= iz: - iz = iz * ibeta - iexp = iexp + 1 - mx = iz + iz - 1 - - # Determine minexp and xmin - for _ in range(max_iterN): - xmin = y - y = y * betain - a = y * one - temp = y * t - if any(a+a != zero) and any(abs(y) < xmin): - k = k + 1 - temp1 = temp * betain - if any(temp1*beta == y) and any(temp != y): - nxres = 3 - xmin = y - break - else: - break - else: - raise RuntimeError(msg % (_, one.dtype)) - minexp = -k - - # Determine maxexp, xmax - if mx <= k + k - 3 and ibeta != 10: - mx = mx + mx - iexp = iexp + 1 - maxexp = mx + minexp - irnd = irnd + nxres - if irnd >= 2: - maxexp = maxexp - 2 - i = maxexp + minexp - if ibeta == 2 and not i: - maxexp = maxexp - 1 - if i > 20: - maxexp = maxexp - 1 - if any(a != y): - maxexp = maxexp - 2 - xmax = one - epsneg - if any(xmax*one != xmax): - xmax = one - beta*epsneg - xmax = xmax / (xmin*beta*beta*beta) - i = maxexp + minexp + 3 - for j in range(i): - if ibeta==2: - xmax = xmax + xmax - else: - xmax = xmax * beta - - self.ibeta = ibeta - self.it = it - self.negep = negep - self.epsneg = float_to_float(epsneg) - self._str_epsneg = float_to_str(epsneg) - self.machep = machep - self.eps = float_to_float(eps) - self._str_eps = float_to_str(eps) - self.ngrd = ngrd - self.iexp = iexp - self.minexp = minexp - self.xmin = float_to_float(xmin) - self._str_xmin = float_to_str(xmin) - self.maxexp = maxexp - self.xmax = float_to_float(xmax) - self._str_xmax = float_to_str(xmax) - self.irnd = irnd - - self.title = title - # Commonly used parameters - self.epsilon = self.eps - self.tiny = self.xmin - self.huge = self.xmax - - import math - self.precision = int(-math.log10(float_to_float(self.eps))) - ten = two + two + two + two + two - resolution = ten ** (-self.precision) - self.resolution = float_to_float(resolution) - self._str_resolution = float_to_str(resolution) - - def __str__(self): - return '''\ -Machine parameters for %(title)s ---------------------------------------------------------------------- -ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s -machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon) -negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg) -minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny) -maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge) ---------------------------------------------------------------------- -''' % self.__dict__ - - -if __name__ == '__main__': - print(MachAr()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/memmap.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/memmap.py deleted file mode 100644 index b1c96ee293ae8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/memmap.py +++ /dev/null @@ -1,308 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__all__ = ['memmap'] - -import warnings -import sys - -import numpy as np -from .numeric import uint8, ndarray, dtype -from numpy.compat import long, basestring - -dtypedescr = dtype -valid_filemodes = ["r", "c", "r+", "w+"] -writeable_filemodes = ["r+", "w+"] - -mode_equivalents = { - "readonly":"r", - "copyonwrite":"c", - "readwrite":"r+", - "write":"w+" - } - -class memmap(ndarray): - """ - Create a memory-map to an array stored in a *binary* file on disk. - - Memory-mapped files are used for accessing small segments of large files - on disk, without reading the entire file into memory. Numpy's - memmap's are array-like objects. This differs from Python's ``mmap`` - module, which uses file-like objects. - - This subclass of ndarray has some unpleasant interactions with - some operations, because it doesn't quite fit properly as a subclass. - An alternative to using this subclass is to create the ``mmap`` - object yourself, then create an ndarray with ndarray.__new__ directly, - passing the object created in its 'buffer=' parameter. - - This class may at some point be turned into a factory function - which returns a view into an mmap buffer. - - Delete the memmap instance to close. - - - Parameters - ---------- - filename : str or file-like object - The file name or file object to be used as the array data buffer. - dtype : data-type, optional - The data-type used to interpret the file contents. - Default is `uint8`. - mode : {'r+', 'r', 'w+', 'c'}, optional - The file is opened in this mode: - - +------+-------------------------------------------------------------+ - | 'r' | Open existing file for reading only. | - +------+-------------------------------------------------------------+ - | 'r+' | Open existing file for reading and writing. | - +------+-------------------------------------------------------------+ - | 'w+' | Create or overwrite existing file for reading and writing. | - +------+-------------------------------------------------------------+ - | 'c' | Copy-on-write: assignments affect data in memory, but | - | | changes are not saved to disk. The file on disk is | - | | read-only. | - +------+-------------------------------------------------------------+ - - Default is 'r+'. - offset : int, optional - In the file, array data starts at this offset. Since `offset` is - measured in bytes, it should normally be a multiple of the byte-size - of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of - file are valid; The file will be extended to accommodate the - additional data. By default, ``memmap`` will start at the beginning of - the file, even if ``filename`` is a file pointer ``fp`` and - ``fp.tell() != 0``. - shape : tuple, optional - The desired shape of the array. If ``mode == 'r'`` and the number - of remaining bytes after `offset` is not a multiple of the byte-size - of `dtype`, you must specify `shape`. By default, the returned array - will be 1-D with the number of elements determined by file size - and data-type. - order : {'C', 'F'}, optional - Specify the order of the ndarray memory layout: C (row-major) or - Fortran (column-major). This only has an effect if the shape is - greater than 1-D. The default order is 'C'. - - Attributes - ---------- - filename : str - Path to the mapped file. - offset : int - Offset position in the file. - mode : str - File mode. - - Methods - ------- - flush - Flush any changes in memory to file on disk. - When you delete a memmap object, flush is called first to write - changes to disk before removing the object. - - - Notes - ----- - The memmap object can be used anywhere an ndarray is accepted. - Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns - ``True``. - - Memory-mapped arrays use the Python memory-map object which - (prior to Python 2.5) does not allow files to be larger than a - certain size depending on the platform. This size is always < 2GB - even on 64-bit systems. - - Examples - -------- - >>> data = np.arange(12, dtype='float32') - >>> data.resize((3,4)) - - This example uses a temporary file so that doctest doesn't write - files to your directory. You would use a 'normal' filename. - - >>> from tempfile import mkdtemp - >>> import os.path as path - >>> filename = path.join(mkdtemp(), 'newfile.dat') - - Create a memmap with dtype and shape that matches our data: - - >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) - >>> fp - memmap([[ 0., 0., 0., 0.], - [ 0., 0., 0., 0.], - [ 0., 0., 0., 0.]], dtype=float32) - - Write data to memmap array: - - >>> fp[:] = data[:] - >>> fp - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - >>> fp.filename == path.abspath(filename) - True - - Deletion flushes memory changes to disk before removing the object: - - >>> del fp - - Load the memmap and verify data was stored: - - >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) - >>> newfp - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - Read-only memmap: - - >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) - >>> fpr.flags.writeable - False - - Copy-on-write memmap: - - >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) - >>> fpc.flags.writeable - True - - It's possible to assign to copy-on-write array, but values are only - written into the memory copy of the array, and not written to disk: - - >>> fpc - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - >>> fpc[0,:] = 0 - >>> fpc - memmap([[ 0., 0., 0., 0.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - File on disk is unchanged: - - >>> fpr - memmap([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]], dtype=float32) - - Offset into a memmap: - - >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) - >>> fpo - memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) - - """ - - __array_priority__ = -100.0 - def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, - shape=None, order='C'): - # Import here to minimize 'import numpy' overhead - import mmap - import os.path - try: - mode = mode_equivalents[mode] - except KeyError: - if mode not in valid_filemodes: - raise ValueError("mode must be one of %s" % - (valid_filemodes + list(mode_equivalents.keys()))) - - if hasattr(filename, 'read'): - fid = filename - own_file = False - else: - fid = open(filename, (mode == 'c' and 'r' or mode)+'b') - own_file = True - - if (mode == 'w+') and shape is None: - raise ValueError("shape must be given") - - fid.seek(0, 2) - flen = fid.tell() - descr = dtypedescr(dtype) - _dbytes = descr.itemsize - - if shape is None: - bytes = flen - offset - if (bytes % _dbytes): - fid.close() - raise ValueError("Size of available data is not a " - "multiple of the data-type size.") - size = bytes // _dbytes - shape = (size,) - else: - if not isinstance(shape, tuple): - shape = (shape,) - size = 1 - for k in shape: - size *= k - - bytes = long(offset + size*_dbytes) - - if mode == 'w+' or (mode == 'r+' and flen < bytes): - fid.seek(bytes - 1, 0) - fid.write(np.compat.asbytes('\0')) - fid.flush() - - if mode == 'c': - acc = mmap.ACCESS_COPY - elif mode == 'r': - acc = mmap.ACCESS_READ - else: - acc = mmap.ACCESS_WRITE - - start = offset - offset % mmap.ALLOCATIONGRANULARITY - bytes -= start - offset -= start - mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) - - self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, - offset=offset, order=order) - self._mmap = mm - self.offset = offset - self.mode = mode - - if isinstance(filename, basestring): - self.filename = os.path.abspath(filename) - # py3 returns int for TemporaryFile().name - elif (hasattr(filename, "name") and - isinstance(filename.name, basestring)): - self.filename = os.path.abspath(filename.name) - # same as memmap copies (e.g. memmap + 1) - else: - self.filename = None - - if own_file: - fid.close() - - return self - - def __array_finalize__(self, obj): - if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): - self._mmap = obj._mmap - self.filename = obj.filename - self.offset = obj.offset - self.mode = obj.mode - else: - self._mmap = None - self.filename = None - self.offset = None - self.mode = None - - def flush(self): - """ - Write any changes in the array to the file on disk. - - For further information, see `memmap`. - - Parameters - ---------- - None - - See Also - -------- - memmap - - """ - if self.base is not None and hasattr(self.base, 'flush'): - self.base.flush() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray.py deleted file mode 100644 index 123cb89678287..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'multiarray.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray_tests.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray_tests.py deleted file mode 100644 index 5f5ee01ef2a0d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/multiarray_tests.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'multiarray_tests.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numeric.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numeric.py deleted file mode 100644 index 5d7407ce0de9a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numeric.py +++ /dev/null @@ -1,2842 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -import warnings -import collections -from . import multiarray -from . import umath -from .umath import (invert, sin, UFUNC_BUFSIZE_DEFAULT, ERR_IGNORE, - ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, - ERR_DEFAULT, PINF, NAN) -from . import numerictypes -from .numerictypes import longlong, intc, int_, float_, complex_, bool_ - -if sys.version_info[0] >= 3: - import pickle - basestring = str -else: - import cPickle as pickle - -loads = pickle.loads - - -__all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', - 'arange', 'array', 'zeros', 'count_nonzero', - 'empty', 'broadcast', 'dtype', 'fromstring', 'fromfile', - 'frombuffer', 'int_asbuffer', 'where', 'argwhere', 'copyto', - 'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops', - 'can_cast', 'promote_types', 'min_scalar_type', 'result_type', - 'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray', - 'isfortran', 'empty_like', 'zeros_like', 'ones_like', - 'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot', - 'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot', - 'array2string', 'get_printoptions', 'set_printoptions', - 'array_repr', 'array_str', 'set_string_function', - 'little_endian', 'require', - 'fromiter', 'array_equal', 'array_equiv', - 'indices', 'fromfunction', 'isclose', - 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', - 'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask', - 'seterr', 'geterr', 'setbufsize', 'getbufsize', - 'seterrcall', 'geterrcall', 'errstate', 'flatnonzero', - 'Inf', 'inf', 'infty', 'Infinity', - 'nan', 'NaN', 'False_', 'True_', 'bitwise_not', - 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS', - 'ComplexWarning', 'may_share_memory', 'full', 'full_like'] - -if sys.version_info[0] < 3: - __all__.extend(['getbuffer', 'newbuffer']) - - -class ComplexWarning(RuntimeWarning): - """ - The warning raised when casting a complex dtype to a real dtype. - - As implemented, casting a complex number to a real discards its imaginary - part, but this behavior may not be what the user actually wants. - - """ - pass - -bitwise_not = invert - -CLIP = multiarray.CLIP -WRAP = multiarray.WRAP -RAISE = multiarray.RAISE -MAXDIMS = multiarray.MAXDIMS -ALLOW_THREADS = multiarray.ALLOW_THREADS -BUFSIZE = multiarray.BUFSIZE - -ndarray = multiarray.ndarray -flatiter = multiarray.flatiter -nditer = multiarray.nditer -nested_iters = multiarray.nested_iters -broadcast = multiarray.broadcast -dtype = multiarray.dtype -copyto = multiarray.copyto -ufunc = type(sin) - - -def zeros_like(a, dtype=None, order='K', subok=True): - """ - Return an array of zeros with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - dtype : data-type, optional - .. versionadded:: 1.6.0 - Overrides the data type of the result. - order : {'C', 'F', 'A', or 'K'}, optional - .. versionadded:: 1.6.0 - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - - Returns - ------- - out : ndarray - Array of zeros with the same shape and type as `a`. - - See Also - -------- - ones_like : Return an array of ones with shape and type of input. - empty_like : Return an empty array with shape and type of input. - zeros : Return a new array setting values to zero. - ones : Return a new array setting values to one. - empty : Return a new uninitialized array. - - Examples - -------- - >>> x = np.arange(6) - >>> x = x.reshape((2, 3)) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.zeros_like(x) - array([[0, 0, 0], - [0, 0, 0]]) - - >>> y = np.arange(3, dtype=np.float) - >>> y - array([ 0., 1., 2.]) - >>> np.zeros_like(y) - array([ 0., 0., 0.]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok) - # needed instead of a 0 to get same result as zeros for for string dtypes - z = zeros(1, dtype=res.dtype) - multiarray.copyto(res, z, casting='unsafe') - return res - -def ones(shape, dtype=None, order='C'): - """ - Return a new array of given shape and type, filled with ones. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - dtype : data-type, optional - The desired data-type for the array, e.g., `numpy.int8`. Default is - `numpy.float64`. - order : {'C', 'F'}, optional - Whether to store multidimensional data in C- or Fortran-contiguous - (row- or column-wise) order in memory. - - Returns - ------- - out : ndarray - Array of ones with the given shape, dtype, and order. - - See Also - -------- - zeros, ones_like - - Examples - -------- - >>> np.ones(5) - array([ 1., 1., 1., 1., 1.]) - - >>> np.ones((5,), dtype=np.int) - array([1, 1, 1, 1, 1]) - - >>> np.ones((2, 1)) - array([[ 1.], - [ 1.]]) - - >>> s = (2,2) - >>> np.ones(s) - array([[ 1., 1.], - [ 1., 1.]]) - - """ - a = empty(shape, dtype, order) - multiarray.copyto(a, 1, casting='unsafe') - return a - -def ones_like(a, dtype=None, order='K', subok=True): - """ - Return an array of ones with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - dtype : data-type, optional - .. versionadded:: 1.6.0 - Overrides the data type of the result. - order : {'C', 'F', 'A', or 'K'}, optional - .. versionadded:: 1.6.0 - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - - Returns - ------- - out : ndarray - Array of ones with the same shape and type as `a`. - - See Also - -------- - zeros_like : Return an array of zeros with shape and type of input. - empty_like : Return an empty array with shape and type of input. - zeros : Return a new array setting values to zero. - ones : Return a new array setting values to one. - empty : Return a new uninitialized array. - - Examples - -------- - >>> x = np.arange(6) - >>> x = x.reshape((2, 3)) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.ones_like(x) - array([[1, 1, 1], - [1, 1, 1]]) - - >>> y = np.arange(3, dtype=np.float) - >>> y - array([ 0., 1., 2.]) - >>> np.ones_like(y) - array([ 1., 1., 1.]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok) - multiarray.copyto(res, 1, casting='unsafe') - return res - -def full(shape, fill_value, dtype=None, order='C'): - """ - Return a new array of given shape and type, filled with `fill_value`. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the new array, e.g., ``(2, 3)`` or ``2``. - fill_value : scalar - Fill value. - dtype : data-type, optional - The desired data-type for the array, e.g., `numpy.int8`. Default is - is chosen as `np.array(fill_value).dtype`. - order : {'C', 'F'}, optional - Whether to store multidimensional data in C- or Fortran-contiguous - (row- or column-wise) order in memory. - - Returns - ------- - out : ndarray - Array of `fill_value` with the given shape, dtype, and order. - - See Also - -------- - zeros_like : Return an array of zeros with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - empty_like : Return an empty array with shape and type of input. - full_like : Fill an array with shape and type of input. - zeros : Return a new array setting values to zero. - ones : Return a new array setting values to one. - empty : Return a new uninitialized array. - - Examples - -------- - >>> np.full((2, 2), np.inf) - array([[ inf, inf], - [ inf, inf]]) - >>> np.full((2, 2), 10, dtype=np.int) - array([[10, 10], - [10, 10]]) - - """ - a = empty(shape, dtype, order) - multiarray.copyto(a, fill_value, casting='unsafe') - return a - -def full_like(a, fill_value, dtype=None, order='K', subok=True): - """ - Return a full array with the same shape and type as a given array. - - Parameters - ---------- - a : array_like - The shape and data-type of `a` define these same attributes of - the returned array. - fill_value : scalar - Fill value. - dtype : data-type, optional - Overrides the data type of the result. - order : {'C', 'F', 'A', or 'K'}, optional - Overrides the memory layout of the result. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. - subok : bool, optional. - If True, then the newly created array will use the sub-class - type of 'a', otherwise it will be a base-class array. Defaults - to True. - - Returns - ------- - out : ndarray - Array of `fill_value` with the same shape and type as `a`. - - See Also - -------- - zeros_like : Return an array of zeros with shape and type of input. - ones_like : Return an array of ones with shape and type of input. - empty_like : Return an empty array with shape and type of input. - zeros : Return a new array setting values to zero. - ones : Return a new array setting values to one. - empty : Return a new uninitialized array. - full : Fill a new array. - - Examples - -------- - >>> x = np.arange(6, dtype=np.int) - >>> np.full_like(x, 1) - array([1, 1, 1, 1, 1, 1]) - >>> np.full_like(x, 0.1) - array([0, 0, 0, 0, 0, 0]) - >>> np.full_like(x, 0.1, dtype=np.double) - array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> np.full_like(x, np.nan, dtype=np.double) - array([ nan, nan, nan, nan, nan, nan]) - - >>> y = np.arange(6, dtype=np.double) - >>> np.full_like(y, 0.1) - array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - - """ - res = empty_like(a, dtype=dtype, order=order, subok=subok) - multiarray.copyto(res, fill_value, casting='unsafe') - return res - - -def extend_all(module): - adict = {} - for a in __all__: - adict[a] = 1 - try: - mall = getattr(module, '__all__') - except AttributeError: - mall = [k for k in module.__dict__.keys() if not k.startswith('_')] - for a in mall: - if a not in adict: - __all__.append(a) - -newaxis = None - - -arange = multiarray.arange -array = multiarray.array -zeros = multiarray.zeros -count_nonzero = multiarray.count_nonzero -empty = multiarray.empty -empty_like = multiarray.empty_like -fromstring = multiarray.fromstring -fromiter = multiarray.fromiter -fromfile = multiarray.fromfile -frombuffer = multiarray.frombuffer -may_share_memory = multiarray.may_share_memory -if sys.version_info[0] < 3: - newbuffer = multiarray.newbuffer - getbuffer = multiarray.getbuffer -int_asbuffer = multiarray.int_asbuffer -where = multiarray.where -concatenate = multiarray.concatenate -fastCopyAndTranspose = multiarray._fastCopyAndTranspose -set_numeric_ops = multiarray.set_numeric_ops -can_cast = multiarray.can_cast -promote_types = multiarray.promote_types -min_scalar_type = multiarray.min_scalar_type -result_type = multiarray.result_type -lexsort = multiarray.lexsort -compare_chararrays = multiarray.compare_chararrays -putmask = multiarray.putmask -einsum = multiarray.einsum - -def asarray(a, dtype=None, order=None): - """ - Convert the input to an array. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('F' for FORTRAN) - memory representation. Defaults to 'C'. - - Returns - ------- - out : ndarray - Array interpretation of `a`. No copy is performed if the input - is already an ndarray. If `a` is a subclass of ndarray, a base - class ndarray is returned. - - See Also - -------- - asanyarray : Similar function which passes through subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asarray(a) - array([1, 2]) - - Existing arrays are not copied: - - >>> a = np.array([1, 2]) - >>> np.asarray(a) is a - True - - If `dtype` is set, array is copied only if dtype does not match: - - >>> a = np.array([1, 2], dtype=np.float32) - >>> np.asarray(a, dtype=np.float32) is a - True - >>> np.asarray(a, dtype=np.float64) is a - False - - Contrary to `asanyarray`, ndarray subclasses are not passed through: - - >>> issubclass(np.matrix, np.ndarray) - True - >>> a = np.matrix([[1, 2]]) - >>> np.asarray(a) is a - False - >>> np.asanyarray(a) is a - True - - """ - return array(a, dtype, copy=False, order=order) - -def asanyarray(a, dtype=None, order=None): - """ - Convert the input to an ndarray, but pass ndarray subclasses through. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes scalars, lists, lists of tuples, tuples, tuples of tuples, - tuples of lists, and ndarrays. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('F') memory - representation. Defaults to 'C'. - - Returns - ------- - out : ndarray or an ndarray subclass - Array interpretation of `a`. If `a` is an ndarray or a subclass - of ndarray, it is returned as-is and no copy is performed. - - See Also - -------- - asarray : Similar function which always returns ndarrays. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array: - - >>> a = [1, 2] - >>> np.asanyarray(a) - array([1, 2]) - - Instances of `ndarray` subclasses are passed through as-is: - - >>> a = np.matrix([1, 2]) - >>> np.asanyarray(a) is a - True - - """ - return array(a, dtype, copy=False, order=order, subok=True) - -def ascontiguousarray(a, dtype=None): - """ - Return a contiguous array in memory (C order). - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - Data-type of returned array. - - Returns - ------- - out : ndarray - Contiguous array of same shape and content as `a`, with type `dtype` - if specified. - - See Also - -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> np.ascontiguousarray(x, dtype=np.float32) - array([[ 0., 1., 2.], - [ 3., 4., 5.]], dtype=float32) - >>> x.flags['C_CONTIGUOUS'] - True - - """ - return array(a, dtype, copy=False, order='C', ndmin=1) - -def asfortranarray(a, dtype=None): - """ - Return an array laid out in Fortran order in memory. - - Parameters - ---------- - a : array_like - Input array. - dtype : str or dtype object, optional - By default, the data-type is inferred from the input data. - - Returns - ------- - out : ndarray - The input `a` in Fortran, or column-major, order. - - See Also - -------- - ascontiguousarray : Convert input to a contiguous (C order) array. - asanyarray : Convert input to an ndarray with either row or - column-major memory order. - require : Return an ndarray that satisfies requirements. - ndarray.flags : Information about the memory layout of the array. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> y = np.asfortranarray(x) - >>> x.flags['F_CONTIGUOUS'] - False - >>> y.flags['F_CONTIGUOUS'] - True - - """ - return array(a, dtype, copy=False, order='F', ndmin=1) - -def require(a, dtype=None, requirements=None): - """ - Return an ndarray of the provided type that satisfies requirements. - - This function is useful to be sure that an array with the correct flags - is returned for passing to compiled code (perhaps through ctypes). - - Parameters - ---------- - a : array_like - The object to be converted to a type-and-requirement-satisfying array. - dtype : data-type - The required data-type, the default data-type is float64). - requirements : str or list of str - The requirements list can be any of the following - - * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array - * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array - * 'ALIGNED' ('A') - ensure a data-type aligned array - * 'WRITEABLE' ('W') - ensure a writable array - * 'OWNDATA' ('O') - ensure an array that owns its own data - - See Also - -------- - asarray : Convert input to an ndarray. - asanyarray : Convert to an ndarray, but pass through ndarray subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. - ndarray.flags : Information about the memory layout of the array. - - Notes - ----- - The returned array will be guaranteed to have the listed requirements - by making a copy if needed. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> x.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : False - OWNDATA : False - WRITEABLE : True - ALIGNED : True - UPDATEIFCOPY : False - - >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) - >>> y.flags - C_CONTIGUOUS : False - F_CONTIGUOUS : True - OWNDATA : True - WRITEABLE : True - ALIGNED : True - UPDATEIFCOPY : False - - """ - if requirements is None: - requirements = [] - else: - requirements = [x.upper() for x in requirements] - - if not requirements: - return asanyarray(a, dtype=dtype) - - if 'ENSUREARRAY' in requirements or 'E' in requirements: - subok = False - else: - subok = True - - arr = array(a, dtype=dtype, copy=False, subok=subok) - - copychar = 'A' - if 'FORTRAN' in requirements or \ - 'F_CONTIGUOUS' in requirements or \ - 'F' in requirements: - copychar = 'F' - elif 'CONTIGUOUS' in requirements or \ - 'C_CONTIGUOUS' in requirements or \ - 'C' in requirements: - copychar = 'C' - - for prop in requirements: - if not arr.flags[prop]: - arr = arr.copy(copychar) - break - return arr - -def isfortran(a): - """ - Returns True if array is arranged in Fortran-order in memory - and not C-order. - - Parameters - ---------- - a : ndarray - Input array. - - - Examples - -------- - - np.array allows to specify whether the array is written in C-contiguous - order (last index varies the fastest), or FORTRAN-contiguous order in - memory (first index varies the fastest). - - >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(a) - False - - >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN') - >>> b - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(b) - True - - - The transpose of a C-ordered array is a FORTRAN-ordered array. - - >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') - >>> a - array([[1, 2, 3], - [4, 5, 6]]) - >>> np.isfortran(a) - False - >>> b = a.T - >>> b - array([[1, 4], - [2, 5], - [3, 6]]) - >>> np.isfortran(b) - True - - C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. - - >>> np.isfortran(np.array([1, 2], order='FORTRAN')) - False - - """ - return a.flags.fnc - -def argwhere(a): - """ - Find the indices of array elements that are non-zero, grouped by element. - - Parameters - ---------- - a : array_like - Input data. - - Returns - ------- - index_array : ndarray - Indices of elements that are non-zero. Indices are grouped by element. - - See Also - -------- - where, nonzero - - Notes - ----- - ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. - - The output of ``argwhere`` is not suitable for indexing arrays. - For this purpose use ``where(a)`` instead. - - Examples - -------- - >>> x = np.arange(6).reshape(2,3) - >>> x - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.argwhere(x>1) - array([[0, 2], - [1, 0], - [1, 1], - [1, 2]]) - - """ - return transpose(nonzero(a)) - -def flatnonzero(a): - """ - Return indices that are non-zero in the flattened version of a. - - This is equivalent to a.ravel().nonzero()[0]. - - Parameters - ---------- - a : ndarray - Input array. - - Returns - ------- - res : ndarray - Output array, containing the indices of the elements of `a.ravel()` - that are non-zero. - - See Also - -------- - nonzero : Return the indices of the non-zero elements of the input array. - ravel : Return a 1-D array containing the elements of the input array. - - Examples - -------- - >>> x = np.arange(-2, 3) - >>> x - array([-2, -1, 0, 1, 2]) - >>> np.flatnonzero(x) - array([0, 1, 3, 4]) - - Use the indices of the non-zero elements as an index array to extract - these elements: - - >>> x.ravel()[np.flatnonzero(x)] - array([-2, -1, 1, 2]) - - """ - return a.ravel().nonzero()[0] - -_mode_from_name_dict = {'v': 0, - 's' : 1, - 'f' : 2} - -def _mode_from_name(mode): - if isinstance(mode, basestring): - return _mode_from_name_dict[mode.lower()[0]] - return mode - -def correlate(a, v, mode='valid', old_behavior=False): - """ - Cross-correlation of two 1-dimensional sequences. - - This function computes the correlation as generally defined in signal - processing texts:: - - c_{av}[k] = sum_n a[n+k] * conj(v[n]) - - with a and v sequences being zero-padded where necessary and conj being - the conjugate. - - Parameters - ---------- - a, v : array_like - Input sequences. - mode : {'valid', 'same', 'full'}, optional - Refer to the `convolve` docstring. Note that the default - is `valid`, unlike `convolve`, which uses `full`. - old_behavior : bool - If True, uses the old behavior from Numeric, - (correlate(a,v) == correlate(v,a), and the conjugate is not taken - for complex arrays). If False, uses the conventional signal - processing definition. - - Returns - ------- - out : ndarray - Discrete cross-correlation of `a` and `v`. - - See Also - -------- - convolve : Discrete, linear convolution of two one-dimensional sequences. - - Notes - ----- - The definition of correlation above is not unique and sometimes correlation - may be defined differently. Another common definition is:: - - c'_{av}[k] = sum_n a[n] conj(v[n+k]) - - which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``. - - Examples - -------- - >>> np.correlate([1, 2, 3], [0, 1, 0.5]) - array([ 3.5]) - >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") - array([ 2. , 3.5, 3. ]) - >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") - array([ 0.5, 2. , 3.5, 3. , 0. ]) - - Using complex sequences: - - >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') - array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) - - Note that you get the time reversed, complex conjugated result - when the two input sequences change places, i.e., - ``c_{va}[k] = c^{*}_{av}[-k]``: - - >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') - array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) - - """ - mode = _mode_from_name(mode) -# the old behavior should be made available under a different name, see thread -# http://thread.gmane.org/gmane.comp.python.numeric.general/12609/focus=12630 - if old_behavior: - warnings.warn(""" -The old behavior of correlate was deprecated for 1.4.0, and will be completely removed -for NumPy 2.0. - -The new behavior fits the conventional definition of correlation: inputs are -never swapped, and the second argument is conjugated for complex arrays.""", - DeprecationWarning) - return multiarray.correlate(a, v, mode) - else: - return multiarray.correlate2(a, v, mode) - -def convolve(a,v,mode='full'): - """ - Returns the discrete, linear convolution of two one-dimensional sequences. - - The convolution operator is often seen in signal processing, where it - models the effect of a linear time-invariant system on a signal [1]_. In - probability theory, the sum of two independent random variables is - distributed according to the convolution of their individual - distributions. - - If `v` is longer than `a`, the arrays are swapped before computation. - - Parameters - ---------- - a : (N,) array_like - First one-dimensional input array. - v : (M,) array_like - Second one-dimensional input array. - mode : {'full', 'valid', 'same'}, optional - 'full': - By default, mode is 'full'. This returns the convolution - at each point of overlap, with an output shape of (N+M-1,). At - the end-points of the convolution, the signals do not overlap - completely, and boundary effects may be seen. - - 'same': - Mode `same` returns output of length ``max(M, N)``. Boundary - effects are still visible. - - 'valid': - Mode `valid` returns output of length - ``max(M, N) - min(M, N) + 1``. The convolution product is only given - for points where the signals overlap completely. Values outside - the signal boundary have no effect. - - Returns - ------- - out : ndarray - Discrete, linear convolution of `a` and `v`. - - See Also - -------- - scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier - Transform. - scipy.linalg.toeplitz : Used to construct the convolution operator. - polymul : Polynomial multiplication. Same output as convolve, but also - accepts poly1d objects as input. - - Notes - ----- - The discrete convolution operation is defined as - - .. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m] - - It can be shown that a convolution :math:`x(t) * y(t)` in time/space - is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier - domain, after appropriate padding (padding is necessary to prevent - circular convolution). Since multiplication is more efficient (faster) - than convolution, the function `scipy.signal.fftconvolve` exploits the - FFT to calculate the convolution of large data-sets. - - References - ---------- - .. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution. - - Examples - -------- - Note how the convolution operator flips the second array - before "sliding" the two across one another: - - >>> np.convolve([1, 2, 3], [0, 1, 0.5]) - array([ 0. , 1. , 2.5, 4. , 1.5]) - - Only return the middle values of the convolution. - Contains boundary effects, where zeros are taken - into account: - - >>> np.convolve([1,2,3],[0,1,0.5], 'same') - array([ 1. , 2.5, 4. ]) - - The two arrays are of the same length, so there - is only one position where they completely overlap: - - >>> np.convolve([1,2,3],[0,1,0.5], 'valid') - array([ 2.5]) - - """ - a, v = array(a, ndmin=1), array(v, ndmin=1) - if (len(v) > len(a)): - a, v = v, a - if len(a) == 0 : - raise ValueError('a cannot be empty') - if len(v) == 0 : - raise ValueError('v cannot be empty') - mode = _mode_from_name(mode) - return multiarray.correlate(a, v[::-1], mode) - -def outer(a, b, out=None): - """ - Compute the outer product of two vectors. - - Given two vectors, ``a = [a0, a1, ..., aM]`` and - ``b = [b0, b1, ..., bN]``, - the outer product [1]_ is:: - - [[a0*b0 a0*b1 ... a0*bN ] - [a1*b0 . - [ ... . - [aM*b0 aM*bN ]] - - Parameters - ---------- - a : (M,) array_like - First input vector. Input is flattened if - not already 1-dimensional. - b : (N,) array_like - Second input vector. Input is flattened if - not already 1-dimensional. - out : (M, N) ndarray, optional - A location where the result is stored - - .. versionadded:: 1.9.0 - - Returns - ------- - out : (M, N) ndarray - ``out[i, j] = a[i] * b[j]`` - - See also - -------- - inner, einsum - - References - ---------- - .. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd - ed., Baltimore, MD, Johns Hopkins University Press, 1996, - pg. 8. - - Examples - -------- - Make a (*very* coarse) grid for computing a Mandelbrot set: - - >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) - >>> rl - array([[-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.], - [-2., -1., 0., 1., 2.]]) - >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) - >>> im - array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], - [ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], - [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], - [ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) - >>> grid = rl + im - >>> grid - array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], - [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], - [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], - [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], - [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) - - An example using a "vector" of letters: - - >>> x = np.array(['a', 'b', 'c'], dtype=object) - >>> np.outer(x, [1, 2, 3]) - array([[a, aa, aaa], - [b, bb, bbb], - [c, cc, ccc]], dtype=object) - - """ - a = asarray(a) - b = asarray(b) - return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis,:], out) - -# try to import blas optimized dot if available -envbak = os.environ.copy() -try: - # importing this changes the dot function for basic 4 types - # to blas-optimized versions. - - # disables openblas affinity setting of the main thread that limits - # python threads or processes to one core - if 'OPENBLAS_MAIN_FREE' not in os.environ: - os.environ['OPENBLAS_MAIN_FREE'] = '1' - if 'GOTOBLAS_MAIN_FREE' not in os.environ: - os.environ['GOTOBLAS_MAIN_FREE'] = '1' - from ._dotblas import dot, vdot, inner, alterdot, restoredot -except ImportError: - # docstrings are in add_newdocs.py - inner = multiarray.inner - dot = multiarray.dot - def vdot(a, b): - return dot(asarray(a).ravel().conj(), asarray(b).ravel()) - def alterdot(): - pass - def restoredot(): - pass -finally: - os.environ.clear() - os.environ.update(envbak) - del envbak - -def tensordot(a, b, axes=2): - """ - Compute tensor dot product along specified axes for arrays >= 1-D. - - Given two tensors (arrays of dimension greater than or equal to one), - `a` and `b`, and an array_like object containing two array_like - objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s - elements (components) over the axes specified by ``a_axes`` and - ``b_axes``. The third argument can be a single non-negative - integer_like scalar, ``N``; if it is such, then the last ``N`` - dimensions of `a` and the first ``N`` dimensions of `b` are summed - over. - - Parameters - ---------- - a, b : array_like, len(shape) >= 1 - Tensors to "dot". - axes : variable type - * integer_like scalar - Number of axes to sum over (applies to both arrays); or - * (2,) array_like, both elements array_like of the same length - List of axes to be summed over, first sequence applying to `a`, - second to `b`. - - See Also - -------- - dot, einsum - - Notes - ----- - When there is more than one axis to sum over - and they are not the last - (first) axes of `a` (`b`) - the argument `axes` should consist of - two sequences of the same length, with the first axis to sum over given - first in both sequences, the second axis second, and so forth. - - Examples - -------- - A "traditional" example: - - >>> a = np.arange(60.).reshape(3,4,5) - >>> b = np.arange(24.).reshape(4,3,2) - >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) - >>> c.shape - (5, 2) - >>> c - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) - >>> # A slower but equivalent way of computing the same... - >>> d = np.zeros((5,2)) - >>> for i in range(5): - ... for j in range(2): - ... for k in range(3): - ... for n in range(4): - ... d[i,j] += a[k,n,i] * b[n,k,j] - >>> c == d - array([[ True, True], - [ True, True], - [ True, True], - [ True, True], - [ True, True]], dtype=bool) - - An extended example taking advantage of the overloading of + and \\*: - - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) - >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) - >>> a; A - array([[[1, 2], - [3, 4]], - [[5, 6], - [7, 8]]]) - array([[a, b], - [c, d]], dtype=object) - - >>> np.tensordot(a, A) # third argument default is 2 - array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object) - - >>> np.tensordot(a, A, 1) - array([[[acc, bdd], - [aaacccc, bbbdddd]], - [[aaaaacccccc, bbbbbdddddd], - [aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object) - - >>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.) - array([[[[[a, b], - [c, d]], - ... - - >>> np.tensordot(a, A, (0, 1)) - array([[[abbbbb, cddddd], - [aabbbbbb, ccdddddd]], - [[aaabbbbbbb, cccddddddd], - [aaaabbbbbbbb, ccccdddddddd]]], dtype=object) - - >>> np.tensordot(a, A, (2, 1)) - array([[[abb, cdd], - [aaabbbb, cccdddd]], - [[aaaaabbbbbb, cccccdddddd], - [aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object) - - >>> np.tensordot(a, A, ((0, 1), (0, 1))) - array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object) - - >>> np.tensordot(a, A, ((2, 1), (1, 0))) - array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object) - - """ - try: - iter(axes) - except: - axes_a = list(range(-axes, 0)) - axes_b = list(range(0, axes)) - else: - axes_a, axes_b = axes - try: - na = len(axes_a) - axes_a = list(axes_a) - except TypeError: - axes_a = [axes_a] - na = 1 - try: - nb = len(axes_b) - axes_b = list(axes_b) - except TypeError: - axes_b = [axes_b] - nb = 1 - - a, b = asarray(a), asarray(b) - as_ = a.shape - nda = len(a.shape) - bs = b.shape - ndb = len(b.shape) - equal = True - if (na != nb): equal = False - else: - for k in range(na): - if as_[axes_a[k]] != bs[axes_b[k]]: - equal = False - break - if axes_a[k] < 0: - axes_a[k] += nda - if axes_b[k] < 0: - axes_b[k] += ndb - if not equal: - raise ValueError("shape-mismatch for sum") - - # Move the axes to sum over to the end of "a" - # and to the front of "b" - notin = [k for k in range(nda) if k not in axes_a] - newaxes_a = notin + axes_a - N2 = 1 - for axis in axes_a: - N2 *= as_[axis] - newshape_a = (-1, N2) - olda = [as_[axis] for axis in notin] - - notin = [k for k in range(ndb) if k not in axes_b] - newaxes_b = axes_b + notin - N2 = 1 - for axis in axes_b: - N2 *= bs[axis] - newshape_b = (N2, -1) - oldb = [bs[axis] for axis in notin] - - at = a.transpose(newaxes_a).reshape(newshape_a) - bt = b.transpose(newaxes_b).reshape(newshape_b) - res = dot(at, bt) - return res.reshape(olda + oldb) - -def roll(a, shift, axis=None): - """ - Roll array elements along a given axis. - - Elements that roll beyond the last position are re-introduced at - the first. - - Parameters - ---------- - a : array_like - Input array. - shift : int - The number of places by which elements are shifted. - axis : int, optional - The axis along which elements are shifted. By default, the array - is flattened before shifting, after which the original - shape is restored. - - Returns - ------- - res : ndarray - Output array, with the same shape as `a`. - - See Also - -------- - rollaxis : Roll the specified axis backwards, until it lies in a - given position. - - Examples - -------- - >>> x = np.arange(10) - >>> np.roll(x, 2) - array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) - - >>> x2 = np.reshape(x, (2,5)) - >>> x2 - array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]]) - >>> np.roll(x2, 1) - array([[9, 0, 1, 2, 3], - [4, 5, 6, 7, 8]]) - >>> np.roll(x2, 1, axis=0) - array([[5, 6, 7, 8, 9], - [0, 1, 2, 3, 4]]) - >>> np.roll(x2, 1, axis=1) - array([[4, 0, 1, 2, 3], - [9, 5, 6, 7, 8]]) - - """ - a = asanyarray(a) - if axis is None: - n = a.size - reshape = True - else: - try: - n = a.shape[axis] - except IndexError: - raise ValueError('axis must be >= 0 and < %d' % a.ndim) - reshape = False - if n == 0: - return a - shift %= n - indexes = concatenate((arange(n - shift, n), arange(n - shift))) - res = a.take(indexes, axis) - if reshape: - res = res.reshape(a.shape) - return res - -def rollaxis(a, axis, start=0): - """ - Roll the specified axis backwards, until it lies in a given position. - - Parameters - ---------- - a : ndarray - Input array. - axis : int - The axis to roll backwards. The positions of the other axes do not - change relative to one another. - start : int, optional - The axis is rolled until it lies before this position. The default, - 0, results in a "complete" roll. - - Returns - ------- - res : ndarray - Output array. - - See Also - -------- - roll : Roll the elements of an array by a number of positions along a - given axis. - - Examples - -------- - >>> a = np.ones((3,4,5,6)) - >>> np.rollaxis(a, 3, 1).shape - (3, 6, 4, 5) - >>> np.rollaxis(a, 2).shape - (5, 3, 4, 6) - >>> np.rollaxis(a, 1, 4).shape - (3, 5, 6, 4) - - """ - n = a.ndim - if axis < 0: - axis += n - if start < 0: - start += n - msg = 'rollaxis: %s (%d) must be >=0 and < %d' - if not (0 <= axis < n): - raise ValueError(msg % ('axis', axis, n)) - if not (0 <= start < n+1): - raise ValueError(msg % ('start', start, n+1)) - if (axis < start): # it's been removed - start -= 1 - if axis==start: - return a - axes = list(range(0, n)) - axes.remove(axis) - axes.insert(start, axis) - return a.transpose(axes) - -# fix hack in scipy which imports this function -def _move_axis_to_0(a, axis): - return rollaxis(a, axis, 0) - -def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): - """ - Return the cross product of two (arrays of) vectors. - - The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular - to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors - are defined by the last axis of `a` and `b` by default, and these axes - can have dimensions 2 or 3. Where the dimension of either `a` or `b` is - 2, the third component of the input vector is assumed to be zero and the - cross product calculated accordingly. In cases where both input vectors - have dimension 2, the z-component of the cross product is returned. - - Parameters - ---------- - a : array_like - Components of the first vector(s). - b : array_like - Components of the second vector(s). - axisa : int, optional - Axis of `a` that defines the vector(s). By default, the last axis. - axisb : int, optional - Axis of `b` that defines the vector(s). By default, the last axis. - axisc : int, optional - Axis of `c` containing the cross product vector(s). By default, the - last axis. - axis : int, optional - If defined, the axis of `a`, `b` and `c` that defines the vector(s) - and cross product(s). Overrides `axisa`, `axisb` and `axisc`. - - Returns - ------- - c : ndarray - Vector cross product(s). - - Raises - ------ - ValueError - When the dimension of the vector(s) in `a` and/or `b` does not - equal 2 or 3. - - See Also - -------- - inner : Inner product - outer : Outer product. - ix_ : Construct index arrays. - - Notes - ----- - .. versionadded:: 1.9.0 - Supports full broadcasting of the inputs. - - Examples - -------- - Vector cross-product. - - >>> x = [1, 2, 3] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([-3, 6, -3]) - - One vector with dimension 2. - - >>> x = [1, 2] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Equivalently: - - >>> x = [1, 2, 0] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Both vectors with dimension 2. - - >>> x = [1,2] - >>> y = [4,5] - >>> np.cross(x, y) - -3 - - Multiple vector cross-products. Note that the direction of the cross - product vector is defined by the `right-hand rule`. - - >>> x = np.array([[1,2,3], [4,5,6]]) - >>> y = np.array([[4,5,6], [1,2,3]]) - >>> np.cross(x, y) - array([[-3, 6, -3], - [ 3, -6, 3]]) - - The orientation of `c` can be changed using the `axisc` keyword. - - >>> np.cross(x, y, axisc=0) - array([[-3, 3], - [ 6, -6], - [-3, 3]]) - - Change the vector definition of `x` and `y` using `axisa` and `axisb`. - - >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) - >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) - >>> np.cross(x, y) - array([[ -6, 12, -6], - [ 0, 0, 0], - [ 6, -12, 6]]) - >>> np.cross(x, y, axisa=0, axisb=0) - array([[-24, 48, -24], - [-30, 60, -30], - [-36, 72, -36]]) - - """ - if axis is not None: - axisa, axisb, axisc = (axis,) * 3 - a = asarray(a) - b = asarray(b) - # Move working axis to the end of the shape - a = rollaxis(a, axisa, a.ndim) - b = rollaxis(b, axisb, b.ndim) - msg = ("incompatible dimensions for cross product\n" - "(dimension must be 2 or 3)") - if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): - raise ValueError(msg) - - # Create the output array - shape = broadcast(a[..., 0], b[..., 0]).shape - if a.shape[-1] == 3 or b.shape[-1] == 3: - shape += (3,) - dtype = promote_types(a.dtype, b.dtype) - cp = empty(shape, dtype) - - # create local aliases for readability - a0 = a[..., 0] - a1 = a[..., 1] - if a.shape[-1] == 3: - a2 = a[..., 2] - b0 = b[..., 0] - b1 = b[..., 1] - if b.shape[-1] == 3: - b2 = b[..., 2] - if cp.ndim != 0 and cp.shape[-1] == 3: - cp0 = cp[..., 0] - cp1 = cp[..., 1] - cp2 = cp[..., 2] - - if a.shape[-1] == 2: - if b.shape[-1] == 2: - # a0 * b1 - a1 * b0 - multiply(a0, b1, out=cp) - cp -= a1 * b0 - if cp.ndim == 0: - return cp - else: - # This works because we are moving the last axis - return rollaxis(cp, -1, axisc) - else: - # cp0 = a1 * b2 - 0 (a2 = 0) - # cp1 = 0 - a0 * b2 (a2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - multiply(a0, b2, out=cp1) - negative(cp1, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 - elif a.shape[-1] == 3: - if b.shape[-1] == 3: - # cp0 = a1 * b2 - a2 * b1 - # cp1 = a2 * b0 - a0 * b2 - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - tmp = array(a2 * b1) - cp0 -= tmp - multiply(a2, b0, out=cp1) - multiply(a0, b2, out=tmp) - cp1 -= tmp - multiply(a0, b1, out=cp2) - multiply(a1, b0, out=tmp) - cp2 -= tmp - else: - # cp0 = 0 - a2 * b1 (b2 = 0) - # cp1 = a2 * b0 - 0 (b2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a2, b1, out=cp0) - negative(cp0, out=cp0) - multiply(a2, b0, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 - - if cp.ndim == 1: - return cp - else: - # This works because we are moving the last axis - return rollaxis(cp, -1, axisc) - -#Use numarray's printing function -from .arrayprint import array2string, get_printoptions, set_printoptions - -_typelessdata = [int_, float_, complex_] -if issubclass(intc, int): - _typelessdata.append(intc) - -if issubclass(longlong, int): - _typelessdata.append(longlong) - -def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): - """ - Return the string representation of an array. - - Parameters - ---------- - arr : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters split the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing precision - (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero, default is False. Very small - is defined by `precision`, if the precision is 8 then - numbers smaller than 5e-9 are represented as zero. - - Returns - ------- - string : str - The string representation of an array. - - See Also - -------- - array_str, array2string, set_printoptions - - Examples - -------- - >>> np.array_repr(np.array([1,2])) - 'array([1, 2])' - >>> np.array_repr(np.ma.array([0.])) - 'MaskedArray([ 0.])' - >>> np.array_repr(np.array([], np.int32)) - 'array([], dtype=int32)' - - >>> x = np.array([1e-6, 4e-7, 2, 3]) - >>> np.array_repr(x, precision=6, suppress_small=True) - 'array([ 0.000001, 0. , 2. , 3. ])' - - """ - if arr.size > 0 or arr.shape==(0,): - lst = array2string(arr, max_line_width, precision, suppress_small, - ', ', "array(") - else: # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(arr.shape),) - - if arr.__class__ is not ndarray: - cName= arr.__class__.__name__ - else: - cName = "array" - - skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0 - - if skipdtype: - return "%s(%s)" % (cName, lst) - else: - typename = arr.dtype.name - # Quote typename in the output if it is "complex". - if typename and not (typename[0].isalpha() and typename.isalnum()): - typename = "'%s'" % typename - - lf = '' - if issubclass(arr.dtype.type, flexible): - if arr.dtype.names: - typename = "%s" % str(arr.dtype) - else: - typename = "'%s'" % str(arr.dtype) - lf = '\n'+' '*len("array(") - return cName + "(%s, %sdtype=%s)" % (lst, lf, typename) - -def array_str(a, max_line_width=None, precision=None, suppress_small=None): - """ - Return a string representation of the data in an array. - - The data in the array is returned as a single string. This function is - similar to `array_repr`, the difference being that `array_repr` also - returns information on the kind of array and its data type. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - Inserts newlines if text is longer than `max_line_width`. The - default is, indirectly, 75. - precision : int, optional - Floating point precision. Default is the current printing precision - (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent numbers "very close" to zero as zero; default is False. - Very close is defined by precision: if the precision is 8, e.g., - numbers smaller (in absolute value) than 5e-9 are represented as - zero. - - See Also - -------- - array2string, array_repr, set_printoptions - - Examples - -------- - >>> np.array_str(np.arange(3)) - '[0 1 2]' - - """ - return array2string(a, max_line_width, precision, suppress_small, ' ', "", str) - -def set_string_function(f, repr=True): - """ - Set a Python function to be used when pretty printing arrays. - - Parameters - ---------- - f : function or None - Function to be used to pretty print arrays. The function should expect - a single array argument and return a string of the representation of - the array. If None, the function is reset to the default NumPy function - to print arrays. - repr : bool, optional - If True (default), the function for pretty printing (``__repr__``) - is set, if False the function that returns the default string - representation (``__str__``) is set. - - See Also - -------- - set_printoptions, get_printoptions - - Examples - -------- - >>> def pprint(arr): - ... return 'HA! - What are you going to do now?' - ... - >>> np.set_string_function(pprint) - >>> a = np.arange(10) - >>> a - HA! - What are you going to do now? - >>> print a - [0 1 2 3 4 5 6 7 8 9] - - We can reset the function to the default: - - >>> np.set_string_function(None) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - `repr` affects either pretty printing or normal string representation. - Note that ``__repr__`` is still affected by setting ``__str__`` - because the width of each array element in the returned string becomes - equal to the length of the result of ``__str__()``. - - >>> x = np.arange(4) - >>> np.set_string_function(lambda x:'random', repr=False) - >>> x.__str__() - 'random' - >>> x.__repr__() - 'array([ 0, 1, 2, 3])' - - """ - if f is None: - if repr: - return multiarray.set_string_function(array_repr, 1) - else: - return multiarray.set_string_function(array_str, 0) - else: - return multiarray.set_string_function(f, repr) - -set_string_function(array_str, 0) -set_string_function(array_repr, 1) - -little_endian = (sys.byteorder == 'little') - - -def indices(dimensions, dtype=int): - """ - Return an array representing the indices of a grid. - - Compute an array where the subarrays contain index values 0,1,... - varying only along the corresponding axis. - - Parameters - ---------- - dimensions : sequence of ints - The shape of the grid. - dtype : dtype, optional - Data type of the result. - - Returns - ------- - grid : ndarray - The array of grid indices, - ``grid.shape = (len(dimensions),) + tuple(dimensions)``. - - See Also - -------- - mgrid, meshgrid - - Notes - ----- - The output shape is obtained by prepending the number of dimensions - in front of the tuple of dimensions, i.e. if `dimensions` is a tuple - ``(r0, ..., rN-1)`` of length ``N``, the output shape is - ``(N,r0,...,rN-1)``. - - The subarrays ``grid[k]`` contains the N-D array of indices along the - ``k-th`` axis. Explicitly:: - - grid[k,i0,i1,...,iN-1] = ik - - Examples - -------- - >>> grid = np.indices((2, 3)) - >>> grid.shape - (2, 2, 3) - >>> grid[0] # row indices - array([[0, 0, 0], - [1, 1, 1]]) - >>> grid[1] # column indices - array([[0, 1, 2], - [0, 1, 2]]) - - The indices can be used as an index into an array. - - >>> x = np.arange(20).reshape(5, 4) - >>> row, col = np.indices((2, 3)) - >>> x[row, col] - array([[0, 1, 2], - [4, 5, 6]]) - - Note that it would be more straightforward in the above example to - extract the required elements directly with ``x[:2, :3]``. - - """ - dimensions = tuple(dimensions) - N = len(dimensions) - if N == 0: - return array([], dtype=dtype) - res = empty((N,)+dimensions, dtype=dtype) - for i, dim in enumerate(dimensions): - tmp = arange(dim, dtype=dtype) - tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1) - newdim = dimensions[:i] + (1,)+ dimensions[i+1:] - val = zeros(newdim, dtype) - add(tmp, val, res[i]) - return res - -def fromfunction(function, shape, **kwargs): - """ - Construct an array by executing a function over each coordinate. - - The resulting array therefore has a value ``fn(x, y, z)`` at - coordinate ``(x, y, z)``. - - Parameters - ---------- - function : callable - The function is called with N parameters, where N is the rank of - `shape`. Each parameter represents the coordinates of the array - varying along a specific axis. For example, if `shape` - were ``(2, 2)``, then the parameters in turn be (0, 0), (0, 1), - (1, 0), (1, 1). - shape : (N,) tuple of ints - Shape of the output array, which also determines the shape of - the coordinate arrays passed to `function`. - dtype : data-type, optional - Data-type of the coordinate arrays passed to `function`. - By default, `dtype` is float. - - Returns - ------- - fromfunction : any - The result of the call to `function` is passed back directly. - Therefore the shape of `fromfunction` is completely determined by - `function`. If `function` returns a scalar value, the shape of - `fromfunction` would match the `shape` parameter. - - See Also - -------- - indices, meshgrid - - Notes - ----- - Keywords other than `dtype` are passed to `function`. - - Examples - -------- - >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) - array([[ True, False, False], - [False, True, False], - [False, False, True]], dtype=bool) - - >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) - array([[0, 1, 2], - [1, 2, 3], - [2, 3, 4]]) - - """ - dtype = kwargs.pop('dtype', float) - args = indices(shape, dtype=dtype) - return function(*args,**kwargs) - -def isscalar(num): - """ - Returns True if the type of `num` is a scalar type. - - Parameters - ---------- - num : any - Input argument, can be of any type and shape. - - Returns - ------- - val : bool - True if `num` is a scalar type, False if it is not. - - Examples - -------- - >>> np.isscalar(3.1) - True - >>> np.isscalar([3.1]) - False - >>> np.isscalar(False) - True - - """ - if isinstance(num, generic): - return True - else: - return type(num) in ScalarType - -_lkup = { - '0':'0000', - '1':'0001', - '2':'0010', - '3':'0011', - '4':'0100', - '5':'0101', - '6':'0110', - '7':'0111', - '8':'1000', - '9':'1001', - 'a':'1010', - 'b':'1011', - 'c':'1100', - 'd':'1101', - 'e':'1110', - 'f':'1111', - 'A':'1010', - 'B':'1011', - 'C':'1100', - 'D':'1101', - 'E':'1110', - 'F':'1111', - 'L':''} - -def binary_repr(num, width=None): - """ - Return the binary representation of the input number as a string. - - For negative numbers, if width is not given, a minus sign is added to the - front. If width is given, the two's complement of the number is - returned, with respect to that width. - - In a two's-complement system negative numbers are represented by the two's - complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement - system can represent every integer in the range - :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. - - Parameters - ---------- - num : int - Only an integer decimal number can be used. - width : int, optional - The length of the returned string if `num` is positive, the length of - the two's complement if `num` is negative. - - Returns - ------- - bin : str - Binary representation of `num` or two's complement of `num`. - - See Also - -------- - base_repr: Return a string representation of a number in the given base - system. - - Notes - ----- - `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x - faster. - - References - ---------- - .. [1] Wikipedia, "Two's complement", - http://en.wikipedia.org/wiki/Two's_complement - - Examples - -------- - >>> np.binary_repr(3) - '11' - >>> np.binary_repr(-3) - '-11' - >>> np.binary_repr(3, width=4) - '0011' - - The two's complement is returned when the input number is negative and - width is specified: - - >>> np.binary_repr(-3, width=4) - '1101' - - """ - # ' <-- unbreak Emacs fontification - sign = '' - if num < 0: - if width is None: - sign = '-' - num = -num - else: - # replace num with its 2-complement - num = 2**width + num - elif num == 0: - return '0'*(width or 1) - ostr = hex(num) - bin = ''.join([_lkup[ch] for ch in ostr[2:]]) - bin = bin.lstrip('0') - if width is not None: - bin = bin.zfill(width) - return sign + bin - -def base_repr(number, base=2, padding=0): - """ - Return a string representation of a number in the given base system. - - Parameters - ---------- - number : int - The value to convert. Only positive values are handled. - base : int, optional - Convert `number` to the `base` number system. The valid range is 2-36, - the default value is 2. - padding : int, optional - Number of zeros padded on the left. Default is 0 (no padding). - - Returns - ------- - out : str - String representation of `number` in `base` system. - - See Also - -------- - binary_repr : Faster version of `base_repr` for base 2. - - Examples - -------- - >>> np.base_repr(5) - '101' - >>> np.base_repr(6, 5) - '11' - >>> np.base_repr(7, base=5, padding=3) - '00012' - - >>> np.base_repr(10, base=16) - 'A' - >>> np.base_repr(32, base=16) - '20' - - """ - digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' - if base > len(digits): - raise ValueError("Bases greater than 36 not handled in base_repr.") - - num = abs(number) - res = [] - while num: - res.append(digits[num % base]) - num //= base - if padding: - res.append('0' * padding) - if number < 0: - res.append('-') - return ''.join(reversed(res or '0')) - - -def load(file): - """ - Wrapper around cPickle.load which accepts either a file-like object or - a filename. - - Note that the NumPy binary format is not based on pickle/cPickle anymore. - For details on the preferred way of loading and saving files, see `load` - and `save`. - - See Also - -------- - load, save - - """ - if isinstance(file, type("")): - file = open(file, "rb") - return pickle.load(file) - -# These are all essentially abbreviations -# These might wind up in a special abbreviations module - -def _maketup(descr, val): - dt = dtype(descr) - # Place val in all scalar tuples: - fields = dt.fields - if fields is None: - return val - else: - res = [_maketup(fields[name][0], val) for name in dt.names] - return tuple(res) - -def identity(n, dtype=None): - """ - Return the identity array. - - The identity array is a square array with ones on - the main diagonal. - - Parameters - ---------- - n : int - Number of rows (and columns) in `n` x `n` output. - dtype : data-type, optional - Data-type of the output. Defaults to ``float``. - - Returns - ------- - out : ndarray - `n` x `n` array with its main diagonal set to one, - and all other elements 0. - - Examples - -------- - >>> np.identity(3) - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - - """ - from numpy import eye - return eye(n, dtype=dtype) - -def allclose(a, b, rtol=1.e-5, atol=1.e-8): - """ - Returns True if two arrays are element-wise equal within a tolerance. - - The tolerance values are positive, typically very small numbers. The - relative difference (`rtol` * abs(`b`)) and the absolute difference - `atol` are added together to compare against the absolute difference - between `a` and `b`. - - If either array contains one or more NaNs, False is returned. - Infs are treated as equal if they are in the same place and of the same - sign in both arrays. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - rtol : float - The relative tolerance parameter (see Notes). - atol : float - The absolute tolerance parameter (see Notes). - - Returns - ------- - allclose : bool - Returns True if the two arrays are equal within the given - tolerance; False otherwise. - - See Also - -------- - isclose, all, any - - Notes - ----- - If the following equation is element-wise True, then allclose returns - True. - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - The above equation is not symmetric in `a` and `b`, so that - `allclose(a, b)` might be different from `allclose(b, a)` in - some rare cases. - - Examples - -------- - >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) - False - >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) - True - >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) - False - >>> np.allclose([1.0, np.nan], [1.0, np.nan]) - False - - """ - x = array(a, copy=False, ndmin=1) - y = array(b, copy=False, ndmin=1) - - # make sure y is an inexact type to avoid abs(MIN_INT); will cause - # casting of x later. - dtype = multiarray.result_type(y, 1.) - y = array(y, dtype=dtype, copy=False) - - xinf = isinf(x) - yinf = isinf(y) - if any(xinf) or any(yinf): - # Check that x and y have inf's only in the same positions - if not all(xinf == yinf): - return False - # Check that sign of inf's in x and y is the same - if not all(x[xinf] == y[xinf]): - return False - - x = x[~xinf] - y = y[~xinf] - - # ignore invalid fpe's - with errstate(invalid='ignore'): - r = all(less_equal(abs(x - y), atol + rtol * abs(y))) - - return r - -def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): - """ - Returns a boolean array where two arrays are element-wise equal within a - tolerance. - - The tolerance values are positive, typically very small numbers. The - relative difference (`rtol` * abs(`b`)) and the absolute difference - `atol` are added together to compare against the absolute difference - between `a` and `b`. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - rtol : float - The relative tolerance parameter (see Notes). - atol : float - The absolute tolerance parameter (see Notes). - equal_nan : bool - Whether to compare NaN's as equal. If True, NaN's in `a` will be - considered equal to NaN's in `b` in the output array. - - Returns - ------- - y : array_like - Returns a boolean array of where `a` and `b` are equal within the - given tolerance. If both `a` and `b` are scalars, returns a single - boolean value. - - See Also - -------- - allclose - - Notes - ----- - .. versionadded:: 1.7.0 - - For finite values, isclose uses the following equation to test whether - two floating point values are equivalent. - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - The above equation is not symmetric in `a` and `b`, so that - `isclose(a, b)` might be different from `isclose(b, a)` in - some rare cases. - - Examples - -------- - >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) - array([True, False]) - >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) - array([True, True]) - >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) - array([False, True]) - >>> np.isclose([1.0, np.nan], [1.0, np.nan]) - array([True, False]) - >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) - array([True, True]) - """ - def within_tol(x, y, atol, rtol): - with errstate(invalid='ignore'): - result = less_equal(abs(x-y), atol + rtol * abs(y)) - if isscalar(a) and isscalar(b): - result = bool(result) - return result - - x = array(a, copy=False, subok=True, ndmin=1) - y = array(b, copy=False, subok=True, ndmin=1) - xfin = isfinite(x) - yfin = isfinite(y) - if all(xfin) and all(yfin): - return within_tol(x, y, atol, rtol) - else: - finite = xfin & yfin - cond = zeros_like(finite, subok=True) - # Because we're using boolean indexing, x & y must be the same shape. - # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in - # lib.stride_tricks, though, so we can't import it here. - x = x * ones_like(cond) - y = y * ones_like(cond) - # Avoid subtraction with infinite/nan values... - cond[finite] = within_tol(x[finite], y[finite], atol, rtol) - # Check for equality of infinite values... - cond[~finite] = (x[~finite] == y[~finite]) - if equal_nan: - # Make NaN == NaN - both_nan = isnan(x) & isnan(y) - cond[both_nan] = both_nan[both_nan] - return cond - -def array_equal(a1, a2): - """ - True if two arrays have the same shape and elements, False otherwise. - - Parameters - ---------- - a1, a2 : array_like - Input arrays. - - Returns - ------- - b : bool - Returns True if the arrays are equal. - - See Also - -------- - allclose: Returns True if two arrays are element-wise equal within a - tolerance. - array_equiv: Returns True if input arrays are shape consistent and all - elements equal. - - Examples - -------- - >>> np.array_equal([1, 2], [1, 2]) - True - >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) - True - >>> np.array_equal([1, 2], [1, 2, 3]) - False - >>> np.array_equal([1, 2], [1, 4]) - False - - """ - try: - a1, a2 = asarray(a1), asarray(a2) - except: - return False - if a1.shape != a2.shape: - return False - return bool(asarray(a1 == a2).all()) - -def array_equiv(a1, a2): - """ - Returns True if input arrays are shape consistent and all elements equal. - - Shape consistent means they are either the same shape, or one input array - can be broadcasted to create the same shape as the other one. - - Parameters - ---------- - a1, a2 : array_like - Input arrays. - - Returns - ------- - out : bool - True if equivalent, False otherwise. - - Examples - -------- - >>> np.array_equiv([1, 2], [1, 2]) - True - >>> np.array_equiv([1, 2], [1, 3]) - False - - Showing the shape equivalence: - - >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) - True - >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) - False - - >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) - False - - """ - try: - a1, a2 = asarray(a1), asarray(a2) - except: - return False - try: - multiarray.broadcast(a1, a2) - except: - return False - - return bool(asarray(a1 == a2).all()) - - -_errdict = {"ignore":ERR_IGNORE, - "warn":ERR_WARN, - "raise":ERR_RAISE, - "call":ERR_CALL, - "print":ERR_PRINT, - "log":ERR_LOG} - -_errdict_rev = {} -for key in _errdict.keys(): - _errdict_rev[_errdict[key]] = key -del key - -def seterr(all=None, divide=None, over=None, under=None, invalid=None): - """ - Set how floating-point errors are handled. - - Note that operations on integer scalar types (such as `int16`) are - handled like floating point, and are affected by these settings. - - Parameters - ---------- - all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Set treatment for all types of floating-point errors at once: - - - ignore: Take no action when the exception occurs. - - warn: Print a `RuntimeWarning` (via the Python `warnings` module). - - raise: Raise a `FloatingPointError`. - - call: Call a function specified using the `seterrcall` function. - - print: Print a warning directly to ``stdout``. - - log: Record error in a Log object specified by `seterrcall`. - - The default is not to change the current behavior. - divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for division by zero. - over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for floating-point overflow. - under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for floating-point underflow. - invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional - Treatment for invalid floating-point operation. - - Returns - ------- - old_settings : dict - Dictionary containing the old settings. - - See also - -------- - seterrcall : Set a callback function for the 'call' mode. - geterr, geterrcall, errstate - - Notes - ----- - The floating-point exceptions are defined in the IEEE 754 standard [1]: - - - Division by zero: infinite result obtained from finite numbers. - - Overflow: result too large to be expressed. - - Underflow: result so close to zero that some precision - was lost. - - Invalid operation: result is not an expressible number, typically - indicates that a NaN was produced. - - .. [1] http://en.wikipedia.org/wiki/IEEE_754 - - Examples - -------- - >>> old_settings = np.seterr(all='ignore') #seterr to known value - >>> np.seterr(over='raise') - {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', - 'under': 'ignore'} - >>> np.seterr(**old_settings) # reset to default - {'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'} - - >>> np.int16(32000) * np.int16(3) - 30464 - >>> old_settings = np.seterr(all='warn', over='raise') - >>> np.int16(32000) * np.int16(3) - Traceback (most recent call last): - File "", line 1, in - FloatingPointError: overflow encountered in short_scalars - - >>> old_settings = np.seterr(all='print') - >>> np.geterr() - {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'} - >>> np.int16(32000) * np.int16(3) - Warning: overflow encountered in short_scalars - 30464 - - """ - - pyvals = umath.geterrobj() - old = geterr() - - if divide is None: divide = all or old['divide'] - if over is None: over = all or old['over'] - if under is None: under = all or old['under'] - if invalid is None: invalid = all or old['invalid'] - - maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + - (_errdict[over] << SHIFT_OVERFLOW ) + - (_errdict[under] << SHIFT_UNDERFLOW) + - (_errdict[invalid] << SHIFT_INVALID)) - - pyvals[1] = maskvalue - umath.seterrobj(pyvals) - return old - - -def geterr(): - """ - Get the current way of handling floating-point errors. - - Returns - ------- - res : dict - A dictionary with keys "divide", "over", "under", and "invalid", - whose values are from the strings "ignore", "print", "log", "warn", - "raise", and "call". The keys represent possible floating-point - exceptions, and the values define how these exceptions are handled. - - See Also - -------- - geterrcall, seterr, seterrcall - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> np.geterr() - {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', - 'under': 'ignore'} - >>> np.arange(3.) / np.arange(3.) - array([ NaN, 1., 1.]) - - >>> oldsettings = np.seterr(all='warn', over='raise') - >>> np.geterr() - {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'} - >>> np.arange(3.) / np.arange(3.) - __main__:1: RuntimeWarning: invalid value encountered in divide - array([ NaN, 1., 1.]) - - """ - maskvalue = umath.geterrobj()[1] - mask = 7 - res = {} - val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask - res['divide'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_OVERFLOW) & mask - res['over'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_UNDERFLOW) & mask - res['under'] = _errdict_rev[val] - val = (maskvalue >> SHIFT_INVALID) & mask - res['invalid'] = _errdict_rev[val] - return res - -def setbufsize(size): - """ - Set the size of the buffer used in ufuncs. - - Parameters - ---------- - size : int - Size of buffer. - - """ - if size > 10e6: - raise ValueError("Buffer size, %s, is too big." % size) - if size < 5: - raise ValueError("Buffer size, %s, is too small." %size) - if size % 16 != 0: - raise ValueError("Buffer size, %s, is not a multiple of 16." %size) - - pyvals = umath.geterrobj() - old = getbufsize() - pyvals[0] = size - umath.seterrobj(pyvals) - return old - -def getbufsize(): - """ - Return the size of the buffer used in ufuncs. - - Returns - ------- - getbufsize : int - Size of ufunc buffer in bytes. - - """ - return umath.geterrobj()[0] - -def seterrcall(func): - """ - Set the floating-point error callback function or log object. - - There are two ways to capture floating-point error messages. The first - is to set the error-handler to 'call', using `seterr`. Then, set - the function to call using this function. - - The second is to set the error-handler to 'log', using `seterr`. - Floating-point errors then trigger a call to the 'write' method of - the provided object. - - Parameters - ---------- - func : callable f(err, flag) or object with write method - Function to call upon floating-point errors ('call'-mode) or - object whose 'write' method is used to log such message ('log'-mode). - - The call function takes two arguments. The first is the - type of error (one of "divide", "over", "under", or "invalid"), - and the second is the status flag. The flag is a byte, whose - least-significant bits indicate the status:: - - [0 0 0 0 invalid over under invalid] - - In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. - - If an object is provided, its write method should take one argument, - a string. - - Returns - ------- - h : callable, log instance or None - The old error handler. - - See Also - -------- - seterr, geterr, geterrcall - - Examples - -------- - Callback upon error: - - >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) - ... - - >>> saved_handler = np.seterrcall(err_handler) - >>> save_err = np.seterr(all='call') - - >>> np.array([1, 2, 3]) / 0.0 - Floating point error (divide by zero), with flag 1 - array([ Inf, Inf, Inf]) - - >>> np.seterrcall(saved_handler) - - >>> np.seterr(**save_err) - {'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'} - - Log error message: - - >>> class Log(object): - ... def write(self, msg): - ... print "LOG: %s" % msg - ... - - >>> log = Log() - >>> saved_handler = np.seterrcall(log) - >>> save_err = np.seterr(all='log') - - >>> np.array([1, 2, 3]) / 0.0 - LOG: Warning: divide by zero encountered in divide - - array([ Inf, Inf, Inf]) - - >>> np.seterrcall(saved_handler) - <__main__.Log object at 0x...> - >>> np.seterr(**save_err) - {'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'} - - """ - if func is not None and not isinstance(func, collections.Callable): - if not hasattr(func, 'write') or not isinstance(func.write, collections.Callable): - raise ValueError("Only callable can be used as callback") - pyvals = umath.geterrobj() - old = geterrcall() - pyvals[2] = func - umath.seterrobj(pyvals) - return old - -def geterrcall(): - """ - Return the current callback function used on floating-point errors. - - When the error handling for a floating-point error (one of "divide", - "over", "under", or "invalid") is set to 'call' or 'log', the function - that is called or the log instance that is written to is returned by - `geterrcall`. This function or log instance has been set with - `seterrcall`. - - Returns - ------- - errobj : callable, log instance or None - The current error handler. If no handler was set through `seterrcall`, - ``None`` is returned. - - See Also - -------- - seterrcall, seterr, geterr - - Notes - ----- - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> np.geterrcall() # we did not yet set a handler, returns None - - >>> oldsettings = np.seterr(all='call') - >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) - >>> oldhandler = np.seterrcall(err_handler) - >>> np.array([1, 2, 3]) / 0.0 - Floating point error (divide by zero), with flag 1 - array([ Inf, Inf, Inf]) - - >>> cur_handler = np.geterrcall() - >>> cur_handler is err_handler - True - - """ - return umath.geterrobj()[2] - -class _unspecified(object): - pass -_Unspecified = _unspecified() - -class errstate(object): - """ - errstate(**kwargs) - - Context manager for floating-point error handling. - - Using an instance of `errstate` as a context manager allows statements in - that context to execute with a known error handling behavior. Upon entering - the context the error handling is set with `seterr` and `seterrcall`, and - upon exiting it is reset to what it was before. - - Parameters - ---------- - kwargs : {divide, over, under, invalid} - Keyword arguments. The valid keywords are the possible floating-point - exceptions. Each keyword should have a string value that defines the - treatment for the particular error. Possible values are - {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. - - See Also - -------- - seterr, geterr, seterrcall, geterrcall - - Notes - ----- - The ``with`` statement was introduced in Python 2.5, and can only be used - there by importing it: ``from __future__ import with_statement``. In - earlier Python versions the ``with`` statement is not available. - - For complete documentation of the types of floating-point exceptions and - treatment options, see `seterr`. - - Examples - -------- - >>> from __future__ import with_statement # use 'with' in Python 2.5 - >>> olderr = np.seterr(all='ignore') # Set error handling to known state. - - >>> np.arange(3) / 0. - array([ NaN, Inf, Inf]) - >>> with np.errstate(divide='warn'): - ... np.arange(3) / 0. - ... - __main__:2: RuntimeWarning: divide by zero encountered in divide - array([ NaN, Inf, Inf]) - - >>> np.sqrt(-1) - nan - >>> with np.errstate(invalid='raise'): - ... np.sqrt(-1) - Traceback (most recent call last): - File "", line 2, in - FloatingPointError: invalid value encountered in sqrt - - Outside the context the error handling behavior has not changed: - - >>> np.geterr() - {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', - 'under': 'ignore'} - - """ - # Note that we don't want to run the above doctests because they will fail - # without a from __future__ import with_statement - def __init__(self, **kwargs): - self.call = kwargs.pop('call', _Unspecified) - self.kwargs = kwargs - - def __enter__(self): - self.oldstate = seterr(**self.kwargs) - if self.call is not _Unspecified: - self.oldcall = seterrcall(self.call) - - def __exit__(self, *exc_info): - seterr(**self.oldstate) - if self.call is not _Unspecified: - seterrcall(self.oldcall) - - -def _setdef(): - defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None] - umath.seterrobj(defval) - -# set the default values -_setdef() - -Inf = inf = infty = Infinity = PINF -nan = NaN = NAN -False_ = bool_(False) -True_ = bool_(True) - -from .umath import * -from .numerictypes import * -from . import fromnumeric -from .fromnumeric import * -extend_all(fromnumeric) -extend_all(umath) -extend_all(numerictypes) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numerictypes.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numerictypes.py deleted file mode 100644 index 1545bc7348953..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/numerictypes.py +++ /dev/null @@ -1,1042 +0,0 @@ -""" -numerictypes: Define the numeric type objects - -This module is designed so "from numerictypes import \\*" is safe. -Exported symbols include: - - Dictionary with all registered number types (including aliases): - typeDict - - Type objects (not all will be available, depends on platform): - see variable sctypes for which ones you have - - Bit-width names - - int8 int16 int32 int64 int128 - uint8 uint16 uint32 uint64 uint128 - float16 float32 float64 float96 float128 float256 - complex32 complex64 complex128 complex192 complex256 complex512 - datetime64 timedelta64 - - c-based names - - bool_ - - object_ - - void, str_, unicode_ - - byte, ubyte, - short, ushort - intc, uintc, - intp, uintp, - int_, uint, - longlong, ulonglong, - - single, csingle, - float_, complex_, - longfloat, clongfloat, - - As part of the type-hierarchy: xx -- is bit-width - - generic - +-> bool_ (kind=b) - +-> number (kind=i) - | integer - | signedinteger (intxx) - | byte - | short - | intc - | intp int0 - | int_ - | longlong - +-> unsignedinteger (uintxx) (kind=u) - | ubyte - | ushort - | uintc - | uintp uint0 - | uint_ - | ulonglong - +-> inexact - | +-> floating (floatxx) (kind=f) - | | half - | | single - | | float_ (double) - | | longfloat - | \\-> complexfloating (complexxx) (kind=c) - | csingle (singlecomplex) - | complex_ (cfloat, cdouble) - | clongfloat (longcomplex) - +-> flexible - | character - | void (kind=V) - | - | str_ (string_, bytes_) (kind=S) [Python 2] - | unicode_ (kind=U) [Python 2] - | - | bytes_ (string_) (kind=S) [Python 3] - | str_ (unicode_) (kind=U) [Python 3] - | - \\-> object_ (not used much) (kind=O) - -""" -from __future__ import division, absolute_import, print_function - -# we add more at the bottom -__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes', - 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', - 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', - 'issubdtype', 'datetime_data', 'datetime_as_string', - 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', - ] - -from numpy.core.multiarray import ( - typeinfo, ndarray, array, empty, dtype, datetime_data, - datetime_as_string, busday_offset, busday_count, is_busday, - busdaycalendar - ) -import types as _types -import sys -from numpy.compat import bytes, long -import numbers - -# we don't export these for import *, but we do want them accessible -# as numerictypes.bool, etc. -if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str -else: - from __builtin__ import bool, int, float, complex, object, unicode, str - - -# String-handling utilities to avoid locale-dependence. - -# "import string" is costly to import! -# Construct the translation tables directly -# "A" = chr(65), "a" = chr(97) -_all_chars = [chr(_m) for _m in range(256)] -_ascii_upper = _all_chars[65:65+26] -_ascii_lower = _all_chars[97:97+26] -LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:]) -UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:]) - -#import string -# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \ -# LOWER_TABLE) -# assert (string.maketrnas(string_ascii_lowercase, string.ascii_uppercase) == \ -# UPPER_TABLE) -#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase) -#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase) - -def english_lower(s): - """ Apply English case rules to convert ASCII strings to all lower case. - - This is an internal utility function to replace calls to str.lower() such - that we can avoid changing behavior with changing locales. In particular, - Turkish has distinct dotted and dotless variants of the Latin letter "I" in - both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. - - Parameters - ---------- - s : str - - Returns - ------- - lowered : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_lower - >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') - 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' - >>> english_lower('') - '' - """ - lowered = s.translate(LOWER_TABLE) - return lowered - -def english_upper(s): - """ Apply English case rules to convert ASCII strings to all upper case. - - This is an internal utility function to replace calls to str.upper() such - that we can avoid changing behavior with changing locales. In particular, - Turkish has distinct dotted and dotless variants of the Latin letter "I" in - both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. - - Parameters - ---------- - s : str - - Returns - ------- - uppered : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_upper - >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') - 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' - >>> english_upper('') - '' - """ - uppered = s.translate(UPPER_TABLE) - return uppered - -def english_capitalize(s): - """ Apply English case rules to convert the first character of an ASCII - string to upper case. - - This is an internal utility function to replace calls to str.capitalize() - such that we can avoid changing behavior with changing locales. - - Parameters - ---------- - s : str - - Returns - ------- - capitalized : str - - Examples - -------- - >>> from numpy.core.numerictypes import english_capitalize - >>> english_capitalize('int8') - 'Int8' - >>> english_capitalize('Int8') - 'Int8' - >>> english_capitalize('') - '' - """ - if s: - return english_upper(s[0]) + s[1:] - else: - return s - - -sctypeDict = {} # Contains all leaf-node scalar types with aliases -sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences -allTypes = {} # Collect the types we will add to the module here - -def _evalname(name): - k = 0 - for ch in name: - if ch in '0123456789': - break - k += 1 - try: - bits = int(name[k:]) - except ValueError: - bits = 0 - base = name[:k] - return base, bits - -def bitname(obj): - """Return a bit-width name for a given type object""" - name = obj.__name__ - base = '' - char = '' - try: - if name[-1] == '_': - newname = name[:-1] - else: - newname = name - info = typeinfo[english_upper(newname)] - assert(info[-1] == obj) # sanity check - bits = info[2] - - except KeyError: # bit-width name - base, bits = _evalname(name) - char = base[0] - - if name == 'bool_': - char = 'b' - base = 'bool' - elif name=='void': - char = 'V' - base = 'void' - elif name=='object_': - char = 'O' - base = 'object' - bits = 0 - elif name=='datetime64': - char = 'M' - elif name=='timedelta64': - char = 'm' - - if sys.version_info[0] >= 3: - if name=='bytes_': - char = 'S' - base = 'bytes' - elif name=='str_': - char = 'U' - base = 'str' - else: - if name=='string_': - char = 'S' - base = 'string' - elif name=='unicode_': - char = 'U' - base = 'unicode' - - bytes = bits // 8 - - if char != '' and bytes != 0: - char = "%s%d" % (char, bytes) - - return base, bits, char - - -def _add_types(): - for a in typeinfo.keys(): - name = english_lower(a) - if isinstance(typeinfo[a], tuple): - typeobj = typeinfo[a][-1] - - # define C-name and insert typenum and typechar references also - allTypes[name] = typeobj - sctypeDict[name] = typeobj - sctypeDict[typeinfo[a][0]] = typeobj - sctypeDict[typeinfo[a][1]] = typeobj - - else: # generic class - allTypes[name] = typeinfo[a] -_add_types() - -def _add_aliases(): - for a in typeinfo.keys(): - name = english_lower(a) - if not isinstance(typeinfo[a], tuple): - continue - typeobj = typeinfo[a][-1] - # insert bit-width version for this class (if relevant) - base, bit, char = bitname(typeobj) - if base[-3:] == 'int' or char[0] in 'ui': continue - if base != '': - myname = "%s%d" % (base, bit) - if (name != 'longdouble' and name != 'clongdouble') or \ - myname not in allTypes.keys(): - allTypes[myname] = typeobj - sctypeDict[myname] = typeobj - if base == 'complex': - na_name = '%s%d' % (english_capitalize(base), bit//2) - elif base == 'bool': - na_name = english_capitalize(base) - sctypeDict[na_name] = typeobj - else: - na_name = "%s%d" % (english_capitalize(base), bit) - sctypeDict[na_name] = typeobj - sctypeNA[na_name] = typeobj - sctypeDict[na_name] = typeobj - sctypeNA[typeobj] = na_name - sctypeNA[typeinfo[a][0]] = na_name - if char != '': - sctypeDict[char] = typeobj - sctypeNA[char] = na_name -_add_aliases() - -# Integers handled so that -# The int32, int64 types should agree exactly with -# PyArray_INT32, PyArray_INT64 in C -# We need to enforce the same checking as is done -# in arrayobject.h where the order of getting a -# bit-width match is: -# long, longlong, int, short, char -# for int8, int16, int32, int64, int128 - -def _add_integer_aliases(): - _ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE'] - for ctype in _ctypes: - val = typeinfo[ctype] - bits = val[2] - charname = 'i%d' % (bits//8,) - ucharname = 'u%d' % (bits//8,) - intname = 'int%d' % bits - UIntname = 'UInt%d' % bits - Intname = 'Int%d' % bits - uval = typeinfo['U'+ctype] - typeobj = val[-1] - utypeobj = uval[-1] - if intname not in allTypes.keys(): - uintname = 'uint%d' % bits - allTypes[intname] = typeobj - allTypes[uintname] = utypeobj - sctypeDict[intname] = typeobj - sctypeDict[uintname] = utypeobj - sctypeDict[Intname] = typeobj - sctypeDict[UIntname] = utypeobj - sctypeDict[charname] = typeobj - sctypeDict[ucharname] = utypeobj - sctypeNA[Intname] = typeobj - sctypeNA[UIntname] = utypeobj - sctypeNA[charname] = typeobj - sctypeNA[ucharname] = utypeobj - sctypeNA[typeobj] = Intname - sctypeNA[utypeobj] = UIntname - sctypeNA[val[0]] = Intname - sctypeNA[uval[0]] = UIntname -_add_integer_aliases() - -# We use these later -void = allTypes['void'] -generic = allTypes['generic'] - -# -# Rework the Python names (so that float and complex and int are consistent -# with Python usage) -# -def _set_up_aliases(): - type_pairs = [('complex_', 'cdouble'), - ('int0', 'intp'), - ('uint0', 'uintp'), - ('single', 'float'), - ('csingle', 'cfloat'), - ('singlecomplex', 'cfloat'), - ('float_', 'double'), - ('intc', 'int'), - ('uintc', 'uint'), - ('int_', 'long'), - ('uint', 'ulong'), - ('cfloat', 'cdouble'), - ('longfloat', 'longdouble'), - ('clongfloat', 'clongdouble'), - ('longcomplex', 'clongdouble'), - ('bool_', 'bool'), - ('unicode_', 'unicode'), - ('object_', 'object')] - if sys.version_info[0] >= 3: - type_pairs.extend([('bytes_', 'string'), - ('str_', 'unicode'), - ('string_', 'string')]) - else: - type_pairs.extend([('str_', 'string'), - ('string_', 'string'), - ('bytes_', 'string')]) - for alias, t in type_pairs: - allTypes[alias] = allTypes[t] - sctypeDict[alias] = sctypeDict[t] - # Remove aliases overriding python types and modules - to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float', - 'complex', 'bool', 'string', 'datetime', 'timedelta'] - if sys.version_info[0] >= 3: - # Py3K - to_remove.append('bytes') - to_remove.append('str') - to_remove.remove('unicode') - to_remove.remove('long') - for t in to_remove: - try: - del allTypes[t] - del sctypeDict[t] - except KeyError: - pass -_set_up_aliases() - -# Now, construct dictionary to lookup character codes from types -_sctype2char_dict = {} -def _construct_char_code_lookup(): - for name in typeinfo.keys(): - tup = typeinfo[name] - if isinstance(tup, tuple): - if tup[0] not in ['p', 'P']: - _sctype2char_dict[tup[-1]] = tup[0] -_construct_char_code_lookup() - - -sctypes = {'int': [], - 'uint':[], - 'float':[], - 'complex':[], - 'others':[bool, object, str, unicode, void]} - -def _add_array_type(typename, bits): - try: - t = allTypes['%s%d' % (typename, bits)] - except KeyError: - pass - else: - sctypes[typename].append(t) - -def _set_array_types(): - ibytes = [1, 2, 4, 8, 16, 32, 64] - fbytes = [2, 4, 8, 10, 12, 16, 32, 64] - for bytes in ibytes: - bits = 8*bytes - _add_array_type('int', bits) - _add_array_type('uint', bits) - for bytes in fbytes: - bits = 8*bytes - _add_array_type('float', bits) - _add_array_type('complex', 2*bits) - _gi = dtype('p') - if _gi.type not in sctypes['int']: - indx = 0 - sz = _gi.itemsize - _lst = sctypes['int'] - while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): - indx += 1 - sctypes['int'].insert(indx, _gi.type) - sctypes['uint'].insert(indx, dtype('P').type) -_set_array_types() - - -genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', - 'int32', 'uint32', 'int64', 'uint64', 'int128', - 'uint128', 'float16', - 'float32', 'float64', 'float80', 'float96', 'float128', - 'float256', - 'complex32', 'complex64', 'complex128', 'complex160', - 'complex192', 'complex256', 'complex512', 'object'] - -def maximum_sctype(t): - """ - Return the scalar type of highest precision of the same kind as the input. - - Parameters - ---------- - t : dtype or dtype specifier - The input data type. This can be a `dtype` object or an object that - is convertible to a `dtype`. - - Returns - ------- - out : dtype - The highest precision data type of the same kind (`dtype.kind`) as `t`. - - See Also - -------- - obj2sctype, mintypecode, sctype2char - dtype - - Examples - -------- - >>> np.maximum_sctype(np.int) - - >>> np.maximum_sctype(np.uint8) - - >>> np.maximum_sctype(np.complex) - - - >>> np.maximum_sctype(str) - - - >>> np.maximum_sctype('i2') - - >>> np.maximum_sctype('f4') - - - """ - g = obj2sctype(t) - if g is None: - return t - t = g - name = t.__name__ - base, bits = _evalname(name) - if bits == 0: - return t - else: - return sctypes[base][-1] - -try: - buffer_type = _types.BufferType -except AttributeError: - # Py3K - buffer_type = memoryview - -_python_types = {int: 'int_', - float: 'float_', - complex: 'complex_', - bool: 'bool_', - bytes: 'bytes_', - unicode: 'unicode_', - buffer_type: 'void', - } - -if sys.version_info[0] >= 3: - def _python_type(t): - """returns the type corresponding to a certain Python type""" - if not isinstance(t, type): - t = type(t) - return allTypes[_python_types.get(t, 'object_')] -else: - def _python_type(t): - """returns the type corresponding to a certain Python type""" - if not isinstance(t, _types.TypeType): - t = type(t) - return allTypes[_python_types.get(t, 'object_')] - -def issctype(rep): - """ - Determines whether the given object represents a scalar data-type. - - Parameters - ---------- - rep : any - If `rep` is an instance of a scalar dtype, True is returned. If not, - False is returned. - - Returns - ------- - out : bool - Boolean result of check whether `rep` is a scalar dtype. - - See Also - -------- - issubsctype, issubdtype, obj2sctype, sctype2char - - Examples - -------- - >>> np.issctype(np.int32) - True - >>> np.issctype(list) - False - >>> np.issctype(1.1) - False - - Strings are also a scalar type: - - >>> np.issctype(np.dtype('str')) - True - - """ - if not isinstance(rep, (type, dtype)): - return False - try: - res = obj2sctype(rep) - if res and res != object_: - return True - return False - except: - return False - -def obj2sctype(rep, default=None): - """ - Return the scalar dtype or NumPy equivalent of Python type of an object. - - Parameters - ---------- - rep : any - The object of which the type is returned. - default : any, optional - If given, this is returned for objects whose types can not be - determined. If not given, None is returned for those objects. - - Returns - ------- - dtype : dtype or Python type - The data type of `rep`. - - See Also - -------- - sctype2char, issctype, issubsctype, issubdtype, maximum_sctype - - Examples - -------- - >>> np.obj2sctype(np.int32) - - >>> np.obj2sctype(np.array([1., 2.])) - - >>> np.obj2sctype(np.array([1.j])) - - - >>> np.obj2sctype(dict) - - >>> np.obj2sctype('string') - - - >>> np.obj2sctype(1, default=list) - - - """ - try: - if issubclass(rep, generic): - return rep - except TypeError: - pass - if isinstance(rep, dtype): - return rep.type - if isinstance(rep, type): - return _python_type(rep) - if isinstance(rep, ndarray): - return rep.dtype.type - try: - res = dtype(rep) - except: - return default - return res.type - - -def issubclass_(arg1, arg2): - """ - Determine if a class is a subclass of a second class. - - `issubclass_` is equivalent to the Python built-in ``issubclass``, - except that it returns False instead of raising a TypeError is one - of the arguments is not a class. - - Parameters - ---------- - arg1 : class - Input class. True is returned if `arg1` is a subclass of `arg2`. - arg2 : class or tuple of classes. - Input class. If a tuple of classes, True is returned if `arg1` is a - subclass of any of the tuple elements. - - Returns - ------- - out : bool - Whether `arg1` is a subclass of `arg2` or not. - - See Also - -------- - issubsctype, issubdtype, issctype - - Examples - -------- - >>> np.issubclass_(np.int32, np.int) - True - >>> np.issubclass_(np.int32, np.float) - False - - """ - try: - return issubclass(arg1, arg2) - except TypeError: - return False - -def issubsctype(arg1, arg2): - """ - Determine if the first argument is a subclass of the second argument. - - Parameters - ---------- - arg1, arg2 : dtype or dtype specifier - Data-types. - - Returns - ------- - out : bool - The result. - - See Also - -------- - issctype, issubdtype,obj2sctype - - Examples - -------- - >>> np.issubsctype('S8', str) - True - >>> np.issubsctype(np.array([1]), np.int) - True - >>> np.issubsctype(np.array([1]), np.float) - False - - """ - return issubclass(obj2sctype(arg1), obj2sctype(arg2)) - -def issubdtype(arg1, arg2): - """ - Returns True if first argument is a typecode lower/equal in type hierarchy. - - Parameters - ---------- - arg1, arg2 : dtype_like - dtype or string representing a typecode. - - Returns - ------- - out : bool - - See Also - -------- - issubsctype, issubclass_ - numpy.core.numerictypes : Overview of numpy type hierarchy. - - Examples - -------- - >>> np.issubdtype('S1', str) - True - >>> np.issubdtype(np.float64, np.float32) - False - - """ - if issubclass_(arg2, generic): - return issubclass(dtype(arg1).type, arg2) - mro = dtype(arg2).type.mro() - if len(mro) > 1: - val = mro[1] - else: - val = mro[0] - return issubclass(dtype(arg1).type, val) - - -# This dictionary allows look up based on any alias for an array data-type -class _typedict(dict): - """ - Base object for a dictionary for look-up with any alias for an array dtype. - - Instances of `_typedict` can not be used as dictionaries directly, - first they have to be populated. - - """ - def __getitem__(self, obj): - return dict.__getitem__(self, obj2sctype(obj)) - -nbytes = _typedict() -_alignment = _typedict() -_maxvals = _typedict() -_minvals = _typedict() -def _construct_lookups(): - for name, val in typeinfo.items(): - if not isinstance(val, tuple): - continue - obj = val[-1] - nbytes[obj] = val[2] // 8 - _alignment[obj] = val[3] - if (len(val) > 5): - _maxvals[obj] = val[4] - _minvals[obj] = val[5] - else: - _maxvals[obj] = None - _minvals[obj] = None - -_construct_lookups() - -def sctype2char(sctype): - """ - Return the string representation of a scalar dtype. - - Parameters - ---------- - sctype : scalar dtype or object - If a scalar dtype, the corresponding string character is - returned. If an object, `sctype2char` tries to infer its scalar type - and then return the corresponding string character. - - Returns - ------- - typechar : str - The string character corresponding to the scalar type. - - Raises - ------ - ValueError - If `sctype` is an object for which the type can not be inferred. - - See Also - -------- - obj2sctype, issctype, issubsctype, mintypecode - - Examples - -------- - >>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]: - ... print np.sctype2char(sctype) - l - d - D - S - O - - >>> x = np.array([1., 2-1.j]) - >>> np.sctype2char(x) - 'D' - >>> np.sctype2char(list) - 'O' - - """ - sctype = obj2sctype(sctype) - if sctype is None: - raise ValueError("unrecognized type") - return _sctype2char_dict[sctype] - -# Create dictionary of casting functions that wrap sequences -# indexed by type or type character - - -cast = _typedict() -try: - ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType, - _types.LongType, _types.BooleanType, - _types.StringType, _types.UnicodeType, _types.BufferType] -except AttributeError: - # Py3K - ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] - -ScalarType.extend(_sctype2char_dict.keys()) -ScalarType = tuple(ScalarType) -for key in _sctype2char_dict.keys(): - cast[key] = lambda x, k=key : array(x, copy=False).astype(k) - -# Create the typestring lookup dictionary -_typestr = _typedict() -for key in _sctype2char_dict.keys(): - if issubclass(key, allTypes['flexible']): - _typestr[key] = _sctype2char_dict[key] - else: - _typestr[key] = empty((1,), key).dtype.str[1:] - -# Make sure all typestrings are in sctypeDict -for key, val in _typestr.items(): - if val not in sctypeDict: - sctypeDict[val] = key - -# Add additional strings to the sctypeDict - -if sys.version_info[0] >= 3: - _toadd = ['int', 'float', 'complex', 'bool', 'object', - 'str', 'bytes', 'object', ('a', allTypes['bytes_'])] -else: - _toadd = ['int', 'float', 'complex', 'bool', 'object', 'string', - ('str', allTypes['string_']), - 'unicode', 'object', ('a', allTypes['string_'])] - -for name in _toadd: - if isinstance(name, tuple): - sctypeDict[name[0]] = name[1] - else: - sctypeDict[name] = allTypes['%s_' % name] - -del _toadd, name - -# Now add the types we've determined to this module -for key in allTypes: - globals()[key] = allTypes[key] - __all__.append(key) - -del key - -typecodes = {'Character':'c', - 'Integer':'bhilqp', - 'UnsignedInteger':'BHILQP', - 'Float':'efdg', - 'Complex':'FDG', - 'AllInteger':'bBhHiIlLqQpP', - 'AllFloat':'efdgFDG', - 'Datetime': 'Mm', - 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} - -# backwards compatibility --- deprecated name -typeDict = sctypeDict -typeNA = sctypeNA - -# b -> boolean -# u -> unsigned integer -# i -> signed integer -# f -> floating point -# c -> complex -# M -> datetime -# m -> timedelta -# S -> string -# U -> Unicode string -# V -> record -# O -> Python object -_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm'] - -__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O' -__len_test_types = len(__test_types) - -# Keep incrementing until a common type both can be coerced to -# is found. Otherwise, return None -def _find_common_coerce(a, b): - if a > b: - return a - try: - thisind = __test_types.index(a.char) - except ValueError: - return None - return _can_coerce_all([a, b], start=thisind) - -# Find a data-type that all data-types in a list can be coerced to -def _can_coerce_all(dtypelist, start=0): - N = len(dtypelist) - if N == 0: - return None - if N == 1: - return dtypelist[0] - thisind = start - while thisind < __len_test_types: - newdtype = dtype(__test_types[thisind]) - numcoerce = len([x for x in dtypelist if newdtype >= x]) - if numcoerce == N: - return newdtype - thisind += 1 - return None - -def _register_types(): - numbers.Integral.register(integer) - numbers.Complex.register(inexact) - numbers.Real.register(floating) -_register_types() - -def find_common_type(array_types, scalar_types): - """ - Determine common type following standard coercion rules. - - Parameters - ---------- - array_types : sequence - A list of dtypes or dtype convertible objects representing arrays. - scalar_types : sequence - A list of dtypes or dtype convertible objects representing scalars. - - Returns - ------- - datatype : dtype - The common data type, which is the maximum of `array_types` ignoring - `scalar_types`, unless the maximum of `scalar_types` is of a - different kind (`dtype.kind`). If the kind is not understood, then - None is returned. - - See Also - -------- - dtype, common_type, can_cast, mintypecode - - Examples - -------- - >>> np.find_common_type([], [np.int64, np.float32, np.complex]) - dtype('complex128') - >>> np.find_common_type([np.int64, np.float32], []) - dtype('float64') - - The standard casting rules ensure that a scalar cannot up-cast an - array unless the scalar is of a fundamentally different kind of data - (i.e. under a different hierarchy in the data type hierarchy) then - the array: - - >>> np.find_common_type([np.float32], [np.int64, np.float64]) - dtype('float32') - - Complex is of a different type, so it up-casts the float in the - `array_types` argument: - - >>> np.find_common_type([np.float32], [np.complex]) - dtype('complex128') - - Type specifier strings are convertible to dtypes and can therefore - be used instead of dtypes: - - >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8']) - dtype('complex128') - - """ - array_types = [dtype(x) for x in array_types] - scalar_types = [dtype(x) for x in scalar_types] - - maxa = _can_coerce_all(array_types) - maxsc = _can_coerce_all(scalar_types) - - if maxa is None: - return maxsc - - if maxsc is None: - return maxa - - try: - index_a = _kind_list.index(maxa.kind) - index_sc = _kind_list.index(maxsc.kind) - except ValueError: - return None - - if index_sc > index_a: - return _find_common_coerce(maxsc, maxa) - else: - return maxa diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/operand_flag_tests.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/operand_flag_tests.py deleted file mode 100644 index 55dedb776b147..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/operand_flag_tests.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'operand_flag_tests.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/records.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/records.py deleted file mode 100644 index d0f82a25c6d52..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/records.py +++ /dev/null @@ -1,808 +0,0 @@ -""" -Record Arrays -============= -Record arrays expose the fields of structured arrays as properties. - -Most commonly, ndarrays contain elements of a single type, e.g. floats, integers, -bools etc. However, it is possible for elements to be combinations of these, -such as:: - - >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)]) - >>> a - array([(1, 2.0), (1, 2.0)], - dtype=[('x', '>> a['x'] - array([1, 1]) - - >>> a['y'] - array([ 2., 2.]) - -Record arrays allow us to access fields as properties:: - - >>> ar = a.view(np.recarray) - - >>> ar.x - array([1, 1]) - - >>> ar.y - array([ 2., 2.]) - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os - -from . import numeric as sb -from .defchararray import chararray -from . import numerictypes as nt -from numpy.compat import isfileobj, bytes, long - -# All of the functions allow formats to be a dtype -__all__ = ['record', 'recarray', 'format_parser'] - - -ndarray = sb.ndarray - -_byteorderconv = {'b':'>', - 'l':'<', - 'n':'=', - 'B':'>', - 'L':'<', - 'N':'=', - 'S':'s', - 's':'s', - '>':'>', - '<':'<', - '=':'=', - '|':'|', - 'I':'|', - 'i':'|'} - -# formats regular expression -# allows multidimension spec with a tuple syntax in front -# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' -# are equally allowed - -numfmt = nt.typeDict -_typestr = nt._typestr - -def find_duplicate(list): - """Find duplication in a list, return a list of duplicated elements""" - dup = [] - for i in range(len(list)): - if (list[i] in list[i + 1:]): - if (list[i] not in dup): - dup.append(list[i]) - return dup - -class format_parser: - """ - Class to convert formats, names, titles description to a dtype. - - After constructing the format_parser object, the dtype attribute is - the converted data-type: - ``dtype = format_parser(formats, names, titles).dtype`` - - Attributes - ---------- - dtype : dtype - The converted data-type. - - Parameters - ---------- - formats : str or list of str - The format description, either specified as a string with - comma-separated format descriptions in the form ``'f8, i4, a5'``, or - a list of format description strings in the form - ``['f8', 'i4', 'a5']``. - names : str or list/tuple of str - The field names, either specified as a comma-separated string in the - form ``'col1, col2, col3'``, or as a list or tuple of strings in the - form ``['col1', 'col2', 'col3']``. - An empty list can be used, in that case default field names - ('f0', 'f1', ...) are used. - titles : sequence - Sequence of title strings. An empty list can be used to leave titles - out. - aligned : bool, optional - If True, align the fields by padding as the C-compiler would. - Default is False. - byteorder : str, optional - If specified, all the fields will be changed to the - provided byte-order. Otherwise, the default byte-order is - used. For all available string specifiers, see `dtype.newbyteorder`. - - See Also - -------- - dtype, typename, sctype2char - - Examples - -------- - >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], - ... ['T1', 'T2', 'T3']).dtype - dtype([(('T1', 'col1'), '>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], - ... []).dtype - dtype([('col1', '>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype - dtype([('f0', ' len(titles)): - self._titles += [None] * (self._nfields - len(titles)) - - def _createdescr(self, byteorder): - descr = sb.dtype({'names':self._names, - 'formats':self._f_formats, - 'offsets':self._offsets, - 'titles':self._titles}) - if (byteorder is not None): - byteorder = _byteorderconv[byteorder[0]] - descr = descr.newbyteorder(byteorder) - - self._descr = descr - -class record(nt.void): - """A data-type scalar that allows field access as attribute lookup. - """ - def __repr__(self): - return self.__str__() - - def __str__(self): - return str(self.item()) - - def __getattribute__(self, attr): - if attr in ['setfield', 'getfield', 'dtype']: - return nt.void.__getattribute__(self, attr) - try: - return nt.void.__getattribute__(self, attr) - except AttributeError: - pass - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - obj = self.getfield(*res[:2]) - # if it has fields return a recarray, - # if it's a string ('SU') return a chararray - # otherwise return the object - try: - dt = obj.dtype - except AttributeError: - return obj - if dt.fields: - return obj.view(obj.__class__) - if dt.char in 'SU': - return obj.view(chararray) - return obj - else: - raise AttributeError("'record' object has no " - "attribute '%s'" % attr) - - - def __setattr__(self, attr, val): - if attr in ['setfield', 'getfield', 'dtype']: - raise AttributeError("Cannot set '%s' attribute" % attr) - fielddict = nt.void.__getattribute__(self, 'dtype').fields - res = fielddict.get(attr, None) - if res: - return self.setfield(val, *res[:2]) - else: - if getattr(self, attr, None): - return nt.void.__setattr__(self, attr, val) - else: - raise AttributeError("'record' object has no " - "attribute '%s'" % attr) - - def pprint(self): - """Pretty-print all fields.""" - # pretty-print all fields - names = self.dtype.names - maxlen = max([len(name) for name in names]) - rows = [] - fmt = '%% %ds: %%s' % maxlen - for name in names: - rows.append(fmt % (name, getattr(self, name))) - return "\n".join(rows) - -# The recarray is almost identical to a standard array (which supports -# named fields already) The biggest difference is that it can use -# attribute-lookup to find the fields and it is constructed using -# a record. - -# If byteorder is given it forces a particular byteorder on all -# the fields (and any subfields) - -class recarray(ndarray): - """ - Construct an ndarray that allows field access using attributes. - - Arrays may have a data-types containing fields, analogous - to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, - where each entry in the array is a pair of ``(int, float)``. Normally, - these attributes are accessed using dictionary lookups such as ``arr['x']`` - and ``arr['y']``. Record arrays allow the fields to be accessed as members - of the array, using ``arr.x`` and ``arr.y``. - - Parameters - ---------- - shape : tuple - Shape of output array. - dtype : data-type, optional - The desired data-type. By default, the data-type is determined - from `formats`, `names`, `titles`, `aligned` and `byteorder`. - formats : list of data-types, optional - A list containing the data-types for the different columns, e.g. - ``['i4', 'f8', 'i4']``. `formats` does *not* support the new - convention of using types directly, i.e. ``(int, float, int)``. - Note that `formats` must be a list, not a tuple. - Given that `formats` is somewhat limited, we recommend specifying - `dtype` instead. - names : tuple of str, optional - The name of each column, e.g. ``('x', 'y', 'z')``. - buf : buffer, optional - By default, a new array is created of the given shape and data-type. - If `buf` is specified and is an object exposing the buffer interface, - the array will use the memory from the existing buffer. In this case, - the `offset` and `strides` keywords are available. - - Other Parameters - ---------------- - titles : tuple of str, optional - Aliases for column names. For example, if `names` were - ``('x', 'y', 'z')`` and `titles` is - ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then - ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. - byteorder : {'<', '>', '='}, optional - Byte-order for all fields. - aligned : bool, optional - Align the fields in memory as the C-compiler would. - strides : tuple of ints, optional - Buffer (`buf`) is interpreted according to these strides (strides - define how many bytes each array element, row, column, etc. - occupy in memory). - offset : int, optional - Start reading buffer (`buf`) from this offset onwards. - order : {'C', 'F'}, optional - Row-major or column-major order. - - Returns - ------- - rec : recarray - Empty array of the given shape and type. - - See Also - -------- - rec.fromrecords : Construct a record array from data. - record : fundamental data-type for `recarray`. - format_parser : determine a data-type from formats, names, titles. - - Notes - ----- - This constructor can be compared to ``empty``: it creates a new record - array but does not fill it with data. To create a record array from data, - use one of the following methods: - - 1. Create a standard ndarray and convert it to a record array, - using ``arr.view(np.recarray)`` - 2. Use the `buf` keyword. - 3. Use `np.rec.fromrecords`. - - Examples - -------- - Create an array with two fields, ``x`` and ``y``: - - >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)]) - >>> x - array([(1.0, 2), (3.0, 4)], - dtype=[('x', '>> x['x'] - array([ 1., 3.]) - - View the array as a record array: - - >>> x = x.view(np.recarray) - - >>> x.x - array([ 1., 3.]) - - >>> x.y - array([2, 4]) - - Create a new, empty record array: - - >>> np.recarray((2,), - ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP - rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), - (3471280, 1.2134086255804012e-316, 0)], - dtype=[('x', '>> x1=np.array([1,2,3,4]) - >>> x2=np.array(['a','dd','xyz','12']) - >>> x3=np.array([1.1,2,3,4]) - >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') - >>> print r[1] - (2, 'dd', 2.0) - >>> x1[1]=34 - >>> r.a - array([1, 2, 3, 4]) - """ - - arrayList = [sb.asarray(x) for x in arrayList] - - if shape is None or shape == 0: - shape = arrayList[0].shape - - if isinstance(shape, int): - shape = (shape,) - - if formats is None and dtype is None: - # go through each object in the list to see if it is an ndarray - # and determine the formats. - formats = '' - for obj in arrayList: - if not isinstance(obj, ndarray): - raise ValueError("item in the array list must be an ndarray.") - formats += _typestr[obj.dtype.type] - if issubclass(obj.dtype.type, nt.flexible): - formats += repr(obj.itemsize) - formats += ',' - formats = formats[:-1] - - if dtype is not None: - descr = sb.dtype(dtype) - _names = descr.names - else: - parsed = format_parser(formats, names, titles, aligned, byteorder) - _names = parsed._names - descr = parsed._descr - - # Determine shape from data-type. - if len(descr) != len(arrayList): - raise ValueError("mismatch between the number of fields " - "and the number of arrays") - - d0 = descr[0].shape - nn = len(d0) - if nn > 0: - shape = shape[:-nn] - - for k, obj in enumerate(arrayList): - nn = len(descr[k].shape) - testshape = obj.shape[:len(obj.shape) - nn] - if testshape != shape: - raise ValueError("array-shape mismatch in array %d" % k) - - _array = recarray(shape, descr) - - # populate the record array (makes a copy) - for i in range(len(arrayList)): - _array[_names[i]] = arrayList[i] - - return _array - -# shape must be 1-d if you use list of lists... -def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None): - """ create a recarray from a list of records in text form - - The data in the same field can be heterogeneous, they will be promoted - to the highest data type. This method is intended for creating - smaller record arrays. If used to create large array without formats - defined - - r=fromrecords([(2,3.,'abc')]*100000) - - it can be slow. - - If formats is None, then this will auto-detect formats. Use list of - tuples rather than list of lists for faster processing. - - >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], - ... names='col1,col2,col3') - >>> print r[0] - (456, 'dbe', 1.2) - >>> r.col1 - array([456, 2]) - >>> r.col2 - chararray(['dbe', 'de'], - dtype='|S3') - >>> import pickle - >>> print pickle.loads(pickle.dumps(r)) - [(456, 'dbe', 1.2) (2, 'de', 1.3)] - """ - - nfields = len(recList[0]) - if formats is None and dtype is None: # slower - obj = sb.array(recList, dtype=object) - arrlist = [sb.array(obj[..., i].tolist()) for i in range(nfields)] - return fromarrays(arrlist, formats=formats, shape=shape, names=names, - titles=titles, aligned=aligned, byteorder=byteorder) - - if dtype is not None: - descr = sb.dtype((record, dtype)) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - try: - retval = sb.array(recList, dtype=descr) - except TypeError: # list of lists instead of list of tuples - if (shape is None or shape == 0): - shape = len(recList) - if isinstance(shape, (int, long)): - shape = (shape,) - if len(shape) > 1: - raise ValueError("Can only deal with 1-d array.") - _array = recarray(shape, descr) - for k in range(_array.size): - _array[k] = tuple(recList[k]) - return _array - else: - if shape is not None and retval.shape != shape: - retval.shape = shape - - res = retval.view(recarray) - - return res - - -def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """ create a (read-only) record array from binary data contained in - a string""" - - - if dtype is None and formats is None: - raise ValueError("Must have dtype= or formats=") - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - if (shape is None or shape == 0 or shape == -1): - shape = (len(datastring) - offset) / itemsize - - _array = recarray(shape, descr, buf=datastring, offset=offset) - return _array - -def get_remaining_size(fd): - try: - fn = fd.fileno() - except AttributeError: - return os.path.getsize(fd.name) - fd.tell() - st = os.fstat(fn) - size = st.st_size - fd.tell() - return size - -def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """Create an array from binary file data - - If file is a string then that file is opened, else it is assumed - to be a file object. - - >>> from tempfile import TemporaryFile - >>> a = np.empty(10,dtype='f8,i4,a5') - >>> a[5] = (0.5,10,'abcde') - >>> - >>> fd=TemporaryFile() - >>> a = a.newbyteorder('<') - >>> a.tofile(fd) - >>> - >>> fd.seek(0) - >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, - ... byteorder='<') - >>> print r[5] - (0.5, 10, 'abcde') - >>> r.shape - (10,) - """ - - if (shape is None or shape == 0): - shape = (-1,) - elif isinstance(shape, (int, long)): - shape = (shape,) - - name = 0 - if isinstance(fd, str): - name = 1 - fd = open(fd, 'rb') - if (offset > 0): - fd.seek(offset, 1) - size = get_remaining_size(fd) - - if dtype is not None: - descr = sb.dtype(dtype) - else: - descr = format_parser(formats, names, titles, aligned, byteorder)._descr - - itemsize = descr.itemsize - - shapeprod = sb.array(shape).prod() - shapesize = shapeprod * itemsize - if shapesize < 0: - shape = list(shape) - shape[ shape.index(-1) ] = size / -shapesize - shape = tuple(shape) - shapeprod = sb.array(shape).prod() - - nbytes = shapeprod * itemsize - - if nbytes > size: - raise ValueError( - "Not enough bytes left in file for specified shape and type") - - # create the array - _array = recarray(shape, descr) - nbytesread = fd.readinto(_array.data) - if nbytesread != nbytes: - raise IOError("Didn't read as many bytes as expected") - if name: - fd.close() - - return _array - -def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None, copy=True): - """Construct a record array from a wide-variety of objects. - """ - - if (isinstance(obj, (type(None), str)) or isfileobj(obj)) \ - and (formats is None) \ - and (dtype is None): - raise ValueError("Must define formats (or dtype) if object is "\ - "None, string, or an open file") - - kwds = {} - if dtype is not None: - dtype = sb.dtype(dtype) - elif formats is not None: - dtype = format_parser(formats, names, titles, - aligned, byteorder)._descr - else: - kwds = {'formats': formats, - 'names' : names, - 'titles' : titles, - 'aligned' : aligned, - 'byteorder' : byteorder - } - - if obj is None: - if shape is None: - raise ValueError("Must define a shape if obj is None") - return recarray(shape, dtype, buf=obj, offset=offset, strides=strides) - - elif isinstance(obj, bytes): - return fromstring(obj, dtype, shape=shape, offset=offset, **kwds) - - elif isinstance(obj, (list, tuple)): - if isinstance(obj[0], (tuple, list)): - return fromrecords(obj, dtype=dtype, shape=shape, **kwds) - else: - return fromarrays(obj, dtype=dtype, shape=shape, **kwds) - - elif isinstance(obj, recarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - return new - - elif isfileobj(obj): - return fromfile(obj, dtype=dtype, shape=shape, offset=offset) - - elif isinstance(obj, ndarray): - if dtype is not None and (obj.dtype != dtype): - new = obj.view(dtype) - else: - new = obj - if copy: - new = new.copy() - res = new.view(recarray) - if issubclass(res.dtype.type, nt.void): - res.dtype = sb.dtype((record, res.dtype)) - return res - - else: - interface = getattr(obj, "__array_interface__", None) - if interface is None or not isinstance(interface, dict): - raise ValueError("Unknown input type") - obj = sb.array(obj) - if dtype is not None and (obj.dtype != dtype): - obj = obj.view(dtype) - res = obj.view(recarray) - if issubclass(res.dtype.type, nt.void): - res.dtype = sb.dtype((record, res.dtype)) - return res diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/scalarmath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/scalarmath.py deleted file mode 100644 index 0bb7dbbf6397e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/scalarmath.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'scalarmath.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup.py deleted file mode 100644 index 5da04241317eb..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup.py +++ /dev/null @@ -1,1013 +0,0 @@ -from __future__ import division, print_function - -import imp -import os -import sys -import shutil -import pickle -import copy -import warnings -import re -from os.path import join -from numpy.distutils import log -from distutils.dep_util import newer -from distutils.sysconfig import get_config_var - -from setup_common import * - -# Set to True to enable multiple file compilations (experimental) -ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0") -# Set to True to enable relaxed strides checking. This (mostly) means -# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags. -NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0") - -# XXX: ugly, we use a class to avoid calling twice some expensive functions in -# config.h/numpyconfig.h. I don't see a better way because distutils force -# config.h generation inside an Extension class, and as such sharing -# configuration informations between extensions is not easy. -# Using a pickled-based memoize does not work because config_cmd is an instance -# method, which cPickle does not like. -# -# Use pickle in all cases, as cPickle is gone in python3 and the difference -# in time is only in build. -- Charles Harris, 2013-03-30 - -class CallOnceOnly(object): - def __init__(self): - self._check_types = None - self._check_ieee_macros = None - self._check_complex = None - - def check_types(self, *a, **kw): - if self._check_types is None: - out = check_types(*a, **kw) - self._check_types = pickle.dumps(out) - else: - out = copy.deepcopy(pickle.loads(self._check_types)) - return out - - def check_ieee_macros(self, *a, **kw): - if self._check_ieee_macros is None: - out = check_ieee_macros(*a, **kw) - self._check_ieee_macros = pickle.dumps(out) - else: - out = copy.deepcopy(pickle.loads(self._check_ieee_macros)) - return out - - def check_complex(self, *a, **kw): - if self._check_complex is None: - out = check_complex(*a, **kw) - self._check_complex = pickle.dumps(out) - else: - out = copy.deepcopy(pickle.loads(self._check_complex)) - return out - -PYTHON_HAS_UNICODE_WIDE = True - -def pythonlib_dir(): - """return path where libpython* is.""" - if sys.platform == 'win32': - return os.path.join(sys.prefix, "libs") - else: - return get_config_var('LIBDIR') - -def is_npy_no_signal(): - """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration - header.""" - return sys.platform == 'win32' - -def is_npy_no_smp(): - """Return True if the NPY_NO_SMP symbol must be defined in public - header (when SMP support cannot be reliably enabled).""" - # Python 2.3 causes a segfault when - # trying to re-acquire the thread-state - # which is done in error-handling - # ufunc code. NPY_ALLOW_C_API and friends - # cause the segfault. So, we disable threading - # for now. - if sys.version[:5] < '2.4.2': - nosmp = 1 - else: - # Perhaps a fancier check is in order here. - # so that threads are only enabled if there - # are actually multiple CPUS? -- but - # threaded code can be nice even on a single - # CPU so that long-calculating code doesn't - # block. - try: - nosmp = os.environ['NPY_NOSMP'] - nosmp = 1 - except KeyError: - nosmp = 0 - return nosmp == 1 - -def win32_checks(deflist): - from numpy.distutils.misc_util import get_build_architecture - a = get_build_architecture() - - # Distutils hack on AMD64 on windows - print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % - (a, os.name, sys.platform)) - if a == 'AMD64': - deflist.append('DISTUTILS_USE_SDK') - - # On win32, force long double format string to be 'g', not - # 'Lg', since the MS runtime does not support long double whose - # size is > sizeof(double) - if a == "Intel" or a == "AMD64": - deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') - -def check_math_capabilities(config, moredefs, mathlibs): - def check_func(func_name): - return config.check_func(func_name, libraries=mathlibs, - decl=True, call=True) - - def check_funcs_once(funcs_name): - decl = dict([(f, True) for f in funcs_name]) - st = config.check_funcs_once(funcs_name, libraries=mathlibs, - decl=decl, call=decl) - if st: - moredefs.extend([(fname2def(f), 1) for f in funcs_name]) - return st - - def check_funcs(funcs_name): - # Use check_funcs_once first, and if it does not work, test func per - # func. Return success only if all the functions are available - if not check_funcs_once(funcs_name): - # Global check failed, check func per func - for f in funcs_name: - if check_func(f): - moredefs.append((fname2def(f), 1)) - return 0 - else: - return 1 - - #use_msvc = config.check_decl("_MSC_VER") - - if not check_funcs_once(MANDATORY_FUNCS): - raise SystemError("One of the required function to build numpy is not" - " available (the list is %s)." % str(MANDATORY_FUNCS)) - - # Standard functions which may not be available and for which we have a - # replacement implementation. Note that some of these are C99 functions. - - # XXX: hack to circumvent cpp pollution from python: python put its - # config.h in the public namespace, so we have a clash for the common - # functions we test. We remove every function tested by python's - # autoconf, hoping their own test are correct - for f in OPTIONAL_STDFUNCS_MAYBE: - if config.check_decl(fname2def(f), - headers=["Python.h", "math.h"]): - OPTIONAL_STDFUNCS.remove(f) - - check_funcs(OPTIONAL_STDFUNCS) - - for h in OPTIONAL_HEADERS: - if config.check_func("", decl=False, call=False, headers=[h]): - moredefs.append((fname2def(h).replace(".", "_"), 1)) - - for tup in OPTIONAL_INTRINSICS: - headers = None - if len(tup) == 2: - f, args = tup - else: - f, args, headers = tup[0], tup[1], [tup[2]] - if config.check_func(f, decl=False, call=True, call_args=args, - headers=headers): - moredefs.append((fname2def(f), 1)) - - for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES: - if config.check_func(fn, decl='int %s %s(void *);' % (dec, fn), - call=False): - moredefs.append((fname2def(fn), 1)) - - for fn in OPTIONAL_VARIABLE_ATTRIBUTES: - if config.check_func(fn, decl='int %s a;' % (fn), call=False): - m = fn.replace("(", "_").replace(")", "_") - moredefs.append((fname2def(m), 1)) - - # C99 functions: float and long double versions - check_funcs(C99_FUNCS_SINGLE) - check_funcs(C99_FUNCS_EXTENDED) - -def check_complex(config, mathlibs): - priv = [] - pub = [] - - try: - if os.uname()[0] == "Interix": - warnings.warn("Disabling broken complex support. See #1365") - return priv, pub - except: - # os.uname not available on all platforms. blanket except ugly but safe - pass - - # Check for complex support - st = config.check_header('complex.h') - if st: - priv.append(('HAVE_COMPLEX_H', 1)) - pub.append(('NPY_USE_C99_COMPLEX', 1)) - - for t in C99_COMPLEX_TYPES: - st = config.check_type(t, headers=["complex.h"]) - if st: - pub.append(('NPY_HAVE_%s' % type2def(t), 1)) - - def check_prec(prec): - flist = [f + prec for f in C99_COMPLEX_FUNCS] - decl = dict([(f, True) for f in flist]) - if not config.check_funcs_once(flist, call=decl, decl=decl, - libraries=mathlibs): - for f in flist: - if config.check_func(f, call=True, decl=True, - libraries=mathlibs): - priv.append((fname2def(f), 1)) - else: - priv.extend([(fname2def(f), 1) for f in flist]) - - check_prec('') - check_prec('f') - check_prec('l') - - return priv, pub - -def check_ieee_macros(config): - priv = [] - pub = [] - - macros = [] - - def _add_decl(f): - priv.append(fname2def("decl_%s" % f)) - pub.append('NPY_%s' % fname2def("decl_%s" % f)) - - # XXX: hack to circumvent cpp pollution from python: python put its - # config.h in the public namespace, so we have a clash for the common - # functions we test. We remove every function tested by python's - # autoconf, hoping their own test are correct - _macros = ["isnan", "isinf", "signbit", "isfinite"] - for f in _macros: - py_symbol = fname2def("decl_%s" % f) - already_declared = config.check_decl(py_symbol, - headers=["Python.h", "math.h"]) - if already_declared: - if config.check_macro_true(py_symbol, - headers=["Python.h", "math.h"]): - pub.append('NPY_%s' % fname2def("decl_%s" % f)) - else: - macros.append(f) - # Normally, isnan and isinf are macro (C99), but some platforms only have - # func, or both func and macro version. Check for macro only, and define - # replacement ones if not found. - # Note: including Python.h is necessary because it modifies some math.h - # definitions - for f in macros: - st = config.check_decl(f, headers = ["Python.h", "math.h"]) - if st: - _add_decl(f) - - return priv, pub - -def check_types(config_cmd, ext, build_dir): - private_defines = [] - public_defines = [] - - # Expected size (in number of bytes) for each type. This is an - # optimization: those are only hints, and an exhaustive search for the size - # is done if the hints are wrong. - expected = {} - expected['short'] = [2] - expected['int'] = [4] - expected['long'] = [8, 4] - expected['float'] = [4] - expected['double'] = [8] - expected['long double'] = [8, 12, 16] - expected['Py_intptr_t'] = [4, 8] - expected['PY_LONG_LONG'] = [8] - expected['long long'] = [8] - expected['off_t'] = [4, 8] - - # Check we have the python header (-dev* packages on Linux) - result = config_cmd.check_header('Python.h') - if not result: - raise SystemError( - "Cannot compile 'Python.h'. Perhaps you need to "\ - "install python-dev|python-devel.") - res = config_cmd.check_header("endian.h") - if res: - private_defines.append(('HAVE_ENDIAN_H', 1)) - public_defines.append(('NPY_HAVE_ENDIAN_H', 1)) - - # Check basic types sizes - for type in ('short', 'int', 'long'): - res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"]) - if res: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type))) - else: - res = config_cmd.check_type_size(type, expected=expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - for type in ('float', 'double', 'long double'): - already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), - headers = ["Python.h"]) - res = config_cmd.check_type_size(type, expected=expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - if not already_declared and not type == 'long double': - private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - # Compute size of corresponding complex type: used to check that our - # definition is binary compatible with C99 complex type (check done at - # build time in npy_common.h) - complex_def = "struct {%s __x; %s __y;}" % (type, type) - res = config_cmd.check_type_size(complex_def, expected=2*expected[type]) - if res >= 0: - public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % complex_def) - - - for type in ('Py_intptr_t', 'off_t'): - res = config_cmd.check_type_size(type, headers=["Python.h"], - library_dirs=[pythonlib_dir()], - expected=expected[type]) - - if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % type) - - # We check declaration AND type because that's how distutils does it. - if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): - res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], - library_dirs=[pythonlib_dir()], - expected=expected['PY_LONG_LONG']) - if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') - - res = config_cmd.check_type_size('long long', - expected=expected['long long']) - if res >= 0: - #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) - else: - raise SystemError("Checking sizeof (%s) failed !" % 'long long') - - if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): - raise RuntimeError( - "Config wo CHAR_BIT is not supported"\ - ", please contact the maintainers") - - return private_defines, public_defines - -def check_mathlib(config_cmd): - # Testing the C math library - mathlibs = [] - mathlibs_choices = [[], ['m'], ['cpml']] - mathlib = os.environ.get('MATHLIB') - if mathlib: - mathlibs_choices.insert(0, mathlib.split(',')) - for libs in mathlibs_choices: - if config_cmd.check_func("exp", libraries=libs, decl=True, call=True): - mathlibs = libs - break - else: - raise EnvironmentError("math library missing; rerun " - "setup.py after setting the " - "MATHLIB env variable") - return mathlibs - -def visibility_define(config): - """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty - string).""" - if config.check_compiler_gcc4(): - return '__attribute__((visibility("hidden")))' - else: - return '' - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration, dot_join - from numpy.distutils.system_info import get_info, default_lib_dirs - - config = Configuration('core', parent_package, top_path) - local_dir = config.local_path - codegen_dir = join(local_dir, 'code_generators') - - if is_released(config): - warnings.simplefilter('error', MismatchCAPIWarning) - - # Check whether we have a mismatch between the set C API VERSION and the - # actual C API VERSION - check_api_version(C_API_VERSION, codegen_dir) - - generate_umath_py = join(codegen_dir, 'generate_umath.py') - n = dot_join(config.name, 'generate_umath') - generate_umath = imp.load_module('_'.join(n.split('.')), - open(generate_umath_py, 'U'), generate_umath_py, - ('.py', 'U', 1)) - - header_dir = 'include/numpy' # this is relative to config.path_in_package - - cocache = CallOnceOnly() - - def generate_config_h(ext, build_dir): - target = join(build_dir, header_dir, 'config.h') - d = os.path.dirname(target) - if not os.path.exists(d): - os.makedirs(d) - - if newer(__file__, target): - config_cmd = config.get_config_cmd() - log.info('Generating %s', target) - - # Check sizeof - moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir) - - # Check math library and C99 math funcs availability - mathlibs = check_mathlib(config_cmd) - moredefs.append(('MATHLIB', ','.join(mathlibs))) - - check_math_capabilities(config_cmd, moredefs, mathlibs) - moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) - moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) - - # Signal check - if is_npy_no_signal(): - moredefs.append('__NPY_PRIVATE_NO_SIGNAL') - - # Windows checks - if sys.platform=='win32' or os.name=='nt': - win32_checks(moredefs) - - # Inline check - inline = config_cmd.check_inline() - - # Check whether we need our own wide character support - if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']): - PYTHON_HAS_UNICODE_WIDE = True - else: - PYTHON_HAS_UNICODE_WIDE = False - - if ENABLE_SEPARATE_COMPILATION: - moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1)) - - if NPY_RELAXED_STRIDES_CHECKING: - moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) - - # Get long double representation - if sys.platform != 'darwin': - rep = check_long_double_representation(config_cmd) - if rep in ['INTEL_EXTENDED_12_BYTES_LE', - 'INTEL_EXTENDED_16_BYTES_LE', - 'MOTOROLA_EXTENDED_12_BYTES_BE', - 'IEEE_QUAD_LE', 'IEEE_QUAD_BE', - 'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE', - 'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']: - moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) - else: - raise ValueError("Unrecognized long double format: %s" % rep) - - # Py3K check - if sys.version_info[0] == 3: - moredefs.append(('NPY_PY3K', 1)) - - # Generate the config.h file from moredefs - target_f = open(target, 'w') - for d in moredefs: - if isinstance(d, str): - target_f.write('#define %s\n' % (d)) - else: - target_f.write('#define %s %s\n' % (d[0], d[1])) - - # define inline to our keyword, or nothing - target_f.write('#ifndef __cplusplus\n') - if inline == 'inline': - target_f.write('/* #undef inline */\n') - else: - target_f.write('#define inline %s\n' % inline) - target_f.write('#endif\n') - - # add the guard to make sure config.h is never included directly, - # but always through npy_config.h - target_f.write(""" -#ifndef _NPY_NPY_CONFIG_H_ -#error config.h should never be included directly, include npy_config.h instead -#endif -""") - - target_f.close() - print('File:', target) - target_f = open(target) - print(target_f.read()) - target_f.close() - print('EOF') - else: - mathlibs = [] - target_f = open(target) - for line in target_f: - s = '#define MATHLIB' - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - target_f.close() - - # Ugly: this can be called within a library and not an extension, - # in which case there is no libraries attributes (and none is - # needed). - if hasattr(ext, 'libraries'): - ext.libraries.extend(mathlibs) - - incl_dir = os.path.dirname(target) - if incl_dir not in config.numpy_include_dirs: - config.numpy_include_dirs.append(incl_dir) - - return target - - def generate_numpyconfig_h(ext, build_dir): - """Depends on config.h: generate_config_h has to be called before !""" - # put private include directory in build_dir on search path - # allows using code generation in headers headers - config.add_include_dirs(join(build_dir, "src", "private")) - - target = join(build_dir, header_dir, '_numpyconfig.h') - d = os.path.dirname(target) - if not os.path.exists(d): - os.makedirs(d) - if newer(__file__, target): - config_cmd = config.get_config_cmd() - log.info('Generating %s', target) - - # Check sizeof - ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir) - - if is_npy_no_signal(): - moredefs.append(('NPY_NO_SIGNAL', 1)) - - if is_npy_no_smp(): - moredefs.append(('NPY_NO_SMP', 1)) - else: - moredefs.append(('NPY_NO_SMP', 0)) - - mathlibs = check_mathlib(config_cmd) - moredefs.extend(cocache.check_ieee_macros(config_cmd)[1]) - moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1]) - - if ENABLE_SEPARATE_COMPILATION: - moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1)) - - if NPY_RELAXED_STRIDES_CHECKING: - moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) - - # Check wether we can use inttypes (C99) formats - if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']): - moredefs.append(('NPY_USE_C99_FORMATS', 1)) - - # visibility check - hidden_visibility = visibility_define(config_cmd) - moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility)) - - # Add the C API/ABI versions - moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION)) - moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION)) - - # Add moredefs to header - target_f = open(target, 'w') - for d in moredefs: - if isinstance(d, str): - target_f.write('#define %s\n' % (d)) - else: - target_f.write('#define %s %s\n' % (d[0], d[1])) - - # Define __STDC_FORMAT_MACROS - target_f.write(""" -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS 1 -#endif -""") - target_f.close() - - # Dump the numpyconfig.h header to stdout - print('File: %s' % target) - target_f = open(target) - print(target_f.read()) - target_f.close() - print('EOF') - config.add_data_files((header_dir, target)) - return target - - def generate_api_func(module_name): - def generate_api(ext, build_dir): - script = join(codegen_dir, module_name + '.py') - sys.path.insert(0, codegen_dir) - try: - m = __import__(module_name) - log.info('executing %s', script) - h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir)) - finally: - del sys.path[0] - config.add_data_files((header_dir, h_file), - (header_dir, doc_file)) - return (h_file,) - return generate_api - - generate_numpy_api = generate_api_func('generate_numpy_api') - generate_ufunc_api = generate_api_func('generate_ufunc_api') - - config.add_include_dirs(join(local_dir, "src", "private")) - config.add_include_dirs(join(local_dir, "src")) - config.add_include_dirs(join(local_dir)) - - config.add_data_files('include/numpy/*.h') - config.add_include_dirs(join('src', 'npymath')) - config.add_include_dirs(join('src', 'multiarray')) - config.add_include_dirs(join('src', 'umath')) - config.add_include_dirs(join('src', 'npysort')) - - config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")]) - config.add_define_macros([("_FILE_OFFSET_BITS", "64")]) - config.add_define_macros([('_LARGEFILE_SOURCE', '1')]) - config.add_define_macros([('_LARGEFILE64_SOURCE', '1')]) - - config.numpy_include_dirs.extend(config.paths('include')) - - deps = [join('src', 'npymath', '_signbit.c'), - join('include', 'numpy', '*object.h'), - 'include/numpy/fenv/fenv.c', - 'include/numpy/fenv/fenv.h', - join(codegen_dir, 'genapi.py'), - ] - - # Don't install fenv unless we need them. - if sys.platform == 'cygwin': - config.add_data_dir('include/numpy/fenv') - - ####################################################################### - # dummy module # - ####################################################################### - - # npymath needs the config.h and numpyconfig.h files to be generated, but - # build_clib cannot handle generate_config_h and generate_numpyconfig_h - # (don't ask). Because clib are generated before extensions, we have to - # explicitly add an extension which has generate_config_h and - # generate_numpyconfig_h as sources *before* adding npymath. - - config.add_extension('_dummy', - sources = [join('src', 'dummymodule.c'), - generate_config_h, - generate_numpyconfig_h, - generate_numpy_api] - ) - - ####################################################################### - # npymath library # - ####################################################################### - - subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")]) - def get_mathlib_info(*args): - # Another ugly hack: the mathlib info is known once build_src is run, - # but we cannot use add_installed_pkg_config here either, so we only - # update the substition dictionary during npymath build - config_cmd = config.get_config_cmd() - - # Check that the toolchain works, to fail early if it doesn't - # (avoid late errors with MATHLIB which are confusing if the - # compiler does not work). - st = config_cmd.try_link('int main(void) { return 0;}') - if not st: - raise RuntimeError("Broken toolchain: cannot link a simple C program") - mlibs = check_mathlib(config_cmd) - - posix_mlib = ' '.join(['-l%s' % l for l in mlibs]) - msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs]) - subst_dict["posix_mathlib"] = posix_mlib - subst_dict["msvc_mathlib"] = msvc_mlib - - npymath_sources = [join('src', 'npymath', 'npy_math.c.src'), - join('src', 'npymath', 'ieee754.c.src'), - join('src', 'npymath', 'npy_math_complex.c.src'), - join('src', 'npymath', 'halffloat.c')] - config.add_installed_library('npymath', - sources=npymath_sources + [get_mathlib_info], - install_dir='lib') - config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", - subst_dict) - config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", - subst_dict) - - ####################################################################### - # npysort library # - ####################################################################### - - # This library is created for the build but it is not installed - npysort_sources=[join('src', 'npysort', 'quicksort.c.src'), - join('src', 'npysort', 'mergesort.c.src'), - join('src', 'npysort', 'heapsort.c.src'), - join('src', 'private', 'npy_partition.h.src'), - join('src', 'npysort', 'selection.c.src'), - join('src', 'private', 'npy_binsearch.h.src'), - join('src', 'npysort', 'binsearch.c.src'), - ] - config.add_library('npysort', - sources=npysort_sources, - include_dirs=[]) - - - ####################################################################### - # multiarray module # - ####################################################################### - - # Multiarray version: this function is needed to build foo.c from foo.c.src - # when foo.c is included in another file and as such not in the src - # argument of build_ext command - def generate_multiarray_templated_sources(ext, build_dir): - from numpy.distutils.misc_util import get_cmd - - subpath = join('src', 'multiarray') - sources = [join(local_dir, subpath, 'scalartypes.c.src'), - join(local_dir, subpath, 'arraytypes.c.src'), - join(local_dir, subpath, 'nditer_templ.c.src'), - join(local_dir, subpath, 'lowlevel_strided_loops.c.src'), - join(local_dir, subpath, 'einsum.c.src')] - - # numpy.distutils generate .c from .c.src in weird directories, we have - # to add them there as they depend on the build_dir - config.add_include_dirs(join(build_dir, subpath)) - cmd = get_cmd('build_src') - cmd.ensure_finalized() - cmd.template_sources(sources, ext) - - multiarray_deps = [ - join('src', 'multiarray', 'arrayobject.h'), - join('src', 'multiarray', 'arraytypes.h'), - join('src', 'multiarray', 'array_assign.h'), - join('src', 'multiarray', 'buffer.h'), - join('src', 'multiarray', 'calculation.h'), - join('src', 'multiarray', 'common.h'), - join('src', 'multiarray', 'convert_datatype.h'), - join('src', 'multiarray', 'convert.h'), - join('src', 'multiarray', 'conversion_utils.h'), - join('src', 'multiarray', 'ctors.h'), - join('src', 'multiarray', 'descriptor.h'), - join('src', 'multiarray', 'getset.h'), - join('src', 'multiarray', 'hashdescr.h'), - join('src', 'multiarray', 'iterators.h'), - join('src', 'multiarray', 'mapping.h'), - join('src', 'multiarray', 'methods.h'), - join('src', 'multiarray', 'multiarraymodule.h'), - join('src', 'multiarray', 'nditer_impl.h'), - join('src', 'multiarray', 'numpymemoryview.h'), - join('src', 'multiarray', 'number.h'), - join('src', 'multiarray', 'numpyos.h'), - join('src', 'multiarray', 'refcount.h'), - join('src', 'multiarray', 'scalartypes.h'), - join('src', 'multiarray', 'sequence.h'), - join('src', 'multiarray', 'shape.h'), - join('src', 'multiarray', 'ucsnarrow.h'), - join('src', 'multiarray', 'usertypes.h'), - join('src', 'private', 'lowlevel_strided_loops.h'), - join('include', 'numpy', 'arrayobject.h'), - join('include', 'numpy', '_neighborhood_iterator_imp.h'), - join('include', 'numpy', 'npy_endian.h'), - join('include', 'numpy', 'arrayscalars.h'), - join('include', 'numpy', 'noprefix.h'), - join('include', 'numpy', 'npy_interrupt.h'), - join('include', 'numpy', 'npy_3kcompat.h'), - join('include', 'numpy', 'npy_math.h'), - join('include', 'numpy', 'halffloat.h'), - join('include', 'numpy', 'npy_common.h'), - join('include', 'numpy', 'npy_os.h'), - join('include', 'numpy', 'utils.h'), - join('include', 'numpy', 'ndarrayobject.h'), - join('include', 'numpy', 'npy_cpu.h'), - join('include', 'numpy', 'numpyconfig.h'), - join('include', 'numpy', 'ndarraytypes.h'), - join('include', 'numpy', 'npy_1_7_deprecated_api.h'), - join('include', 'numpy', '_numpyconfig.h.in'), - # add library sources as distuils does not consider libraries - # dependencies - ] + npysort_sources + npymath_sources - - multiarray_src = [ - join('src', 'multiarray', 'alloc.c'), - join('src', 'multiarray', 'arrayobject.c'), - join('src', 'multiarray', 'arraytypes.c.src'), - join('src', 'multiarray', 'array_assign.c'), - join('src', 'multiarray', 'array_assign_scalar.c'), - join('src', 'multiarray', 'array_assign_array.c'), - join('src', 'multiarray', 'buffer.c'), - join('src', 'multiarray', 'calculation.c'), - join('src', 'multiarray', 'common.c'), - join('src', 'multiarray', 'convert.c'), - join('src', 'multiarray', 'convert_datatype.c'), - join('src', 'multiarray', 'conversion_utils.c'), - join('src', 'multiarray', 'ctors.c'), - join('src', 'multiarray', 'datetime.c'), - join('src', 'multiarray', 'datetime_strings.c'), - join('src', 'multiarray', 'datetime_busday.c'), - join('src', 'multiarray', 'datetime_busdaycal.c'), - join('src', 'multiarray', 'descriptor.c'), - join('src', 'multiarray', 'dtype_transfer.c'), - join('src', 'multiarray', 'einsum.c.src'), - join('src', 'multiarray', 'flagsobject.c'), - join('src', 'multiarray', 'getset.c'), - join('src', 'multiarray', 'hashdescr.c'), - join('src', 'multiarray', 'item_selection.c'), - join('src', 'multiarray', 'iterators.c'), - join('src', 'multiarray', 'lowlevel_strided_loops.c.src'), - join('src', 'multiarray', 'mapping.c'), - join('src', 'multiarray', 'methods.c'), - join('src', 'multiarray', 'multiarraymodule.c'), - join('src', 'multiarray', 'nditer_templ.c.src'), - join('src', 'multiarray', 'nditer_api.c'), - join('src', 'multiarray', 'nditer_constr.c'), - join('src', 'multiarray', 'nditer_pywrap.c'), - join('src', 'multiarray', 'number.c'), - join('src', 'multiarray', 'numpymemoryview.c'), - join('src', 'multiarray', 'numpyos.c'), - join('src', 'multiarray', 'refcount.c'), - join('src', 'multiarray', 'sequence.c'), - join('src', 'multiarray', 'shape.c'), - join('src', 'multiarray', 'scalarapi.c'), - join('src', 'multiarray', 'scalartypes.c.src'), - join('src', 'multiarray', 'usertypes.c'), - join('src', 'multiarray', 'ucsnarrow.c')] - - - if not ENABLE_SEPARATE_COMPILATION: - multiarray_deps.extend(multiarray_src) - multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')] - multiarray_src.append(generate_multiarray_templated_sources) - - config.add_extension('multiarray', - sources = multiarray_src + - [generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - join(codegen_dir, 'generate_numpy_api.py'), - join('*.py')], - depends = deps + multiarray_deps, - libraries = ['npymath', 'npysort']) - - ####################################################################### - # umath module # - ####################################################################### - - # umath version: this function is needed to build foo.c from foo.c.src - # when foo.c is included in another file and as such not in the src - # argument of build_ext command - def generate_umath_templated_sources(ext, build_dir): - from numpy.distutils.misc_util import get_cmd - - subpath = join('src', 'umath') - sources = [ - join(local_dir, subpath, 'loops.h.src'), - join(local_dir, subpath, 'loops.c.src'), - join(local_dir, subpath, 'simd.inc.src')] - - # numpy.distutils generate .c from .c.src in weird directories, we have - # to add them there as they depend on the build_dir - config.add_include_dirs(join(build_dir, subpath)) - cmd = get_cmd('build_src') - cmd.ensure_finalized() - cmd.template_sources(sources, ext) - - - def generate_umath_c(ext, build_dir): - target = join(build_dir, header_dir, '__umath_generated.c') - dir = os.path.dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) - script = generate_umath_py - if newer(script, target): - f = open(target, 'w') - f.write(generate_umath.make_code(generate_umath.defdict, - generate_umath.__file__)) - f.close() - return [] - - umath_src = [ - join('src', 'umath', 'umathmodule.c'), - join('src', 'umath', 'reduction.c'), - join('src', 'umath', 'funcs.inc.src'), - join('src', 'umath', 'simd.inc.src'), - join('src', 'umath', 'loops.h.src'), - join('src', 'umath', 'loops.c.src'), - join('src', 'umath', 'ufunc_object.c'), - join('src', 'umath', 'ufunc_type_resolution.c')] - - umath_deps = [ - generate_umath_py, - join('src', 'multiarray', 'common.h'), - join('src', 'umath', 'simd.inc.src'), - join(codegen_dir, 'generate_ufunc_api.py'), - join('src', 'private', 'ufunc_override.h')] + npymath_sources - - if not ENABLE_SEPARATE_COMPILATION: - umath_deps.extend(umath_src) - umath_src = [join('src', 'umath', 'umathmodule_onefile.c')] - umath_src.append(generate_umath_templated_sources) - umath_src.append(join('src', 'umath', 'funcs.inc.src')) - umath_src.append(join('src', 'umath', 'simd.inc.src')) - - config.add_extension('umath', - sources = umath_src + - [generate_config_h, - generate_numpyconfig_h, - generate_umath_c, - generate_ufunc_api], - depends = deps + umath_deps, - libraries = ['npymath'], - ) - - ####################################################################### - # scalarmath module # - ####################################################################### - - config.add_extension('scalarmath', - sources = [join('src', 'scalarmathmodule.c.src'), - join('src', 'private', 'scalarmathmodule.h.src'), - generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - generate_ufunc_api], - depends = deps + npymath_sources, - libraries = ['npymath'], - ) - - ####################################################################### - # _dotblas module # - ####################################################################### - - # Configure blasdot - blas_info = get_info('blas_opt', 0) - #blas_info = {} - def get_dotblas_sources(ext, build_dir): - if blas_info: - if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []): - return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient. - return ext.depends[:1] - return None # no extension module will be built - - config.add_extension('_dotblas', - sources = [get_dotblas_sources], - depends = [join('blasdot', '_dotblas.c'), - join('blasdot', 'cblas.h'), - ], - include_dirs = ['blasdot'], - extra_info = blas_info - ) - - ####################################################################### - # umath_tests module # - ####################################################################### - - config.add_extension('umath_tests', - sources = [join('src', 'umath', 'umath_tests.c.src')]) - - ####################################################################### - # custom rational dtype module # - ####################################################################### - - config.add_extension('test_rational', - sources = [join('src', 'umath', 'test_rational.c.src')]) - - ####################################################################### - # struct_ufunc_test module # - ####################################################################### - - config.add_extension('struct_ufunc_test', - sources = [join('src', 'umath', 'struct_ufunc_test.c.src')]) - - ####################################################################### - # multiarray_tests module # - ####################################################################### - - config.add_extension('multiarray_tests', - sources = [join('src', 'multiarray', 'multiarray_tests.c.src')]) - - ####################################################################### - # operand_flag_tests module # - ####################################################################### - - config.add_extension('operand_flag_tests', - sources = [join('src', 'umath', 'operand_flag_tests.c.src')]) - - config.add_data_dir('tests') - config.add_data_dir('tests/data') - - config.make_svn_version_py() - - return config - -if __name__=='__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup_common.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup_common.py deleted file mode 100644 index be5673a478733..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/setup_common.py +++ /dev/null @@ -1,321 +0,0 @@ -from __future__ import division, absolute_import, print_function - -# Code common to build tools -import sys -from os.path import join -import warnings -import copy -import binascii - -from distutils.ccompiler import CompileError - -#------------------- -# Versioning support -#------------------- -# How to change C_API_VERSION ? -# - increase C_API_VERSION value -# - record the hash for the new C API with the script cversions.py -# and add the hash to cversions.txt -# The hash values are used to remind developers when the C API number was not -# updated - generates a MismatchCAPIWarning warning which is turned into an -# exception for released version. - -# Binary compatibility version number. This number is increased whenever the -# C-API is changed such that binary compatibility is broken, i.e. whenever a -# recompile of extension modules is needed. -C_ABI_VERSION = 0x01000009 - -# Minor API version. This number is increased whenever a change is made to the -# C-API -- whether it breaks binary compatibility or not. Some changes, such -# as adding a function pointer to the end of the function table, can be made -# without breaking binary compatibility. In this case, only the C_API_VERSION -# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is -# broken, both C_API_VERSION and C_ABI_VERSION should be increased. -# -# 0x00000008 - 1.7.x -# 0x00000009 - 1.8.x -# 0x00000009 - 1.9.x -C_API_VERSION = 0x00000009 - -class MismatchCAPIWarning(Warning): - pass - -def is_released(config): - """Return True if a released version of numpy is detected.""" - from distutils.version import LooseVersion - - v = config.get_version('../version.py') - if v is None: - raise ValueError("Could not get version") - pv = LooseVersion(vstring=v).version - if len(pv) > 3: - return False - return True - -def get_api_versions(apiversion, codegen_dir): - """Return current C API checksum and the recorded checksum for the given - version of the C API version.""" - api_files = [join(codegen_dir, 'numpy_api_order.txt'), - join(codegen_dir, 'ufunc_api_order.txt')] - - # Compute the hash of the current API as defined in the .txt files in - # code_generators - sys.path.insert(0, codegen_dir) - try: - m = __import__('genapi') - numpy_api = __import__('numpy_api') - curapi_hash = m.fullapi_hash(numpy_api.full_api) - apis_hash = m.get_versions_hash() - finally: - del sys.path[0] - - return curapi_hash, apis_hash[apiversion] - -def check_api_version(apiversion, codegen_dir): - """Emits a MismacthCAPIWarning if the C API version needs updating.""" - curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) - - # If different hash, it means that the api .txt files in - # codegen_dir have been updated without the API version being - # updated. Any modification in those .txt files should be reflected - # in the api and eventually abi versions. - # To compute the checksum of the current API, use - # code_generators/cversions.py script - if not curapi_hash == api_hash: - msg = "API mismatch detected, the C API version " \ - "numbers have to be updated. Current C api version is %d, " \ - "with checksum %s, but recorded checksum for C API version %d in " \ - "codegen_dir/cversions.txt is %s. If functions were added in the " \ - "C API, you have to update C_API_VERSION in %s." - warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, - __file__), - MismatchCAPIWarning) -# Mandatory functions: if not found, fail the build -MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", - "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", - "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] - -# Standard functions which may not be available and for which we have a -# replacement implementation. Note that some of these are C99 functions. -OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", - "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", - "copysign", "nextafter", "ftello", "fseeko", - "strtoll", "strtoull"] - - -OPTIONAL_HEADERS = [ -# sse headers only enabled automatically on amd64/x32 builds - "xmmintrin.h", # SSE - "emmintrin.h", # SSE2 -] - -# optional gcc compiler builtins and their call arguments and optional a -# required header -# call arguments are required as the compiler will do strict signature checking -OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'), - ("__builtin_isinf", '5.'), - ("__builtin_isfinite", '5.'), - ("__builtin_bswap32", '5u'), - ("__builtin_bswap64", '5u'), - ("__builtin_expect", '5, 0'), - ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE - ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2 - ] - -# function attributes -# tested via "int %s %s(void *);" % (attribute, name) -# function name will be converted to HAVE_ preprocessor macro -OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))', - 'attribute_optimize_unroll_loops'), - ('__attribute__((optimize("O3")))', - 'attribute_optimize_opt_3'), - ('__attribute__((nonnull (1)))', - 'attribute_nonnull'), - ] - -# variable attributes tested via "int %s a" % attribute -OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"] - -# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h -OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot", - "copysign", "ftello", "fseeko"] - -# C99 functions: float and long double versions -C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", - "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", - "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", - "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', - "exp2", "log2", "copysign", "nextafter"] - -C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] -C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] - -C99_COMPLEX_TYPES = ['complex double', 'complex float', 'complex long double'] - -C99_COMPLEX_FUNCS = ['creal', 'cimag', 'cabs', 'carg', 'cexp', 'csqrt', 'clog', - 'ccos', 'csin', 'cpow'] - -def fname2def(name): - return "HAVE_%s" % name.upper() - -def sym2def(symbol): - define = symbol.replace(' ', '') - return define.upper() - -def type2def(symbol): - define = symbol.replace(' ', '_') - return define.upper() - -# Code to detect long double representation taken from MPFR m4 macro -def check_long_double_representation(cmd): - cmd._check_compiler() - body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} - - # We need to use _compile because we need the object filename - src, object = cmd._compile(body, None, None, 'c') - try: - type = long_double_representation(pyod(object)) - return type - finally: - cmd._clean() - -LONG_DOUBLE_REPRESENTATION_SRC = r""" -/* "before" is 16 bytes to ensure there's no padding between it and "x". - * We're not expecting any "long double" bigger than 16 bytes or with - * alignment requirements stricter than 16 bytes. */ -typedef %(type)s test_type; - -struct { - char before[16]; - test_type x; - char after[8]; -} foo = { - { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, - -123456789.0, - { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } -}; -""" - -def pyod(filename): - """Python implementation of the od UNIX utility (od -b, more exactly). - - Parameters - ---------- - filename : str - name of the file to get the dump from. - - Returns - ------- - out : seq - list of lines of od output - - Note - ---- - We only implement enough to get the necessary information for long double - representation, this is not intended as a compatible replacement for od. - """ - def _pyod2(): - out = [] - - fid = open(filename, 'rb') - try: - yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()] - for i in range(0, len(yo), 16): - line = ['%07d' % int(oct(i))] - line.extend(['%03d' % c for c in yo[i:i+16]]) - out.append(" ".join(line)) - return out - finally: - fid.close() - - def _pyod3(): - out = [] - - fid = open(filename, 'rb') - try: - yo2 = [oct(o)[2:] for o in fid.read()] - for i in range(0, len(yo2), 16): - line = ['%07d' % int(oct(i)[2:])] - line.extend(['%03d' % int(c) for c in yo2[i:i+16]]) - out.append(" ".join(line)) - return out - finally: - fid.close() - - if sys.version_info[0] < 3: - return _pyod2() - else: - return _pyod3() - -_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', - '001', '043', '105', '147', '211', '253', '315', '357'] -_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] - -_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] -_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] -_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', - '031', '300', '000', '000'] -_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', - '031', '300', '000', '000', '000', '000', '000', '000'] -_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171', - '242', '240', '000', '000', '000', '000'] -_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', - '000', '000', '000', '000', '000', '000', '000', '000'] -_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] -_DOUBLE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] + \ - ['000'] * 8 -_DOUBLE_DOUBLE_LE = ['000', '000', '000', '124', '064', '157', '235', '301'] + \ - ['000'] * 8 - -def long_double_representation(lines): - """Given a binary dump as given by GNU od -b, look for long double - representation.""" - - # Read contains a list of 32 items, each item is a byte (in octal - # representation, as a string). We 'slide' over the output until read is of - # the form before_seq + content + after_sequence, where content is the long double - # representation: - # - content is 12 bytes: 80 bits Intel representation - # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision - # - content is 8 bytes: same as double (not implemented yet) - read = [''] * 32 - saw = None - for line in lines: - # we skip the first word, as od -b output an index at the beginning of - # each line - for w in line.split()[1:]: - read.pop(0) - read.append(w) - - # If the end of read is equal to the after_sequence, read contains - # the long double - if read[-8:] == _AFTER_SEQ: - saw = copy.copy(read) - if read[:12] == _BEFORE_SEQ[4:]: - if read[12:-8] == _INTEL_EXTENDED_12B: - return 'INTEL_EXTENDED_12_BYTES_LE' - if read[12:-8] == _MOTOROLA_EXTENDED_12B: - return 'MOTOROLA_EXTENDED_12_BYTES_BE' - elif read[:8] == _BEFORE_SEQ[8:]: - if read[8:-8] == _INTEL_EXTENDED_16B: - return 'INTEL_EXTENDED_16_BYTES_LE' - elif read[8:-8] == _IEEE_QUAD_PREC_BE: - return 'IEEE_QUAD_BE' - elif read[8:-8] == _IEEE_QUAD_PREC_LE: - return 'IEEE_QUAD_LE' - elif read[8:-8] == _DOUBLE_DOUBLE_BE: - return 'DOUBLE_DOUBLE_BE' - elif read[8:-8] == _DOUBLE_DOUBLE_LE: - return 'DOUBLE_DOUBLE_LE' - elif read[:16] == _BEFORE_SEQ: - if read[16:-8] == _IEEE_DOUBLE_LE: - return 'IEEE_DOUBLE_LE' - elif read[16:-8] == _IEEE_DOUBLE_BE: - return 'IEEE_DOUBLE_BE' - - if saw is not None: - raise ValueError("Unrecognized format (%s)" % saw) - else: - # We never detected the after_sequence - raise ValueError("Could not lock sequences (%s)" % saw) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/shape_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/shape_base.py deleted file mode 100644 index ae684fb423949..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/shape_base.py +++ /dev/null @@ -1,277 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'vstack', 'hstack'] - -from . import numeric as _nx -from .numeric import array, asanyarray, newaxis - -def atleast_1d(*arys): - """ - Convert inputs to arrays with at least one dimension. - - Scalar inputs are converted to 1-dimensional arrays, whilst - higher-dimensional inputs are preserved. - - Parameters - ---------- - arys1, arys2, ... : array_like - One or more input arrays. - - Returns - ------- - ret : ndarray - An array, or sequence of arrays, each with ``a.ndim >= 1``. - Copies are made only if necessary. - - See Also - -------- - atleast_2d, atleast_3d - - Examples - -------- - >>> np.atleast_1d(1.0) - array([ 1.]) - - >>> x = np.arange(9.0).reshape(3,3) - >>> np.atleast_1d(x) - array([[ 0., 1., 2.], - [ 3., 4., 5.], - [ 6., 7., 8.]]) - >>> np.atleast_1d(x) is x - True - - >>> np.atleast_1d(1, [3, 4]) - [array([1]), array([3, 4])] - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if len(ary.shape) == 0 : - result = ary.reshape(1) - else : - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - -def atleast_2d(*arys): - """ - View inputs as arrays with at least two dimensions. - - Parameters - ---------- - arys1, arys2, ... : array_like - One or more array-like sequences. Non-array inputs are converted - to arrays. Arrays that already have two or more dimensions are - preserved. - - Returns - ------- - res, res2, ... : ndarray - An array, or tuple of arrays, each with ``a.ndim >= 2``. - Copies are avoided where possible, and views with two or more - dimensions are returned. - - See Also - -------- - atleast_1d, atleast_3d - - Examples - -------- - >>> np.atleast_2d(3.0) - array([[ 3.]]) - - >>> x = np.arange(3.0) - >>> np.atleast_2d(x) - array([[ 0., 1., 2.]]) - >>> np.atleast_2d(x).base is x - True - - >>> np.atleast_2d(1, [1, 2], [[1, 2]]) - [array([[1]]), array([[1, 2]]), array([[1, 2]])] - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if len(ary.shape) == 0 : - result = ary.reshape(1, 1) - elif len(ary.shape) == 1 : - result = ary[newaxis,:] - else : - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - -def atleast_3d(*arys): - """ - View inputs as arrays with at least three dimensions. - - Parameters - ---------- - arys1, arys2, ... : array_like - One or more array-like sequences. Non-array inputs are converted to - arrays. Arrays that already have three or more dimensions are - preserved. - - Returns - ------- - res1, res2, ... : ndarray - An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are - avoided where possible, and views with three or more dimensions are - returned. For example, a 1-D array of shape ``(N,)`` becomes a view - of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a - view of shape ``(M, N, 1)``. - - See Also - -------- - atleast_1d, atleast_2d - - Examples - -------- - >>> np.atleast_3d(3.0) - array([[[ 3.]]]) - - >>> x = np.arange(3.0) - >>> np.atleast_3d(x).shape - (1, 3, 1) - - >>> x = np.arange(12.0).reshape(4,3) - >>> np.atleast_3d(x).shape - (4, 3, 1) - >>> np.atleast_3d(x).base is x - True - - >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): - ... print arr, arr.shape - ... - [[[1] - [2]]] (1, 2, 1) - [[[1] - [2]]] (1, 2, 1) - [[[1 2]]] (1, 1, 2) - - """ - res = [] - for ary in arys: - ary = asanyarray(ary) - if len(ary.shape) == 0: - result = ary.reshape(1, 1, 1) - elif len(ary.shape) == 1: - result = ary[newaxis,:, newaxis] - elif len(ary.shape) == 2: - result = ary[:,:, newaxis] - else: - result = ary - res.append(result) - if len(res) == 1: - return res[0] - else: - return res - - -def vstack(tup): - """ - Stack arrays in sequence vertically (row wise). - - Take a sequence of arrays and stack them vertically to make a single - array. Rebuild arrays divided by `vsplit`. - - Parameters - ---------- - tup : sequence of ndarrays - Tuple containing arrays to be stacked. The arrays must have the same - shape along all but the first axis. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays. - - See Also - -------- - hstack : Stack arrays in sequence horizontally (column wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - concatenate : Join a sequence of arrays together. - vsplit : Split array into a list of multiple sub-arrays vertically. - - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that - are at least 2-dimensional. - - Examples - -------- - >>> a = np.array([1, 2, 3]) - >>> b = np.array([2, 3, 4]) - >>> np.vstack((a,b)) - array([[1, 2, 3], - [2, 3, 4]]) - - >>> a = np.array([[1], [2], [3]]) - >>> b = np.array([[2], [3], [4]]) - >>> np.vstack((a,b)) - array([[1], - [2], - [3], - [2], - [3], - [4]]) - - """ - return _nx.concatenate([atleast_2d(_m) for _m in tup], 0) - -def hstack(tup): - """ - Stack arrays in sequence horizontally (column wise). - - Take a sequence of arrays and stack them horizontally to make - a single array. Rebuild arrays divided by `hsplit`. - - Parameters - ---------- - tup : sequence of ndarrays - All arrays must have the same shape along all but the second axis. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays. - - See Also - -------- - vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third axis). - concatenate : Join a sequence of arrays together. - hsplit : Split array along second axis. - - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=1)`` - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.hstack((a,b)) - array([1, 2, 3, 2, 3, 4]) - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.hstack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - - """ - arrs = [atleast_1d(_m) for _m in tup] - # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" - if arrs[0].ndim == 1: - return _nx.concatenate(arrs, 0) - else: - return _nx.concatenate(arrs, 1) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/struct_ufunc_test.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/struct_ufunc_test.py deleted file mode 100644 index 7eec360249bb9..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/struct_ufunc_test.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'struct_ufunc_test.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/test_rational.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/test_rational.py deleted file mode 100644 index 1fee9627eb0da..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/test_rational.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'test_rational.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/data/astype_copy.pkl b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/data/astype_copy.pkl deleted file mode 100644 index 7397c978297b3f64c7e6540b23f448f280e30bcc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 716 zcmXYteJs>*9LMj0wELrGX0g*+#^yGUX+D(rtPp;lqH|WB?v6qnckVXSb|}U4T*BDc ztgt&*R!!n4);z_!hDA=U2TLO{Ow;HiegF8r_Wgc7-|zeV{uFYgGF4VizN<7_k?NYI z%2cL{6$)`aFG-Oq&6edV6)LHcCui?;@nk7GRXK-Yl?d6AYteG96T@agO8z&bDD!ht zd2%x$=bSggV%XGR?!<6~Y~P58h|n!s|2BrDl$4zg=CD`+(VqNc9e~^+UBN~>V67&n zEsSU_2&g_w9CexS?S286d?)vaZ7ZOSCN{jM9>#oZnP9{-<74-r(()ADGyiQ_X*#oFw)|WgXCbaQIUc?Vex~Y?|0qZuS+&o_ay3Y^G(z*e| zU5Z-|b^#im58G6B0ye}i_Xp`H&tlY39r^v$RA_W82MiqR@mE&?2Jin|;oS)MHc%*j zNAG>wGa}EEPm!Hn*@IEQibJv)dt#i5AIjc77i!hX#D@){8p~1s&o|_0=^xg1F!FOk`#9_}3c|bNMTiwYXD@MeGhs``i;pyb#^J) zWJ3vQ`c!lWhB#Vs&(53~3TNz)<$7TOyoDBX;Q~S)F$hvtfS)E|n#Ukgm=o85LeM0j zdC=H+OWZYiY{xUkh?kVH$8tzR@VF=-LqDK`LdKt1)PPc7Z1a&g=(WbzYv3su`h=05 zFzeB^?AZ}KnXgJsLKRFvOm{MEL3sk~NwB2gY0|)-u9i#7cJV~D@%21nPuxSwNK7{j z+?*ywmgWMST_ITf1|)@4K)^ztumBL)EJU6rX}kmE^94^WXWL!D{Q{ga%OV!D1c6x2 zF~?8C;YBtW4}RHu69c`;0M|xW2X8z&f5Dr|JbVFS%fL1r6C9tj#AmPyLyi}$h_kO~ zAtM2o`P(Fo(#=i=t@Z)D;Hi9PG7T<}73>keQ~~~M^m+;)#iR0y7p_`|$_LLI)mhz> zJ7UXvbR5_5Nf$q7rgPz%w+qYZU_o4pbYi|Nm*j(Sb#>M4;oqczbNDhR3(Gc_7UBu- zJd`cgIo!i5_8*N^uBxwVcaFIBzF}JX&uH{ttwFG=NAOki9XU%cYX&p}ngPv#WNr$X#)El?NR;s0`H$0xudb?)JSllr-j zc>Qj`QMO5KMI+SYwP+6*^{c`&+V!(BLA=jA5L+oGtBg?zos3oC_1p1aP(S(m{PlG8 zAmwFWoRgcaggJ%Cl)Wf+-3iyz-NDgcG^l@|NtNUezXW*D5 zt%;{{-48KXWd(XIyAnGRW~&F3?bWvGQ^lw1+wu16O}ua8!>5m%im$>m-c^q^@!I1X zR`KB*YTa4)HFy;t@{aGXzh7?ReH-7&DyjG?ypj3+@tMw)5SVUhjw2clP0!}eh_kNh zan~Q6;|TrtdhjBPa7^8o8c#Iqk33x)T5V7G4!C&SCZcqo4l z%H%NWexgyyR44jrRL05khPtETtN1fV)s24@-|xh6SNHE>if_2Uw{o6yFvi#Y(KQZ` z{eJgdnRNTT-pO~O9-p4y>TGCVGoTsJ4E#6)H?Klzo3pdCx770kcJu0sw6D1EYq;6+ oKW=^+NE_n9Zy)4;h|$re8PE)91~dbj0nLDBKr^5j_= (2, 7, 5): - # This test fails for earlier versions of Python. - # Evidently a bug got fixed in 2.7.5. - dat = np.array(_buffer('1.0'), dtype=np.float64) - assert_equal(dat, [49.0, 46.0, 48.0]) - assert_(dat.dtype.type is np.float64) - - dat = np.array(_buffer(b'1.0')) - assert_equal(dat, [49, 46, 48]) - assert_(dat.dtype.type is np.uint8) - - # test memoryview, new version of buffer - _memoryview = builtins.get("memoryview") - if _memoryview: - dat = np.array(_memoryview(b'1.0'), dtype=np.float64) - assert_equal(dat, [49.0, 46.0, 48.0]) - assert_(dat.dtype.type is np.float64) - - dat = np.array(_memoryview(b'1.0')) - assert_equal(dat, [49, 46, 48]) - assert_(dat.dtype.type is np.uint8) - - # test array interface - a = np.array(100.0, dtype=np.float64) - o = type("o", (object,), - dict(__array_interface__=a.__array_interface__)) - assert_equal(np.array(o, dtype=np.float64), a) - - # test array_struct interface - a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], - dtype=[('f0', int), ('f1', float), ('f2', str)]) - o = type("o", (object,), - dict(__array_struct__=a.__array_struct__)) - ## wasn't what I expected... is np.array(o) supposed to equal a ? - ## instead we get a array([...], dtype=">V18") - assert_equal(str(np.array(o).data), str(a.data)) - - # test array - o = type("o", (object,), - dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))() - assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) - - # test recursion - nested = 1.5 - for i in range(np.MAXDIMS): - nested = [nested] - - # no error - np.array(nested) - - # Exceeds recursion limit - assert_raises(ValueError, np.array, [nested], dtype=np.float64) - - # Try with lists... - assert_equal(np.array([None] * 10, dtype=np.float64), - np.full((10,), np.nan, dtype=np.float64)) - assert_equal(np.array([[None]] * 10, dtype=np.float64), - np.full((10, 1), np.nan, dtype=np.float64)) - assert_equal(np.array([[None] * 10], dtype=np.float64), - np.full((1, 10), np.nan, dtype=np.float64)) - assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), - np.full((10, 10), np.nan, dtype=np.float64)) - - assert_equal(np.array([1.0] * 10, dtype=np.float64), - np.ones((10,), dtype=np.float64)) - assert_equal(np.array([[1.0]] * 10, dtype=np.float64), - np.ones((10, 1), dtype=np.float64)) - assert_equal(np.array([[1.0] * 10], dtype=np.float64), - np.ones((1, 10), dtype=np.float64)) - assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), - np.ones((10, 10), dtype=np.float64)) - - # Try with tuples - assert_equal(np.array((None,) * 10, dtype=np.float64), - np.full((10,), np.nan, dtype=np.float64)) - assert_equal(np.array([(None,)] * 10, dtype=np.float64), - np.full((10, 1), np.nan, dtype=np.float64)) - assert_equal(np.array([(None,) * 10], dtype=np.float64), - np.full((1, 10), np.nan, dtype=np.float64)) - assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), - np.full((10, 10), np.nan, dtype=np.float64)) - - assert_equal(np.array((1.0,) * 10, dtype=np.float64), - np.ones((10,), dtype=np.float64)) - assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), - np.ones((10, 1), dtype=np.float64)) - assert_equal(np.array([(1.0,) * 10], dtype=np.float64), - np.ones((1, 10), dtype=np.float64)) - assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), - np.ones((10, 10), dtype=np.float64)) - - -def test_fastCopyAndTranspose(): - # 0D array - a = np.array(2) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - - # 1D array - a = np.array([3, 2, 7, 0]) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - - # 2D array - a = np.arange(6).reshape(2, 3) - b = np.fastCopyAndTranspose(a) - assert_equal(b, a.T) - assert_(b.flags.owndata) - -def test_array_astype(): - a = np.arange(6, dtype='f4').reshape(2, 3) - # Default behavior: allows unsafe casts, keeps memory layout, - # always copies. - b = a.astype('i4') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('i4')) - assert_equal(a.strides, b.strides) - b = a.T.astype('i4') - assert_equal(a.T, b) - assert_equal(b.dtype, np.dtype('i4')) - assert_equal(a.T.strides, b.strides) - b = a.astype('f4') - assert_equal(a, b) - assert_(not (a is b)) - - # copy=False parameter can sometimes skip a copy - b = a.astype('f4', copy=False) - assert_(a is b) - - # order parameter allows overriding of the memory layout, - # forcing a copy if the layout is wrong - b = a.astype('f4', order='F', copy=False) - assert_equal(a, b) - assert_(not (a is b)) - assert_(b.flags.f_contiguous) - - b = a.astype('f4', order='C', copy=False) - assert_equal(a, b) - assert_(a is b) - assert_(b.flags.c_contiguous) - - # casting parameter allows catching bad casts - b = a.astype('c8', casting='safe') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('c8')) - - assert_raises(TypeError, a.astype, 'i4', casting='safe') - - # subok=False passes through a non-subclassed array - b = a.astype('f4', subok=0, copy=False) - assert_(a is b) - - a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') - - # subok=True passes through a matrix - b = a.astype('f4', subok=True, copy=False) - assert_(a is b) - - # subok=True is default, and creates a subtype on a cast - b = a.astype('i4', copy=False) - assert_equal(a, b) - assert_equal(type(b), np.matrix) - - # subok=False never returns a matrix - b = a.astype('f4', subok=False, copy=False) - assert_equal(a, b) - assert_(not (a is b)) - assert_(type(b) is not np.matrix) - - # Make sure converting from string object to fixed length string - # does not truncate. - a = np.array([b'a'*100], dtype='O') - b = a.astype('S') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('S100')) - a = np.array([sixu('a')*100], dtype='O') - b = a.astype('U') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('U100')) - - # Same test as above but for strings shorter than 64 characters - a = np.array([b'a'*10], dtype='O') - b = a.astype('S') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('S10')) - a = np.array([sixu('a')*10], dtype='O') - b = a.astype('U') - assert_equal(a, b) - assert_equal(b.dtype, np.dtype('U10')) - - a = np.array(123456789012345678901234567890, dtype='O').astype('S') - assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) - a = np.array(123456789012345678901234567890, dtype='O').astype('U') - assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30')) - - a = np.array([123456789012345678901234567890], dtype='O').astype('S') - assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) - a = np.array([123456789012345678901234567890], dtype='O').astype('U') - assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30')) - - a = np.array(123456789012345678901234567890, dtype='S') - assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) - a = np.array(123456789012345678901234567890, dtype='U') - assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30')) - - a = np.array(sixu('a\u0140'), dtype='U') - b = np.ndarray(buffer=a, dtype='uint32', shape=2) - assert_(b.size == 2) - - a = np.array([1000], dtype='i4') - assert_raises(TypeError, a.astype, 'S1', casting='safe') - - a = np.array(1000, dtype='i4') - assert_raises(TypeError, a.astype, 'U1', casting='safe') - -def test_copyto_fromscalar(): - a = np.arange(6, dtype='f4').reshape(2, 3) - - # Simple copy - np.copyto(a, 1.5) - assert_equal(a, 1.5) - np.copyto(a.T, 2.5) - assert_equal(a, 2.5) - - # Where-masked copy - mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') - np.copyto(a, 3.5, where=mask) - assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) - mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') - np.copyto(a.T, 4.5, where=mask) - assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) - -def test_copyto(): - a = np.arange(6, dtype='i4').reshape(2, 3) - - # Simple copy - np.copyto(a, [[3, 1, 5], [6, 2, 1]]) - assert_equal(a, [[3, 1, 5], [6, 2, 1]]) - - # Overlapping copy should work - np.copyto(a[:, :2], a[::-1, 1::-1]) - assert_equal(a, [[2, 6, 5], [1, 3, 1]]) - - # Defaults to 'same_kind' casting - assert_raises(TypeError, np.copyto, a, 1.5) - - # Force a copy with 'unsafe' casting, truncating 1.5 to 1 - np.copyto(a, 1.5, casting='unsafe') - assert_equal(a, 1) - - # Copying with a mask - np.copyto(a, 3, where=[True, False, True]) - assert_equal(a, [[3, 1, 3], [3, 1, 3]]) - - # Casting rule still applies with a mask - assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) - - # Lists of integer 0's and 1's is ok too - np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) - assert_equal(a, [[3, 4, 4], [4, 1, 3]]) - - # Overlapping copy with mask should work - np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) - assert_equal(a, [[3, 4, 4], [4, 3, 3]]) - - # 'dst' must be an array - assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) - -def test_copyto_permut(): - # test explicit overflow case - pad = 500 - l = [True] * pad + [True, True, True, True] - r = np.zeros(len(l)-pad) - d = np.ones(len(l)-pad) - mask = np.array(l)[pad:] - np.copyto(r, d, where=mask[::-1]) - - # test all permutation of possible masks, 9 should be sufficient for - # current 4 byte unrolled code - power = 9 - d = np.ones(power) - for i in range(2**power): - r = np.zeros(power) - l = [(i & x) != 0 for x in range(power)] - mask = np.array(l) - np.copyto(r, d, where=mask) - assert_array_equal(r == 1, l) - assert_equal(r.sum(), sum(l)) - - r = np.zeros(power) - np.copyto(r, d, where=mask[::-1]) - assert_array_equal(r == 1, l[::-1]) - assert_equal(r.sum(), sum(l)) - - r = np.zeros(power) - np.copyto(r[::2], d[::2], where=mask[::2]) - assert_array_equal(r[::2] == 1, l[::2]) - assert_equal(r[::2].sum(), sum(l[::2])) - - r = np.zeros(power) - np.copyto(r[::2], d[::2], where=mask[::-2]) - assert_array_equal(r[::2] == 1, l[::-2]) - assert_equal(r[::2].sum(), sum(l[::-2])) - - for c in [0xFF, 0x7F, 0x02, 0x10]: - r = np.zeros(power) - mask = np.array(l) - imask = np.array(l).view(np.uint8) - imask[mask != 0] = 0xFF - np.copyto(r, d, where=mask) - assert_array_equal(r == 1, l) - assert_equal(r.sum(), sum(l)) - - r = np.zeros(power) - np.copyto(r, d, where=True) - assert_equal(r.sum(), r.size) - r = np.ones(power) - d = np.zeros(power) - np.copyto(r, d, where=False) - assert_equal(r.sum(), r.size) - -def test_copy_order(): - a = np.arange(24).reshape(2, 1, 3, 4) - b = a.copy(order='F') - c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) - - def check_copy_result(x, y, ccontig, fcontig, strides=False): - assert_(not (x is y)) - assert_equal(x, y) - assert_equal(res.flags.c_contiguous, ccontig) - assert_equal(res.flags.f_contiguous, fcontig) - # This check is impossible only because - # NPY_RELAXED_STRIDES_CHECKING changes the strides actively - if not NPY_RELAXED_STRIDES_CHECKING: - if strides: - assert_equal(x.strides, y.strides) - else: - assert_(x.strides != y.strides) - - # Validate the initial state of a, b, and c - assert_(a.flags.c_contiguous) - assert_(not a.flags.f_contiguous) - assert_(not b.flags.c_contiguous) - assert_(b.flags.f_contiguous) - assert_(not c.flags.c_contiguous) - assert_(not c.flags.f_contiguous) - - # Copy with order='C' - res = a.copy(order='C') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = b.copy(order='C') - check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) - res = c.copy(order='C') - check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) - res = np.copy(a, order='C') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = np.copy(b, order='C') - check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) - res = np.copy(c, order='C') - check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) - - # Copy with order='F' - res = a.copy(order='F') - check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) - res = b.copy(order='F') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = c.copy(order='F') - check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) - res = np.copy(a, order='F') - check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) - res = np.copy(b, order='F') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = np.copy(c, order='F') - check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) - - # Copy with order='K' - res = a.copy(order='K') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = b.copy(order='K') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = c.copy(order='K') - check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) - res = np.copy(a, order='K') - check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) - res = np.copy(b, order='K') - check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) - res = np.copy(c, order='K') - check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) - -def test_contiguous_flags(): - a = np.ones((4, 4, 1))[::2,:,:] - if NPY_RELAXED_STRIDES_CHECKING: - a.strides = a.strides[:2] + (-123,) - b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) - - def check_contig(a, ccontig, fcontig): - assert_(a.flags.c_contiguous == ccontig) - assert_(a.flags.f_contiguous == fcontig) - - # Check if new arrays are correct: - check_contig(a, False, False) - check_contig(b, False, False) - if NPY_RELAXED_STRIDES_CHECKING: - check_contig(np.empty((2, 2, 0, 2, 2)), True, True) - check_contig(np.array([[[1], [2]]], order='F'), True, True) - else: - check_contig(np.empty((2, 2, 0, 2, 2)), True, False) - check_contig(np.array([[[1], [2]]], order='F'), False, True) - check_contig(np.empty((2, 2)), True, False) - check_contig(np.empty((2, 2), order='F'), False, True) - - # Check that np.array creates correct contiguous flags: - check_contig(np.array(a, copy=False), False, False) - check_contig(np.array(a, copy=False, order='C'), True, False) - check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True) - - if NPY_RELAXED_STRIDES_CHECKING: - # Check slicing update of flags and : - check_contig(a[0], True, True) - check_contig(a[None, ::4, ..., None], True, True) - check_contig(b[0, 0, ...], False, True) - check_contig(b[:,:, 0:0,:,:], True, True) - else: - # Check slicing update of flags: - check_contig(a[0], True, False) - # Would be nice if this was C-Contiguous: - check_contig(a[None, 0, ..., None], False, False) - check_contig(b[0, 0, 0, ...], False, True) - - # Test ravel and squeeze. - check_contig(a.ravel(), True, True) - check_contig(np.ones((1, 3, 1)).squeeze(), True, True) - -def test_broadcast_arrays(): - # Test user defined dtypes - a = np.array([(1, 2, 3)], dtype='u4,u4,u4') - b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') - result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_arrayprint.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_arrayprint.py deleted file mode 100644 index 44bf5f3978ffe..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_arrayprint.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -from __future__ import division, absolute_import, print_function - -import sys -import numpy as np -from numpy.testing import * -from numpy.compat import sixu - -class TestArrayRepr(object): - def test_nan_inf(self): - x = np.array([np.nan, np.inf]) - assert_equal(repr(x), 'array([ nan, inf])') - -class TestComplexArray(TestCase): - def test_str(self): - rvals = [0, 1, -1, np.inf, -np.inf, np.nan] - cvals = [complex(rp, ip) for rp in rvals for ip in rvals] - dtypes = [np.complex64, np.cdouble, np.clongdouble] - actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] - wanted = [ - '[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]', - '[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]', - '[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]', - '[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]', - '[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]', - '[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]', - '[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]', - '[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]', - '[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]', - '[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]', - '[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]', - '[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]', - '[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]', - '[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]', - '[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]', - '[-1.+infj]', '[-1.+infj]', '[-1.0+infj]', - '[-1.-infj]', '[-1.-infj]', '[-1.0-infj]', - '[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]', - '[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]', - '[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]', - '[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]', - '[ inf+infj]', '[ inf+infj]', '[ inf+infj]', - '[ inf-infj]', '[ inf-infj]', '[ inf-infj]', - '[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]', - '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]', - '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]', - '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]', - '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', - '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', - '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', - '[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]', - '[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]', - '[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]', - '[ nan+infj]', '[ nan+infj]', '[ nan+infj]', - '[ nan-infj]', '[ nan-infj]', '[ nan-infj]', - '[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]'] - - for res, val in zip(actual, wanted): - assert_(res == val) - -class TestArray2String(TestCase): - def test_basic(self): - """Basic test of array2string.""" - a = np.arange(3) - assert_(np.array2string(a) == '[0 1 2]') - assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]') - - def test_style_keyword(self): - """This should only apply to 0-D arrays. See #1218.""" - stylestr = np.array2string(np.array(1.5), - style=lambda x: "Value in 0-D array: " + str(x)) - assert_(stylestr == 'Value in 0-D array: 1.5') - - def test_format_function(self): - """Test custom format function for each element in array.""" - def _format_function(x): - if np.abs(x) < 1: - return '.' - elif np.abs(x) < 2: - return 'o' - else: - return 'O' - x = np.arange(3) - if sys.version_info[0] >= 3: - x_hex = "[0x0 0x1 0x2]" - x_oct = "[0o0 0o1 0o2]" - else: - x_hex = "[0x0L 0x1L 0x2L]" - x_oct = "[0L 01L 02L]" - assert_(np.array2string(x, formatter={'all':_format_function}) == \ - "[. o O]") - assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==\ - "[. o O]") - assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == \ - "[0.0000 1.0000 2.0000]") - assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), \ - x_hex) - assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), \ - x_oct) - - x = np.arange(3.) - assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == \ - "[0.00 1.00 2.00]") - assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == \ - "[0.00 1.00 2.00]") - - s = np.array(['abc', 'def']) - assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == \ - '[abcabc defdef]') - - -class TestPrintOptions: - """Test getting and setting global print options.""" - def setUp(self): - self.oldopts = np.get_printoptions() - - def tearDown(self): - np.set_printoptions(**self.oldopts) - - def test_basic(self): - x = np.array([1.5, 0, 1.234567890]) - assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])") - np.set_printoptions(precision=4) - assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])") - - def test_formatter(self): - x = np.arange(3) - np.set_printoptions(formatter={'all':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - - def test_formatter_reset(self): - x = np.arange(3) - np.set_printoptions(formatter={'all':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - np.set_printoptions(formatter={'int':None}) - assert_equal(repr(x), "array([0, 1, 2])") - - np.set_printoptions(formatter={'all':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - np.set_printoptions(formatter={'all':None}) - assert_equal(repr(x), "array([0, 1, 2])") - - np.set_printoptions(formatter={'int':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1, 0, 1])") - np.set_printoptions(formatter={'int_kind':None}) - assert_equal(repr(x), "array([0, 1, 2])") - - x = np.arange(3.) - np.set_printoptions(formatter={'float':lambda x: str(x-1)}) - assert_equal(repr(x), "array([-1.0, 0.0, 1.0])") - np.set_printoptions(formatter={'float_kind':None}) - assert_equal(repr(x), "array([ 0., 1., 2.])") - -def test_unicode_object_array(): - import sys - if sys.version_info[0] >= 3: - expected = "array(['é'], dtype=object)" - else: - expected = "array([u'\\xe9'], dtype=object)" - x = np.array([sixu('\xe9')], dtype=object) - assert_equal(repr(x), expected) - - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_blasdot.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_blasdot.py deleted file mode 100644 index 264663835644d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_blasdot.py +++ /dev/null @@ -1,172 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -import sys -from numpy.core import zeros, float64 -from numpy.testing import dec, TestCase, assert_almost_equal, assert_, \ - assert_raises, assert_array_equal, assert_allclose, assert_equal -from numpy.core.multiarray import inner as inner_ - -DECPREC = 14 - -class TestInner(TestCase): - def test_vecself(self): - """Ticket 844.""" - # Inner product of a vector with itself segfaults or give meaningless - # result - a = zeros(shape = (1, 80), dtype = float64) - p = inner_(a, a) - assert_almost_equal(p, 0, decimal = DECPREC) - -try: - import numpy.core._dotblas as _dotblas -except ImportError: - _dotblas = None - -@dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas") -def test_blasdot_used(): - from numpy.core import dot, vdot, inner, alterdot, restoredot - assert_(dot is _dotblas.dot) - assert_(vdot is _dotblas.vdot) - assert_(inner is _dotblas.inner) - assert_(alterdot is _dotblas.alterdot) - assert_(restoredot is _dotblas.restoredot) - - -def test_dot_2args(): - from numpy.core import dot - - a = np.array([[1, 2], [3, 4]], dtype=float) - b = np.array([[1, 0], [1, 1]], dtype=float) - c = np.array([[3, 2], [7, 4]], dtype=float) - - d = dot(a, b) - assert_allclose(c, d) - -def test_dot_3args(): - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 32)) - for i in range(12): - np.dot(f, v, r) - assert_equal(sys.getrefcount(r), 2) - r2 = np.dot(f, v, out=None) - assert_array_equal(r2, r) - assert_(r is np.dot(f, v, out=r)) - - v = v[:, 0].copy() # v.shape == (16,) - r = r[:, 0].copy() # r.shape == (1024,) - r2 = np.dot(f, v) - assert_(r is np.dot(f, v, r)) - assert_array_equal(r2, r) - -def test_dot_3args_errors(): - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 31)) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((1024,)) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((32,)) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((32, 1024)) - assert_raises(ValueError, np.dot, f, v, r) - assert_raises(ValueError, np.dot, f, v, r.T) - - r = np.empty((1024, 64)) - assert_raises(ValueError, np.dot, f, v, r[:, ::2]) - assert_raises(ValueError, np.dot, f, v, r[:, :32]) - - r = np.empty((1024, 32), dtype=np.float32) - assert_raises(ValueError, np.dot, f, v, r) - - r = np.empty((1024, 32), dtype=int) - assert_raises(ValueError, np.dot, f, v, r) - -def test_dot_array_order(): - """ Test numpy dot with different order C, F - - Comparing results with multiarray dot. - Double and single precisions array are compared using relative - precision of 7 and 5 decimals respectively. - Use 30 decimal when comparing exact operations like: - (a.b)' = b'.a' - """ - _dot = np.core.multiarray.dot - a_dim, b_dim, c_dim = 10, 4, 7 - orders = ["C", "F"] - dtypes_prec = {np.float64: 7, np.float32: 5} - np.random.seed(7) - - for arr_type, prec in dtypes_prec.items(): - for a_order in orders: - a = np.asarray(np.random.randn(a_dim, a_dim), - dtype=arr_type, order=a_order) - assert_array_equal(np.dot(a, a), a.dot(a)) - # (a.a)' = a'.a', note that mse~=1e-31 needs almost_equal - assert_almost_equal(a.dot(a), a.T.dot(a.T).T, decimal=prec) - - # - # Check with making explicit copy - # - a_T = a.T.copy(order=a_order) - assert_almost_equal(a_T.dot(a_T), a.T.dot(a.T), decimal=prec) - assert_almost_equal(a.dot(a_T), a.dot(a.T), decimal=prec) - assert_almost_equal(a_T.dot(a), a.T.dot(a), decimal=prec) - - # - # Compare with multiarray dot - # - assert_almost_equal(a.dot(a), _dot(a, a), decimal=prec) - assert_almost_equal(a.T.dot(a), _dot(a.T, a), decimal=prec) - assert_almost_equal(a.dot(a.T), _dot(a, a.T), decimal=prec) - assert_almost_equal(a.T.dot(a.T), _dot(a.T, a.T), decimal=prec) - for res in a.dot(a), a.T.dot(a), a.dot(a.T), a.T.dot(a.T): - assert res.flags.c_contiguous - - for b_order in orders: - b = np.asarray(np.random.randn(a_dim, b_dim), - dtype=arr_type, order=b_order) - b_T = b.T.copy(order=b_order) - assert_almost_equal(a_T.dot(b), a.T.dot(b), decimal=prec) - assert_almost_equal(b_T.dot(a), b.T.dot(a), decimal=prec) - # (b'.a)' = a'.b - assert_almost_equal(b.T.dot(a), a.T.dot(b).T, decimal=prec) - assert_almost_equal(a.dot(b), _dot(a, b), decimal=prec) - assert_almost_equal(b.T.dot(a), _dot(b.T, a), decimal=prec) - - - for c_order in orders: - c = np.asarray(np.random.randn(b_dim, c_dim), - dtype=arr_type, order=c_order) - c_T = c.T.copy(order=c_order) - assert_almost_equal(c.T.dot(b.T), c_T.dot(b_T), decimal=prec) - assert_almost_equal(c.T.dot(b.T).T, b.dot(c), decimal=prec) - assert_almost_equal(b.dot(c), _dot(b, c), decimal=prec) - assert_almost_equal(c.T.dot(b.T), _dot(c.T, b.T), decimal=prec) - -@dec.skipif(True) # ufunc override disabled for 1.9 -def test_dot_override(): - class A(object): - def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): - return "A" - - class B(object): - def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): - return NotImplemented - - a = A() - b = B() - c = np.array([[1]]) - - assert_equal(np.dot(a, b), "A") - assert_equal(c.dot(a), "A") - assert_raises(TypeError, np.dot, b, c) - assert_raises(TypeError, c.dot, b) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_datetime.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_datetime.py deleted file mode 100644 index bf0ba68073940..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_datetime.py +++ /dev/null @@ -1,1771 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os, pickle -import numpy -import numpy as np -from numpy.testing import * -from numpy.compat import asbytes -import datetime - -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False - - -class TestDateTime(TestCase): - def test_datetime_dtype_creation(self): - for unit in ['Y', 'M', 'W', 'D', - 'h', 'm', 's', 'ms', 'us', - 'ns', 'ps', 'fs', 'as']: - dt1 = np.dtype('M8[750%s]'%unit) - assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) - dt2 = np.dtype('m8[%s]' % unit) - assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) - - # Generic units shouldn't add [] to the end - assert_equal(str(np.dtype("M8")), "datetime64") - - # Should be possible to specify the endianness - assert_equal(np.dtype("=M8"), np.dtype("M8")) - assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) - assert_(np.dtype(">M8") == np.dtype("M8") or - np.dtype("M8[D]") == np.dtype("M8[D]") or - np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or - np.dtype("m8[D]") == np.dtype("m8[D]") or - np.dtype("m8") != np.dtype(" Scalars - assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) - assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) - assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) - assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) - assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) - - # Arrays -> Scalars - assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) - assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) - assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) - assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) - assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) - - def test_days_creation(self): - assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 - 365) - assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3) - assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 + 366) - assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4) - assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4 + 365) - assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) - assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) - assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) - assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) - assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) - assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) - assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) - assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) - assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4) - assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366) - assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3) - assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) - - assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) - assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) - assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) - assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) - assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) - - def test_days_to_pydate(self): - assert_equal(np.array('1599', dtype='M8[D]').astype('O'), - datetime.date(1599, 1, 1)) - assert_equal(np.array('1600', dtype='M8[D]').astype('O'), - datetime.date(1600, 1, 1)) - assert_equal(np.array('1601', dtype='M8[D]').astype('O'), - datetime.date(1601, 1, 1)) - assert_equal(np.array('1900', dtype='M8[D]').astype('O'), - datetime.date(1900, 1, 1)) - assert_equal(np.array('1901', dtype='M8[D]').astype('O'), - datetime.date(1901, 1, 1)) - assert_equal(np.array('2000', dtype='M8[D]').astype('O'), - datetime.date(2000, 1, 1)) - assert_equal(np.array('2001', dtype='M8[D]').astype('O'), - datetime.date(2001, 1, 1)) - assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), - datetime.date(1600, 2, 29)) - assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), - datetime.date(1600, 3, 1)) - assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), - datetime.date(2001, 3, 22)) - - def test_dtype_comparison(self): - assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) - assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) - assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) - assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) - - def test_pydatetime_creation(self): - a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') - assert_equal(a[0], a[1]) - a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') - assert_equal(a[0], a[1]) - a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') - assert_equal(a[0], a[1]) - # Will fail if the date changes during the exact right moment - a = np.array(['today', datetime.date.today()], dtype='M8[D]') - assert_equal(a[0], a[1]) - # datetime.datetime.now() returns local time, not UTC - #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') - #assert_equal(a[0], a[1]) - - # A datetime.date will raise if you try to give it time units - assert_raises(TypeError, np.array, datetime.date(1960, 3, 12), - dtype='M8[s]') - - def test_datetime_string_conversion(self): - a = ['2011-03-16', '1920-01-01', '2013-05-19'] - str_a = np.array(a, dtype='S') - dt_a = np.array(a, dtype='M') - str_b = np.empty_like(str_a) - dt_b = np.empty_like(dt_a) - - # String to datetime - assert_equal(dt_a, str_a.astype('M')) - assert_equal(dt_a.dtype, str_a.astype('M').dtype) - dt_b[...] = str_a - assert_equal(dt_a, dt_b) - # Datetime to string - assert_equal(str_a, dt_a.astype('S0')) - str_b[...] = dt_a - assert_equal(str_a, str_b) - - # Convert the 'S' to 'U' - str_a = str_a.astype('U') - str_b = str_b.astype('U') - - # Unicode to datetime - assert_equal(dt_a, str_a.astype('M')) - assert_equal(dt_a.dtype, str_a.astype('M').dtype) - dt_b[...] = str_a - assert_equal(dt_a, dt_b) - # Datetime to unicode - assert_equal(str_a, dt_a.astype('U')) - str_b[...] = dt_a - assert_equal(str_a, str_b) - - def test_datetime_array_str(self): - a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') - assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") - - a = np.array(['2011-03-16T13:55Z', '1920-01-01T03:12Z'], dtype='M') - assert_equal(np.array2string(a, separator=', ', - formatter={'datetime': lambda x : - "'%s'" % np.datetime_as_string(x, timezone='UTC')}), - "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") - - # Check that one NaT doesn't corrupt subsequent entries - a = np.array(['2010', 'NaT', '2030']).astype('M') - assert_equal(str(a), "['2010' 'NaT' '2030']") - - def test_pickle(self): - # Check that pickle roundtripping works - dt = np.dtype('M8[7D]') - assert_equal(pickle.loads(pickle.dumps(dt)), dt) - dt = np.dtype('M8[W]') - assert_equal(pickle.loads(pickle.dumps(dt)), dt) - - # Check that loading pickles from 1.6 works - pkl = "cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ - "(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \ - "I7\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(asbytes(pkl)), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ - "I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(asbytes(pkl)), np.dtype('>M8[us]')) - - def test_setstate(self): - "Verify that datetime dtype __setstate__ can handle bad arguments" - dt = np.dtype('>M8[us]') - assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) - assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) - assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - - def test_dtype_promotion(self): - # datetime datetime computes the metadata gcd - # timedelta timedelta computes the metadata gcd - for mM in ['m', 'M']: - assert_equal( - np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), - np.dtype(mM+'8[2Y]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), - np.dtype(mM+'8[3Y]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), - np.dtype(mM+'8[2M]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), - np.dtype(mM+'8[1D]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), - np.dtype(mM+'8[s]')) - assert_equal( - np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), - np.dtype(mM+'8[7s]')) - # timedelta timedelta raises when there is no reasonable gcd - assert_raises(TypeError, np.promote_types, - np.dtype('m8[Y]'), np.dtype('m8[D]')) - assert_raises(TypeError, np.promote_types, - np.dtype('m8[M]'), np.dtype('m8[W]')) - # timedelta timedelta may overflow with big unit ranges - assert_raises(OverflowError, np.promote_types, - np.dtype('m8[W]'), np.dtype('m8[fs]')) - assert_raises(OverflowError, np.promote_types, - np.dtype('m8[s]'), np.dtype('m8[as]')) - - def test_cast_overflow(self): - # gh-4486 - def cast(): - numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("= self.B)) - assert_(all(self.A <= self.B)) - assert_(not any(self.A > self.B)) - assert_(not any(self.A < self.B)) - assert_(not any(self.A != self.B)) - -class TestChar(TestCase): - def setUp(self): - self.A = np.array('abc1', dtype='c').view(np.chararray) - - def test_it(self): - assert_equal(self.A.shape, (4,)) - assert_equal(self.A.upper()[:2].tobytes(), asbytes('AB')) - -class TestComparisons(TestCase): - def setUp(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - self.B = np.array([['efg', '123 '], - ['051', 'tuv']]).view(np.chararray) - - def test_not_equal(self): - assert_array_equal((self.A != self.B), [[True, False], [True, True]]) - - def test_equal(self): - assert_array_equal((self.A == self.B), [[False, True], [False, False]]) - - def test_greater_equal(self): - assert_array_equal((self.A >= self.B), [[False, True], [True, True]]) - - def test_less_equal(self): - assert_array_equal((self.A <= self.B), [[True, True], [False, False]]) - - def test_greater(self): - assert_array_equal((self.A > self.B), [[False, False], [True, True]]) - - def test_less(self): - assert_array_equal((self.A < self.B), [[True, False], [False, False]]) - -class TestComparisonsMixed1(TestComparisons): - """Ticket #1276""" - - def setUp(self): - TestComparisons.setUp(self) - self.B = np.array([['efg', '123 '], - ['051', 'tuv']], np.unicode_).view(np.chararray) - -class TestComparisonsMixed2(TestComparisons): - """Ticket #1276""" - - def setUp(self): - TestComparisons.setUp(self) - self.A = np.array([['abc', '123'], - ['789', 'xyz']], np.unicode_).view(np.chararray) - -class TestInformation(TestCase): - def setUp(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) - self.B = np.array([[sixu(' \u03a3 '), sixu('')], - [sixu('12345'), sixu('MixedCase')], - [sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray) - - def test_len(self): - assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) - assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) - assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) - - def test_count(self): - assert_(issubclass(self.A.count('').dtype.type, np.integer)) - assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) - assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) - # Python doesn't seem to like counting NULL characters - # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) - assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) - # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) - - def test_endswith(self): - assert_(issubclass(self.A.endswith('').dtype.type, np.bool_)) - assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) - def fail(): - self.A.endswith('3', 'fdjk') - self.assertRaises(TypeError, fail) - - def test_find(self): - assert_(issubclass(self.A.find('a').dtype.type, np.integer)) - assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]]) - assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]]) - - def test_index(self): - def fail(): - self.A.index('a') - self.assertRaises(ValueError, fail) - assert_(np.char.index('abcba', 'b') == 1) - assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) - - def test_isalnum(self): - assert_(issubclass(self.A.isalnum().dtype.type, np.bool_)) - assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) - - def test_isalpha(self): - assert_(issubclass(self.A.isalpha().dtype.type, np.bool_)) - assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) - - def test_isdigit(self): - assert_(issubclass(self.A.isdigit().dtype.type, np.bool_)) - assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) - - def test_islower(self): - assert_(issubclass(self.A.islower().dtype.type, np.bool_)) - assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) - - def test_isspace(self): - assert_(issubclass(self.A.isspace().dtype.type, np.bool_)) - assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) - - def test_istitle(self): - assert_(issubclass(self.A.istitle().dtype.type, np.bool_)) - assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) - - def test_isupper(self): - assert_(issubclass(self.A.isupper().dtype.type, np.bool_)) - assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) - - def test_rfind(self): - assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) - assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) - assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) - - def test_rindex(self): - def fail(): - self.A.rindex('a') - self.assertRaises(ValueError, fail) - assert_(np.char.rindex('abcba', 'b') == 3) - assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) - - def test_startswith(self): - assert_(issubclass(self.A.startswith('').dtype.type, np.bool_)) - assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) - def fail(): - self.A.startswith('3', 'fdjk') - self.assertRaises(TypeError, fail) - - -class TestMethods(TestCase): - def setUp(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']], - dtype='S').view(np.chararray) - self.B = np.array([[sixu(' \u03a3 '), sixu('')], - [sixu('12345'), sixu('MixedCase')], - [sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray) - - def test_capitalize(self): - assert_(issubclass(self.A.capitalize().dtype.type, np.string_)) - assert_array_equal(self.A.capitalize(), asbytes_nested([ - [' abc ', ''], - ['12345', 'Mixedcase'], - ['123 \t 345 \0 ', 'Upper']])) - assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_)) - assert_array_equal(self.B.capitalize(), [ - [sixu(' \u03c3 '), ''], - ['12345', 'Mixedcase'], - ['123 \t 345 \0 ', 'Upper']]) - - def test_center(self): - assert_(issubclass(self.A.center(10).dtype.type, np.string_)) - widths = np.array([[10, 20]]) - C = self.A.center([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.center(20, asbytes('#')) - assert_(np.all(C.startswith(asbytes('#')))) - assert_(np.all(C.endswith(asbytes('#')))) - C = np.char.center(asbytes('FOO'), [[10, 20], [15, 8]]) - assert_(issubclass(C.dtype.type, np.string_)) - assert_array_equal(C, asbytes_nested([ - [' FOO ', ' FOO '], - [' FOO ', ' FOO ']])) - - def test_decode(self): - if sys.version_info[0] >= 3: - A = np.char.array([asbytes('\\u03a3')]) - assert_(A.decode('unicode-escape')[0] == '\u03a3') - else: - A = np.char.array(['736563726574206d657373616765']) - assert_(A.decode('hex_codec')[0] == 'secret message') - - def test_encode(self): - B = self.B.encode('unicode_escape') - assert_(B[0][0] == str(' \\u03a3 ').encode('latin1')) - - def test_expandtabs(self): - T = self.A.expandtabs() - assert_(T[2][0] == asbytes('123 345')) - - def test_join(self): - if sys.version_info[0] >= 3: - # NOTE: list(b'123') == [49, 50, 51] - # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') - else: - A0 = self.A - - A = np.char.join([',', '#'], A0) - if sys.version_info[0] >= 3: - assert_(issubclass(A.dtype.type, np.unicode_)) - else: - assert_(issubclass(A.dtype.type, np.string_)) - assert_array_equal(np.char.join([',', '#'], A0), - [ - [' ,a,b,c, ', ''], - ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], - ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) - - def test_ljust(self): - assert_(issubclass(self.A.ljust(10).dtype.type, np.string_)) - widths = np.array([[10, 20]]) - C = self.A.ljust([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.ljust(20, asbytes('#')) - assert_array_equal(C.startswith(asbytes('#')), [ - [False, True], [False, False], [False, False]]) - assert_(np.all(C.endswith(asbytes('#')))) - C = np.char.ljust(asbytes('FOO'), [[10, 20], [15, 8]]) - assert_(issubclass(C.dtype.type, np.string_)) - assert_array_equal(C, asbytes_nested([ - ['FOO ', 'FOO '], - ['FOO ', 'FOO ']])) - - def test_lower(self): - assert_(issubclass(self.A.lower().dtype.type, np.string_)) - assert_array_equal(self.A.lower(), asbytes_nested([ - [' abc ', ''], - ['12345', 'mixedcase'], - ['123 \t 345 \0 ', 'upper']])) - assert_(issubclass(self.B.lower().dtype.type, np.unicode_)) - assert_array_equal(self.B.lower(), [ - [sixu(' \u03c3 '), sixu('')], - [sixu('12345'), sixu('mixedcase')], - [sixu('123 \t 345 \0 '), sixu('upper')]]) - - def test_lstrip(self): - assert_(issubclass(self.A.lstrip().dtype.type, np.string_)) - assert_array_equal(self.A.lstrip(), asbytes_nested([ - ['abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']])) - assert_array_equal(self.A.lstrip(asbytes_nested(['1', 'M'])), - asbytes_nested([ - [' abc', ''], - ['2345', 'ixedCase'], - ['23 \t 345 \x00', 'UPPER']])) - assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_)) - assert_array_equal(self.B.lstrip(), [ - [sixu('\u03a3 '), ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) - - def test_partition(self): - P = self.A.partition(asbytes_nested(['3', 'M'])) - assert_(issubclass(P.dtype.type, np.string_)) - assert_array_equal(P, asbytes_nested([ - [(' abc ', '', ''), ('', '', '')], - [('12', '3', '45'), ('', 'M', 'ixedCase')], - [('12', '3', ' \t 345 \0 '), ('UPPER', '', '')]])) - - def test_replace(self): - R = self.A.replace(asbytes_nested(['3', 'a']), - asbytes_nested(['##########', '@'])) - assert_(issubclass(R.dtype.type, np.string_)) - assert_array_equal(R, asbytes_nested([ - [' abc ', ''], - ['12##########45', 'MixedC@se'], - ['12########## \t ##########45 \x00', 'UPPER']])) - - if sys.version_info[0] < 3: - # NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3 - R = self.A.replace(asbytes('a'), sixu('\u03a3')) - assert_(issubclass(R.dtype.type, np.unicode_)) - assert_array_equal(R, [ - [sixu(' \u03a3bc '), ''], - ['12345', sixu('MixedC\u03a3se')], - ['123 \t 345 \x00', 'UPPER']]) - - def test_rjust(self): - assert_(issubclass(self.A.rjust(10).dtype.type, np.string_)) - widths = np.array([[10, 20]]) - C = self.A.rjust([10, 20]) - assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.rjust(20, asbytes('#')) - assert_(np.all(C.startswith(asbytes('#')))) - assert_array_equal(C.endswith(asbytes('#')), - [[False, True], [False, False], [False, False]]) - C = np.char.rjust(asbytes('FOO'), [[10, 20], [15, 8]]) - assert_(issubclass(C.dtype.type, np.string_)) - assert_array_equal(C, asbytes_nested([ - [' FOO', ' FOO'], - [' FOO', ' FOO']])) - - def test_rpartition(self): - P = self.A.rpartition(asbytes_nested(['3', 'M'])) - assert_(issubclass(P.dtype.type, np.string_)) - assert_array_equal(P, asbytes_nested([ - [('', '', ' abc '), ('', '', '')], - [('12', '3', '45'), ('', 'M', 'ixedCase')], - [('123 \t ', '3', '45 \0 '), ('', '', 'UPPER')]])) - - def test_rsplit(self): - A = self.A.rsplit(asbytes('3')) - assert_(issubclass(A.dtype.type, np.object_)) - assert_equal(A.tolist(), asbytes_nested([ - [[' abc '], ['']], - [['12', '45'], ['MixedCase']], - [['12', ' \t ', '45 \x00 '], ['UPPER']]])) - - def test_rstrip(self): - assert_(issubclass(self.A.rstrip().dtype.type, np.string_)) - assert_array_equal(self.A.rstrip(), asbytes_nested([ - [' abc', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']])) - assert_array_equal(self.A.rstrip(asbytes_nested(['5', 'ER'])), - asbytes_nested([ - [' abc ', ''], - ['1234', 'MixedCase'], - ['123 \t 345 \x00', 'UPP']])) - assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_)) - assert_array_equal(self.B.rstrip(), [ - [sixu(' \u03a3'), ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']]) - - def test_strip(self): - assert_(issubclass(self.A.strip().dtype.type, np.string_)) - assert_array_equal(self.A.strip(), asbytes_nested([ - ['abc', ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']])) - assert_array_equal(self.A.strip(asbytes_nested(['15', 'EReM'])), - asbytes_nested([ - [' abc ', ''], - ['234', 'ixedCas'], - ['23 \t 345 \x00', 'UPP']])) - assert_(issubclass(self.B.strip().dtype.type, np.unicode_)) - assert_array_equal(self.B.strip(), [ - [sixu('\u03a3'), ''], - ['12345', 'MixedCase'], - ['123 \t 345', 'UPPER']]) - - def test_split(self): - A = self.A.split(asbytes('3')) - assert_(issubclass(A.dtype.type, np.object_)) - assert_equal(A.tolist(), asbytes_nested([ - [[' abc '], ['']], - [['12', '45'], ['MixedCase']], - [['12', ' \t ', '45 \x00 '], ['UPPER']]])) - - def test_splitlines(self): - A = np.char.array(['abc\nfds\nwer']).splitlines() - assert_(issubclass(A.dtype.type, np.object_)) - assert_(A.shape == (1,)) - assert_(len(A[0]) == 3) - - def test_swapcase(self): - assert_(issubclass(self.A.swapcase().dtype.type, np.string_)) - assert_array_equal(self.A.swapcase(), asbytes_nested([ - [' ABC ', ''], - ['12345', 'mIXEDcASE'], - ['123 \t 345 \0 ', 'upper']])) - assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_)) - assert_array_equal(self.B.swapcase(), [ - [sixu(' \u03c3 '), sixu('')], - [sixu('12345'), sixu('mIXEDcASE')], - [sixu('123 \t 345 \0 '), sixu('upper')]]) - - def test_title(self): - assert_(issubclass(self.A.title().dtype.type, np.string_)) - assert_array_equal(self.A.title(), asbytes_nested([ - [' Abc ', ''], - ['12345', 'Mixedcase'], - ['123 \t 345 \0 ', 'Upper']])) - assert_(issubclass(self.B.title().dtype.type, np.unicode_)) - assert_array_equal(self.B.title(), [ - [sixu(' \u03a3 '), sixu('')], - [sixu('12345'), sixu('Mixedcase')], - [sixu('123 \t 345 \0 '), sixu('Upper')]]) - - def test_upper(self): - assert_(issubclass(self.A.upper().dtype.type, np.string_)) - assert_array_equal(self.A.upper(), asbytes_nested([ - [' ABC ', ''], - ['12345', 'MIXEDCASE'], - ['123 \t 345 \0 ', 'UPPER']])) - assert_(issubclass(self.B.upper().dtype.type, np.unicode_)) - assert_array_equal(self.B.upper(), [ - [sixu(' \u03a3 '), sixu('')], - [sixu('12345'), sixu('MIXEDCASE')], - [sixu('123 \t 345 \0 '), sixu('UPPER')]]) - - def test_isnumeric(self): - def fail(): - self.A.isnumeric() - self.assertRaises(TypeError, fail) - assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_)) - assert_array_equal(self.B.isnumeric(), [ - [False, False], [True, False], [False, False]]) - - def test_isdecimal(self): - def fail(): - self.A.isdecimal() - self.assertRaises(TypeError, fail) - assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_)) - assert_array_equal(self.B.isdecimal(), [ - [False, False], [True, False], [False, False]]) - - -class TestOperations(TestCase): - def setUp(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.chararray) - self.B = np.array([['efg', '456'], - ['051', 'tuv']]).view(np.chararray) - - def test_add(self): - AB = np.array([['abcefg', '123456'], - ['789051', 'xyztuv']]).view(np.chararray) - assert_array_equal(AB, (self.A + self.B)) - assert_(len((self.A + self.B)[0][0]) == 6) - - def test_radd(self): - QA = np.array([['qabc', 'q123'], - ['q789', 'qxyz']]).view(np.chararray) - assert_array_equal(QA, ('q' + self.A)) - - def test_mul(self): - A = self.A - for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) - - assert_array_equal(Ar, (self.A * r)) - - for ob in [object(), 'qrs']: - try: - A * ob - except ValueError: - pass - else: - self.fail("chararray can only be multiplied by integers") - - def test_rmul(self): - A = self.A - for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) - assert_array_equal(Ar, (r * self.A)) - - for ob in [object(), 'qrs']: - try: - ob * A - except ValueError: - pass - else: - self.fail("chararray can only be multiplied by integers") - - def test_mod(self): - """Ticket #856""" - F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray) - C = np.array([[3, 7], [19, 1]]) - FC = np.array([['3', '7.000000'], - ['19', '1']]).view(np.chararray) - assert_array_equal(FC, F % C) - - A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray) - A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray) - assert_array_equal(A1, (A % 1)) - - A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray) - assert_array_equal(A2, (A % [[1, 2], [3, 4]])) - - def test_rmod(self): - assert_(("%s" % self.A) == str(self.A)) - assert_(("%r" % self.A) == repr(self.A)) - - for ob in [42, object()]: - try: - ob % self.A - except TypeError: - pass - else: - self.fail("chararray __rmod__ should fail with " \ - "non-string objects") - - -def test_empty_indexing(): - """Regression test for ticket 1948.""" - # Check that indexing a chararray with an empty list/array returns an - # empty chararray instead of a chararray with a single empty string in it. - s = np.chararray((4,)) - assert_(s[[]].size == 0) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_deprecations.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_deprecations.py deleted file mode 100644 index ef56766f5f415..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_deprecations.py +++ /dev/null @@ -1,512 +0,0 @@ -""" -Tests related to deprecation warnings. Also a convenient place -to document how deprecations should eventually be turned into errors. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import operator -import warnings -from nose.plugins.skip import SkipTest - -import numpy as np -from numpy.testing import (dec, run_module_suite, assert_raises, - assert_warns, assert_array_equal, assert_) - - -class _DeprecationTestCase(object): - # Just as warning: warnings uses re.match, so the start of this message - # must match. - message = '' - - def setUp(self): - self.warn_ctx = warnings.catch_warnings(record=True) - self.log = self.warn_ctx.__enter__() - - # Do *not* ignore other DeprecationWarnings. Ignoring warnings - # can give very confusing results because of - # http://bugs.python.org/issue4180 and it is probably simplest to - # try to keep the tests cleanly giving only the right warning type. - # (While checking them set to "error" those are ignored anyway) - # We still have them show up, because otherwise they would be raised - warnings.filterwarnings("always", category=DeprecationWarning) - warnings.filterwarnings("always", message=self.message, - category=DeprecationWarning) - - - def tearDown(self): - self.warn_ctx.__exit__() - - - def assert_deprecated(self, function, num=1, ignore_others=False, - function_fails=False, - exceptions=(DeprecationWarning,), args=(), kwargs={}): - """Test if DeprecationWarnings are given and raised. - - This first checks if the function when called gives `num` - DeprecationWarnings, after that it tries to raise these - DeprecationWarnings and compares them with `exceptions`. - The exceptions can be different for cases where this code path - is simply not anticipated and the exception is replaced. - - Parameters - ---------- - f : callable - The function to test - num : int - Number of DeprecationWarnings to expect. This should normally be 1. - ignore_other : bool - Whether warnings of the wrong type should be ignored (note that - the message is not checked) - function_fails : bool - If the function would normally fail, setting this will check for - warnings inside a try/except block. - exceptions : Exception or tuple of Exceptions - Exception to expect when turning the warnings into an error. - The default checks for DeprecationWarnings. If exceptions is - empty the function is expected to run successfull. - args : tuple - Arguments for `f` - kwargs : dict - Keyword arguments for `f` - """ - # reset the log - self.log[:] = [] - - try: - function(*args, **kwargs) - except (Exception if function_fails else tuple()): - pass - # just in case, clear the registry - num_found = 0 - for warning in self.log: - if warning.category is DeprecationWarning: - num_found += 1 - elif not ignore_others: - raise AssertionError("expected DeprecationWarning but %s given" - % warning.category) - if num is not None and num_found != num: - raise AssertionError("%i warnings found but %i expected" - % (len(self.log), num)) - - with warnings.catch_warnings(): - warnings.filterwarnings("error", message=self.message, - category=DeprecationWarning) - - try: - function(*args, **kwargs) - if exceptions != tuple(): - raise AssertionError("No error raised during function call") - except exceptions: - if exceptions == tuple(): - raise AssertionError("Error raised during function call") - - - def assert_not_deprecated(self, function, args=(), kwargs={}): - """Test if DeprecationWarnings are given and raised. - - This is just a shorthand for: - - self.assert_deprecated(function, num=0, ignore_others=True, - exceptions=tuple(), args=args, kwargs=kwargs) - """ - self.assert_deprecated(function, num=0, ignore_others=True, - exceptions=tuple(), args=args, kwargs=kwargs) - - -class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): - """ - These test that ``DeprecationWarning`` is given when you try to use - non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` - and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. - - After deprecation, changes need to be done inside conversion_utils.c - in PyArray_PyIntAsIntp and possibly PyArray_IntpConverter. - In iterators.c the function slice_GetIndices could be removed in favor - of its python equivalent and in mapping.c the function _tuple_of_integers - can be simplified (if ``np.array([1]).__index__()`` is also deprecated). - - As for the deprecation time-frame: via Ralf Gommers, - - "Hard to put that as a version number, since we don't know if the - version after 1.8 will be 6 months or 2 years after. I'd say 2 - years is reasonable." - - I interpret this to mean 2 years after the 1.8 release. Possibly - giving a PendingDeprecationWarning before that (which is visible - by default) - - """ - message = "using a non-integer number instead of an integer " \ - "will result in an error in the future" - - def test_indexing(self): - a = np.array([[[5]]]) - def assert_deprecated(*args, **kwargs): - self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs) - - assert_deprecated(lambda: a[0.0]) - assert_deprecated(lambda: a[0, 0.0]) - assert_deprecated(lambda: a[0.0, 0]) - assert_deprecated(lambda: a[0.0,:]) - assert_deprecated(lambda: a[:, 0.0]) - assert_deprecated(lambda: a[:, 0.0,:]) - assert_deprecated(lambda: a[0.0,:,:]) - assert_deprecated(lambda: a[0, 0, 0.0]) - assert_deprecated(lambda: a[0.0, 0, 0]) - assert_deprecated(lambda: a[0, 0.0, 0]) - assert_deprecated(lambda: a[-1.4]) - assert_deprecated(lambda: a[0, -1.4]) - assert_deprecated(lambda: a[-1.4, 0]) - assert_deprecated(lambda: a[-1.4,:]) - assert_deprecated(lambda: a[:, -1.4]) - assert_deprecated(lambda: a[:, -1.4,:]) - assert_deprecated(lambda: a[-1.4,:,:]) - assert_deprecated(lambda: a[0, 0, -1.4]) - assert_deprecated(lambda: a[-1.4, 0, 0]) - assert_deprecated(lambda: a[0, -1.4, 0]) - - # Test that the slice parameter deprecation warning doesn't mask - # the scalar index warning. - assert_deprecated(lambda: a[0.0:, 0.0], num=2) - assert_deprecated(lambda: a[0.0:, 0.0,:], num=2) - - - def test_valid_indexing(self): - a = np.array([[[5]]]) - assert_not_deprecated = self.assert_not_deprecated - - assert_not_deprecated(lambda: a[np.array([0])]) - assert_not_deprecated(lambda: a[[0, 0]]) - assert_not_deprecated(lambda: a[:, [0, 0]]) - assert_not_deprecated(lambda: a[:, 0,:]) - assert_not_deprecated(lambda: a[:,:,:]) - - - def test_slicing(self): - a = np.array([[5]]) - def assert_deprecated(*args, **kwargs): - self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs) - - # start as float. - assert_deprecated(lambda: a[0.0:]) - assert_deprecated(lambda: a[0:, 0.0:2]) - assert_deprecated(lambda: a[0.0::2, :0]) - assert_deprecated(lambda: a[0.0:1:2,:]) - assert_deprecated(lambda: a[:, 0.0:]) - # stop as float. - assert_deprecated(lambda: a[:0.0]) - assert_deprecated(lambda: a[:0, 1:2.0]) - assert_deprecated(lambda: a[:0.0:2, :0]) - assert_deprecated(lambda: a[:0.0,:]) - assert_deprecated(lambda: a[:, 0:4.0:2]) - # step as float. - assert_deprecated(lambda: a[::1.0]) - assert_deprecated(lambda: a[0:, :2:2.0]) - assert_deprecated(lambda: a[1::4.0, :0]) - assert_deprecated(lambda: a[::5.0,:]) - assert_deprecated(lambda: a[:, 0:4:2.0]) - # mixed. - assert_deprecated(lambda: a[1.0:2:2.0], num=2) - assert_deprecated(lambda: a[1.0::2.0], num=2) - assert_deprecated(lambda: a[0:, :2.0:2.0], num=2) - assert_deprecated(lambda: a[1.0:1:4.0, :0], num=2) - assert_deprecated(lambda: a[1.0:5.0:5.0,:], num=3) - assert_deprecated(lambda: a[:, 0.4:4.0:2.0], num=3) - # should still get the DeprecationWarning if step = 0. - assert_deprecated(lambda: a[::0.0], function_fails=True) - - - def test_valid_slicing(self): - a = np.array([[[5]]]) - assert_not_deprecated = self.assert_not_deprecated - - assert_not_deprecated(lambda: a[::]) - assert_not_deprecated(lambda: a[0:]) - assert_not_deprecated(lambda: a[:2]) - assert_not_deprecated(lambda: a[0:2]) - assert_not_deprecated(lambda: a[::2]) - assert_not_deprecated(lambda: a[1::2]) - assert_not_deprecated(lambda: a[:2:2]) - assert_not_deprecated(lambda: a[1:2:2]) - - - def test_non_integer_argument_deprecations(self): - a = np.array([[5]]) - - self.assert_deprecated(np.reshape, args=(a, (1., 1., -1)), num=2) - self.assert_deprecated(np.reshape, args=(a, (np.array(1.), -1))) - self.assert_deprecated(np.take, args=(a, [0], 1.)) - self.assert_deprecated(np.take, args=(a, [0], np.float64(1.))) - - - def test_non_integer_sequence_multiplication(self): - # Numpy scalar sequence multiply should not work with non-integers - def mult(a, b): - return a * b - self.assert_deprecated(mult, args=([1], np.float_(3))) - self.assert_not_deprecated(mult, args=([1], np.int_(3))) - - - def test_reduce_axis_float_index(self): - d = np.zeros((3,3,3)) - self.assert_deprecated(np.min, args=(d, 0.5)) - self.assert_deprecated(np.min, num=1, args=(d, (0.5, 1))) - self.assert_deprecated(np.min, num=1, args=(d, (1, 2.2))) - self.assert_deprecated(np.min, num=2, args=(d, (.2, 1.2))) - - -class TestBooleanArgumentDeprecation(_DeprecationTestCase): - """This tests that using a boolean as integer argument/indexing is - deprecated. - - This should be kept in sync with TestFloatNonIntegerArgumentDeprecation - and like it is handled in PyArray_PyIntAsIntp. - """ - message = "using a boolean instead of an integer " \ - "will result in an error in the future" - - def test_bool_as_int_argument(self): - a = np.array([[[1]]]) - - self.assert_deprecated(np.reshape, args=(a, (True, -1))) - self.assert_deprecated(np.reshape, args=(a, (np.bool_(True), -1))) - # Note that operator.index(np.array(True)) does not work, a boolean - # array is thus also deprecated, but not with the same message: - assert_raises(TypeError, operator.index, np.array(True)) - self.assert_deprecated(np.take, args=(a, [0], False)) - self.assert_deprecated(lambda: a[False:True:True], exceptions=IndexError, num=3) - self.assert_deprecated(lambda: a[False, 0], exceptions=IndexError) - self.assert_deprecated(lambda: a[False, 0, 0], exceptions=IndexError) - - -class TestArrayToIndexDeprecation(_DeprecationTestCase): - """This tests that creating an an index from an array is deprecated - if the array is not 0d. - - This can probably be deprecated somewhat faster then the integer - deprecations. The deprecation period started with NumPy 1.8. - For deprecation this needs changing of array_index in number.c - """ - message = "converting an array with ndim \> 0 to an index will result " \ - "in an error in the future" - - def test_array_to_index_deprecation(self): - # This drops into the non-integer deprecation, which is ignored here, - # so no exception is expected. The raising is effectively tested above. - a = np.array([[[1]]]) - - self.assert_deprecated(operator.index, args=(np.array([1]),)) - self.assert_deprecated(np.reshape, args=(a, (a, -1)), exceptions=()) - self.assert_deprecated(np.take, args=(a, [0], a), exceptions=()) - # Check slicing. Normal indexing checks arrays specifically. - self.assert_deprecated(lambda: a[a:a:a], exceptions=(), num=3) - - -class TestNonIntegerArrayLike(_DeprecationTestCase): - """Tests that array likes, i.e. lists give a deprecation warning - when they cannot be safely cast to an integer. - """ - message = "non integer \(and non boolean\) array-likes will not be " \ - "accepted as indices in the future" - - def test_basic(self): - a = np.arange(10) - self.assert_deprecated(a.__getitem__, args=([0.5, 1.5],), - exceptions=IndexError) - self.assert_deprecated(a.__getitem__, args=((['1', '2'],),), - exceptions=IndexError) - - self.assert_not_deprecated(a.__getitem__, ([],)) - - - def test_boolean_futurewarning(self): - a = np.arange(10) - with warnings.catch_warnings(): - warnings.filterwarnings('always') - assert_warns(FutureWarning, a.__getitem__, [True]) - # Unfortunatly, the deprecation warning takes precedence: - #assert_warns(FutureWarning, a.__getitem__, True) - - with warnings.catch_warnings(): - warnings.filterwarnings('error') - assert_raises(FutureWarning, a.__getitem__, [True]) - #assert_raises(FutureWarning, a.__getitem__, True) - - -class TestMultipleEllipsisDeprecation(_DeprecationTestCase): - message = "an index can only have a single Ellipsis \(`...`\); replace " \ - "all but one with slices \(`:`\)." - - def test_basic(self): - a = np.arange(10) - self.assert_deprecated(a.__getitem__, args=((Ellipsis, Ellipsis),)) - - with warnings.catch_warnings(): - warnings.filterwarnings('ignore', '', DeprecationWarning) - # Just check that this works: - b = a[...,...] - assert_array_equal(a, b) - assert_raises(IndexError, a.__getitem__, ((Ellipsis, ) * 3,)) - - -class TestBooleanSubtractDeprecations(_DeprecationTestCase): - """Test deprecation of boolean `-`. While + and * are well - defined, - is not and even a corrected form seems to have - no real uses. - - The deprecation process was started in NumPy 1.9. - """ - message = r"numpy boolean .* \(the .* `-` operator\) is deprecated, " \ - "use the bitwise" - - def test_operator_deprecation(self): - array = np.array([True]) - generic = np.bool_(True) - - # Minus operator/subtract ufunc: - self.assert_deprecated(operator.sub, args=(array, array)) - self.assert_deprecated(operator.sub, args=(generic, generic)) - - # Unary minus/negative ufunc: - self.assert_deprecated(operator.neg, args=(array,)) - self.assert_deprecated(operator.neg, args=(generic,)) - - -class TestRankDeprecation(_DeprecationTestCase): - """Test that np.rank is deprecated. The function should simply be - removed. The VisibleDeprecationWarning may become unnecessary. - """ - def test(self): - a = np.arange(10) - assert_warns(np.VisibleDeprecationWarning, np.rank, a) - - -class TestComparisonDepreactions(_DeprecationTestCase): - """This tests the deprecation, for non-elementwise comparison logic. - This used to mean that when an error occured during element-wise comparison - (i.e. broadcasting) NotImplemented was returned, but also in the comparison - itself, False was given instead of the error. - - Also test FutureWarning for the None comparison. - """ - - message = "elementwise comparison failed; " \ - "this will raise the error in the future." - - def test_normal_types(self): - for op in (operator.eq, operator.ne): - # Broadcasting errors: - self.assert_deprecated(op, args=(np.zeros(3), [])) - a = np.zeros(3, dtype='i,i') - # (warning is issued a couple of times here) - self.assert_deprecated(op, args=(a, a[:-1]), num=None) - - # Element comparison error (numpy array can't be compared). - a = np.array([1, np.array([1,2,3])], dtype=object) - b = np.array([1, np.array([1,2,3])], dtype=object) - self.assert_deprecated(op, args=(a, b), num=None) - - - def test_string(self): - # For two string arrays, strings always raised the broadcasting error: - a = np.array(['a', 'b']) - b = np.array(['a', 'b', 'c']) - assert_raises(ValueError, lambda x, y: x == y, a, b) - - # The empty list is not cast to string, this is only to document - # that fact (it likely should be changed). This means that the - # following works (and returns False) due to dtype mismatch: - a == [] - - - def test_none_comparison(self): - # Test comparison of None, which should result in elementwise - # comparison in the future. [1, 2] == None should be [False, False]. - with warnings.catch_warnings(): - warnings.filterwarnings('always', '', FutureWarning) - assert_warns(FutureWarning, operator.eq, np.arange(3), None) - assert_warns(FutureWarning, operator.ne, np.arange(3), None) - - with warnings.catch_warnings(): - warnings.filterwarnings('error', '', FutureWarning) - assert_raises(FutureWarning, operator.eq, np.arange(3), None) - assert_raises(FutureWarning, operator.ne, np.arange(3), None) - - def test_scalar_none_comparison(self): - # Scalars should still just return false and not give a warnings. - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', FutureWarning) - assert_(not np.float32(1) == None) - assert_(not np.str_('test') == None) - # This is dubious (see below): - assert_(not np.datetime64('NaT') == None) - - assert_(np.float32(1) != None) - assert_(np.str_('test') != None) - # This is dubious (see below): - assert_(np.datetime64('NaT') != None) - assert_(len(w) == 0) - - # For documentaiton purpose, this is why the datetime is dubious. - # At the time of deprecation this was no behaviour change, but - # it has to be considered when the deprecations is done. - assert_(np.equal(np.datetime64('NaT'), None)) - - -class TestIdentityComparisonDepreactions(_DeprecationTestCase): - """This tests the equal and not_equal object ufuncs identity check - deprecation. This was due to the usage of PyObject_RichCompareBool. - - This tests that for example for `a = np.array([np.nan], dtype=object)` - `a == a` it is warned that False and not `np.nan is np.nan` is returned. - - Should be kept in sync with TestComparisonDepreactions and new tests - added when the deprecation is over. Requires only removing of @identity@ - (and blocks) from the ufunc loops.c.src of the OBJECT comparisons. - """ - - message = "numpy .* will not check object identity in the future." - - def test_identity_equality_mismatch(self): - a = np.array([np.nan], dtype=object) - - with warnings.catch_warnings(): - warnings.filterwarnings('always', '', FutureWarning) - assert_warns(FutureWarning, np.equal, a, a) - assert_warns(FutureWarning, np.not_equal, a, a) - - with warnings.catch_warnings(): - warnings.filterwarnings('error', '', FutureWarning) - assert_raises(FutureWarning, np.equal, a, a) - assert_raises(FutureWarning, np.not_equal, a, a) - # And the other do not warn: - with np.errstate(invalid='ignore'): - np.less(a, a) - np.greater(a, a) - np.less_equal(a, a) - np.greater_equal(a, a) - - - def test_comparison_error(self): - class FunkyType(object): - def __eq__(self, other): - raise TypeError("I won't compare") - def __ne__(self, other): - raise TypeError("I won't compare") - - a = np.array([FunkyType()]) - self.assert_deprecated(np.equal, args=(a, a)) - self.assert_deprecated(np.not_equal, args=(a, a)) - - - def test_bool_error(self): - # The comparison result cannot be interpreted as a bool - a = np.array([np.array([1, 2, 3]), None], dtype=object) - self.assert_deprecated(np.equal, args=(a, a)) - self.assert_deprecated(np.not_equal, args=(a, a)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_dtype.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_dtype.py deleted file mode 100644 index 18660351cb6a8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_dtype.py +++ /dev/null @@ -1,542 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import numpy as np -from numpy.testing import * - -def assert_dtype_equal(a, b): - assert_equal(a, b) - assert_equal(hash(a), hash(b), - "two equivalent types do not hash to the same value !") - -def assert_dtype_not_equal(a, b): - assert_(a != b) - assert_(hash(a) != hash(b), - "two different types hash to the same value !") - -class TestBuiltin(TestCase): - def test_run(self): - """Only test hash runs at all.""" - for t in [np.int, np.float, np.complex, np.int32, np.str, np.object, - np.unicode]: - dt = np.dtype(t) - hash(dt) - - def test_dtype(self): - # Make sure equivalent byte order char hash the same (e.g. < and = on - # little endian) - for t in [np.int, np.float]: - dt = np.dtype(t) - dt2 = dt.newbyteorder("<") - dt3 = dt.newbyteorder(">") - if dt == dt2: - self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test") - assert_dtype_equal(dt, dt2) - else: - self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test") - assert_dtype_equal(dt, dt3) - - def test_equivalent_dtype_hashing(self): - # Make sure equivalent dtypes with different type num hash equal - uintp = np.dtype(np.uintp) - if uintp.itemsize == 4: - left = uintp - right = np.dtype(np.uint32) - else: - left = uintp - right = np.dtype(np.ulonglong) - self.assertTrue(left == right) - self.assertTrue(hash(left) == hash(right)) - - def test_invalid_types(self): - # Make sure invalid type strings raise a warning. - # For now, display a deprecation warning for invalid - # type sizes. In the future this should be changed - # to an exception. - - assert_warns(DeprecationWarning, np.dtype, 'O3') - assert_warns(DeprecationWarning, np.dtype, 'O5') - assert_warns(DeprecationWarning, np.dtype, 'O7') - assert_warns(DeprecationWarning, np.dtype, 'b3') - assert_warns(DeprecationWarning, np.dtype, 'h4') - assert_warns(DeprecationWarning, np.dtype, 'I5') - assert_warns(DeprecationWarning, np.dtype, 'e3') - assert_warns(DeprecationWarning, np.dtype, 'f5') - - if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: - assert_warns(DeprecationWarning, np.dtype, 'g12') - elif np.dtype('g').itemsize == 12: - assert_warns(DeprecationWarning, np.dtype, 'g16') - - if np.dtype('l').itemsize == 8: - assert_warns(DeprecationWarning, np.dtype, 'l4') - assert_warns(DeprecationWarning, np.dtype, 'L4') - else: - assert_warns(DeprecationWarning, np.dtype, 'l8') - assert_warns(DeprecationWarning, np.dtype, 'L8') - - if np.dtype('q').itemsize == 8: - assert_warns(DeprecationWarning, np.dtype, 'q4') - assert_warns(DeprecationWarning, np.dtype, 'Q4') - else: - assert_warns(DeprecationWarning, np.dtype, 'q8') - assert_warns(DeprecationWarning, np.dtype, 'Q8') - - def test_bad_param(self): - # Can't give a size that's too small - assert_raises(ValueError, np.dtype, - {'names':['f0', 'f1'], - 'formats':['i4', 'i1'], - 'offsets':[0, 4], - 'itemsize':4}) - # If alignment is enabled, the alignment (4) must divide the itemsize - assert_raises(ValueError, np.dtype, - {'names':['f0', 'f1'], - 'formats':['i4', 'i1'], - 'offsets':[0, 4], - 'itemsize':9}, align=True) - # If alignment is enabled, the individual fields must be aligned - assert_raises(ValueError, np.dtype, - {'names':['f0', 'f1'], - 'formats':['i1', 'f4'], - 'offsets':[0, 2]}, align=True) - -class TestRecord(TestCase): - def test_equivalent_record(self): - """Test whether equivalent record dtypes hash the same.""" - a = np.dtype([('yo', np.int)]) - b = np.dtype([('yo', np.int)]) - assert_dtype_equal(a, b) - - def test_different_names(self): - # In theory, they may hash the same (collision) ? - a = np.dtype([('yo', np.int)]) - b = np.dtype([('ye', np.int)]) - assert_dtype_not_equal(a, b) - - def test_different_titles(self): - # In theory, they may hash the same (collision) ? - a = np.dtype({'names': ['r', 'b'], - 'formats': ['u1', 'u1'], - 'titles': ['Red pixel', 'Blue pixel']}) - b = np.dtype({'names': ['r', 'b'], - 'formats': ['u1', 'u1'], - 'titles': ['RRed pixel', 'Blue pixel']}) - assert_dtype_not_equal(a, b) - - def test_not_lists(self): - """Test if an appropriate exception is raised when passing bad values to - the dtype constructor. - """ - self.assertRaises(TypeError, np.dtype, - dict(names=set(['A', 'B']), formats=['f8', 'i4'])) - self.assertRaises(TypeError, np.dtype, - dict(names=['A', 'B'], formats=set(['f8', 'i4']))) - - def test_aligned_size(self): - # Check that structured dtypes get padded to an aligned size - dt = np.dtype('i4, i1', align=True) - assert_equal(dt.itemsize, 8) - dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True) - assert_equal(dt.itemsize, 8) - dt = np.dtype({'names':['f0', 'f1'], - 'formats':['i4', 'u1'], - 'offsets':[0, 4]}, align=True) - assert_equal(dt.itemsize, 8) - dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True) - assert_equal(dt.itemsize, 8) - # Nesting should preserve that alignment - dt1 = np.dtype([('f0', 'i4'), - ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), - ('f2', 'i1')], align=True) - assert_equal(dt1.itemsize, 20) - dt2 = np.dtype({'names':['f0', 'f1', 'f2'], - 'formats':['i4', - [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], - 'i1'], - 'offsets':[0, 4, 16]}, align=True) - assert_equal(dt2.itemsize, 20) - dt3 = np.dtype({'f0': ('i4', 0), - 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), - 'f2': ('i1', 16)}, align=True) - assert_equal(dt3.itemsize, 20) - assert_equal(dt1, dt2) - assert_equal(dt2, dt3) - # Nesting should preserve packing - dt1 = np.dtype([('f0', 'i4'), - ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), - ('f2', 'i1')], align=False) - assert_equal(dt1.itemsize, 11) - dt2 = np.dtype({'names':['f0', 'f1', 'f2'], - 'formats':['i4', - [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], - 'i1'], - 'offsets':[0, 4, 10]}, align=False) - assert_equal(dt2.itemsize, 11) - dt3 = np.dtype({'f0': ('i4', 0), - 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), - 'f2': ('i1', 10)}, align=False) - assert_equal(dt3.itemsize, 11) - assert_equal(dt1, dt2) - assert_equal(dt2, dt3) - - def test_union_struct(self): - # Should be able to create union dtypes - dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])]) - assert_equal(str(dt), - "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,)), " - "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))])]") - - # If the sticky aligned flag is set to True, it makes the - # str() function use a dict representation with an 'aligned' flag - dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], - (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])], - align=True) - assert_equal(str(dt), - "{'names':['top','bottom'], " - "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,))," - "[('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))]], " - "'offsets':[0,76800], " - "'itemsize':80000, " - "'aligned':True}") - assert_equal(np.dtype(eval(str(dt))), dt) - - dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], - 'offsets': [0, 1, 2], - 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) - assert_equal(str(dt), - "[(('Red pixel', 'r'), 'u1'), " - "(('Green pixel', 'g'), 'u1'), " - "(('Blue pixel', 'b'), 'u1')]") - - dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], - 'formats': ['f4', (64, 64)), (1,)), - ('rtile', '>f4', (64, 36))], (3,)), - ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), - ('bright', '>f4', (8, 36))])]) - assert_equal(repr(dt), - "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " - "('rtile', '>f4', (64, 36))], (3,)), " - "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " - "('bright', '>f4', (8, 36))])])") - - dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], - 'offsets': [0, 1, 2], - 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, - align=True) - assert_equal(repr(dt), - "dtype([(('Red pixel', 'r'), 'u1'), " - "(('Green pixel', 'g'), 'u1'), " - "(('Blue pixel', 'b'), 'u1')], align=True)") - - dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], - 'formats': ['= 3) - def test_dtype_str_with_long_in_shape(self): - # Pull request #376 - dt = np.dtype('(1L,)i4') - - def test_base_dtype_with_object_type(self): - # Issue gh-2798 - a = np.array(['a'], dtype="O").astype(("O", [("name", "O")])) - - def test_empty_string_to_object(self): - # Pull request #4722 - np.array(["", ""]).astype(object) - -class TestDtypeAttributeDeletion(object): - - def test_dtype_non_writable_attributes_deletion(self): - dt = np.dtype(np.double) - attr = ["subdtype", "descr", "str", "name", "base", "shape", - "isbuiltin", "isnative", "isalignedstruct", "fields", - "metadata", "hasobject"] - - if sys.version[:3] == '2.4': - error = TypeError - else: - error = AttributeError - - for s in attr: - assert_raises(error, delattr, dt, s) - - - def test_dtype_writable_attributes_deletion(self): - dt = np.dtype(np.double) - attr = ["names"] - for s in attr: - assert_raises(AttributeError, delattr, dt, s) - -class TestDtypeAttributes(TestCase): - - def test_name_builtin(self): - for t in np.typeDict.values(): - name = t.__name__ - if name.endswith('_'): - name = name[:-1] - assert_equal(np.dtype(t).name, name) - - def test_name_dtype_subclass(self): - # Ticket #4357 - class user_def_subcls(np.void): pass - assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls') - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_einsum.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_einsum.py deleted file mode 100644 index 226bde0a3fadc..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_einsum.py +++ /dev/null @@ -1,573 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import warnings -from decimal import Decimal - -import numpy as np -from numpy.testing import * - -class TestEinSum(TestCase): - def test_einsum_errors(self): - # Need enough arguments - assert_raises(ValueError, np.einsum) - assert_raises(ValueError, np.einsum, "") - - # subscripts must be a string - assert_raises(TypeError, np.einsum, 0, 0) - - # out parameter must be an array - assert_raises(TypeError, np.einsum, "", 0, out='test') - - # order parameter must be a valid order - assert_raises(TypeError, np.einsum, "", 0, order='W') - - # casting parameter must be a valid casting - assert_raises(ValueError, np.einsum, "", 0, casting='blah') - - # dtype parameter must be a valid dtype - assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type') - - # other keyword arguments are rejected - assert_raises(TypeError, np.einsum, "", 0, bad_arg=0) - - # issue 4528 revealed a segfault with this call - assert_raises(TypeError, np.einsum, *(None,)*63) - - # number of operands must match count in subscripts string - assert_raises(ValueError, np.einsum, "", 0, 0) - assert_raises(ValueError, np.einsum, ",", 0, [0], [0]) - assert_raises(ValueError, np.einsum, ",", [0]) - - # can't have more subscripts than dimensions in the operand - assert_raises(ValueError, np.einsum, "i", 0) - assert_raises(ValueError, np.einsum, "ij", [0, 0]) - assert_raises(ValueError, np.einsum, "...i", 0) - assert_raises(ValueError, np.einsum, "i...j", [0, 0]) - assert_raises(ValueError, np.einsum, "i...", 0) - assert_raises(ValueError, np.einsum, "ij...", [0, 0]) - - # invalid ellipsis - assert_raises(ValueError, np.einsum, "i..", [0, 0]) - assert_raises(ValueError, np.einsum, ".i...", [0, 0]) - assert_raises(ValueError, np.einsum, "j->..j", [0, 0]) - assert_raises(ValueError, np.einsum, "j->.j...", [0, 0]) - - # invalid subscript character - assert_raises(ValueError, np.einsum, "i%...", [0, 0]) - assert_raises(ValueError, np.einsum, "...j$", [0, 0]) - assert_raises(ValueError, np.einsum, "i->&", [0, 0]) - - # output subscripts must appear in input - assert_raises(ValueError, np.einsum, "i->ij", [0, 0]) - - # output subscripts may only be specified once - assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]]) - - # dimensions much match when being collapsed - assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2, 3)) - assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2, 3)) - - # broadcasting to new dimensions must be enabled explicitly - assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3)) - assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], - out=np.arange(4).reshape(2, 2)) - - def test_einsum_views(self): - # pass-through - a = np.arange(6) - a.shape = (2, 3) - - b = np.einsum("...", a) - assert_(b.base is a) - - b = np.einsum(a, [Ellipsis]) - assert_(b.base is a) - - b = np.einsum("ij", a) - assert_(b.base is a) - assert_equal(b, a) - - b = np.einsum(a, [0, 1]) - assert_(b.base is a) - assert_equal(b, a) - - # transpose - a = np.arange(6) - a.shape = (2, 3) - - b = np.einsum("ji", a) - assert_(b.base is a) - assert_equal(b, a.T) - - b = np.einsum(a, [1, 0]) - assert_(b.base is a) - assert_equal(b, a.T) - - # diagonal - a = np.arange(9) - a.shape = (3, 3) - - b = np.einsum("ii->i", a) - assert_(b.base is a) - assert_equal(b, [a[i, i] for i in range(3)]) - - b = np.einsum(a, [0, 0], [0]) - assert_(b.base is a) - assert_equal(b, [a[i, i] for i in range(3)]) - - # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27) - a.shape = (3, 3, 3) - - b = np.einsum("...ii->...i", a) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) - - b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0]) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) - - b = np.einsum("ii...->...i", a) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(2, 0, 1)]) - - b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0]) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(2, 0, 1)]) - - b = np.einsum("...ii->i...", a) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis]) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum("jii->ij", a) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum(a, [1, 0, 0], [0, 1]) - assert_(b.base is a) - assert_equal(b, [a[:, i, i] for i in range(3)]) - - b = np.einsum("ii...->i...", a) - assert_(b.base is a) - assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) - - b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis]) - assert_(b.base is a) - assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) - - b = np.einsum("i...i->i...", a) - assert_(b.base is a) - assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) - - b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis]) - assert_(b.base is a) - assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) - - b = np.einsum("i...i->...i", a) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(1, 0, 2)]) - - b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0]) - assert_(b.base is a) - assert_equal(b, [[x[i, i] for i in range(3)] - for x in a.transpose(1, 0, 2)]) - - # triple diagonal - a = np.arange(27) - a.shape = (3, 3, 3) - - b = np.einsum("iii->i", a) - assert_(b.base is a) - assert_equal(b, [a[i, i, i] for i in range(3)]) - - b = np.einsum(a, [0, 0, 0], [0]) - assert_(b.base is a) - assert_equal(b, [a[i, i, i] for i in range(3)]) - - # swap axes - a = np.arange(24) - a.shape = (2, 3, 4) - - b = np.einsum("ijk->jik", a) - assert_(b.base is a) - assert_equal(b, a.swapaxes(0, 1)) - - b = np.einsum(a, [0, 1, 2], [1, 0, 2]) - assert_(b.base is a) - assert_equal(b, a.swapaxes(0, 1)) - - def check_einsum_sums(self, dtype): - # Check various sums. Does many sizes to exercise unrolled loops. - - # sum(a, axis=-1) - for n in range(1, 17): - a = np.arange(n, dtype=dtype) - assert_equal(np.einsum("i->", a), np.sum(a, axis=-1).astype(dtype)) - assert_equal(np.einsum(a, [0], []), - np.sum(a, axis=-1).astype(dtype)) - - for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) - assert_equal(np.einsum("...i->...", a), - np.sum(a, axis=-1).astype(dtype)) - assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis]), - np.sum(a, axis=-1).astype(dtype)) - - # sum(a, axis=0) - for n in range(1, 17): - a = np.arange(2*n, dtype=dtype).reshape(2, n) - assert_equal(np.einsum("i...->...", a), - np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]), - np.sum(a, axis=0).astype(dtype)) - - for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) - assert_equal(np.einsum("i...->...", a), - np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]), - np.sum(a, axis=0).astype(dtype)) - - # trace(a) - for n in range(1, 17): - a = np.arange(n*n, dtype=dtype).reshape(n, n) - assert_equal(np.einsum("ii", a), np.trace(a).astype(dtype)) - assert_equal(np.einsum(a, [0, 0]), np.trace(a).astype(dtype)) - - # multiply(a, b) - assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case - for n in range(1, 17): - a = np.arange(3*n, dtype=dtype).reshape(3, n) - b = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) - assert_equal(np.einsum("..., ...", a, b), np.multiply(a, b)) - assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis]), - np.multiply(a, b)) - - # inner(a,b) - for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("...i, ...i", a, b), np.inner(a, b)) - assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0]), - np.inner(a, b)) - - for n in range(1, 11): - a = np.arange(n*3*2, dtype=dtype).reshape(n, 3, 2) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("i..., i...", a, b), np.inner(a.T, b.T).T) - assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis]), - np.inner(a.T, b.T).T) - - # outer(a,b) - for n in range(1, 17): - a = np.arange(3, dtype=dtype)+1 - b = np.arange(n, dtype=dtype)+1 - assert_equal(np.einsum("i,j", a, b), np.outer(a, b)) - assert_equal(np.einsum(a, [0], b, [1]), np.outer(a, b)) - - # Suppress the complex warnings for the 'as f8' tests - with warnings.catch_warnings(): - warnings.simplefilter('ignore', np.ComplexWarning) - - # matvec(a,b) / a.dot(b) where a is matrix, b is vector - for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("ij, j", a, b), np.dot(a, b)) - assert_equal(np.einsum(a, [0, 1], b, [1]), np.dot(a, b)) - - c = np.arange(4, dtype=dtype) - np.einsum("ij,j", a, b, out=c, - dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a, [0, 1], b, [1], out=c, - dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - - for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n, dtype=dtype) - assert_equal(np.einsum("ji,j", a.T, b.T), np.dot(b.T, a.T)) - assert_equal(np.einsum(a.T, [1, 0], b.T, [1]), np.dot(b.T, a.T)) - - c = np.arange(4, dtype=dtype) - np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(b.T.astype('f8'), - a.T.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a.T, [1, 0], b.T, [1], out=c, - dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(b.T.astype('f8'), - a.T.astype('f8')).astype(dtype)) - - # matmat(a,b) / a.dot(b) where a is matrix, b is matrix - for n in range(1, 17): - if n < 8 or dtype != 'f2': - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) - assert_equal(np.einsum("ij,jk", a, b), np.dot(a, b)) - assert_equal(np.einsum(a, [0, 1], b, [1, 2]), np.dot(a, b)) - - for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) - c = np.arange(24, dtype=dtype).reshape(4, 6) - np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - c[...] = 0 - np.einsum(a, [0, 1], b, [1, 2], out=c, - dtype='f8', casting='unsafe') - assert_equal(c, - np.dot(a.astype('f8'), - b.astype('f8')).astype(dtype)) - - # matrix triple product (note this is not currently an efficient - # way to multiply 3 matrices) - a = np.arange(12, dtype=dtype).reshape(3, 4) - b = np.arange(20, dtype=dtype).reshape(4, 5) - c = np.arange(30, dtype=dtype).reshape(5, 6) - if dtype != 'f2': - assert_equal(np.einsum("ij,jk,kl", a, b, c), - a.dot(b).dot(c)) - assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3]), - a.dot(b).dot(c)) - - d = np.arange(18, dtype=dtype).reshape(3, 6) - np.einsum("ij,jk,kl", a, b, c, out=d, - dtype='f8', casting='unsafe') - assert_equal(d, a.astype('f8').dot(b.astype('f8') - ).dot(c.astype('f8')).astype(dtype)) - d[...] = 0 - np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, - dtype='f8', casting='unsafe') - assert_equal(d, a.astype('f8').dot(b.astype('f8') - ).dot(c.astype('f8')).astype(dtype)) - - # tensordot(a, b) - if np.dtype(dtype) != np.dtype('f2'): - a = np.arange(60, dtype=dtype).reshape(3, 4, 5) - b = np.arange(24, dtype=dtype).reshape(4, 3, 2) - assert_equal(np.einsum("ijk, jil -> kl", a, b), - np.tensordot(a, b, axes=([1, 0], [0, 1]))) - assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), - np.tensordot(a, b, axes=([1, 0], [0, 1]))) - - c = np.arange(10, dtype=dtype).reshape(5, 2) - np.einsum("ijk,jil->kl", a, b, out=c, - dtype='f8', casting='unsafe') - assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1, 0], [0, 1])).astype(dtype)) - c[...] = 0 - np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, - dtype='f8', casting='unsafe') - assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1, 0], [0, 1])).astype(dtype)) - - # logical_and(logical_and(a!=0, b!=0), c!=0) - a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype) - b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype) - c = np.array([True, True, False, True, True, False, True, True]) - assert_equal(np.einsum("i,i,i->i", a, b, c, - dtype='?', casting='unsafe'), - np.logical_and(np.logical_and(a!=0, b!=0), c!=0)) - assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], - dtype='?', casting='unsafe'), - np.logical_and(np.logical_and(a!=0, b!=0), c!=0)) - - a = np.arange(9, dtype=dtype) - assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) - assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) - assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) - assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) - - # Various stride0, contiguous, and SSE aligned variants - for n in range(1, 25): - a = np.arange(n, dtype=dtype) - if np.dtype(dtype).itemsize > 1: - assert_equal(np.einsum("...,...", a, a), np.multiply(a, a)) - assert_equal(np.einsum("i,i", a, a), np.dot(a, a)) - assert_equal(np.einsum("i,->i", a, 2), 2*a) - assert_equal(np.einsum(",i->i", 2, a), 2*a) - assert_equal(np.einsum("i,->", a, 2), 2*np.sum(a)) - assert_equal(np.einsum(",i->", 2, a), 2*np.sum(a)) - - assert_equal(np.einsum("...,...", a[1:], a[:-1]), - np.multiply(a[1:], a[:-1])) - assert_equal(np.einsum("i,i", a[1:], a[:-1]), - np.dot(a[1:], a[:-1])) - assert_equal(np.einsum("i,->i", a[1:], 2), 2*a[1:]) - assert_equal(np.einsum(",i->i", 2, a[1:]), 2*a[1:]) - assert_equal(np.einsum("i,->", a[1:], 2), 2*np.sum(a[1:])) - assert_equal(np.einsum(",i->", 2, a[1:]), 2*np.sum(a[1:])) - - # An object array, summed as the data type - a = np.arange(9, dtype=object) - - b = np.einsum("i->", a, dtype=dtype, casting='unsafe') - assert_equal(b, np.sum(a)) - assert_equal(b.dtype, np.dtype(dtype)) - - b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') - assert_equal(b, np.sum(a)) - assert_equal(b.dtype, np.dtype(dtype)) - - # A case which was failing (ticket #1885) - p = np.arange(2) + 1 - q = np.arange(4).reshape(2, 2) + 3 - r = np.arange(4).reshape(2, 2) + 7 - assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) - - def test_einsum_sums_int8(self): - self.check_einsum_sums('i1'); - - def test_einsum_sums_uint8(self): - self.check_einsum_sums('u1'); - - def test_einsum_sums_int16(self): - self.check_einsum_sums('i2'); - - def test_einsum_sums_uint16(self): - self.check_einsum_sums('u2'); - - def test_einsum_sums_int32(self): - self.check_einsum_sums('i4'); - - def test_einsum_sums_uint32(self): - self.check_einsum_sums('u4'); - - def test_einsum_sums_int64(self): - self.check_einsum_sums('i8'); - - def test_einsum_sums_uint64(self): - self.check_einsum_sums('u8'); - - def test_einsum_sums_float16(self): - self.check_einsum_sums('f2'); - - def test_einsum_sums_float32(self): - self.check_einsum_sums('f4'); - - def test_einsum_sums_float64(self): - self.check_einsum_sums('f8'); - - def test_einsum_sums_longdouble(self): - self.check_einsum_sums(np.longdouble); - - def test_einsum_sums_cfloat64(self): - self.check_einsum_sums('c8'); - - def test_einsum_sums_cfloat128(self): - self.check_einsum_sums('c16'); - - def test_einsum_sums_clongdouble(self): - self.check_einsum_sums(np.clongdouble); - - def test_einsum_misc(self): - # This call used to crash because of a bug in - # PyArray_AssignZero - a = np.ones((1, 2)) - b = np.ones((2, 2, 1)) - assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) - - # The iterator had an issue with buffering this reduction - a = np.ones((5, 12, 4, 2, 3), np.int64) - b = np.ones((5, 12, 11), np.int64) - assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), - np.einsum('ijklm,ijn->', a, b)) - - # Issue #2027, was a problem in the contiguous 3-argument - # inner loop implementation - a = np.arange(1, 3) - b = np.arange(1, 5).reshape(2, 2) - c = np.arange(1, 9).reshape(4, 2) - assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), - [[[1, 3], [3, 9], [5, 15], [7, 21]], - [[8, 16], [16, 32], [24, 48], [32, 64]]]) - - def test_einsum_broadcast(self): - # Issue #2455 change in handling ellipsis - # remove the 'middle broadcast' error - # only use the 'RIGHT' iteration in prepare_op_axes - # adds auto broadcast on left where it belongs - # broadcast on right has to be explicit - - A = np.arange(2*3*4).reshape(2,3,4) - B = np.arange(3) - ref = np.einsum('ijk,j->ijk',A, B) - assert_equal(np.einsum('ij...,j...->ij...',A, B), ref) - assert_equal(np.einsum('ij...,...j->ij...',A, B), ref) - assert_equal(np.einsum('ij...,j->ij...',A, B), ref) # used to raise error - - A = np.arange(12).reshape((4,3)) - B = np.arange(6).reshape((3,2)) - ref = np.einsum('ik,kj->ij', A, B) - assert_equal(np.einsum('ik...,k...->i...', A, B), ref) - assert_equal(np.einsum('ik...,...kj->i...j', A, B), ref) - assert_equal(np.einsum('...k,kj', A, B), ref) # used to raise error - assert_equal(np.einsum('ik,k...->i...', A, B), ref) # used to raise error - - dims=[2,3,4,5]; - a = np.arange(np.prod(dims)).reshape(dims) - v = np.arange(dims[2]) - ref = np.einsum('ijkl,k->ijl', a, v) - assert_equal(np.einsum('ijkl,k', a, v), ref) - assert_equal(np.einsum('...kl,k', a, v), ref) # used to raise error - assert_equal(np.einsum('...kl,k...', a, v), ref) - # no real diff from 1st - - J,K,M=160,160,120; - A=np.arange(J*K*M).reshape(1,1,1,J,K,M) - B=np.arange(J*K*M*3).reshape(J,K,M,3) - ref = np.einsum('...lmn,...lmno->...o', A, B) - assert_equal(np.einsum('...lmn,lmno->...o', A, B), ref) # used to raise error - - def test_einsum_fixedstridebug(self): - # Issue #4485 obscure einsum bug - # This case revealed a bug in nditer where it reported a stride - # as 'fixed' (0) when it was in fact not fixed during processing - # (0 or 4). The reason for the bug was that the check for a fixed - # stride was using the information from the 2D inner loop reuse - # to restrict the iteration dimensions it had to validate to be - # the same, but that 2D inner loop reuse logic is only triggered - # during the buffer copying step, and hence it was invalid to - # rely on those values. The fix is to check all the dimensions - # of the stride in question, which in the test case reveals that - # the stride is not fixed. - # - # NOTE: This test is triggered by the fact that the default buffersize, - # used by einsum, is 8192, and 3*2731 = 8193, is larger than that - # and results in a mismatch between the buffering and the - # striding for operand A. - A = np.arange(2*3).reshape(2,3).astype(np.float32) - B = np.arange(2*3*2731).reshape(2,3,2731).astype(np.int16) - es = np.einsum('cl,cpx->lpx', A, B) - tp = np.tensordot(A, B, axes=(0, 0)) - assert_equal(es, tp) - # The following is the original test case from the bug report, - # made repeatable by changing random arrays to aranges. - A = np.arange(3*3).reshape(3,3).astype(np.float64) - B = np.arange(3*3*64*64).reshape(3,3,64,64).astype(np.float32) - es = np.einsum ('cl,cpxy->lpxy', A,B) - tp = np.tensordot(A,B, axes=(0,0)) - assert_equal(es, tp) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_errstate.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_errstate.py deleted file mode 100644 index 7eb0aba2ef767..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_errstate.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform - -import numpy as np -from numpy.testing import TestCase, assert_, run_module_suite, dec - - -class TestErrstate(TestCase): - @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") - def test_invalid(self): - with np.errstate(all='raise', under='ignore'): - a = -np.arange(3) - # This should work - with np.errstate(invalid='ignore'): - np.sqrt(a) - # While this should fail! - try: - np.sqrt(a) - except FloatingPointError: - pass - else: - self.fail("Did not raise an invalid error") - - def test_divide(self): - with np.errstate(all='raise', under='ignore'): - a = -np.arange(3) - # This should work - with np.errstate(divide='ignore'): - a // 0 - # While this should fail! - try: - a // 0 - except FloatingPointError: - pass - else: - self.fail("Did not raise divide by zero error") - - def test_errcall(self): - def foo(*args): - print(args) - olderrcall = np.geterrcall() - with np.errstate(call=foo): - assert_(np.geterrcall() is foo, 'call is not foo') - with np.errstate(call=None): - assert_(np.geterrcall() is None, 'call is not None') - assert_(np.geterrcall() is olderrcall, 'call is not olderrcall') - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_function_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_function_base.py deleted file mode 100644 index f6ffd5a1048af..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_function_base.py +++ /dev/null @@ -1,111 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * -from numpy import logspace, linspace, dtype, array - -class TestLogspace(TestCase): - - def test_basic(self): - y = logspace(0, 6) - assert_(len(y) == 50) - y = logspace(0, 6, num=100) - assert_(y[-1] == 10 ** 6) - y = logspace(0, 6, endpoint=0) - assert_(y[-1] < 10 ** 6) - y = logspace(0, 6, num=7) - assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) - - def test_dtype(self): - y = logspace(0, 6, dtype='float32') - assert_equal(y.dtype, dtype('float32')) - y = logspace(0, 6, dtype='float64') - assert_equal(y.dtype, dtype('float64')) - y = logspace(0, 6, dtype='int32') - assert_equal(y.dtype, dtype('int32')) - - -class TestLinspace(TestCase): - - def test_basic(self): - y = linspace(0, 10) - assert_(len(y) == 50) - y = linspace(2, 10, num=100) - assert_(y[-1] == 10) - y = linspace(2, 10, endpoint=0) - assert_(y[-1] < 10) - - def test_corner(self): - y = list(linspace(0, 1, 1)) - assert_(y == [0.0], y) - y = list(linspace(0, 1, 2.5)) - assert_(y == [0.0, 1.0]) - - def test_type(self): - t1 = linspace(0, 1, 0).dtype - t2 = linspace(0, 1, 1).dtype - t3 = linspace(0, 1, 2).dtype - assert_equal(t1, t2) - assert_equal(t2, t3) - - def test_dtype(self): - y = linspace(0, 6, dtype='float32') - assert_equal(y.dtype, dtype('float32')) - y = linspace(0, 6, dtype='float64') - assert_equal(y.dtype, dtype('float64')) - y = linspace(0, 6, dtype='int32') - assert_equal(y.dtype, dtype('int32')) - - def test_array_scalar(self): - lim1 = array([-120, 100], dtype="int8") - lim2 = array([120, -100], dtype="int8") - lim3 = array([1200, 1000], dtype="uint16") - t1 = linspace(lim1[0], lim1[1], 5) - t2 = linspace(lim2[0], lim2[1], 5) - t3 = linspace(lim3[0], lim3[1], 5) - t4 = linspace(-120.0, 100.0, 5) - t5 = linspace(120.0, -100.0, 5) - t6 = linspace(1200.0, 1000.0, 5) - assert_equal(t1, t4) - assert_equal(t2, t5) - assert_equal(t3, t6) - - def test_complex(self): - lim1 = linspace(1 + 2j, 3 + 4j, 5) - t1 = array([ 1.0+2.j , 1.5+2.5j, 2.0+3.j , 2.5+3.5j, 3.0+4.j]) - lim2 = linspace(1j, 10, 5) - t2 = array([ 0.0+1.j , 2.5+0.75j, 5.0+0.5j , 7.5+0.25j, 10.0+0.j]) - assert_equal(lim1, t1) - assert_equal(lim2, t2) - - def test_physical_quantities(self): - class PhysicalQuantity(float): - def __new__(cls, value): - return float.__new__(cls, value) - - def __add__(self, x): - assert_(isinstance(x, PhysicalQuantity)) - return PhysicalQuantity(float(x) + float(self)) - __radd__ = __add__ - - def __sub__(self, x): - assert_(isinstance(x, PhysicalQuantity)) - return PhysicalQuantity(float(self) - float(x)) - - def __rsub__(self, x): - assert_(isinstance(x, PhysicalQuantity)) - return PhysicalQuantity(float(x) - float(self)) - - def __mul__(self, x): - return PhysicalQuantity(float(x) * float(self)) - __rmul__ = __mul__ - - def __div__(self, x): - return PhysicalQuantity(float(self) / float(x)) - - def __rdiv__(self, x): - return PhysicalQuantity(float(x) / float(self)) - - - a = PhysicalQuantity(0.0) - b = PhysicalQuantity(1.0) - assert_equal(linspace(a, b), linspace(0.0, 1.0)) \ No newline at end of file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_getlimits.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_getlimits.py deleted file mode 100644 index 6ccdbd5ded129..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_getlimits.py +++ /dev/null @@ -1,86 +0,0 @@ -""" Test functions for limits module. - -""" -from __future__ import division, absolute_import, print_function - -from numpy.testing import * - -from numpy.core import finfo, iinfo -from numpy import half, single, double, longdouble -import numpy as np - -################################################## - -class TestPythonFloat(TestCase): - def test_singleton(self): - ftype = finfo(float) - ftype2 = finfo(float) - assert_equal(id(ftype), id(ftype2)) - -class TestHalf(TestCase): - def test_singleton(self): - ftype = finfo(half) - ftype2 = finfo(half) - assert_equal(id(ftype), id(ftype2)) - -class TestSingle(TestCase): - def test_singleton(self): - ftype = finfo(single) - ftype2 = finfo(single) - assert_equal(id(ftype), id(ftype2)) - -class TestDouble(TestCase): - def test_singleton(self): - ftype = finfo(double) - ftype2 = finfo(double) - assert_equal(id(ftype), id(ftype2)) - -class TestLongdouble(TestCase): - def test_singleton(self,level=2): - ftype = finfo(longdouble) - ftype2 = finfo(longdouble) - assert_equal(id(ftype), id(ftype2)) - -class TestIinfo(TestCase): - def test_basic(self): - dts = list(zip(['i1', 'i2', 'i4', 'i8', - 'u1', 'u2', 'u4', 'u8'], - [np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64])) - for dt1, dt2 in dts: - assert_equal(iinfo(dt1).min, iinfo(dt2).min) - assert_equal(iinfo(dt1).max, iinfo(dt2).max) - self.assertRaises(ValueError, iinfo, 'f4') - - def test_unsigned_max(self): - types = np.sctypes['uint'] - for T in types: - assert_equal(iinfo(T).max, T(-1)) - -class TestRepr(TestCase): - def test_iinfo_repr(self): - expected = "iinfo(min=-32768, max=32767, dtype=int16)" - assert_equal(repr(np.iinfo(np.int16)), expected) - - def test_finfo_repr(self): - expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \ - " max=3.4028235e+38, dtype=float32)" - # Python 2.5 float formatting on Windows adds an extra 0 to the - # exponent. So test for both. Once 2.5 compatibility is dropped, this - # can simply use `assert_equal(repr(np.finfo(np.float32)), expected)`. - expected_win25 = "finfo(resolution=1e-006, min=-3.4028235e+038," + \ - " max=3.4028235e+038, dtype=float32)" - - actual = repr(np.finfo(np.float32)) - if not actual == expected: - if not actual == expected_win25: - msg = build_err_msg([actual, desired], verbose=True) - raise AssertionError(msg) - - -def test_instances(): - iinfo(10) - finfo(3.0) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_half.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_half.py deleted file mode 100644 index 928db48b70834..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_half.py +++ /dev/null @@ -1,439 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import platform - -import numpy as np -from numpy import uint16, float16, float32, float64 -from numpy.testing import TestCase, run_module_suite, assert_, assert_equal, \ - dec - - -def assert_raises_fpe(strmatch, callable, *args, **kwargs): - try: - callable(*args, **kwargs) - except FloatingPointError as exc: - assert_(str(exc).find(strmatch) >= 0, - "Did not raise floating point %s error" % strmatch) - else: - assert_(False, - "Did not raise floating point %s error" % strmatch) - -class TestHalf(TestCase): - def setUp(self): - # An array of all possible float16 values - self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 - self.all_f32 = np.array(self.all_f16, dtype=float32) - self.all_f64 = np.array(self.all_f16, dtype=float64) - - # An array of all non-NaN float16 values, in sorted order - self.nonan_f16 = np.concatenate( - (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), - np.arange(0x0000, 0x7c01, 1, dtype=uint16)) - ) - self.nonan_f16.dtype = float16 - self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) - self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) - - # An array of all finite float16 values, in sorted order - self.finite_f16 = self.nonan_f16[1:-1] - self.finite_f32 = self.nonan_f32[1:-1] - self.finite_f64 = self.nonan_f64[1:-1] - - def test_half_conversions(self): - """Checks that all 16-bit values survive conversion - to/from 32-bit and 64-bit float""" - # Because the underlying routines preserve the NaN bits, every - # value is preserved when converting to/from other floats. - - # Convert from float32 back to float16 - b = np.array(self.all_f32, dtype=float16) - assert_equal(self.all_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Convert from float64 back to float16 - b = np.array(self.all_f64, dtype=float16) - assert_equal(self.all_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Convert float16 to longdouble and back - # This doesn't necessarily preserve the extra NaN bits, - # so exclude NaNs. - a_ld = np.array(self.nonan_f16, dtype=np.longdouble) - b = np.array(a_ld, dtype=float16) - assert_equal(self.nonan_f16.view(dtype=uint16), - b.view(dtype=uint16)) - - # Check the range for which all integers can be represented - i_int = np.arange(-2048, 2049) - i_f16 = np.array(i_int, dtype=float16) - j = np.array(i_f16, dtype=np.int) - assert_equal(i_int, j) - - def test_nans_infs(self): - with np.errstate(all='ignore'): - # Check some of the ufuncs - assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) - assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) - assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) - assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) - assert_equal(np.spacing(float16(65504)), np.inf) - - # Check comparisons of all values with NaN - nan = float16(np.nan) - - assert_(not (self.all_f16 == nan).any()) - assert_(not (nan == self.all_f16).any()) - - assert_((self.all_f16 != nan).all()) - assert_((nan != self.all_f16).all()) - - assert_(not (self.all_f16 < nan).any()) - assert_(not (nan < self.all_f16).any()) - - assert_(not (self.all_f16 <= nan).any()) - assert_(not (nan <= self.all_f16).any()) - - assert_(not (self.all_f16 > nan).any()) - assert_(not (nan > self.all_f16).any()) - - assert_(not (self.all_f16 >= nan).any()) - assert_(not (nan >= self.all_f16).any()) - - def test_half_values(self): - """Confirms a small number of known half values""" - a = np.array([1.0, -1.0, - 2.0, -2.0, - 0.0999755859375, 0.333251953125, # 1/10, 1/3 - 65504, -65504, # Maximum magnitude - 2.0**(-14), -2.0**(-14), # Minimum normal - 2.0**(-24), -2.0**(-24), # Minimum subnormal - 0, -1/1e1000, # Signed zeros - np.inf, -np.inf]) - b = np.array([0x3c00, 0xbc00, - 0x4000, 0xc000, - 0x2e66, 0x3555, - 0x7bff, 0xfbff, - 0x0400, 0x8400, - 0x0001, 0x8001, - 0x0000, 0x8000, - 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 - assert_equal(a, b) - - def test_half_rounding(self): - """Checks that rounding when converting to half is correct""" - a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal - 2.0**-25, # Underflows to zero (nearest even mode) - 2.0**-26, # Underflows to zero - 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) - 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) - 1.0+2.0**-12, # rounds to 1.0 - 65519, # rounds to 65504 - 65520], # rounds to inf - dtype=float64) - rounded = [2.0**-24, - 0.0, - 0.0, - 1.0+2.0**(-10), - 1.0, - 1.0, - 65504, - np.inf] - - # Check float64->float16 rounding - b = np.array(a, dtype=float16) - assert_equal(b, rounded) - - # Check float32->float16 rounding - a = np.array(a, dtype=float32) - b = np.array(a, dtype=float16) - assert_equal(b, rounded) - - def test_half_correctness(self): - """Take every finite float16, and check the casting functions with - a manual conversion.""" - - # Create an array of all finite float16s - a_f16 = self.finite_f16 - a_bits = a_f16.view(dtype=uint16) - - # Convert to 64-bit float manually - a_sgn = (-1.0)**((a_bits&0x8000) >> 15) - a_exp = np.array((a_bits&0x7c00) >> 10, dtype=np.int32) - 15 - a_man = (a_bits&0x03ff) * 2.0**(-10) - # Implicit bit of normalized floats - a_man[a_exp!=-15] += 1 - # Denormalized exponent is -14 - a_exp[a_exp==-15] = -14 - - a_manual = a_sgn * a_man * 2.0**a_exp - - a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] - if len(a32_fail) != 0: - bad_index = a32_fail[0] - assert_equal(self.finite_f32, a_manual, - "First non-equal is half value %x -> %g != %g" % - (a[bad_index], - self.finite_f32[bad_index], - a_manual[bad_index])) - - a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] - if len(a64_fail) != 0: - bad_index = a64_fail[0] - assert_equal(self.finite_f64, a_manual, - "First non-equal is half value %x -> %g != %g" % - (a[bad_index], - self.finite_f64[bad_index], - a_manual[bad_index])) - - def test_half_ordering(self): - """Make sure comparisons are working right""" - - # All non-NaN float16 values in reverse order - a = self.nonan_f16[::-1].copy() - - # 32-bit float copy - b = np.array(a, dtype=float32) - - # Should sort the same - a.sort() - b.sort() - assert_equal(a, b) - - # Comparisons should work - assert_((a[:-1] <= a[1:]).all()) - assert_(not (a[:-1] > a[1:]).any()) - assert_((a[1:] >= a[:-1]).all()) - assert_(not (a[1:] < a[:-1]).any()) - # All != except for +/-0 - assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) - assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) - - def test_half_funcs(self): - """Test the various ArrFuncs""" - - # fill - assert_equal(np.arange(10, dtype=float16), - np.arange(10, dtype=float32)) - - # fillwithscalar - a = np.zeros((5,), dtype=float16) - a.fill(1) - assert_equal(a, np.ones((5,), dtype=float16)) - - # nonzero and copyswap - a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) - assert_equal(a.nonzero()[0], - [2, 5, 6]) - a = a.byteswap().newbyteorder() - assert_equal(a.nonzero()[0], - [2, 5, 6]) - - # dot - a = np.arange(0, 10, 0.5, dtype=float16) - b = np.ones((20,), dtype=float16) - assert_equal(np.dot(a, b), - 95) - - # argmax - a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) - assert_equal(a.argmax(), - 4) - a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) - assert_equal(a.argmax(), - 5) - - # getitem - a = np.arange(10, dtype=float16) - for i in range(10): - assert_equal(a.item(i), i) - - def test_spacing_nextafter(self): - """Test np.spacing and np.nextafter""" - # All non-negative finite #'s - a = np.arange(0x7c00, dtype=uint16) - hinf = np.array((np.inf,), dtype=float16) - a_f16 = a.view(dtype=float16) - - assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) - - assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) - assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) - assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) - - # switch to negatives - a |= 0x8000 - - assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) - assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) - - assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) - assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) - assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) - - - def test_half_ufuncs(self): - """Test the various ufuncs""" - - a = np.array([0, 1, 2, 4, 2], dtype=float16) - b = np.array([-2, 5, 1, 4, 3], dtype=float16) - c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) - - assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) - assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) - assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) - assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) - - assert_equal(np.equal(a, b), [False, False, False, True, False]) - assert_equal(np.not_equal(a, b), [True, True, True, False, True]) - assert_equal(np.less(a, b), [False, True, False, False, True]) - assert_equal(np.less_equal(a, b), [False, True, False, True, True]) - assert_equal(np.greater(a, b), [True, False, True, False, False]) - assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) - assert_equal(np.logical_and(a, b), [False, True, True, True, True]) - assert_equal(np.logical_or(a, b), [True, True, True, True, True]) - assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) - assert_equal(np.logical_not(a), [True, False, False, False, False]) - - assert_equal(np.isnan(c), [False, False, False, True, False]) - assert_equal(np.isinf(c), [False, False, True, False, False]) - assert_equal(np.isfinite(c), [True, True, False, False, True]) - assert_equal(np.signbit(b), [True, False, False, False, False]) - - assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) - - assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) - x = np.maximum(b, c) - assert_(np.isnan(x[3])) - x[3] = 0 - assert_equal(x, [0, 5, 1, 0, 6]) - assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) - x = np.minimum(b, c) - assert_(np.isnan(x[3])) - x[3] = 0 - assert_equal(x, [-2, -1, -np.inf, 0, 3]) - assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) - assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) - assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) - assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) - - assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) - assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) - assert_equal(np.square(b), [4, 25, 1, 16, 9]) - assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) - assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) - assert_equal(np.conjugate(b), b) - assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) - assert_equal(np.negative(b), [2, -5, -1, -4, -3]) - assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) - assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) - assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) - assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) - - def test_half_coercion(self): - """Test that half gets coerced properly with the other types""" - a16 = np.array((1,), dtype=float16) - a32 = np.array((1,), dtype=float32) - b16 = float16(1) - b32 = float32(1) - - assert_equal(np.power(a16, 2).dtype, float16) - assert_equal(np.power(a16, 2.0).dtype, float16) - assert_equal(np.power(a16, b16).dtype, float16) - assert_equal(np.power(a16, b32).dtype, float16) - assert_equal(np.power(a16, a16).dtype, float16) - assert_equal(np.power(a16, a32).dtype, float32) - - assert_equal(np.power(b16, 2).dtype, float64) - assert_equal(np.power(b16, 2.0).dtype, float64) - assert_equal(np.power(b16, b16).dtype, float16) - assert_equal(np.power(b16, b32).dtype, float32) - assert_equal(np.power(b16, a16).dtype, float16) - assert_equal(np.power(b16, a32).dtype, float32) - - assert_equal(np.power(a32, a16).dtype, float32) - assert_equal(np.power(a32, b16).dtype, float32) - assert_equal(np.power(b32, a16).dtype, float16) - assert_equal(np.power(b32, b16).dtype, float32) - - @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") - def test_half_fpe(self): - with np.errstate(all='raise'): - sx16 = np.array((1e-4,), dtype=float16) - bx16 = np.array((1e4,), dtype=float16) - sy16 = float16(1e-4) - by16 = float16(1e4) - - # Underflow errors - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(-2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(-2.**-14-2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-23), float16(4)) - - # Overflow errors - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) - assert_raises_fpe('overflow', lambda a, b:a+b, - float16(65504), float16(17)) - assert_raises_fpe('overflow', lambda a, b:a-b, - float16(-65504), float16(17)) - assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) - assert_raises_fpe('overflow', np.spacing, float16(65504)) - - # Invalid value errors - assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) - assert_raises_fpe('invalid', np.spacing, float16(np.inf)) - assert_raises_fpe('invalid', np.spacing, float16(np.nan)) - assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0)) - assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0)) - assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan)) - - # These should not raise - float16(65472)+float16(32) - float16(2**-13)/float16(2) - float16(2**-14)/float16(2**10) - np.spacing(float16(-65504)) - np.nextafter(float16(65504), float16(-np.inf)) - np.nextafter(float16(-65504), float16(np.inf)) - float16(2**-14)/float16(2**10) - float16(-2**-14)/float16(2**10) - float16(2**-14+2**-23)/float16(2) - float16(-2**-14-2**-23)/float16(2) - - def test_half_array_interface(self): - """Test that half is compatible with __array_interface__""" - class Dummy: - pass - - a = np.ones((1,), dtype=float16) - b = Dummy() - b.__array_interface__ = a.__array_interface__ - c = np.array(b) - assert_(c.dtype == float16) - assert_equal(a, c) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexerrors.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexerrors.py deleted file mode 100644 index e5dc9dbab6d1e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexerrors.py +++ /dev/null @@ -1,127 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_raises, assert_equal, assert_ -import sys - -class TestIndexErrors(TestCase): - '''Tests to exercise indexerrors not covered by other tests.''' - - def test_arraytypes_fasttake(self): - 'take from a 0-length dimension' - x = np.empty((2, 3, 0, 4)) - assert_raises(IndexError, x.take, [0], axis=2) - assert_raises(IndexError, x.take, [1], axis=2) - assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') - assert_raises(IndexError, x.take, [0], axis=2, mode='clip') - - def test_take_from_object(self): - # Check exception taking from object array - d = np.zeros(5, dtype=object) - assert_raises(IndexError, d.take, [6]) - - # Check exception taking from 0-d array - d = np.zeros((5, 0), dtype=object) - assert_raises(IndexError, d.take, [1], axis=1) - assert_raises(IndexError, d.take, [0], axis=1) - assert_raises(IndexError, d.take, [0]) - assert_raises(IndexError, d.take, [0], mode='wrap') - assert_raises(IndexError, d.take, [0], mode='clip') - - def test_multiindex_exceptions(self): - a = np.empty(5, dtype=object) - assert_raises(IndexError, a.item, 20) - a = np.empty((5, 0), dtype=object) - assert_raises(IndexError, a.item, (0, 0)) - - a = np.empty(5, dtype=object) - assert_raises(IndexError, a.itemset, 20, 0) - a = np.empty((5, 0), dtype=object) - assert_raises(IndexError, a.itemset, (0, 0), 0) - - def test_put_exceptions(self): - a = np.zeros((5, 5)) - assert_raises(IndexError, a.put, 100, 0) - a = np.zeros((5, 5), dtype=object) - assert_raises(IndexError, a.put, 100, 0) - a = np.zeros((5, 5, 0)) - assert_raises(IndexError, a.put, 100, 0) - a = np.zeros((5, 5, 0), dtype=object) - assert_raises(IndexError, a.put, 100, 0) - - def test_iterators_exceptions(self): - "cases in iterators.c" - def assign(obj, ind, val): - obj[ind] = val - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a[0, 5, None, 2]) - assert_raises(IndexError, lambda: a[0, 5, 0, 2]) - assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) - assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) - - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a[0, 0, None, 2]) - assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a.flat[10]) - assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a.flat[10]) - assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a.flat[np.array(10)]) - assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a.flat[np.array(10)]) - assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) - - a = np.zeros([1, 2, 3]) - assert_raises(IndexError, lambda: a.flat[np.array([10])]) - assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) - a = np.zeros([1, 0, 3]) - assert_raises(IndexError, lambda: a.flat[np.array([10])]) - assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) - - def test_mapping(self): - "cases from mapping.c" - - def assign(obj, ind, val): - obj[ind] = val - - a = np.zeros((0, 10)) - assert_raises(IndexError, lambda: a[12]) - - a = np.zeros((3, 5)) - assert_raises(IndexError, lambda: a[(10, 20)]) - assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) - a = np.zeros((3, 0)) - assert_raises(IndexError, lambda: a[(1, 0)]) - assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) - - a = np.zeros((10,)) - assert_raises(IndexError, lambda: assign(a, 10, 1)) - a = np.zeros((0,)) - assert_raises(IndexError, lambda: assign(a, 10, 1)) - - a = np.zeros((3, 5)) - assert_raises(IndexError, lambda: a[(1, [1, 20])]) - assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) - a = np.zeros((3, 0)) - assert_raises(IndexError, lambda: a[(1, [0, 1])]) - assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) - - def test_methods(self): - "cases from methods.c" - - a = np.zeros((3, 3)) - assert_raises(IndexError, lambda: a.item(100)) - assert_raises(IndexError, lambda: a.itemset(100, 1)) - a = np.zeros((0, 3)) - assert_raises(IndexError, lambda: a.item(100)) - assert_raises(IndexError, lambda: a.itemset(100, 1)) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexing.py deleted file mode 100644 index 7f6fab72e5579..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_indexing.py +++ /dev/null @@ -1,983 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import warnings -import functools - -import numpy as np -from numpy.core.multiarray_tests import array_indexing -from itertools import product -from numpy.testing import * - - -try: - cdll = np.ctypeslib.load_library('multiarray', np.core.multiarray.__file__) - _HAS_CTYPE = True -except ImportError: - _HAS_CTYPE = False - - -class TestIndexing(TestCase): - def test_none_index(self): - # `None` index adds newaxis - a = np.array([1, 2, 3]) - assert_equal(a[None], a[np.newaxis]) - assert_equal(a[None].ndim, a.ndim + 1) - - def test_empty_tuple_index(self): - # Empty tuple index creates a view - a = np.array([1, 2, 3]) - assert_equal(a[()], a) - assert_(a[()].base is a) - a = np.array(0) - assert_(isinstance(a[()], np.int_)) - - # Regression, it needs to fall through integer and fancy indexing - # cases, so need the with statement to ignore the non-integer error. - with warnings.catch_warnings(): - warnings.filterwarnings('ignore', '', DeprecationWarning) - a = np.array([1.]) - assert_(isinstance(a[0.], np.float_)) - - a = np.array([np.array(1)], dtype=object) - assert_(isinstance(a[0.], np.ndarray)) - - def test_same_kind_index_casting(self): - # Indexes should be cast with same-kind and not safe, even if - # that is somewhat unsafe. So test various different code paths. - index = np.arange(5) - u_index = index.astype(np.uintp) - arr = np.arange(10) - - assert_array_equal(arr[index], arr[u_index]) - arr[u_index] = np.arange(5) - assert_array_equal(arr, np.arange(10)) - - arr = np.arange(10).reshape(5, 2) - assert_array_equal(arr[index], arr[u_index]) - - arr[u_index] = np.arange(5)[:,None] - assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) - - arr = np.arange(25).reshape(5, 5) - assert_array_equal(arr[u_index, u_index], arr[index, index]) - - def test_empty_fancy_index(self): - # Empty list index creates an empty array - # with the same dtype (but with weird shape) - a = np.array([1, 2, 3]) - assert_equal(a[[]], []) - assert_equal(a[[]].dtype, a.dtype) - - b = np.array([], dtype=np.intp) - assert_equal(a[[]], []) - assert_equal(a[[]].dtype, a.dtype) - - b = np.array([]) - assert_raises(IndexError, a.__getitem__, b) - - def test_ellipsis_index(self): - # Ellipsis index does not create a view - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - assert_equal(a[...], a) - assert_(a[...].base is a) # `a[...]` was `a` in numpy <1.9.) - - # Slicing with ellipsis can skip an - # arbitrary number of dimensions - assert_equal(a[0, ...], a[0]) - assert_equal(a[0, ...], a[0,:]) - assert_equal(a[..., 0], a[:, 0]) - - # Slicing with ellipsis always results - # in an array, not a scalar - assert_equal(a[0, ..., 1], np.array(2)) - - # Assignment with `(Ellipsis,)` on 0-d arrays - b = np.array(1) - b[(Ellipsis,)] = 2 - assert_equal(b, 2) - - def test_single_int_index(self): - # Single integer index selects one row - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - - assert_equal(a[0], [1, 2, 3]) - assert_equal(a[-1], [7, 8, 9]) - - # Index out of bounds produces IndexError - assert_raises(IndexError, a.__getitem__, 1<<30) - # Index overflow produces IndexError - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', '', DeprecationWarning) - assert_raises(IndexError, a.__getitem__, 1<<64) - - def test_single_bool_index(self): - # Single boolean index - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - - # Python boolean converts to integer - # These are being deprecated (and test in test_deprecations) - #assert_equal(a[True], a[1]) - #assert_equal(a[False], a[0]) - - # Same with NumPy boolean scalar - # Before DEPRECATE, this is an error (as always, but telling about - # future change): - assert_raises(IndexError, a.__getitem__, np.array(True)) - assert_raises(IndexError, a.__getitem__, np.array(False)) - # After DEPRECATE, this behaviour can be enabled: - #assert_equal(a[np.array(True)], a[None]) - #assert_equal(a[np.array(False), a[None][0:0]]) - - - def test_boolean_indexing_onedim(self): - # Indexing a 2-dimensional array with - # boolean array of length one - a = np.array([[ 0., 0., 0.]]) - b = np.array([ True], dtype=bool) - assert_equal(a[b], a) - # boolean assignment - a[b] = 1. - assert_equal(a, [[1., 1., 1.]]) - - - def test_boolean_assignment_value_mismatch(self): - # A boolean assignment should fail when the shape of the values - # cannot be broadcast to the subscription. (see also gh-3458) - a = np.arange(4) - def f(a, v): - a[a > -1] = v - - assert_raises(ValueError, f, a, []) - assert_raises(ValueError, f, a, [1, 2, 3]) - assert_raises(ValueError, f, a[:1], [1, 2, 3]) - - - def test_boolean_indexing_twodim(self): - # Indexing a 2-dimensional array with - # 2-dimensional boolean array - a = np.array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - b = np.array([[ True, False, True], - [False, True, False], - [ True, False, True]]) - assert_equal(a[b], [1, 3, 5, 7, 9]) - assert_equal(a[b[1]], [[4, 5, 6]]) - assert_equal(a[b[0]], a[b[2]]) - - # boolean assignment - a[b] = 0 - assert_equal(a, [[0, 2, 0], - [4, 0, 6], - [0, 8, 0]]) - - - def test_reverse_strides_and_subspace_bufferinit(self): - # This tests that the strides are not reversed for simple and - # subspace fancy indexing. - a = np.ones(5) - b = np.zeros(5, dtype=np.intp)[::-1] - c = np.arange(5)[::-1] - - a[b] = c - # If the strides are not reversed, the 0 in the arange comes last. - assert_equal(a[0], 0) - - # This also tests that the subspace buffer is initialized: - a = np.ones((5, 2)) - c = np.arange(10).reshape(5, 2)[::-1] - a[b, :] = c - assert_equal(a[0], [0, 1]) - - def test_reversed_strides_result_allocation(self): - # Test a bug when calculating the output strides for a result array - # when the subspace size was 1 (and test other cases as well) - a = np.arange(10)[:, None] - i = np.arange(10)[::-1] - assert_array_equal(a[i], a[i.copy('C')]) - - a = np.arange(20).reshape(-1, 2) - - - def test_uncontiguous_subspace_assignment(self): - # During development there was a bug activating a skip logic - # based on ndim instead of size. - a = np.full((3, 4, 2), -1) - b = np.full((3, 4, 2), -1) - - a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T - b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() - - assert_equal(a, b) - - - def test_too_many_fancy_indices_special_case(self): - # Just documents behaviour, this is a small limitation. - a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS - assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32) - - - def test_scalar_array_bool(self): - # Numpy bools can be used as boolean index (python ones as of yet not) - a = np.array(1) - assert_equal(a[np.bool_(True)], a[np.array(True)]) - assert_equal(a[np.bool_(False)], a[np.array(False)]) - - # After deprecating bools as integers: - #a = np.array([0,1,2]) - #assert_equal(a[True, :], a[None, :]) - #assert_equal(a[:, True], a[:, None]) - # - #assert_(not np.may_share_memory(a, a[True, :])) - - - def test_everything_returns_views(self): - # Before `...` would return a itself. - a = np.arange(5) - - assert_(a is not a[()]) - assert_(a is not a[...]) - assert_(a is not a[:]) - - - def test_broaderrors_indexing(self): - a = np.zeros((5, 5)) - assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) - assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) - - - def test_trivial_fancy_out_of_bounds(self): - a = np.zeros(5) - ind = np.ones(20, dtype=np.intp) - ind[-1] = 10 - assert_raises(IndexError, a.__getitem__, ind) - assert_raises(IndexError, a.__setitem__, ind, 0) - ind = np.ones(20, dtype=np.intp) - ind[0] = 11 - assert_raises(IndexError, a.__getitem__, ind) - assert_raises(IndexError, a.__setitem__, ind, 0) - - - def test_nonbaseclass_values(self): - class SubClass(np.ndarray): - def __array_finalize__(self, old): - # Have array finalize do funny things - self.fill(99) - - a = np.zeros((5, 5)) - s = a.copy().view(type=SubClass) - s.fill(1) - - a[[0, 1, 2, 3, 4], :] = s - assert_((a == 1).all()) - - # Subspace is last, so transposing might want to finalize - a[:, [0, 1, 2, 3, 4]] = s - assert_((a == 1).all()) - - a.fill(0) - a[...] = s - assert_((a == 1).all()) - - - def test_subclass_writeable(self): - d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], - dtype=[('target', 'S20'), ('V_mag', '>f4')]) - ind = np.array([False, True, True], dtype=bool) - assert_(d[ind].flags.writeable) - ind = np.array([0, 1]) - assert_(d[ind].flags.writeable) - assert_(d[...].flags.writeable) - assert_(d[0].flags.writeable) - - - def test_memory_order(self): - # This is not necessary to preserve. Memory layouts for - # more complex indices are not as simple. - a = np.arange(10) - b = np.arange(10).reshape(5,2).T - assert_(a[b].flags.f_contiguous) - - # Takes a different implementation branch: - a = a.reshape(-1, 1) - assert_(a[b, 0].flags.f_contiguous) - - - def test_scalar_return_type(self): - # Full scalar indices should return scalars and object - # arrays should not call PyArray_Return on their items - class Zero(object): - # The most basic valid indexing - def __index__(self): - return 0 - z = Zero() - - class ArrayLike(object): - # Simple array, should behave like the array - def __array__(self): - return np.array(0) - - a = np.zeros(()) - assert_(isinstance(a[()], np.float_)) - a = np.zeros(1) - assert_(isinstance(a[z], np.float_)) - a = np.zeros((1, 1)) - assert_(isinstance(a[z, np.array(0)], np.float_)) - assert_(isinstance(a[z, ArrayLike()], np.float_)) - - # And object arrays do not call it too often: - b = np.array(0) - a = np.array(0, dtype=object) - a[()] = b - assert_(isinstance(a[()], np.ndarray)) - a = np.array([b, None]) - assert_(isinstance(a[z], np.ndarray)) - a = np.array([[b, None]]) - assert_(isinstance(a[z, np.array(0)], np.ndarray)) - assert_(isinstance(a[z, ArrayLike()], np.ndarray)) - - - def test_small_regressions(self): - # Reference count of intp for index checks - a = np.array([0]) - refcount = sys.getrefcount(np.dtype(np.intp)) - # item setting always checks indices in separate function: - a[np.array([0], dtype=np.intp)] = 1 - a[np.array([0], dtype=np.uint8)] = 1 - assert_raises(IndexError, a.__setitem__, - np.array([1], dtype=np.intp), 1) - assert_raises(IndexError, a.__setitem__, - np.array([1], dtype=np.uint8), 1) - - assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) - - def test_unaligned(self): - v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] - d = v.view(np.dtype("S8")) - # unaligned source - x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] - x = x.view(np.dtype("S8")) - x[...] = np.array("b" * 8, dtype="S") - b = np.arange(d.size) - #trivial - assert_equal(d[b], d) - d[b] = x - # nontrivial - # unaligned index array - b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] - b = b.view(np.intp)[:d.size] - b[...] = np.arange(d.size) - assert_equal(d[b.astype(np.int16)], d) - d[b.astype(np.int16)] = x - # boolean - d[b % 2 == 0] - d[b % 2 == 0] = x[::2] - - -class TestFieldIndexing(TestCase): - def test_scalar_return_type(self): - # Field access on an array should return an array, even if it - # is 0-d. - a = np.zeros((), [('a','f8')]) - assert_(isinstance(a['a'], np.ndarray)) - assert_(isinstance(a[['a']], np.ndarray)) - - -class TestBroadcastedAssignments(TestCase): - def assign(self, a, ind, val): - a[ind] = val - return a - - - def test_prepending_ones(self): - a = np.zeros((3, 2)) - - a[...] = np.ones((1, 3, 2)) - # Fancy with subspace with and without transpose - a[[0, 1, 2], :] = np.ones((1, 3, 2)) - a[:, [0, 1]] = np.ones((1, 3, 2)) - # Fancy without subspace (with broadcasting) - a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) - - - def test_prepend_not_one(self): - assign = self.assign - s_ = np.s_ - - a = np.zeros(5) - - # Too large and not only ones. - assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) - - with warnings.catch_warnings(): - # Will be a ValueError as well. - warnings.simplefilter("error", DeprecationWarning) - assert_raises(DeprecationWarning, assign, a, s_[[1, 2, 3],], - np.ones((2, 1))) - assert_raises(DeprecationWarning, assign, a, s_[[[1], [2]],], - np.ones((2,2,1))) - - - def test_simple_broadcasting_errors(self): - assign = self.assign - s_ = np.s_ - - a = np.zeros((5, 1)) - assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) - assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) - - assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) - assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) - - assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) - - - def test_index_is_larger(self): - # Simple case of fancy index broadcasting of the index. - a = np.zeros((5, 5)) - a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] - - assert_((a[:3, :3] == [2, 3, 4]).all()) - - - def test_broadcast_subspace(self): - a = np.zeros((100, 100)) - v = np.arange(100)[:,None] - b = np.arange(100)[::-1] - a[b] = v - assert_((a[::-1] == v).all()) - - -class TestSubclasses(TestCase): - def test_basic(self): - class SubClass(np.ndarray): - pass - - s = np.arange(5).view(SubClass) - assert_(isinstance(s[:3], SubClass)) - assert_(s[:3].base is s) - - assert_(isinstance(s[[0, 1, 2]], SubClass)) - assert_(isinstance(s[s > 0], SubClass)) - - - def test_matrix_fancy(self): - # The matrix class messes with the shape. While this is always - # weird (getitem is not used, it does not have setitem nor knows - # about fancy indexing), this tests gh-3110 - m = np.matrix([[1, 2], [3, 4]]) - - assert_(isinstance(m[[0,1,0], :], np.matrix)) - - # gh-3110. Note the transpose currently because matrices do *not* - # support dimension fixing for fancy indexing correctly. - x = np.asmatrix(np.arange(50).reshape(5,10)) - assert_equal(x[:2, np.array(-1)], x[:2, -1].T) - - - def test_finalize_gets_full_info(self): - # Array finalize should be called on the filled array. - class SubClass(np.ndarray): - def __array_finalize__(self, old): - self.finalize_status = np.array(self) - self.old = old - - s = np.arange(10).view(SubClass) - new_s = s[:3] - assert_array_equal(new_s.finalize_status, new_s) - assert_array_equal(new_s.old, s) - - new_s = s[[0,1,2,3]] - assert_array_equal(new_s.finalize_status, new_s) - assert_array_equal(new_s.old, s) - - new_s = s[s > 0] - assert_array_equal(new_s.finalize_status, new_s) - assert_array_equal(new_s.old, s) - - -class TestFancyIndexingEquivalence(TestCase): - def test_object_assign(self): - # Check that the field and object special case using copyto is active. - # The right hand side cannot be converted to an array here. - a = np.arange(5, dtype=object) - b = a.copy() - a[:3] = [1, (1,2), 3] - b[[0, 1, 2]] = [1, (1,2), 3] - assert_array_equal(a, b) - - # test same for subspace fancy indexing - b = np.arange(5, dtype=object)[None, :] - b[[0], :3] = [[1, (1,2), 3]] - assert_array_equal(a, b[0]) - - - def test_cast_equivalence(self): - # Yes, normal slicing uses unsafe casting. - a = np.arange(5) - b = a.copy() - - a[:3] = np.array(['2', '-3', '-1']) - b[[0, 2, 1]] = np.array(['2', '-1', '-3']) - assert_array_equal(a, b) - - # test the same for subspace fancy indexing - b = np.arange(5)[None, :] - b[[0], :3] = np.array([['2', '-3', '-1']]) - assert_array_equal(a, b[0]) - - -class TestMultiIndexingAutomated(TestCase): - """ - These test use code to mimic the C-Code indexing for selection. - - NOTE: * This still lacks tests for complex item setting. - * If you change behavior of indexing, you might want to modify - these tests to try more combinations. - * Behavior was written to match numpy version 1.8. (though a - first version matched 1.7.) - * Only tuple indices are supported by the mimicking code. - (and tested as of writing this) - * Error types should match most of the time as long as there - is only one error. For multiple errors, what gets raised - will usually not be the same one. They are *not* tested. - """ - def setUp(self): - self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) - self.b = np.empty((3, 0, 5, 6)) - self.complex_indices = ['skip', Ellipsis, - 0, - # Boolean indices, up to 3-d for some special cases of eating up - # dimensions, also need to test all False - np.array(False), - np.array([True, False, False]), - np.array([[True, False], [False, True]]), - np.array([[[False, False], [False, False]]]), - # Some slices: - slice(-5, 5, 2), - slice(1, 1, 100), - slice(4, -1, -2), - slice(None, None, -3), - # Some Fancy indexes: - np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast - np.array([0, 1, -2]), - np.array([[2], [0], [1]]), - np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), - np.array([2, -1], dtype=np.int8), - np.zeros([1]*31, dtype=int), # trigger too large array. - np.array([0., 1.])] # invalid datatype - # Some simpler indices that still cover a bit more - self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] - # Very simple ones to fill the rest: - self.fill_indices = [slice(None, None), 0] - - - def _get_multi_index(self, arr, indices): - """Mimic multi dimensional indexing. - - Parameters - ---------- - arr : ndarray - Array to be indexed. - indices : tuple of index objects - - Returns - ------- - out : ndarray - An array equivalent to the indexing operation (but always a copy). - `arr[indices]` should be identical. - no_copy : bool - Whether the indexing operation requires a copy. If this is `True`, - `np.may_share_memory(arr, arr[indicies])` should be `True` (with - some exceptions for scalars and possibly 0-d arrays). - - Notes - ----- - While the function may mostly match the errors of normal indexing this - is generally not the case. - """ - in_indices = list(indices) - indices = [] - # if False, this is a fancy or boolean index - no_copy = True - # number of fancy/scalar indexes that are not consecutive - num_fancy = 0 - # number of dimensions indexed by a "fancy" index - fancy_dim = 0 - # NOTE: This is a funny twist (and probably OK to change). - # The boolean array has illegal indexes, but this is - # allowed if the broadcast fancy-indices are 0-sized. - # This variable is to catch that case. - error_unless_broadcast_to_empty = False - - # We need to handle Ellipsis and make arrays from indices, also - # check if this is fancy indexing (set no_copy). - ndim = 0 - ellipsis_pos = None # define here mostly to replace all but first. - for i, indx in enumerate(in_indices): - if indx is None: - continue - if isinstance(indx, np.ndarray) and indx.dtype == bool: - no_copy = False - if indx.ndim == 0: - raise IndexError - # boolean indices can have higher dimensions - ndim += indx.ndim - fancy_dim += indx.ndim - continue - if indx is Ellipsis: - if ellipsis_pos is None: - ellipsis_pos = i - continue # do not increment ndim counter - raise IndexError - if isinstance(indx, slice): - ndim += 1 - continue - if not isinstance(indx, np.ndarray): - # This could be open for changes in numpy. - # numpy should maybe raise an error if casting to intp - # is not safe. It rejects np.array([1., 2.]) but not - # [1., 2.] as index (same for ie. np.take). - # (Note the importance of empty lists if changing this here) - indx = np.array(indx, dtype=np.intp) - in_indices[i] = indx - elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': - raise IndexError('arrays used as indices must be of integer (or boolean) type') - if indx.ndim != 0: - no_copy = False - ndim += 1 - fancy_dim += 1 - - if arr.ndim - ndim < 0: - # we can't take more dimensions then we have, not even for 0-d arrays. - # since a[()] makes sense, but not a[(),]. We will raise an error - # later on, unless a broadcasting error occurs first. - raise IndexError - - if ndim == 0 and not None in in_indices: - # Well we have no indexes or one Ellipsis. This is legal. - return arr.copy(), no_copy - - if ellipsis_pos is not None: - in_indices[ellipsis_pos:ellipsis_pos+1] = [slice(None, None)] * (arr.ndim - ndim) - - for ax, indx in enumerate(in_indices): - if isinstance(indx, slice): - # convert to an index array - indx = np.arange(*indx.indices(arr.shape[ax])) - indices.append(['s', indx]) - continue - elif indx is None: - # this is like taking a slice with one element from a new axis: - indices.append(['n', np.array([0], dtype=np.intp)]) - arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) - continue - if isinstance(indx, np.ndarray) and indx.dtype == bool: - # This may be open for improvement in numpy. - # numpy should probably cast boolean lists to boolean indices - # instead of intp! - - # Numpy supports for a boolean index with - # non-matching shape as long as the True values are not - # out of bounds. Numpy maybe should maybe not allow this, - # (at least not array that are larger then the original one). - try: - flat_indx = np.ravel_multi_index(np.nonzero(indx), - arr.shape[ax:ax+indx.ndim], mode='raise') - except: - error_unless_broadcast_to_empty = True - # fill with 0s instead, and raise error later - flat_indx = np.array([0]*indx.sum(), dtype=np.intp) - # concatenate axis into a single one: - if indx.ndim != 0: - arr = arr.reshape((arr.shape[:ax] - + (np.prod(arr.shape[ax:ax+indx.ndim]),) - + arr.shape[ax+indx.ndim:])) - indx = flat_indx - else: - # This could be changed, a 0-d boolean index can - # make sense (even outside the 0-d indexed array case) - # Note that originally this is could be interpreted as - # integer in the full integer special case. - raise IndexError - else: - # If the index is a singleton, the bounds check is done - # before the broadcasting. This used to be different in <1.9 - if indx.ndim == 0: - if indx >= arr.shape[ax] or indx < -arr.shape[ax]: - raise IndexError - if indx.ndim == 0: - # The index is a scalar. This used to be two fold, but if fancy - # indexing was active, the check was done later, possibly - # after broadcasting it away (1.7. or earlier). Now it is always - # done. - if indx >= arr.shape[ax] or indx < - arr.shape[ax]: - raise IndexError - if len(indices) > 0 and indices[-1][0] == 'f' and ax != ellipsis_pos: - # NOTE: There could still have been a 0-sized Ellipsis - # between them. Checked that with ellipsis_pos. - indices[-1].append(indx) - else: - # We have a fancy index that is not after an existing one. - # NOTE: A 0-d array triggers this as well, while - # one may expect it to not trigger it, since a scalar - # would not be considered fancy indexing. - num_fancy += 1 - indices.append(['f', indx]) - - if num_fancy > 1 and not no_copy: - # We have to flush the fancy indexes left - new_indices = indices[:] - axes = list(range(arr.ndim)) - fancy_axes = [] - new_indices.insert(0, ['f']) - ni = 0 - ai = 0 - for indx in indices: - ni += 1 - if indx[0] == 'f': - new_indices[0].extend(indx[1:]) - del new_indices[ni] - ni -= 1 - for ax in range(ai, ai + len(indx[1:])): - fancy_axes.append(ax) - axes.remove(ax) - ai += len(indx) - 1 # axis we are at - indices = new_indices - # and now we need to transpose arr: - arr = arr.transpose(*(fancy_axes + axes)) - - # We only have one 'f' index now and arr is transposed accordingly. - # Now handle newaxis by reshaping... - ax = 0 - for indx in indices: - if indx[0] == 'f': - if len(indx) == 1: - continue - # First of all, reshape arr to combine fancy axes into one: - orig_shape = arr.shape - orig_slice = orig_shape[ax:ax + len(indx[1:])] - arr = arr.reshape((arr.shape[:ax] - + (np.prod(orig_slice).astype(int),) - + arr.shape[ax + len(indx[1:]):])) - - # Check if broadcasting works - if len(indx[1:]) != 1: - res = np.broadcast(*indx[1:]) # raises ValueError... - else: - res = indx[1] - # unfortunately the indices might be out of bounds. So check - # that first, and use mode='wrap' then. However only if - # there are any indices... - if res.size != 0: - if error_unless_broadcast_to_empty: - raise IndexError - for _indx, _size in zip(indx[1:], orig_slice): - if _indx.size == 0: - continue - if np.any(_indx >= _size) or np.any(_indx < -_size): - raise IndexError - if len(indx[1:]) == len(orig_slice): - if np.product(orig_slice) == 0: - # Work around for a crash or IndexError with 'wrap' - # in some 0-sized cases. - try: - mi = np.ravel_multi_index(indx[1:], orig_slice, mode='raise') - except: - # This happens with 0-sized orig_slice (sometimes?) - # here it is a ValueError, but indexing gives a: - raise IndexError('invalid index into 0-sized') - else: - mi = np.ravel_multi_index(indx[1:], orig_slice, mode='wrap') - else: - # Maybe never happens... - raise ValueError - arr = arr.take(mi.ravel(), axis=ax) - arr = arr.reshape((arr.shape[:ax] - + mi.shape - + arr.shape[ax+1:])) - ax += mi.ndim - continue - - # If we are here, we have a 1D array for take: - arr = arr.take(indx[1], axis=ax) - ax += 1 - - return arr, no_copy - - - def _check_multi_index(self, arr, index): - """Check a multi index item getting and simple setting. - - Parameters - ---------- - arr : ndarray - Array to be indexed, must be a reshaped arange. - index : tuple of indexing objects - Index being tested. - """ - # Test item getting - try: - mimic_get, no_copy = self._get_multi_index(arr, index) - except Exception as e: - prev_refcount = sys.getrefcount(arr) - assert_raises(Exception, arr.__getitem__, index) - assert_raises(Exception, arr.__setitem__, index, 0) - assert_equal(prev_refcount, sys.getrefcount(arr)) - return - - self._compare_index_result(arr, index, mimic_get, no_copy) - - - def _check_single_index(self, arr, index): - """Check a single index item getting and simple setting. - - Parameters - ---------- - arr : ndarray - Array to be indexed, must be an arange. - index : indexing object - Index being tested. Must be a single index and not a tuple - of indexing objects (see also `_check_multi_index`). - """ - try: - mimic_get, no_copy = self._get_multi_index(arr, (index,)) - except Exception as e: - prev_refcount = sys.getrefcount(arr) - assert_raises(Exception, arr.__getitem__, index) - assert_raises(Exception, arr.__setitem__, index, 0) - assert_equal(prev_refcount, sys.getrefcount(arr)) - return - - self._compare_index_result(arr, index, mimic_get, no_copy) - - - def _compare_index_result(self, arr, index, mimic_get, no_copy): - """Compare mimicked result to indexing result. - """ - arr = arr.copy() - indexed_arr = arr[index] - assert_array_equal(indexed_arr, mimic_get) - # Check if we got a view, unless its a 0-sized or 0-d array. - # (then its not a view, and that does not matter) - if indexed_arr.size != 0 and indexed_arr.ndim != 0: - assert_(np.may_share_memory(indexed_arr, arr) == no_copy) - # Check reference count of the original array - if no_copy: - # refcount increases by one: - assert_equal(sys.getrefcount(arr), 3) - else: - assert_equal(sys.getrefcount(arr), 2) - - # Test non-broadcast setitem: - b = arr.copy() - b[index] = mimic_get + 1000 - if b.size == 0: - return # nothing to compare here... - if no_copy and indexed_arr.ndim != 0: - # change indexed_arr in-place to manipulate original: - indexed_arr += 1000 - assert_array_equal(arr, b) - return - # Use the fact that the array is originally an arange: - arr.flat[indexed_arr.ravel()] += 1000 - assert_array_equal(arr, b) - - - def test_boolean(self): - a = np.array(5) - assert_equal(a[np.array(True)], 5) - a[np.array(True)] = 1 - assert_equal(a, 1) - # NOTE: This is different from normal broadcasting, as - # arr[boolean_array] works like in a multi index. Which means - # it is aligned to the left. This is probably correct for - # consistency with arr[boolean_array,] also no broadcasting - # is done at all - self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool),)) - self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) - self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) - - - def test_multidim(self): - # Automatically test combinations with complex indexes on 2nd (or 1st) - # spot and the simple ones in one other spot. - with warnings.catch_warnings(): - # This is so that np.array(True) is not accepted in a full integer - # index, when running the file separately. - warnings.filterwarnings('error', '', DeprecationWarning) - for simple_pos in [0, 2, 3]: - tocheck = [self.fill_indices, self.complex_indices, - self.fill_indices, self.fill_indices] - tocheck[simple_pos] = self.simple_indices - for index in product(*tocheck): - index = tuple(i for i in index if i != 'skip') - self._check_multi_index(self.a, index) - self._check_multi_index(self.b, index) - - # Check very simple item getting: - self._check_multi_index(self.a, (0, 0, 0, 0)) - self._check_multi_index(self.b, (0, 0, 0, 0)) - # Also check (simple cases of) too many indices: - assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) - assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) - - - def test_1d(self): - a = np.arange(10) - with warnings.catch_warnings(): - warnings.filterwarnings('error', '', DeprecationWarning) - for index in self.complex_indices: - self._check_single_index(a, index) - - -class TestCApiAccess(TestCase): - def test_getitem(self): - subscript = functools.partial(array_indexing, 0) - - # 0-d arrays don't work: - assert_raises(IndexError, subscript, np.ones(()), 0) - # Out of bound values: - assert_raises(IndexError, subscript, np.ones(10), 11) - assert_raises(IndexError, subscript, np.ones(10), -11) - assert_raises(IndexError, subscript, np.ones((10, 10)), 11) - assert_raises(IndexError, subscript, np.ones((10, 10)), -11) - - a = np.arange(10) - assert_array_equal(a[4], subscript(a, 4)) - a = a.reshape(5, 2) - assert_array_equal(a[-4], subscript(a, -4)) - - def test_setitem(self): - assign = functools.partial(array_indexing, 1) - - # Deletion is impossible: - assert_raises(ValueError, assign, np.ones(10), 0) - # 0-d arrays don't work: - assert_raises(IndexError, assign, np.ones(()), 0, 0) - # Out of bound values: - assert_raises(IndexError, assign, np.ones(10), 11, 0) - assert_raises(IndexError, assign, np.ones(10), -11, 0) - assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) - assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) - - a = np.arange(10) - assign(a, 4, 10) - assert_(a[4] == 10) - - a = a.reshape(5, 2) - assign(a, 4, 10) - assert_array_equal(a[-1], [10, 10]) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_item_selection.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_item_selection.py deleted file mode 100644 index d8e9e6fd0faf5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_item_selection.py +++ /dev/null @@ -1,70 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import * -import sys, warnings - - -class TestTake(TestCase): - def test_simple(self): - a = [[1, 2], [3, 4]] - a_str = [[b'1', b'2'], [b'3', b'4']] - modes = ['raise', 'wrap', 'clip'] - indices = [-1, 4] - index_arrays = [np.empty(0, dtype=np.intp), - np.empty(tuple(), dtype=np.intp), - np.empty((1, 1), dtype=np.intp)] - real_indices = {} - real_indices['raise'] = {-1:1, 4:IndexError} - real_indices['wrap'] = {-1:1, 4:0} - real_indices['clip'] = {-1:0, 4:1} - # Currently all types but object, use the same function generation. - # So it should not be necessary to test all. However test also a non - # refcounted struct on top of object. - types = np.int, np.object, np.dtype([('', 'i', 2)]) - for t in types: - # ta works, even if the array may be odd if buffer interface is used - ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) - tresult = list(ta.T.copy()) - for index_array in index_arrays: - if index_array.size != 0: - tresult[0].shape = (2,) + index_array.shape - tresult[1].shape = (2,) + index_array.shape - for mode in modes: - for index in indices: - real_index = real_indices[mode][index] - if real_index is IndexError and index_array.size != 0: - index_array.put(0, index) - assert_raises(IndexError, ta.take, index_array, - mode=mode, axis=1) - elif index_array.size != 0: - index_array.put(0, index) - res = ta.take(index_array, mode=mode, axis=1) - assert_array_equal(res, tresult[real_index]) - else: - res = ta.take(index_array, mode=mode, axis=1) - assert_(res.shape == (2,) + index_array.shape) - - - def test_refcounting(self): - objects = [object() for i in range(10)] - for mode in ('raise', 'clip', 'wrap'): - a = np.array(objects) - b = np.array([2, 2, 4, 5, 3, 5]) - a.take(b, out=a[:6]) - del a - assert_(all(sys.getrefcount(o) == 3 for o in objects)) - # not contiguous, example: - a = np.array(objects * 2)[::2] - a.take(b, out=a[:6]) - del a - assert_(all(sys.getrefcount(o) == 3 for o in objects)) - - def test_unicode_mode(self): - d = np.arange(10) - k = b'\xc3\xa4'.decode("UTF8") - assert_raises(ValueError, d.take, 5, mode=k) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_machar.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_machar.py deleted file mode 100644 index 8d858c28b83bf..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_machar.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * - -from numpy.core.machar import MachAr -import numpy.core.numerictypes as ntypes -from numpy import errstate, array - -class TestMachAr(TestCase): - def _run_machar_highprec(self): - # Instanciate MachAr instance with high enough precision to cause - # underflow - try: - hiprec = ntypes.float96 - machar = MachAr(lambda v:array([v], hiprec)) - except AttributeError: - "Skipping test: no nyptes.float96 available on this platform." - - def test_underlow(self): - """Regression testing for #759: instanciating MachAr for dtype = - np.float96 raises spurious warning.""" - with errstate(all='raise'): - try: - self._run_machar_highprec() - except FloatingPointError as e: - self.fail("Caught %s exception, should not have been raised." % e) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_memmap.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_memmap.py deleted file mode 100644 index b364f5eb990f4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_memmap.py +++ /dev/null @@ -1,127 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp -import os -import shutil - -from numpy import memmap -from numpy import arange, allclose, asarray -from numpy.testing import * - -class TestMemmap(TestCase): - def setUp(self): - self.tmpfp = NamedTemporaryFile(prefix='mmap') - self.tempdir = mkdtemp() - self.shape = (3, 4) - self.dtype = 'float32' - self.data = arange(12, dtype=self.dtype) - self.data.resize(self.shape) - - def tearDown(self): - self.tmpfp.close() - shutil.rmtree(self.tempdir) - - def test_roundtrip(self): - # Write data to file - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - del fp # Test __del__ machinery, which handles cleanup - - # Read data back from file - newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', - shape=self.shape) - assert_(allclose(self.data, newfp)) - assert_array_equal(self.data, newfp) - - def test_open_with_filename(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) - fp = memmap(tmpname, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - del fp - - def test_unnamed_file(self): - with TemporaryFile() as f: - fp = memmap(f, dtype=self.dtype, shape=self.shape) - del fp - - def test_attributes(self): - offset = 1 - mode = "w+" - fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, - shape=self.shape, offset=offset) - self.assertEqual(offset, fp.offset) - self.assertEqual(mode, fp.mode) - del fp - - def test_filename(self): - tmpname = mktemp('', 'mmap', dir=self.tempdir) - fp = memmap(tmpname, dtype=self.dtype, mode='w+', - shape=self.shape) - abspath = os.path.abspath(tmpname) - fp[:] = self.data[:] - self.assertEqual(abspath, fp.filename) - b = fp[:1] - self.assertEqual(abspath, b.filename) - del b - del fp - - def test_filename_fileobj(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", - shape=self.shape) - self.assertEqual(fp.filename, self.tmpfp.name) - - @dec.knownfailureif(sys.platform=='gnu0', "This test is known to fail on hurd") - def test_flush(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp[:] = self.data[:] - assert_equal(fp[0], self.data[0]) - fp.flush() - - def test_del(self): - # Make sure a view does not delete the underlying mmap - fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - fp_base[0] = 5 - fp_view = fp_base[0:1] - assert_equal(fp_view[0], 5) - del fp_view - # Should still be able to access and assign values after - # deleting the view - assert_equal(fp_base[0], 5) - fp_base[0] = 6 - assert_equal(fp_base[0], 6) - - def test_arithmetic_drops_references(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - tmp = (fp + 10) - if isinstance(tmp, memmap): - assert tmp._mmap is not fp._mmap - - def test_indexing_drops_references(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - tmp = fp[[(1, 2), (2, 3)]] - if isinstance(tmp, memmap): - assert tmp._mmap is not fp._mmap - - def test_slicing_keeps_references(self): - fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', - shape=self.shape) - assert fp[:2, :2]._mmap is fp._mmap - - def test_view(self): - fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) - new1 = fp.view() - new2 = new1.view() - assert(new1.base is fp) - assert(new2.base is fp) - new_array = asarray(fp) - assert(new_array.base is fp) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray.py deleted file mode 100644 index 68e1c11a0711d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray.py +++ /dev/null @@ -1,4482 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import tempfile -import sys -import os -import shutil -import warnings -import operator -import io -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins -from decimal import Decimal - - -import numpy as np -from nose import SkipTest -from numpy.core import * -from numpy.compat import asbytes, getexception, strchar, sixu -from test_print import in_foreign_locale -from numpy.core.multiarray_tests import ( - test_neighborhood_iterator, test_neighborhood_iterator_oob, - test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end, - test_inplace_increment, get_buffer_info - ) -from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_raises, - assert_equal, assert_almost_equal, assert_array_equal, - assert_array_almost_equal, assert_allclose, - assert_array_less, runstring, dec - ) - -# Need to test an object that does not fully implement math interface -from datetime import timedelta - - -if sys.version_info[:2] > (3, 2): - # In Python 3.3 the representation of empty shape, strides and suboffsets - # is an empty tuple instead of None. - # http://docs.python.org/dev/whatsnew/3.3.html#api-changes - EMPTY = () -else: - EMPTY = None - - -class TestFlags(TestCase): - def setUp(self): - self.a = arange(10) - - def test_writeable(self): - mydict = locals() - self.a.flags.writeable = False - self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict) - self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 - - def test_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.updateifcopy, False) - -class TestHash(TestCase): - # see #3793 - def test_int(self): - for st, ut, s in [(np.int8, np.uint8, 8), - (np.int16, np.uint16, 16), - (np.int32, np.uint32, 32), - (np.int64, np.uint64, 64)]: - for i in range(1, s): - assert_equal(hash(st(-2**i)), hash(-2**i), - err_msg="%r: -2**%d" % (st, i)) - assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (st, i - 1)) - assert_equal(hash(st(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (st, i)) - - i = max(i - 1, 1) - assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (ut, i - 1)) - assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (ut, i)) - -class TestAttributes(TestCase): - def setUp(self): - self.one = arange(10) - self.two = arange(20).reshape(4, 5) - self.three = arange(60, dtype=float64).reshape(2, 5, 6) - - def test_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4, 5)) - assert_equal(self.three.shape, (2, 5, 6)) - self.three.shape = (10, 3, 2) - assert_equal(self.three.shape, (10, 3, 2)) - self.three.shape = (2, 5, 6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5*num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30*num, 6*num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20*num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, arange(20)) - - def test_dtypeattr(self): - assert_equal(self.one.dtype, dtype(int_)) - assert_equal(self.three.dtype, dtype(float_)) - assert_equal(self.one.dtype.char, 'l') - assert_equal(self.three.dtype.char, 'd') - self.assertTrue(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') - - def test_int_subclassing(self): - # Regression test for https://github.com/numpy/numpy/pull/3526 - - numpy_int = np.int_(0) - - if sys.version_info[0] >= 3: - # On Py3k int_ should not inherit from int, because it's not fixed-width anymore - assert_equal(isinstance(numpy_int, int), False) - else: - # Otherwise, it should inherit from int... - assert_equal(isinstance(numpy_int, int), True) - - # ... and fast-path checks on C-API level should also work - from numpy.core.multiarray_tests import test_int_subclass - assert_equal(test_int_subclass(numpy_int), True) - - def test_stridesattr(self): - x = self.one - def make_array(size, offset, strides): - return ndarray(size, buffer=x, dtype=int, - offset=offset*x.itemsize, - strides=strides*x.itemsize) - assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) - self.assertRaises(ValueError, make_array, 4, 4, -2) - self.assertRaises(ValueError, make_array, 4, 2, -1) - self.assertRaises(ValueError, make_array, 8, 3, 1) - assert_equal(make_array(8, 3, 0), np.array([3]*8)) - # Check behavior reported in gh-2503: - self.assertRaises(ValueError, make_array, (2, 3), 5, array([-2, -3])) - make_array(0, 0, 10) - - def test_set_stridesattr(self): - x = self.one - def make_array(size, offset, strides): - try: - r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) - except: - raise RuntimeError(getexception()) - r.strides = strides=strides*x.itemsize - return r - assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) - assert_equal(make_array(7, 3, 1), array([3, 4, 5, 6, 7, 8, 9])) - self.assertRaises(ValueError, make_array, 4, 4, -2) - self.assertRaises(ValueError, make_array, 4, 2, -1) - self.assertRaises(RuntimeError, make_array, 8, 3, 1) - # Check that the true extent of the array is used. - # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(arange(1), (10, 10), (0, 0)) - def set_strides(arr, strides): - arr.strides = strides - self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) - - # Test for offset calculations: - x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], - shape=(10,), strides=(-1,)) - self.assertRaises(ValueError, set_strides, x[::-1], -1) - a = x[::-1] - a.strides = 1 - a[::2].strides = 2 - - def test_fill(self): - for t in "?bhilqpBHILQPfdgFDGO": - x = empty((3, 2, 1), t) - y = empty((3, 2, 1), t) - x.fill(1) - y[...] = 1 - assert_equal(x, y) - - def test_fill_struct_array(self): - # Filling from a scalar - x = array([(0, 0.0), (1, 1.0)], dtype='i4,f8') - x.fill(x[0]) - assert_equal(x['f1'][1], x['f1'][0]) - # Filling from a tuple that can be converted - # to a scalar - x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) - x.fill((3.5, -2)) - assert_array_equal(x['a'], [3.5, 3.5]) - assert_array_equal(x['b'], [-2, -2]) - - -class TestArrayConstruction(TestCase): - def test_array(self): - d = np.ones(6) - r = np.array([d, d]) - assert_equal(r, np.ones((2, 6))) - - d = np.ones(6) - tgt = np.ones((2, 6)) - r = np.array([d, d]) - assert_equal(r, tgt) - tgt[1] = 2 - r = np.array([d, d + 1]) - assert_equal(r, tgt) - - d = np.ones(6) - r = np.array([[d, d]]) - assert_equal(r, np.ones((1, 2, 6))) - - d = np.ones(6) - r = np.array([[d, d], [d, d]]) - assert_equal(r, np.ones((2, 2, 6))) - - d = np.ones((6, 6)) - r = np.array([d, d]) - assert_equal(r, np.ones((2, 6, 6))) - - d = np.ones((6, )) - r = np.array([[d, d + 1], d + 2]) - assert_equal(len(r), 2) - assert_equal(r[0], [d, d + 1]) - assert_equal(r[1], d + 2) - - tgt = np.ones((2, 3), dtype=np.bool) - tgt[0, 2] = False - tgt[1, 0:2] = False - r = np.array([[True, True, False], [False, False, True]]) - assert_equal(r, tgt) - r = np.array([[True, False], [True, False], [False, True]]) - assert_equal(r, tgt.T) - - -class TestAssignment(TestCase): - def test_assignment_broadcasting(self): - a = np.arange(6).reshape(2, 3) - - # Broadcasting the input to the output - a[...] = np.arange(3) - assert_equal(a, [[0, 1, 2], [0, 1, 2]]) - a[...] = np.arange(2).reshape(2, 1) - assert_equal(a, [[0, 0, 0], [1, 1, 1]]) - - # For compatibility with <= 1.5, a limited version of broadcasting - # the output to the input. - # - # This behavior is inconsistent with NumPy broadcasting - # in general, because it only uses one of the two broadcasting - # rules (adding a new "1" dimension to the left of the shape), - # applied to the output instead of an input. In NumPy 2.0, this kind - # of broadcasting assignment will likely be disallowed. - a[...] = np.arange(6)[::-1].reshape(1, 2, 3) - assert_equal(a, [[5, 4, 3], [2, 1, 0]]) - # The other type of broadcasting would require a reduction operation. - def assign(a, b): - a[...] = b - assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) - - def test_assignment_errors(self): - # Address issue #2276 - class C: - pass - a = np.zeros(1) - def assign(v): - a[0] = v - assert_raises((AttributeError, TypeError), assign, C()) - assert_raises(ValueError, assign, [1]) - -class TestDtypedescr(TestCase): - def test_construction(self): - d1 = dtype('i4') - assert_equal(d1, dtype(int32)) - d2 = dtype('f8') - assert_equal(d2, dtype(float64)) - -class TestZeroRank(TestCase): - def setUp(self): - self.d = array(0), array('x', object) - - def test_ellipsis_subscript(self): - a, b = self.d - self.assertEqual(a[...], 0) - self.assertEqual(b[...], 'x') - self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9. - self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9. - - def test_empty_subscript(self): - a, b = self.d - self.assertEqual(a[()], 0) - self.assertEqual(b[()], 'x') - self.assertTrue(type(a[()]) is a.dtype.type) - self.assertTrue(type(b[()]) is str) - - def test_invalid_subscript(self): - a, b = self.d - self.assertRaises(IndexError, lambda x: x[0], a) - self.assertRaises(IndexError, lambda x: x[0], b) - self.assertRaises(IndexError, lambda x: x[array([], int)], a) - self.assertRaises(IndexError, lambda x: x[array([], int)], b) - - def test_ellipsis_subscript_assignment(self): - a, b = self.d - a[...] = 42 - self.assertEqual(a, 42) - b[...] = '' - self.assertEqual(b.item(), '') - - def test_empty_subscript_assignment(self): - a, b = self.d - a[()] = 42 - self.assertEqual(a, 42) - b[()] = '' - self.assertEqual(b.item(), '') - - def test_invalid_subscript_assignment(self): - a, b = self.d - def assign(x, i, v): - x[i] = v - self.assertRaises(IndexError, assign, a, 0, 42) - self.assertRaises(IndexError, assign, b, 0, '') - self.assertRaises(ValueError, assign, a, (), '') - - def test_newaxis(self): - a, b = self.d - self.assertEqual(a[newaxis].shape, (1,)) - self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ...].shape, (1,)) - self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1)) - self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1)) - self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1)) - self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) - - def test_invalid_newaxis(self): - a, b = self.d - def subscript(x, i): x[i] - self.assertRaises(IndexError, subscript, a, (newaxis, 0)) - self.assertRaises(IndexError, subscript, a, (newaxis,)*50) - - def test_constructor(self): - x = ndarray(()) - x[()] = 5 - self.assertEqual(x[()], 5) - y = ndarray((), buffer=x) - y[()] = 6 - self.assertEqual(x[()], 6) - - def test_output(self): - x = array(2) - self.assertRaises(ValueError, add, x, [1], x) - - -class TestScalarIndexing(TestCase): - def setUp(self): - self.d = array([0, 1])[0] - - def test_ellipsis_subscript(self): - a = self.d - self.assertEqual(a[...], 0) - self.assertEqual(a[...].shape, ()) - - def test_empty_subscript(self): - a = self.d - self.assertEqual(a[()], 0) - self.assertEqual(a[()].shape, ()) - - def test_invalid_subscript(self): - a = self.d - self.assertRaises(IndexError, lambda x: x[0], a) - self.assertRaises(IndexError, lambda x: x[array([], int)], a) - - def test_invalid_subscript_assignment(self): - a = self.d - def assign(x, i, v): - x[i] = v - self.assertRaises(TypeError, assign, a, 0, 42) - - def test_newaxis(self): - a = self.d - self.assertEqual(a[newaxis].shape, (1,)) - self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ...].shape, (1,)) - self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1)) - self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1)) - self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1)) - self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) - - def test_invalid_newaxis(self): - a = self.d - def subscript(x, i): x[i] - self.assertRaises(IndexError, subscript, a, (newaxis, 0)) - self.assertRaises(IndexError, subscript, a, (newaxis,)*50) - - def test_overlapping_assignment(self): - # With positive strides - a = np.arange(4) - a[:-1] = a[1:] - assert_equal(a, [1, 2, 3, 3]) - - a = np.arange(4) - a[1:] = a[:-1] - assert_equal(a, [0, 0, 1, 2]) - - # With positive and negative strides - a = np.arange(4) - a[:] = a[::-1] - assert_equal(a, [3, 2, 1, 0]) - - a = np.arange(6).reshape(2, 3) - a[::-1,:] = a[:, ::-1] - assert_equal(a, [[5, 4, 3], [2, 1, 0]]) - - a = np.arange(6).reshape(2, 3) - a[::-1, ::-1] = a[:, ::-1] - assert_equal(a, [[3, 4, 5], [0, 1, 2]]) - - # With just one element overlapping - a = np.arange(5) - a[:3] = a[2:] - assert_equal(a, [2, 3, 4, 3, 4]) - - a = np.arange(5) - a[2:] = a[:3] - assert_equal(a, [0, 1, 0, 1, 2]) - - a = np.arange(5) - a[2::-1] = a[2:] - assert_equal(a, [4, 3, 2, 3, 4]) - - a = np.arange(5) - a[2:] = a[2::-1] - assert_equal(a, [0, 1, 2, 1, 0]) - - a = np.arange(5) - a[2::-1] = a[:1:-1] - assert_equal(a, [2, 3, 4, 3, 4]) - - a = np.arange(5) - a[:1:-1] = a[2::-1] - assert_equal(a, [0, 1, 0, 1, 2]) - -class TestCreation(TestCase): - def test_from_attribute(self): - class x(object): - def __array__(self, dtype=None): - pass - self.assertRaises(ValueError, array, x()) - - def test_from_string(self) : - types = np.typecodes['AllInteger'] + np.typecodes['Float'] - nstr = ['123', '123'] - result = array([123, 123], dtype=int) - for type in types : - msg = 'String conversion for %s' % type - assert_equal(array(nstr, dtype=type), result, err_msg=msg) - - def test_void(self): - arr = np.array([], dtype='V') - assert_equal(arr.dtype.kind, 'V') - - def test_zeros(self): - types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - for dt in types: - d = np.zeros((13,), dtype=dt) - assert_equal(np.count_nonzero(d), 0) - # true for ieee floats - assert_equal(d.sum(), 0) - assert_(not d.any()) - - d = np.zeros(2, dtype='(2,4)i4') - assert_equal(np.count_nonzero(d), 0) - assert_equal(d.sum(), 0) - assert_(not d.any()) - - d = np.zeros(2, dtype='4i4') - assert_equal(np.count_nonzero(d), 0) - assert_equal(d.sum(), 0) - assert_(not d.any()) - - d = np.zeros(2, dtype='(2,4)i4, (2,4)i4') - assert_equal(np.count_nonzero(d), 0) - - @dec.slow - def test_zeros_big(self): - # test big array as they might be allocated different by the sytem - types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - for dt in types: - d = np.zeros((30 * 1024**2,), dtype=dt) - assert_(not d.any()) - - def test_zeros_obj(self): - # test initialization from PyLong(0) - d = np.zeros((13,), dtype=object) - assert_array_equal(d, [0] * 13) - assert_equal(np.count_nonzero(d), 0) - - def test_zeros_obj_obj(self): - d = zeros(10, dtype=[('k', object, 2)]) - assert_array_equal(d['k'], 0) - - def test_zeros_like_like_zeros(self): - # test zeros_like returns the same as zeros - for c in np.typecodes['All']: - if c == 'V': - continue - d = zeros((3,3), dtype=c) - assert_array_equal(zeros_like(d), d) - assert_equal(zeros_like(d).dtype, d.dtype) - # explicitly check some special cases - d = zeros((3,3), dtype='S5') - assert_array_equal(zeros_like(d), d) - assert_equal(zeros_like(d).dtype, d.dtype) - d = zeros((3,3), dtype='U5') - assert_array_equal(zeros_like(d), d) - assert_equal(zeros_like(d).dtype, d.dtype) - - d = zeros((3,3), dtype='= 3) - def test_sequence_long(self): - assert_equal(np.array([long(4), long(4)]).dtype, np.long) - assert_equal(np.array([long(4), 2**80]).dtype, np.object) - assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object) - assert_equal(np.array([2**80, long(4)]).dtype, np.object) - - def test_non_sequence_sequence(self): - """Should not segfault. - - Class Fail breaks the sequence protocol for new style classes, i.e., - those derived from object. Class Map is a mapping type indicated by - raising a ValueError. At some point we may raise a warning instead - of an error in the Fail case. - - """ - class Fail(object): - def __len__(self): - return 1 - - def __getitem__(self, index): - raise ValueError() - - class Map(object): - def __len__(self): - return 1 - - def __getitem__(self, index): - raise KeyError() - - a = np.array([Map()]) - assert_(a.shape == (1,)) - assert_(a.dtype == np.dtype(object)) - assert_raises(ValueError, np.array, [Fail()]) - - -class TestStructured(TestCase): - def test_subarray_field_access(self): - a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) - a['a'] = np.arange(60).reshape(3, 5, 2, 2) - - # Since the subarray is always in C-order, a transpose - # does not swap the subarray: - assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) - - # In Fortran order, the subarray gets appended - # like in all other cases, not prepended as a special case - b = a.copy(order='F') - assert_equal(a['a'].shape, b['a'].shape) - assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) - - def test_subarray_comparison(self): - # Check that comparisons between record arrays with - # multi-dimensional field types work properly - a = np.rec.fromrecords( - [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], - dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))]) - b = a.copy() - assert_equal(a==b, [True, True]) - assert_equal(a!=b, [False, False]) - b[1].b = 'c' - assert_equal(a==b, [True, False]) - assert_equal(a!=b, [False, True]) - for i in range(3): - b[0].a = a[0].a - b[0].a[i] = 5 - assert_equal(a==b, [False, False]) - assert_equal(a!=b, [True, True]) - for i in range(2): - for j in range(2): - b = a.copy() - b[0].c[i, j] = 10 - assert_equal(a==b, [False, True]) - assert_equal(a!=b, [True, False]) - - # Check that broadcasting with a subarray works - a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) - b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) - assert_equal(a==b, [[True, True, False], [False, False, True]]) - assert_equal(b==a, [[True, True, False], [False, False, True]]) - a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) - b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) - assert_equal(a==b, [[True, True, False], [False, False, True]]) - assert_equal(b==a, [[True, True, False], [False, False, True]]) - a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) - b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) - assert_equal(a==b, [[True, False, False], [False, False, True]]) - assert_equal(b==a, [[True, False, False], [False, False, True]]) - - # Check that broadcasting Fortran-style arrays with a subarray work - a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') - b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) - assert_equal(a==b, [[True, False, False], [False, False, True]]) - assert_equal(b==a, [[True, False, False], [False, False, True]]) - - # Check that incompatible sub-array shapes don't result to broadcasting - x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) - y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) - assert_equal(x == y, False) - - x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) - y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) - assert_equal(x == y, False) - - # Check that structured arrays that are different only in - # byte-order work - a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', 'f8')]) - assert_equal(a == b, [False, True]) - - -class TestBool(TestCase): - def test_test_interning(self): - a0 = bool_(0) - b0 = bool_(False) - self.assertTrue(a0 is b0) - a1 = bool_(1) - b1 = bool_(True) - self.assertTrue(a1 is b1) - self.assertTrue(array([True])[0] is a1) - self.assertTrue(array(True)[()] is a1) - - def test_sum(self): - d = np.ones(101, dtype=np.bool); - assert_equal(d.sum(), d.size) - assert_equal(d[::2].sum(), d[::2].size) - assert_equal(d[::-2].sum(), d[::-2].size) - - d = np.frombuffer(b'\xff\xff' * 100, dtype=bool) - assert_equal(d.sum(), d.size) - assert_equal(d[::2].sum(), d[::2].size) - assert_equal(d[::-2].sum(), d[::-2].size) - - def check_count_nonzero(self, power, length): - powers = [2 ** i for i in range(length)] - for i in range(2**power): - l = [(i & x) != 0 for x in powers] - a = np.array(l, dtype=np.bool) - c = builtins.sum(l) - self.assertEqual(np.count_nonzero(a), c) - av = a.view(np.uint8) - av *= 3 - self.assertEqual(np.count_nonzero(a), c) - av *= 4 - self.assertEqual(np.count_nonzero(a), c) - av[av != 0] = 0xFF - self.assertEqual(np.count_nonzero(a), c) - - def test_count_nonzero(self): - # check all 12 bit combinations in a length 17 array - # covers most cases of the 16 byte unrolled code - self.check_count_nonzero(12, 17) - - @dec.slow - def test_count_nonzero_all(self): - # check all combinations in a length 17 array - # covers all cases of the 16 byte unrolled code - self.check_count_nonzero(17, 17) - - def test_count_nonzero_unaligned(self): - # prevent mistakes as e.g. gh-4060 - for o in range(7): - a = np.zeros((18,), dtype=np.bool)[o+1:] - a[:o] = True - self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) - a = np.ones((18,), dtype=np.bool)[o+1:] - a[:o] = False - self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) - -class TestMethods(TestCase): - def test_test_round(self): - assert_equal(array([1.2, 1.5]).round(), [1, 2]) - assert_equal(array(1.5).round(), 2) - assert_equal(array([12.2, 15.5]).round(-1), [10, 20]) - assert_equal(array([12.15, 15.51]).round(1), [12.2, 15.5]) - - def test_transpose(self): - a = array([[1, 2], [3, 4]]) - assert_equal(a.transpose(), [[1, 3], [2, 4]]) - self.assertRaises(ValueError, lambda: a.transpose(0)) - self.assertRaises(ValueError, lambda: a.transpose(0, 0)) - self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2)) - - def test_sort(self): - # test ordering for floats and complex containing nans. It is only - # necessary to check the lessthan comparison, so sorts that - # only follow the insertion sort path are sufficient. We only - # test doubles and complex doubles as the logic is the same. - - # check doubles - msg = "Test real sort order with nans" - a = np.array([np.nan, 1, 0]) - b = sort(a) - assert_equal(b, a[::-1], msg) - # check complex - msg = "Test complex sort order with nans" - a = np.zeros(9, dtype=np.complex128) - a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] - a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] - b = sort(a) - assert_equal(b, a[::-1], msg) - - # all c scalar sorts use the same code with different types - # so it suffices to run a quick check with one type. The number - # of sorted items must be greater than ~50 to check the actual - # algorithm because quick and merge sort fall over to insertion - # sort for small arrays. - a = np.arange(101) - b = a[::-1].copy() - for kind in ['q', 'm', 'h'] : - msg = "scalar sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test complex sorts. These use the same code as the scalars - # but the compare fuction differs. - ai = a*1j + 1 - bi = b*1j + 1 - for kind in ['q', 'm', 'h'] : - msg = "complex sort, real part == 1, kind=%s" % kind - c = ai.copy(); - c.sort(kind=kind) - assert_equal(c, ai, msg) - c = bi.copy(); - c.sort(kind=kind) - assert_equal(c, ai, msg) - ai = a + 1j - bi = b + 1j - for kind in ['q', 'm', 'h'] : - msg = "complex sort, imag part == 1, kind=%s" % kind - c = ai.copy(); - c.sort(kind=kind) - assert_equal(c, ai, msg) - c = bi.copy(); - c.sort(kind=kind) - assert_equal(c, ai, msg) - - # test string sorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)]) - b = a[::-1].copy() - for kind in ['q', 'm', 'h'] : - msg = "string sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test unicode sorts. - s = 'aaaaaaaa' - a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) - b = a[::-1].copy() - for kind in ['q', 'm', 'h'] : - msg = "unicode sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test object array sorts. - a = np.empty((101,), dtype=np.object) - a[:] = list(range(101)) - b = a[::-1] - for kind in ['q', 'h', 'm'] : - msg = "object sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test record array sorts. - dt = np.dtype([('f', float), ('i', int)]) - a = array([(i, i) for i in range(101)], dtype = dt) - b = a[::-1] - for kind in ['q', 'h', 'm'] : - msg = "object sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test datetime64 sorts. - a = np.arange(0, 101, dtype='datetime64[D]') - b = a[::-1] - for kind in ['q', 'h', 'm'] : - msg = "datetime64 sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # test timedelta64 sorts. - a = np.arange(0, 101, dtype='timedelta64[D]') - b = a[::-1] - for kind in ['q', 'h', 'm'] : - msg = "timedelta64 sort, kind=%s" % kind - c = a.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - c = b.copy(); - c.sort(kind=kind) - assert_equal(c, a, msg) - - # check axis handling. This should be the same for all type - # specific sorts, so we only check it for one type and one kind - a = np.array([[3, 2], [1, 0]]) - b = np.array([[1, 0], [3, 2]]) - c = np.array([[2, 3], [0, 1]]) - d = a.copy() - d.sort(axis=0) - assert_equal(d, b, "test sort with axis=0") - d = a.copy() - d.sort(axis=1) - assert_equal(d, c, "test sort with axis=1") - d = a.copy() - d.sort() - assert_equal(d, c, "test sort with default axis") - - def test_copy(self): - def assert_fortran(arr): - assert_(arr.flags.fortran) - assert_(arr.flags.f_contiguous) - assert_(not arr.flags.c_contiguous) - - def assert_c(arr): - assert_(not arr.flags.fortran) - assert_(not arr.flags.f_contiguous) - assert_(arr.flags.c_contiguous) - - a = np.empty((2, 2), order='F') - # Test copying a Fortran array - assert_c(a.copy()) - assert_c(a.copy('C')) - assert_fortran(a.copy('F')) - assert_fortran(a.copy('A')) - - # Now test starting with a C array. - a = np.empty((2, 2), order='C') - assert_c(a.copy()) - assert_c(a.copy('C')) - assert_fortran(a.copy('F')) - assert_c(a.copy('A')) - - def test_sort_order(self): - # Test sorting an array with fields - x1=np.array([21, 32, 14]) - x2=np.array(['my', 'first', 'name']) - x3=np.array([3.1, 4.5, 6.2]) - r=np.rec.fromarrays([x1, x2, x3], names='id,word,number') - - r.sort(order=['id']) - assert_equal(r.id, array([14, 21, 32])) - assert_equal(r.word, array(['name', 'my', 'first'])) - assert_equal(r.number, array([6.2, 3.1, 4.5])) - - r.sort(order=['word']) - assert_equal(r.id, array([32, 21, 14])) - assert_equal(r.word, array(['first', 'my', 'name'])) - assert_equal(r.number, array([4.5, 3.1, 6.2])) - - r.sort(order=['number']) - assert_equal(r.id, array([21, 32, 14])) - assert_equal(r.word, array(['my', 'first', 'name'])) - assert_equal(r.number, array([3.1, 4.5, 6.2])) - - if sys.byteorder == 'little': - strtype = '>i2' - else: - strtype = ' p[:, i]).all(), - msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) - aae(p, d1[np.arange(d1.shape[0])[:, None], - np.argpartition(d1, i, axis=1, kind=k)]) - - p = np.partition(d0, i, axis=0, kind=k) - aae(p[i,:], np.array([i] * d1.shape[0], - dtype=dt)) - # array_less does not seem to work right - at((p[:i,:] <= p[i,:]).all(), - msg="%d: %r <= %r" % (i, p[i,:], p[:i,:])) - at((p[i + 1:,:] > p[i,:]).all(), - msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:])) - aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), - np.arange(d0.shape[1])[None,:]]) - - # check inplace - dc = d.copy() - dc.partition(i, kind=k) - assert_equal(dc, np.partition(d, i, kind=k)) - dc = d0.copy() - dc.partition(i, axis=0, kind=k) - assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) - dc = d1.copy() - dc.partition(i, axis=1, kind=k) - assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) - - - def assert_partitioned(self, d, kth): - prev = 0 - for k in np.sort(kth): - assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) - assert_((d[k:] >= d[k]).all(), - msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) - prev = k + 1 - - - def test_partition_iterative(self): - d = np.arange(17) - kth = (0, 1, 2, 429, 231) - assert_raises(ValueError, d.partition, kth) - assert_raises(ValueError, d.argpartition, kth) - d = np.arange(10).reshape((2, 5)) - assert_raises(ValueError, d.partition, kth, axis=0) - assert_raises(ValueError, d.partition, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=None) - - d = np.array([3, 4, 2, 1]) - p = np.partition(d, (0, 3)) - self.assert_partitioned(p, (0, 3)) - self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) - - assert_array_equal(p, np.partition(d, (-3, -1))) - assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) - - d = np.arange(17) - np.random.shuffle(d) - d.partition(range(d.size)) - assert_array_equal(np.arange(17), d) - np.random.shuffle(d) - assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) - - # test unsorted kth - d = np.arange(17) - np.random.shuffle(d) - keys = np.array([1, 3, 8, -2]) - np.random.shuffle(d) - p = np.partition(d, keys) - self.assert_partitioned(p, keys) - p = d[np.argpartition(d, keys)] - self.assert_partitioned(p, keys) - np.random.shuffle(keys) - assert_array_equal(np.partition(d, keys), p) - assert_array_equal(d[np.argpartition(d, keys)], p) - - # equal kth - d = np.arange(20)[::-1] - self.assert_partitioned(np.partition(d, [5]*4), [5]) - self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), - [5]*4 + [6, 13]) - self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) - self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], - [5]*4 + [6, 13]) - - d = np.arange(12) - np.random.shuffle(d) - d1 = np.tile(np.arange(12), (4, 1)) - map(np.random.shuffle, d1) - d0 = np.transpose(d1) - - kth = (1, 6, 7, -1) - p = np.partition(d1, kth, axis=1) - pa = d1[np.arange(d1.shape[0])[:, None], - d1.argpartition(kth, axis=1)] - assert_array_equal(p, pa) - for i in range(d1.shape[0]): - self.assert_partitioned(p[i,:], kth) - p = np.partition(d0, kth, axis=0) - pa = d0[np.argpartition(d0, kth, axis=0), - np.arange(d0.shape[1])[None,:]] - assert_array_equal(p, pa) - for i in range(d0.shape[1]): - self.assert_partitioned(p[:, i], kth) - - - def test_partition_cdtype(self): - d = array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), - ('Lancelot', 1.9, 38)], - dtype=[('name', '|S10'), ('height', ' obj, "nope") - assert_equal(arr < obj, "yep") - assert_equal(np.multiply(arr, obj), "ufunc") - arr *= obj - assert_equal(arr, 321) - - assert_equal(obj2 * arr, 123) - assert_equal(arr * obj2, 321) - assert_equal(arr > obj2, "nope") - assert_equal(arr < obj2, "yep") - assert_equal(np.multiply(arr, obj2), "ufunc") - arr *= obj2 - assert_equal(arr, 321) - - obj2 += 33 - assert_equal(obj2[0], 42) - assert_equal(obj2.sum(), 42) - assert_(isinstance(obj2, SomeClass2)) - - -class TestSubscripting(TestCase): - def test_test_zero_rank(self): - x = array([1, 2, 3]) - self.assertTrue(isinstance(x[0], np.int_)) - if sys.version_info[0] < 3: - self.assertTrue(isinstance(x[0], int)) - self.assertTrue(type(x[0, ...]) is ndarray) - - -class TestPickling(TestCase): - def test_roundtrip(self): - import pickle - carray = array([[2, 9], [7, 0], [3, 8]]) - DATA = [ - carray, - transpose(carray), - array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), - ('c', float)]) - ] - - for a in DATA: - assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a) - - def _loads(self, obj): - if sys.version_info[0] >= 3: - return loads(obj, encoding='latin1') - else: - return loads(obj) - - # version 0 pickles, using protocol=2 to pickle - # version 0 doesn't have a version field - def test_version0_int8(self): - s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' - a = array([1, 2, 3, 4], dtype=int8) - p = self._loads(asbytes(s)) - assert_equal(a, p) - - def test_version0_float32(self): - s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) - - def test_mixed(self): - g1 = array(["spam", "spa", "spammer", "and eggs"]) - g2 = "spam" - assert_array_equal(g1 == g2, [x == g2 for x in g1]) - assert_array_equal(g1 != g2, [x != g2 for x in g1]) - assert_array_equal(g1 < g2, [x < g2 for x in g1]) - assert_array_equal(g1 > g2, [x > g2 for x in g1]) - assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) - assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) - - - def test_unicode(self): - g1 = array([sixu("This"), sixu("is"), sixu("example")]) - g2 = array([sixu("This"), sixu("was"), sixu("example")]) - assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) - - -class TestArgmax(TestCase): - - nan_arr = [ - ([0, 1, 2, 3, np.nan], 4), - ([0, 1, 2, np.nan, 3], 3), - ([np.nan, 0, 1, 2, 3], 0), - ([np.nan, 0, np.nan, 2, 3], 0), - ([0, 1, 2, 3, complex(0, np.nan)], 4), - ([0, 1, 2, 3, complex(np.nan, 0)], 4), - ([0, 1, 2, complex(np.nan, 0), 3], 3), - ([0, 1, 2, complex(0, np.nan), 3], 3), - ([complex(0, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), - - ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), - ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), - ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), - - ([np.datetime64('1923-04-14T12:43:12'), - np.datetime64('1994-06-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('1995-11-25T16:02:16'), - np.datetime64('2005-01-04T03:14:12'), - np.datetime64('2041-12-03T14:05:03')], 5), - ([np.datetime64('1935-09-14T04:40:11'), - np.datetime64('1949-10-12T12:32:11'), - np.datetime64('2010-01-03T05:14:12'), - np.datetime64('2015-11-20T12:20:59'), - np.datetime64('1932-09-23T10:10:13'), - np.datetime64('2014-10-10T03:50:30')], 3), - ([np.datetime64('2059-03-14T12:43:12'), - np.datetime64('1996-09-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('2022-12-25T16:02:16'), - np.datetime64('1963-10-04T03:14:12'), - np.datetime64('2013-05-08T18:15:23')], 0), - - ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), - timedelta(days=-1, seconds=23)], 0), - ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), - timedelta(days=5, seconds=14)], 1), - ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), - timedelta(days=10, seconds=43)], 2), - - ([False, False, False, False, True], 4), - ([False, False, False, True, False], 3), - ([True, False, False, False, False], 0), - ([True, False, True, False, False], 0), - - # Can't reduce a "flexible type" - #(['a', 'z', 'aa', 'zz'], 3), - #(['zz', 'a', 'aa', 'a'], 0), - #(['aa', 'z', 'zz', 'a'], 2), - ] - - def test_all(self): - a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) - for i in range(a.ndim): - amax = a.max(i) - aargmax = a.argmax(i) - axes = list(range(a.ndim)) - axes.remove(i) - assert_(all(amax == aargmax.choose(*a.transpose(i,*axes)))) - - def test_combinations(self): - for arr, pos in self.nan_arr: - assert_equal(np.argmax(arr), pos, err_msg="%r"%arr) - assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr) - - def test_output_shape(self): - # see also gh-616 - a = np.ones((10, 5)) - # Check some simple shape mismatches - out = np.ones(11, dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, out) - - out = np.ones((2, 5), dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, out) - - # these could be relaxed possibly (used to allow even the previous) - out = np.ones((1, 10), dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, np.ones((1, 10))) - - out = np.ones(10, dtype=np.int_) - a.argmax(-1, out=out) - assert_equal(out, a.argmax(-1)) - - -class TestArgmin(TestCase): - - nan_arr = [ - ([0, 1, 2, 3, np.nan], 4), - ([0, 1, 2, np.nan, 3], 3), - ([np.nan, 0, 1, 2, 3], 0), - ([np.nan, 0, np.nan, 2, 3], 0), - ([0, 1, 2, 3, complex(0, np.nan)], 4), - ([0, 1, 2, 3, complex(np.nan, 0)], 4), - ([0, 1, 2, complex(np.nan, 0), 3], 3), - ([0, 1, 2, complex(0, np.nan), 3], 3), - ([complex(0, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), - ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), - - ([complex(0, 0), complex(0, 2), complex(0, 1)], 0), - ([complex(1, 0), complex(0, 2), complex(0, 1)], 2), - ([complex(1, 0), complex(0, 2), complex(1, 1)], 1), - - ([np.datetime64('1923-04-14T12:43:12'), - np.datetime64('1994-06-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('1995-11-25T16:02:16'), - np.datetime64('2005-01-04T03:14:12'), - np.datetime64('2041-12-03T14:05:03')], 0), - ([np.datetime64('1935-09-14T04:40:11'), - np.datetime64('1949-10-12T12:32:11'), - np.datetime64('2010-01-03T05:14:12'), - np.datetime64('2014-11-20T12:20:59'), - np.datetime64('2015-09-23T10:10:13'), - np.datetime64('1932-10-10T03:50:30')], 5), - ([np.datetime64('2059-03-14T12:43:12'), - np.datetime64('1996-09-21T14:43:15'), - np.datetime64('2001-10-15T04:10:32'), - np.datetime64('2022-12-25T16:02:16'), - np.datetime64('1963-10-04T03:14:12'), - np.datetime64('2013-05-08T18:15:23')], 4), - - ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), - timedelta(days=-1, seconds=23)], 2), - ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), - timedelta(days=5, seconds=14)], 0), - ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), - timedelta(days=10, seconds=43)], 1), - - ([True, True, True, True, False], 4), - ([True, True, True, False, True], 3), - ([False, True, True, True, True], 0), - ([False, True, False, True, True], 0), - - # Can't reduce a "flexible type" - #(['a', 'z', 'aa', 'zz'], 0), - #(['zz', 'a', 'aa', 'a'], 1), - #(['aa', 'z', 'zz', 'a'], 3), - ] - - def test_all(self): - a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) - for i in range(a.ndim): - amin = a.min(i) - aargmin = a.argmin(i) - axes = list(range(a.ndim)) - axes.remove(i) - assert_(all(amin == aargmin.choose(*a.transpose(i,*axes)))) - - def test_combinations(self): - for arr, pos in self.nan_arr: - assert_equal(np.argmin(arr), pos, err_msg="%r"%arr) - assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr) - - def test_minimum_signed_integers(self): - - a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8) - assert_equal(np.argmin(a), 1) - - a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16) - assert_equal(np.argmin(a), 1) - - a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32) - assert_equal(np.argmin(a), 1) - - a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64) - assert_equal(np.argmin(a), 1) - - def test_output_shape(self): - # see also gh-616 - a = np.ones((10, 5)) - # Check some simple shape mismatches - out = np.ones(11, dtype=np.int_) - assert_raises(ValueError, a.argmin, -1, out) - - out = np.ones((2, 5), dtype=np.int_) - assert_raises(ValueError, a.argmin, -1, out) - - # these could be relaxed possibly (used to allow even the previous) - out = np.ones((1, 10), dtype=np.int_) - assert_raises(ValueError, a.argmin, -1, np.ones((1, 10))) - - out = np.ones(10, dtype=np.int_) - a.argmin(-1, out=out) - assert_equal(out, a.argmin(-1)) - - -class TestMinMax(TestCase): - def test_scalar(self): - assert_raises(ValueError, np.amax, 1, 1) - assert_raises(ValueError, np.amin, 1, 1) - - assert_equal(np.amax(1, axis=0), 1) - assert_equal(np.amin(1, axis=0), 1) - assert_equal(np.amax(1, axis=None), 1) - assert_equal(np.amin(1, axis=None), 1) - - def test_axis(self): - assert_raises(ValueError, np.amax, [1, 2, 3], 1000) - assert_equal(np.amax([[1, 2, 3]], axis=1), 3) - -class TestNewaxis(TestCase): - def test_basic(self): - sk = array([0, -0.1, 0.1]) - res = 250*sk[:, newaxis] - assert_almost_equal(res.ravel(), 250*sk) - - -class TestClip(TestCase): - def _check_range(self, x, cmin, cmax): - assert_(np.all(x >= cmin)) - assert_(np.all(x <= cmax)) - - def _clip_type(self,type_group,array_max, - clip_min,clip_max,inplace=False, - expected_min=None,expected_max=None): - if expected_min is None: - expected_min = clip_min - if expected_max is None: - expected_max = clip_max - - for T in np.sctypes[type_group]: - if sys.byteorder == 'little': - byte_orders = ['=', '>'] - else: - byte_orders = ['<', '='] - - for byteorder in byte_orders: - dtype = np.dtype(T).newbyteorder(byteorder) - - x = (np.random.random(1000) * array_max).astype(dtype) - if inplace: - x.clip(clip_min, clip_max, x) - else: - x = x.clip(clip_min, clip_max) - byteorder = '=' - - if x.dtype.byteorder == '|': byteorder = '|' - assert_equal(x.dtype.byteorder, byteorder) - self._check_range(x, expected_min, expected_max) - return x - - def test_basic(self): - for inplace in [False, True]: - self._clip_type('float', 1024, -12.8, 100.2, inplace=inplace) - self._clip_type('float', 1024, 0, 0, inplace=inplace) - - self._clip_type('int', 1024, -120, 100.5, inplace=inplace) - self._clip_type('int', 1024, 0, 0, inplace=inplace) - - x = self._clip_type('uint', 1024, -120, 100, expected_min=0, - inplace=inplace) - x = self._clip_type('uint', 1024, 0, 0, inplace=inplace) - - def test_record_array(self): - rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], - dtype=[('x', '= 3)) - x = val.clip(min=3) - assert_(np.all(x >= 3)) - x = val.clip(max=4) - assert_(np.all(x <= 4)) - - -class TestPutmask(object): - def tst_basic(self, x, T, mask, val): - np.putmask(x, mask, val) - assert_(np.all(x[mask] == T(val))) - assert_(x.dtype == T) - - def test_ip_types(self): - unchecked_types = [str, unicode, np.void, object] - - x = np.random.random(1000)*100 - mask = x < 40 - - for val in [-100, 0, 15]: - for types in np.sctypes.values(): - for T in types: - if T not in unchecked_types: - yield self.tst_basic, x.copy().astype(T), T, mask, val - - def test_mask_size(self): - assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) - - def tst_byteorder(self, dtype): - x = np.array([1, 2, 3], dtype) - np.putmask(x, [True, False, True], -1) - assert_array_equal(x, [-1, 2, -1]) - - def test_ip_byteorder(self): - for dtype in ('>i4', 'f8'), ('z', 'i4', 'f8'), ('z', ' 1 minute on mechanical hard drive - def test_big_binary(self): - """Test workarounds for 32-bit limited fwrite, fseek, and ftell - calls in windows. These normally would hang doing something like this. - See http://projects.scipy.org/numpy/ticket/1660""" - if sys.platform != 'win32': - return - try: - # before workarounds, only up to 2**32-1 worked - fourgbplus = 2**32 + 2**16 - testbytes = np.arange(8, dtype=np.int8) - n = len(testbytes) - flike = tempfile.NamedTemporaryFile() - f = flike.file - np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) - flike.seek(0) - a = np.fromfile(f, dtype=np.int8) - flike.close() - assert_(len(a) == fourgbplus) - # check only start and end for speed: - assert_((a[:n] == testbytes).all()) - assert_((a[-n:] == testbytes).all()) - except (MemoryError, ValueError): - pass - - def test_string(self): - self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',') - - def test_counted_string(self): - self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') - self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',') - self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') - - def test_string_with_ws(self): - self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') - - def test_counted_string_with_ws(self): - self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int, - sep=' ') - - def test_ascii(self): - self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') - self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') - - def test_malformed(self): - self._check_from('1.234 1,234', [1.234, 1.], sep=' ') - - def test_long_sep(self): - self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') - - def test_dtype(self): - v = np.array([1, 2, 3, 4], dtype=np.int_) - self._check_from('1,2,3,4', v, sep=',', dtype=np.int_) - - def test_dtype_bool(self): - # can't use _check_from because fromstring can't handle True/False - v = np.array([True, False, True, False], dtype=np.bool_) - s = '1,0,-2.3,0' - f = open(self.filename, 'wb') - f.write(asbytes(s)) - f.close() - y = np.fromfile(self.filename, sep=',', dtype=np.bool_) - assert_(y.dtype == '?') - assert_array_equal(y, v) - - def test_tofile_sep(self): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - f = open(self.filename, 'w') - x.tofile(f, sep=',') - f.close() - f = open(self.filename, 'r') - s = f.read() - f.close() - assert_equal(s, '1.51,2.0,3.51,4.0') - - def test_tofile_format(self): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - f = open(self.filename, 'w') - x.tofile(f, sep=',', format='%.2f') - f.close() - f = open(self.filename, 'r') - s = f.read() - f.close() - assert_equal(s, '1.51,2.00,3.51,4.00') - - def test_locale(self): - in_foreign_locale(self.test_numbers)() - in_foreign_locale(self.test_nan)() - in_foreign_locale(self.test_inf)() - in_foreign_locale(self.test_counted_string)() - in_foreign_locale(self.test_ascii)() - in_foreign_locale(self.test_malformed)() - in_foreign_locale(self.test_tofile_sep)() - in_foreign_locale(self.test_tofile_format)() - - -class TestFromBuffer(object): - def tst_basic(self, buffer, expected, kwargs): - assert_array_equal(np.frombuffer(buffer,**kwargs), expected) - - def test_ip_basic(self): - for byteorder in ['<', '>']: - for dtype in [float, int, np.complex]: - dt = np.dtype(dtype).newbyteorder(byteorder) - x = (np.random.random((4, 7))*5).astype(dt) - buf = x.tobytes() - yield self.tst_basic, buf, x.flat, {'dtype':dt} - - def test_empty(self): - yield self.tst_basic, asbytes(''), np.array([]), {} - - -class TestFlat(TestCase): - def setUp(self): - a0 = arange(20.0) - a = a0.reshape(4, 5) - a0.shape = (4, 5) - a.flags.writeable = False - self.a = a - self.b = a[::2, ::2] - self.a0 = a0 - self.b0 = a0[::2, ::2] - - def test_contiguous(self): - testpassed = False - try: - self.a.flat[12] = 100.0 - except ValueError: - testpassed = True - assert testpassed - assert self.a.flat[12] == 12.0 - - def test_discontiguous(self): - testpassed = False - try: - self.b.flat[4] = 100.0 - except ValueError: - testpassed = True - assert testpassed - assert self.b.flat[4] == 12.0 - - def test___array__(self): - c = self.a.flat.__array__() - d = self.b.flat.__array__() - e = self.a0.flat.__array__() - f = self.b0.flat.__array__() - - assert c.flags.writeable is False - assert d.flags.writeable is False - assert e.flags.writeable is True - assert f.flags.writeable is True - - assert c.flags.updateifcopy is False - assert d.flags.updateifcopy is False - assert e.flags.updateifcopy is False - assert f.flags.updateifcopy is True - assert f.base is self.b0 - -class TestResize(TestCase): - def test_basic(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - x.resize((5, 5)) - assert_array_equal(x.flat[:9], - np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) - assert_array_equal(x[9:].flat, 0) - - def test_check_reference(self): - x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - y = x - self.assertRaises(ValueError, x.resize, (5, 1)) - - def test_int_shape(self): - x = np.eye(3) - x.resize(3) - assert_array_equal(x, np.eye(3)[0,:]) - - def test_none_shape(self): - x = np.eye(3) - x.resize(None) - assert_array_equal(x, np.eye(3)) - x.resize() - assert_array_equal(x, np.eye(3)) - - def test_invalid_arguements(self): - self.assertRaises(TypeError, np.eye(3).resize, 'hi') - self.assertRaises(ValueError, np.eye(3).resize, -1) - self.assertRaises(TypeError, np.eye(3).resize, order=1) - self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi') - - def test_freeform_shape(self): - x = np.eye(3) - x.resize(3, 2, 1) - assert_(x.shape == (3, 2, 1)) - - def test_zeros_appended(self): - x = np.eye(3) - x.resize(2, 3, 3) - assert_array_equal(x[0], np.eye(3)) - assert_array_equal(x[1], np.zeros((3, 3))) - - def test_obj_obj(self): - # check memory is initialized on resize, gh-4857 - a = ones(10, dtype=[('k', object, 2)]) - a.resize(15,) - assert_equal(a.shape, (15,)) - assert_array_equal(a['k'][-5:], 0) - assert_array_equal(a['k'][:-5], 1) - - -class TestRecord(TestCase): - def test_field_rename(self): - dt = np.dtype([('f', float), ('i', int)]) - dt.names = ['p', 'q'] - assert_equal(dt.names, ['p', 'q']) - - if sys.version_info[0] >= 3: - def test_bytes_fields(self): - # Bytes are not allowed in field names and not recognized in titles - # on Py3 - assert_raises(TypeError, np.dtype, [(asbytes('a'), int)]) - assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)]) - - dt = np.dtype([((asbytes('a'), 'b'), int)]) - assert_raises(ValueError, dt.__getitem__, asbytes('a')) - - x = np.array([(1,), (2,), (3,)], dtype=dt) - assert_raises(ValueError, x.__getitem__, asbytes('a')) - - y = x[0] - assert_raises(IndexError, y.__getitem__, asbytes('a')) - else: - def test_unicode_field_titles(self): - # Unicode field titles are added to field dict on Py2 - title = unicode('b') - dt = np.dtype([((title, 'a'), int)]) - dt[title] - dt['a'] - x = np.array([(1,), (2,), (3,)], dtype=dt) - x[title] - x['a'] - y = x[0] - y[title] - y['a'] - - def test_unicode_field_names(self): - # Unicode field names are not allowed on Py2 - title = unicode('b') - assert_raises(TypeError, np.dtype, [(title, int)]) - assert_raises(TypeError, np.dtype, [(('a', title), int)]) - - def test_field_names(self): - # Test unicode and 8-bit / byte strings can be used - a = np.zeros((1,), dtype=[('f1', 'i4'), - ('f2', 'i4'), - ('f3', [('sf1', 'i4')])]) - is_py3 = sys.version_info[0] >= 3 - if is_py3: - funcs = (str,) - # byte string indexing fails gracefully - assert_raises(ValueError, a.__setitem__, asbytes('f1'), 1) - assert_raises(ValueError, a.__getitem__, asbytes('f1')) - assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1) - assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1')) - else: - funcs = (str, unicode) - for func in funcs: - b = a.copy() - fn1 = func('f1') - b[fn1] = 1 - assert_equal(b[fn1], 1) - fnn = func('not at all') - assert_raises(ValueError, b.__setitem__, fnn, 1) - assert_raises(ValueError, b.__getitem__, fnn) - b[0][fn1] = 2 - assert_equal(b[fn1], 2) - # Subfield - assert_raises(IndexError, b[0].__setitem__, fnn, 1) - assert_raises(IndexError, b[0].__getitem__, fnn) - # Subfield - fn3 = func('f3') - sfn1 = func('sf1') - b[fn3][sfn1] = 1 - assert_equal(b[fn3][sfn1], 1) - assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) - assert_raises(ValueError, b[fn3].__getitem__, fnn) - # multiple Subfields - fn2 = func('f2') - b[fn2] = 3 - assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) - assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) - assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) - # view of subfield view/copy - assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3)) - assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2)) - view_dtype=[('f1', 'i4'), ('f3', [('', 'i4')])] - assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,))) - # non-ascii unicode field indexing is well behaved - if not is_py3: - raise SkipTest('non ascii unicode field indexing skipped; ' - 'raises segfault on python 2.x') - else: - assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1) - assert_raises(ValueError, a.__getitem__, sixu('\u03e0')) - - def test_field_names_deprecation(self): - - def collect_warning_types(f, *args, **kwargs): - with warnings.catch_warnings(record=True) as log: - warnings.simplefilter("always") - f(*args, **kwargs) - return [w.category for w in log] - - a = np.zeros((1,), dtype=[('f1', 'i4'), - ('f2', 'i4'), - ('f3', [('sf1', 'i4')])]) - a['f1'][0] = 1 - a['f2'][0] = 2 - a['f3'][0] = (3,) - b = np.zeros((1,), dtype=[('f1', 'i4'), - ('f2', 'i4'), - ('f3', [('sf1', 'i4')])]) - b['f1'][0] = 1 - b['f2'][0] = 2 - b['f3'][0] = (3,) - - # All the different functions raise a warning, but not an error, and - # 'a' is not modified: - assert_equal(collect_warning_types(a[['f1', 'f2']].__setitem__, 0, (10, 20)), - [FutureWarning]) - assert_equal(a, b) - # Views also warn - subset = a[['f1', 'f2']] - subset_view = subset.view() - assert_equal(collect_warning_types(subset_view['f1'].__setitem__, 0, 10), - [FutureWarning]) - # But the write goes through: - assert_equal(subset['f1'][0], 10) - # Only one warning per multiple field indexing, though (even if there are - # multiple views involved): - assert_equal(collect_warning_types(subset['f1'].__setitem__, 0, 10), - []) - - def test_record_hash(self): - a = np.array([(1, 2), (1, 2)], dtype='i1,i2') - a.flags.writeable = False - b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) - b.flags.writeable = False - c = np.array([(1, 2), (3, 4)], dtype='i1,i2') - c.flags.writeable = False - self.assertTrue(hash(a[0]) == hash(a[1])) - self.assertTrue(hash(a[0]) == hash(b[0])) - self.assertTrue(hash(a[0]) != hash(b[1])) - self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0]) - - def test_record_no_hash(self): - a = np.array([(1, 2), (1, 2)], dtype='i1,i2') - self.assertRaises(TypeError, hash, a[0]) - -class TestView(TestCase): - def test_basic(self): - x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8), - ('b', np.int8), ('a', np.int8)]) - # We must be specific about the endianness here: - y = x.view(dtype=' 0) - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_empty(self): - A = np.zeros((0, 3)) - for f in self.funcs: - for axis in [0, None]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(A, axis=axis)).all()) - assert_(len(w) > 0) - assert_(issubclass(w[0].category, RuntimeWarning)) - for axis in [1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_equal(f(A, axis=axis), np.zeros([])) - - def test_mean_values(self): - for mat in [self.rmat, self.cmat, self.omat]: - for axis in [0, 1]: - tgt = mat.sum(axis=axis) - res = _mean(mat, axis=axis) * mat.shape[axis] - assert_almost_equal(res, tgt) - for axis in [None]: - tgt = mat.sum(axis=axis) - res = _mean(mat, axis=axis) * np.prod(mat.shape) - assert_almost_equal(res, tgt) - - def test_var_values(self): - for mat in [self.rmat, self.cmat, self.omat]: - for axis in [0, 1, None]: - msqr = _mean(mat * mat.conj(), axis=axis) - mean = _mean(mat, axis=axis) - tgt = msqr - mean * mean.conjugate() - res = _var(mat, axis=axis) - assert_almost_equal(res, tgt) - - def test_std_values(self): - for mat in [self.rmat, self.cmat, self.omat]: - for axis in [0, 1, None]: - tgt = np.sqrt(_var(mat, axis=axis)) - res = _std(mat, axis=axis) - assert_almost_equal(res, tgt) - - - def test_subclass(self): - class TestArray(np.ndarray): - def __new__(cls, data, info): - result = np.array(data) - result = result.view(cls) - result.info = info - return result - def __array_finalize__(self, obj): - self.info = getattr(obj, "info", '') - - dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') - res = dat.mean(1) - assert_(res.info == dat.info) - res = dat.std(1) - assert_(res.info == dat.info) - res = dat.var(1) - assert_(res.info == dat.info) - -class TestDot(TestCase): - def test_dot_2args(self): - from numpy.core.multiarray import dot - - a = np.array([[1, 2], [3, 4]], dtype=float) - b = np.array([[1, 0], [1, 1]], dtype=float) - c = np.array([[3, 2], [7, 4]], dtype=float) - - d = dot(a, b) - assert_allclose(c, d) - - def test_dot_3args(self): - from numpy.core.multiarray import dot - - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 32)) - for i in range(12): - dot(f, v, r) - assert_equal(sys.getrefcount(r), 2) - r2 = dot(f, v, out=None) - assert_array_equal(r2, r) - assert_(r is dot(f, v, out=r)) - - v = v[:, 0].copy() # v.shape == (16,) - r = r[:, 0].copy() # r.shape == (1024,) - r2 = dot(f, v) - assert_(r is dot(f, v, r)) - assert_array_equal(r2, r) - - def test_dot_3args_errors(self): - from numpy.core.multiarray import dot - - np.random.seed(22) - f = np.random.random_sample((1024, 16)) - v = np.random.random_sample((16, 32)) - - r = np.empty((1024, 31)) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((1024,)) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((32,)) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((32, 1024)) - assert_raises(ValueError, dot, f, v, r) - assert_raises(ValueError, dot, f, v, r.T) - - r = np.empty((1024, 64)) - assert_raises(ValueError, dot, f, v, r[:, ::2]) - assert_raises(ValueError, dot, f, v, r[:, :32]) - - r = np.empty((1024, 32), dtype=np.float32) - assert_raises(ValueError, dot, f, v, r) - - r = np.empty((1024, 32), dtype=int) - assert_raises(ValueError, dot, f, v, r) - - def test_dot_scalar_and_matrix_of_objects(self): - # Ticket #2469 - arr = np.matrix([1, 2], dtype=object) - desired = np.matrix([[3, 6]], dtype=object) - assert_equal(np.dot(arr, 3), desired) - assert_equal(np.dot(3, arr), desired) - - -class TestInner(TestCase): - - def test_inner_scalar_and_matrix_of_objects(self): - # Ticket #4482 - arr = np.matrix([1, 2], dtype=object) - desired = np.matrix([[3, 6]], dtype=object) - assert_equal(np.inner(arr, 3), desired) - assert_equal(np.inner(3, arr), desired) - - -class TestSummarization(TestCase): - def test_1d(self): - A = np.arange(1001) - strA = '[ 0 1 2 ..., 998 999 1000]' - assert_(str(A) == strA) - - reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' - assert_(repr(A) == reprA) - - def test_2d(self): - A = np.arange(1002).reshape(2, 501) - strA = '[[ 0 1 2 ..., 498 499 500]\n' \ - ' [ 501 502 503 ..., 999 1000 1001]]' - assert_(str(A) == strA) - - reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ - ' [ 501, 502, 503, ..., 999, 1000, 1001]])' - assert_(repr(A) == reprA) - - -class TestChoose(TestCase): - def setUp(self): - self.x = 2*ones((3,), dtype=int) - self.y = 3*ones((3,), dtype=int) - self.x2 = 2*ones((2, 3), dtype=int) - self.y2 = 3*ones((2, 3), dtype=int) - self.ind = [0, 0, 1] - - def test_basic(self): - A = np.choose(self.ind, (self.x, self.y)) - assert_equal(A, [2, 2, 3]) - - def test_broadcast1(self): - A = np.choose(self.ind, (self.x2, self.y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - def test_broadcast2(self): - A = np.choose(self.ind, (self.x, self.y2)) - assert_equal(A, [[2, 2, 3], [2, 2, 3]]) - - -# TODO: test for multidimensional -NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} -class TestNeighborhoodIter(TestCase): - # Simple, 2d tests - def _test_simple2d(self, dt): - # Test zero and one padding for simple data type - x = np.array([[0, 1], [2, 3]], dtype=dt) - r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), - np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), - np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), - np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] - l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], - NEIGH_MODE['zero']) - assert_array_equal(l, r) - - r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), - np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), - np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), - np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] - l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], - NEIGH_MODE['one']) - assert_array_equal(l, r) - - r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), - np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), - np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), - np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] - l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4, - NEIGH_MODE['constant']) - assert_array_equal(l, r) - - def test_simple2d(self): - self._test_simple2d(np.float) - - def test_simple2d_object(self): - self._test_simple2d(Decimal) - - def _test_mirror2d(self, dt): - x = np.array([[0, 1], [2, 3]], dtype=dt) - r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), - np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), - np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), - np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] - l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], - NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - def test_mirror2d(self): - self._test_mirror2d(np.float) - - def test_mirror2d_object(self): - self._test_mirror2d(Decimal) - - # Simple, 1d tests - def _test_simple(self, dt): - # Test padding with constant values - x = np.linspace(1, 5, 5).astype(dt) - r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] - l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] - l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one']) - assert_array_equal(l, r) - - r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] - l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant']) - assert_array_equal(l, r) - - def test_simple_float(self): - self._test_simple(np.float) - - def test_simple_object(self): - self._test_simple(Decimal) - - # Test mirror modes - def _test_mirror(self, dt): - x = np.linspace(1, 5, 5).astype(dt) - r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], - [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) - l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror']) - self.assertTrue([i.dtype == dt for i in l]) - assert_array_equal(l, r) - - def test_mirror(self): - self._test_mirror(np.float) - - def test_mirror_object(self): - self._test_mirror(Decimal) - - # Circular mode - def _test_circular(self, dt): - x = np.linspace(1, 5, 5).astype(dt) - r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], - [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) - l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - def test_circular(self): - self._test_circular(np.float) - - def test_circular_object(self): - self._test_circular(Decimal) - -# Test stacking neighborhood iterators -class TestStackedNeighborhoodIter(TestCase): - # Simple, 1d test: stacking 2 constant-padded neigh iterators - def test_simple_const(self): - dt = np.float64 - # Test zero and one padding for simple data type - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0], dtype=dt), - np.array([0], dtype=dt), - np.array([1], dtype=dt), - np.array([2], dtype=dt), - np.array([3], dtype=dt), - np.array([0], dtype=dt), - np.array([0], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'], - [0, 0], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - r = [np.array([1, 0, 1], dtype=dt), - np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt), - np.array([3, 0, 1], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], - [-1, 1], NEIGH_MODE['one']) - assert_array_equal(l, r) - - # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and - # mirror padding - def test_simple_mirror(self): - dt = np.float64 - # Stacking zero on top of mirror - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 1, 1], dtype=dt), - np.array([1, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 3], dtype=dt), - np.array([3, 3, 0], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'], - [-1, 1], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 0, 0], dtype=dt), - np.array([0, 0, 1], dtype=dt), - np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], - [-2, 0], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 2nd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt), - np.array([3, 0, 0], dtype=dt), - np.array([0, 0, 3], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], - [0, 2], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 3rd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 0, 0, 1, 2], dtype=dt), - np.array([0, 0, 1, 2, 3], dtype=dt), - np.array([0, 1, 2, 3, 0], dtype=dt), - np.array([1, 2, 3, 0, 0], dtype=dt), - np.array([2, 3, 0, 0, 3], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], - [-2, 2], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and - # circular padding - def test_simple_circular(self): - dt = np.float64 - # Stacking zero on top of mirror - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 3, 1], dtype=dt), - np.array([3, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 1], dtype=dt), - np.array([3, 1, 0], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'], - [-1, 1], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([3, 0, 0], dtype=dt), - np.array([0, 0, 1], dtype=dt), - np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], - [-2, 0], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 2nd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([0, 1, 2], dtype=dt), - np.array([1, 2, 3], dtype=dt), - np.array([2, 3, 0], dtype=dt), - np.array([3, 0, 0], dtype=dt), - np.array([0, 0, 1], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], - [0, 2], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero: 3rd - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([3, 0, 0, 1, 2], dtype=dt), - np.array([0, 0, 1, 2, 3], dtype=dt), - np.array([0, 1, 2, 3, 0], dtype=dt), - np.array([1, 2, 3, 0, 0], dtype=dt), - np.array([2, 3, 0, 0, 1], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], - [-2, 2], NEIGH_MODE['circular']) - assert_array_equal(l, r) - - # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator - # being strictly within the array - def test_simple_strict_within(self): - dt = np.float64 - # Stacking zero on top of zero, first neighborhood strictly inside the - # array - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 2, 3, 0], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], - [-1, 2], NEIGH_MODE['zero']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero, first neighborhood strictly inside the - # array - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 2, 3, 3], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], - [-1, 2], NEIGH_MODE['mirror']) - assert_array_equal(l, r) - - # Stacking mirror on top of zero, first neighborhood strictly inside the - # array - x = np.array([1, 2, 3], dtype=dt) - r = [np.array([1, 2, 3, 1], dtype=dt)] - l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], - [-1, 2], NEIGH_MODE['circular']) - assert_array_equal(l, r) - -class TestWarnings(object): - - def test_complex_warning(self): - x = np.array([1, 2]) - y = np.array([1-2j, 1+2j]) - - with warnings.catch_warnings(): - warnings.simplefilter("error", np.ComplexWarning) - assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) - assert_equal(x, [1, 2]) - -class TestMinScalarType(object): - - def test_usigned_shortshort(self): - dt = np.min_scalar_type(2**8-1) - wanted = np.dtype('uint8') - assert_equal(wanted, dt) - - def test_usigned_short(self): - dt = np.min_scalar_type(2**16-1) - wanted = np.dtype('uint16') - assert_equal(wanted, dt) - - def test_usigned_int(self): - dt = np.min_scalar_type(2**32-1) - wanted = np.dtype('uint32') - assert_equal(wanted, dt) - - def test_usigned_longlong(self): - dt = np.min_scalar_type(2**63-1) - wanted = np.dtype('uint64') - assert_equal(wanted, dt) - - def test_object(self): - dt = np.min_scalar_type(2**64) - wanted = np.dtype('O') - assert_equal(wanted, dt) - - -if sys.version_info[:2] == (2, 6): - from numpy.core.multiarray import memorysimpleview as memoryview - -from numpy.core._internal import _dtype_from_pep3118 - -class TestPEP3118Dtype(object): - def _check(self, spec, wanted): - dt = np.dtype(wanted) - if isinstance(wanted, list) and isinstance(wanted[-1], tuple): - if wanted[-1][0] == '': - names = list(dt.names) - names[-1] = '' - dt.names = tuple(names) - assert_equal(_dtype_from_pep3118(spec), dt, - err_msg="spec %r != dtype %r" % (spec, wanted)) - - def test_native_padding(self): - align = np.dtype('i').alignment - for j in range(8): - if j == 0: - s = 'bi' - else: - s = 'b%dxi' % j - self._check('@'+s, {'f0': ('i1', 0), - 'f1': ('i', align*(1 + j//align))}) - self._check('='+s, {'f0': ('i1', 0), - 'f1': ('i', 1+j)}) - - def test_native_padding_2(self): - # Native padding should work also for structs and sub-arrays - self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) - self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) - - def test_trailing_padding(self): - # Trailing padding should be included, *and*, the item size - # should match the alignment if in aligned mode - align = np.dtype('i').alignment - def VV(n): - return 'V%d' % (align*(1 + (n-1)//align)) - - self._check('ix', [('f0', 'i'), ('', VV(1))]) - self._check('ixx', [('f0', 'i'), ('', VV(2))]) - self._check('ixxx', [('f0', 'i'), ('', VV(3))]) - self._check('ixxxx', [('f0', 'i'), ('', VV(4))]) - self._check('i7x', [('f0', 'i'), ('', VV(7))]) - - self._check('^ix', [('f0', 'i'), ('', 'V1')]) - self._check('^ixx', [('f0', 'i'), ('', 'V2')]) - self._check('^ixxx', [('f0', 'i'), ('', 'V3')]) - self._check('^ixxxx', [('f0', 'i'), ('', 'V4')]) - self._check('^i7x', [('f0', 'i'), ('', 'V7')]) - - def test_native_padding_3(self): - dt = np.dtype( - [('a', 'b'), ('b', 'i'), - ('sub', np.dtype('b,i')), ('c', 'i')], - align=True) - self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) - - dt = np.dtype( - [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), - ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) - self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) - - def test_padding_with_array_inside_struct(self): - dt = np.dtype( - [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), - ('d', 'i')], - align=True) - self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) - - def test_byteorder_inside_struct(self): - # The byte order after @T{=i} should be '=', not '@'. - # Check this by noting the absence of native alignment. - self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), - 'f1': ('i', 5)}) - - def test_intra_padding(self): - # Natively aligned sub-arrays may require some internal padding - align = np.dtype('i').alignment - def VV(n): - return 'V%d' % (align*(1 + (n-1)//align)) - - self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,))) - -class TestNewBufferProtocol(object): - def _check_roundtrip(self, obj): - obj = np.asarray(obj) - x = memoryview(obj) - y = np.asarray(x) - y2 = np.array(x) - assert_(not y.flags.owndata) - assert_(y2.flags.owndata) - - assert_equal(y.dtype, obj.dtype) - assert_equal(y.shape, obj.shape) - assert_array_equal(obj, y) - - assert_equal(y2.dtype, obj.dtype) - assert_equal(y2.shape, obj.shape) - assert_array_equal(obj, y2) - - def test_roundtrip(self): - x = np.array([1, 2, 3, 4, 5], dtype='i4') - self._check_roundtrip(x) - - x = np.array([[1, 2], [3, 4]], dtype=np.float64) - self._check_roundtrip(x) - - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] - self._check_roundtrip(x) - - dt = [('a', 'b'), - ('b', 'h'), - ('c', 'i'), - ('d', 'l'), - ('dx', 'q'), - ('e', 'B'), - ('f', 'H'), - ('g', 'I'), - ('h', 'L'), - ('hx', 'Q'), - ('i', np.single), - ('j', np.double), - ('k', np.longdouble), - ('ix', np.csingle), - ('jx', np.cdouble), - ('kx', np.clongdouble), - ('l', 'S4'), - ('m', 'U4'), - ('n', 'V3'), - ('o', '?'), - ('p', np.half), - ] - x = np.array( - [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)], - dtype=dt) - self._check_roundtrip(x) - - x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) - self._check_roundtrip(x) - - x = np.array([1, 2, 3], dtype='>i2') - self._check_roundtrip(x) - - x = np.array([1, 2, 3], dtype='') - x = np.zeros(4, dtype=dt) - self._check_roundtrip(x) - - def test_roundtrip_scalar(self): - # Issue #4015. - self._check_roundtrip(0) - - def test_export_simple_1d(self): - x = np.array([1, 2, 3, 4, 5], dtype='i') - y = memoryview(x) - assert_equal(y.format, 'i') - assert_equal(y.shape, (5,)) - assert_equal(y.ndim, 1) - assert_equal(y.strides, (4,)) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 4) - - def test_export_simple_nd(self): - x = np.array([[1, 2], [3, 4]], dtype=np.float64) - y = memoryview(x) - assert_equal(y.format, 'd') - assert_equal(y.shape, (2, 2)) - assert_equal(y.ndim, 2) - assert_equal(y.strides, (16, 8)) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 8) - - def test_export_discontiguous(self): - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] - y = memoryview(x) - assert_equal(y.format, 'f') - assert_equal(y.shape, (3, 3)) - assert_equal(y.ndim, 2) - assert_equal(y.strides, (36, 4)) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 4) - - def test_export_record(self): - dt = [('a', 'b'), - ('b', 'h'), - ('c', 'i'), - ('d', 'l'), - ('dx', 'q'), - ('e', 'B'), - ('f', 'H'), - ('g', 'I'), - ('h', 'L'), - ('hx', 'Q'), - ('i', np.single), - ('j', np.double), - ('k', np.longdouble), - ('ix', np.csingle), - ('jx', np.cdouble), - ('kx', np.clongdouble), - ('l', 'S4'), - ('m', 'U4'), - ('n', 'V3'), - ('o', '?'), - ('p', np.half), - ] - x = np.array( - [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)], - dtype=dt) - y = memoryview(x) - assert_equal(y.shape, (1,)) - assert_equal(y.ndim, 1) - assert_equal(y.suboffsets, EMPTY) - - sz = sum([dtype(b).itemsize for a, b in dt]) - if dtype('l').itemsize == 4: - assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') - else: - assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') - # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides - if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): - assert_equal(y.strides, (sz,)) - assert_equal(y.itemsize, sz) - - def test_export_subarray(self): - x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) - y = memoryview(x) - assert_equal(y.format, 'T{(2,2)i:a:}') - assert_equal(y.shape, EMPTY) - assert_equal(y.ndim, 0) - assert_equal(y.strides, EMPTY) - assert_equal(y.suboffsets, EMPTY) - assert_equal(y.itemsize, 16) - - def test_export_endian(self): - x = np.array([1, 2, 3], dtype='>i') - y = memoryview(x) - if sys.byteorder == 'little': - assert_equal(y.format, '>i') - else: - assert_equal(y.format, 'i') - - x = np.array([1, 2, 3], dtype=' array) - - def __le__(self, array): - if isinstance(array, PriorityNdarray): - array = array.array - return PriorityNdarray(self.array <= array) - - def __ge__(self, array): - if isinstance(array, PriorityNdarray): - array = array.array - return PriorityNdarray(self.array >= array) - - def __eq__(self, array): - if isinstance(array, PriorityNdarray): - array = array.array - return PriorityNdarray(self.array == array) - - def __ne__(self, array): - if isinstance(array, PriorityNdarray): - array = array.array - return PriorityNdarray(self.array != array) - - -class TestArrayPriority(TestCase): - def test_lt(self): - l = np.asarray([0., -1., 1.], dtype=dtype) - r = np.asarray([0., 1., -1.], dtype=dtype) - lp = PriorityNdarray(l) - rp = PriorityNdarray(r) - res1 = l < r - res2 = l < rp - res3 = lp < r - res4 = lp < rp - - assert_array_equal(res1, res2.array) - assert_array_equal(res1, res3.array) - assert_array_equal(res1, res4.array) - assert_(isinstance(res1, np.ndarray)) - assert_(isinstance(res2, PriorityNdarray)) - assert_(isinstance(res3, PriorityNdarray)) - assert_(isinstance(res4, PriorityNdarray)) - - def test_gt(self): - l = np.asarray([0., -1., 1.], dtype=dtype) - r = np.asarray([0., 1., -1.], dtype=dtype) - lp = PriorityNdarray(l) - rp = PriorityNdarray(r) - res1 = l > r - res2 = l > rp - res3 = lp > r - res4 = lp > rp - - assert_array_equal(res1, res2.array) - assert_array_equal(res1, res3.array) - assert_array_equal(res1, res4.array) - assert_(isinstance(res1, np.ndarray)) - assert_(isinstance(res2, PriorityNdarray)) - assert_(isinstance(res3, PriorityNdarray)) - assert_(isinstance(res4, PriorityNdarray)) - - def test_le(self): - l = np.asarray([0., -1., 1.], dtype=dtype) - r = np.asarray([0., 1., -1.], dtype=dtype) - lp = PriorityNdarray(l) - rp = PriorityNdarray(r) - res1 = l <= r - res2 = l <= rp - res3 = lp <= r - res4 = lp <= rp - - assert_array_equal(res1, res2.array) - assert_array_equal(res1, res3.array) - assert_array_equal(res1, res4.array) - assert_(isinstance(res1, np.ndarray)) - assert_(isinstance(res2, PriorityNdarray)) - assert_(isinstance(res3, PriorityNdarray)) - assert_(isinstance(res4, PriorityNdarray)) - - def test_ge(self): - l = np.asarray([0., -1., 1.], dtype=dtype) - r = np.asarray([0., 1., -1.], dtype=dtype) - lp = PriorityNdarray(l) - rp = PriorityNdarray(r) - res1 = l >= r - res2 = l >= rp - res3 = lp >= r - res4 = lp >= rp - - assert_array_equal(res1, res2.array) - assert_array_equal(res1, res3.array) - assert_array_equal(res1, res4.array) - assert_(isinstance(res1, np.ndarray)) - assert_(isinstance(res2, PriorityNdarray)) - assert_(isinstance(res3, PriorityNdarray)) - assert_(isinstance(res4, PriorityNdarray)) - - def test_eq(self): - l = np.asarray([0., -1., 1.], dtype=dtype) - r = np.asarray([0., 1., -1.], dtype=dtype) - lp = PriorityNdarray(l) - rp = PriorityNdarray(r) - res1 = l == r - res2 = l == rp - res3 = lp == r - res4 = lp == rp - - assert_array_equal(res1, res2.array) - assert_array_equal(res1, res3.array) - assert_array_equal(res1, res4.array) - assert_(isinstance(res1, np.ndarray)) - assert_(isinstance(res2, PriorityNdarray)) - assert_(isinstance(res3, PriorityNdarray)) - assert_(isinstance(res4, PriorityNdarray)) - - def test_ne(self): - l = np.asarray([0., -1., 1.], dtype=dtype) - r = np.asarray([0., 1., -1.], dtype=dtype) - lp = PriorityNdarray(l) - rp = PriorityNdarray(r) - res1 = l != r - res2 = l != rp - res3 = lp != r - res4 = lp != rp - - assert_array_equal(res1, res2.array) - assert_array_equal(res1, res3.array) - assert_array_equal(res1, res4.array) - assert_(isinstance(res1, np.ndarray)) - assert_(isinstance(res2, PriorityNdarray)) - assert_(isinstance(res3, PriorityNdarray)) - assert_(isinstance(res4, PriorityNdarray)) - - -class TestConversion(TestCase): - def test_array_scalar_relational_operation(self): - #All integer - for dt1 in np.typecodes['AllInteger']: - assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) - - for dt2 in np.typecodes['AllInteger']: - assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - - #Unsigned integers - for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - - #unsigned vs signed - for dt2 in 'bhilqp': - assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - - #Signed integers and floats - for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - - for dt2 in 'bhlqp' + np.typecodes['Float']: - assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) - - -class TestWhere(TestCase): - def test_basic(self): - dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128, - np.longdouble, np.clongdouble] - for dt in dts: - c = np.ones(53, dtype=np.bool) - assert_equal(np.where( c, dt(0), dt(1)), dt(0)) - assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) - assert_equal(np.where(True, dt(0), dt(1)), dt(0)) - assert_equal(np.where(False, dt(0), dt(1)), dt(1)) - d = np.ones_like(c).astype(dt) - e = np.zeros_like(d) - r = d.astype(dt) - c[7] = False - r[7] = e[7] - assert_equal(np.where(c, e, e), e) - assert_equal(np.where(c, d, e), r) - assert_equal(np.where(c, d, e[0]), r) - assert_equal(np.where(c, d[0], e), r) - assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) - assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) - assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) - assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) - assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) - assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) - assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) - - def test_exotic(self): - # object - assert_array_equal(np.where(True, None, None), np.array(None)) - # zero sized - m = np.array([], dtype=bool).reshape(0, 3) - b = np.array([], dtype=np.float64).reshape(0, 3) - assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) - - # object cast - d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, - 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, - 1.267, 0.229, -1.39, 0.487]) - nan = float('NaN') - e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, - 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], - dtype=object); - m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool) - - r = e[:] - r[np.where(m)] = d[np.where(m)] - assert_array_equal(np.where(m, d, e), r) - - r = e[:] - r[np.where(~m)] = d[np.where(~m)] - assert_array_equal(np.where(m, e, d), r) - - assert_array_equal(np.where(m, e, e), e) - - # minimal dtype result with NaN scalar (e.g required by pandas) - d = np.array([1., 2.], dtype=np.float32) - e = float('NaN') - assert_equal(np.where(True, d, e).dtype, np.float32) - e = float('Infinity') - assert_equal(np.where(True, d, e).dtype, np.float32) - e = float('-Infinity') - assert_equal(np.where(True, d, e).dtype, np.float32) - # also check upcast - e = float(1e150) - assert_equal(np.where(True, d, e).dtype, np.float64) - - def test_ndim(self): - c = [True, False] - a = np.zeros((2, 25)) - b = np.ones((2, 25)) - r = np.where(np.array(c)[:,np.newaxis], a, b) - assert_array_equal(r[0], a[0]) - assert_array_equal(r[1], b[0]) - - a = a.T - b = b.T - r = np.where(c, a, b) - assert_array_equal(r[:,0], a[:,0]) - assert_array_equal(r[:,1], b[:,0]) - - def test_dtype_mix(self): - c = np.array([False, True, False, False, False, False, True, False, - False, False, True, False]) - a = np.uint32(1) - b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], - dtype=np.float64) - r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], - dtype=np.float64) - assert_equal(np.where(c, a, b), r) - - a = a.astype(np.float32) - b = b.astype(np.int64) - assert_equal(np.where(c, a, b), r) - - # non bool mask - c = c.astype(np.int) - c[c != 0] = 34242324 - assert_equal(np.where(c, a, b), r) - # invert - tmpmask = c != 0 - c[c == 0] = 41247212 - c[tmpmask] = 0 - assert_equal(np.where(c, b, a), r) - - def test_foreign(self): - c = np.array([False, True, False, False, False, False, True, False, - False, False, True, False]) - r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], - dtype=np.float64) - a = np.ones(1, dtype='>i4') - b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], - dtype=np.float64) - assert_equal(np.where(c, a, b), r) - - b = b.astype('>f8') - assert_equal(np.where(c, a, b), r) - - a = a.astype('i4') - assert_equal(np.where(c, a, b), r) - - def test_error(self): - c = [True, True] - a = np.ones((4, 5)) - b = np.ones((5, 5)) - assert_raises(ValueError, np.where, c, a, a) - assert_raises(ValueError, np.where, c[0], a, b) - - def test_string(self): - # gh-4778 check strings are properly filled with nulls - a = np.array("abc") - b = np.array("x" * 753) - assert_equal(np.where(True, a, b), "abc") - assert_equal(np.where(False, b, a), "abc") - - # check native datatype sized strings - a = np.array("abcd") - b = np.array("x" * 8) - assert_equal(np.where(True, a, b), "abcd") - assert_equal(np.where(False, b, a), "abcd") - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray_assignment.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray_assignment.py deleted file mode 100644 index 65a09086bc2a5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_multiarray_assignment.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import TestCase - -ndims = 2 -size = 10 -shape = tuple([size] * ndims) - - -def _indices_for_nelems(nelems): - """Returns slices of length nelems, from start onwards, in direction sign.""" - - if nelems == 0: - return [size // 2] # int index - - res = [] - for step in (1, 2): - for sign in (-1, 1): - start = size // 2 - nelems * step * sign // 2 - stop = start + nelems * step * sign - res.append(slice(start, stop, step * sign)) - - return res - - -def _indices_for_axis(): - """Returns (src, dst) pairs of indices.""" - - res = [] - for nelems in (0, 2, 3): - ind = _indices_for_nelems(nelems) - - # no itertools.product available in Py2.4 - res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems" - - return res - - -def _indices(ndims): - """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" - - ind = _indices_for_axis() - - # no itertools.product available in Py2.4 - - res = [[]] - for i in range(ndims): - newres = [] - for elem in ind: - for others in res: - newres.append([elem] + others) - res = newres - - return res - - -def _check_assignment(srcidx, dstidx): - """Check assignment arr[dstidx] = arr[srcidx] works.""" - - arr = np.arange(np.product(shape)).reshape(shape) - - cpy = arr.copy() - - cpy[dstidx] = arr[srcidx] - arr[dstidx] = arr[srcidx] - - assert np.all(arr == cpy), 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx) - - -def test_overlapping_assignments(): - """Test automatically generated assignments which overlap in memory.""" - - inds = _indices(ndims) - - for ind in inds: - srcidx = tuple([a[0] for a in ind]) - dstidx = tuple([a[1] for a in ind]) - - yield _check_assignment, srcidx, dstidx diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_nditer.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_nditer.py deleted file mode 100644 index 0055c038b7494..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_nditer.py +++ /dev/null @@ -1,2630 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys, warnings - -import numpy as np -from numpy import array, arange, nditer, all -from numpy.compat import asbytes, sixu -from numpy.testing import * -from numpy.core.multiarray_tests import test_nditer_too_large - - -def iter_multi_index(i): - ret = [] - while not i.finished: - ret.append(i.multi_index) - i.iternext() - return ret - -def iter_indices(i): - ret = [] - while not i.finished: - ret.append(i.index) - i.iternext() - return ret - -def iter_iterindices(i): - ret = [] - while not i.finished: - ret.append(i.iterindex) - i.iternext() - return ret - -def test_iter_refcount(): - # Make sure the iterator doesn't leak - - # Basic - a = arange(6) - dt = np.dtype('f4').newbyteorder() - rc_a = sys.getrefcount(a) - rc_dt = sys.getrefcount(dt) - it = nditer(a, [], - [['readwrite', 'updateifcopy']], - casting='unsafe', - op_dtypes=[dt]) - assert_(not it.iterationneedsapi) - assert_(sys.getrefcount(a) > rc_a) - assert_(sys.getrefcount(dt) > rc_dt) - it = None - assert_equal(sys.getrefcount(a), rc_a) - assert_equal(sys.getrefcount(dt), rc_dt) - - # With a copy - a = arange(6, dtype='f4') - dt = np.dtype('f4') - rc_a = sys.getrefcount(a) - rc_dt = sys.getrefcount(dt) - it = nditer(a, [], - [['readwrite']], - op_dtypes=[dt]) - rc2_a = sys.getrefcount(a) - rc2_dt = sys.getrefcount(dt) - it2 = it.copy() - assert_(sys.getrefcount(a) > rc2_a) - assert_(sys.getrefcount(dt) > rc2_dt) - it = None - assert_equal(sys.getrefcount(a), rc2_a) - assert_equal(sys.getrefcount(dt), rc2_dt) - it2 = None - assert_equal(sys.getrefcount(a), rc_a) - assert_equal(sys.getrefcount(dt), rc_dt) - -def test_iter_best_order(): - # The iterator should always find the iteration order - # with increasing memory addresses - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, [], [['readonly']]) - assert_equal([x for x in i], a) - # Fortran-order - i = nditer(aview.T, [], [['readonly']]) - assert_equal([x for x in i], a) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) - assert_equal([x for x in i], a) - -def test_iter_c_order(): - # Test forcing C order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='C') - assert_equal([x for x in i], aview.ravel(order='C')) - # Fortran-order - i = nditer(aview.T, order='C') - assert_equal([x for x in i], aview.T.ravel(order='C')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), order='C') - assert_equal([x for x in i], - aview.swapaxes(0, 1).ravel(order='C')) - -def test_iter_f_order(): - # Test forcing F order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='F') - assert_equal([x for x in i], aview.ravel(order='F')) - # Fortran-order - i = nditer(aview.T, order='F') - assert_equal([x for x in i], aview.T.ravel(order='F')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), order='F') - assert_equal([x for x in i], - aview.swapaxes(0, 1).ravel(order='F')) - -def test_iter_c_or_f_order(): - # Test forcing any contiguous (C or F) order - - # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - a = arange(np.prod(shape)) - # Test each combination of positive and negative strides - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, order='A') - assert_equal([x for x in i], aview.ravel(order='A')) - # Fortran-order - i = nditer(aview.T, order='A') - assert_equal([x for x in i], aview.T.ravel(order='A')) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), order='A') - assert_equal([x for x in i], - aview.swapaxes(0, 1).ravel(order='A')) - -def test_iter_best_order_multi_index_1d(): - # The multi-indices should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a, ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) - # 1D reversed order - i = nditer(a[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) - -def test_iter_best_order_multi_index_2d(): - # The multi-indices should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) - # 2D Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) - # 2D reversed C-order - i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) - i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) - i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) - i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) - i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) - -def test_iter_best_order_multi_index_3d(): - # The multi-indices should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), - (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) - # 3D Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), - (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) - # 3D reversed C-order - i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), - (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) - i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), - (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), - (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), - (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), - (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), - (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) - -def test_iter_best_order_c_index_1d(): - # The C index should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a, ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3]) - # 1D reversed order - i = nditer(a[::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 2, 1, 0]) - -def test_iter_best_order_c_index_2d(): - # The C index should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) - # 2D Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F'), - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) - # 2D reversed C-order - i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) - i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) - i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F')[::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) - i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) - i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) - -def test_iter_best_order_c_index_3d(): - # The C index should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - # 3D Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F'), - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) - # 3D reversed C-order - i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) - i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['c_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) - -def test_iter_best_order_f_index_1d(): - # The Fortran index should be correct with any reordering - - a = arange(4) - # 1D order - i = nditer(a, ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3]) - # 1D reversed order - i = nditer(a[::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [3, 2, 1, 0]) - -def test_iter_best_order_f_index_2d(): - # The Fortran index should be correct with any reordering - - a = arange(6) - # 2D C-order - i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) - # 2D Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F'), - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) - # 2D reversed C-order - i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) - i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) - i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) - # 2D reversed Fortran-order - i = nditer(a.reshape(2, 3).copy(order='F')[::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) - i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) - i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) - -def test_iter_best_order_f_index_3d(): - # The Fortran index should be correct with any reordering - - a = arange(12) - # 3D C-order - i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) - # 3D Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F'), - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - # 3D reversed C-order - i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) - i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) - # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['f_index'], [['readonly']]) - assert_equal(iter_indices(i), - [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) - -def test_iter_no_inner_full_coalesce(): - # Check no_inner iterators which coalesce into a single inner loop - - for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: - size = np.prod(shape) - a = arange(size) - # Test each combination of forward and backwards indexing - for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) - for bit in range(len(shape)): - if ((2**bit)&dirs): - dirs_index[bit] = slice(None, None, -1) - dirs_index = tuple(dirs_index) - - aview = a.reshape(shape)[dirs_index] - # C-order - i = nditer(aview, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - # Fortran-order - i = nditer(aview.T, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - # Other order - if len(shape) > 2: - i = nditer(aview.swapaxes(0, 1), - ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (size,)) - -def test_iter_no_inner_dim_coalescing(): - # Check no_inner iterators whose dimensions may not coalesce completely - - # Skipping the last element in a dimension prevents coalescing - # with the next-bigger dimension - a = arange(24).reshape(2, 3, 4)[:,:, :-1] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 2) - assert_equal(i[0].shape, (3,)) - a = arange(24).reshape(2, 3, 4)[:, :-1,:] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 2) - assert_equal(i[0].shape, (8,)) - a = arange(24).reshape(2, 3, 4)[:-1,:,:] - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (12,)) - - # Even with lots of 1-sized dimensions, should still coalesce - a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) - i = nditer(a, ['external_loop'], [['readonly']]) - assert_equal(i.ndim, 1) - assert_equal(i[0].shape, (24,)) - -def test_iter_dim_coalescing(): - # Check that the correct number of dimensions are coalesced - - # Tracking a multi-index disables coalescing - a = arange(24).reshape(2, 3, 4) - i = nditer(a, ['multi_index'], [['readonly']]) - assert_equal(i.ndim, 3) - - # A tracked index can allow coalescing if it's compatible with the array - a3d = arange(24).reshape(2, 3, 4) - i = nditer(a3d, ['c_index'], [['readonly']]) - assert_equal(i.ndim, 1) - i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) - assert_equal(i.ndim, 3) - i = nditer(a3d.T, ['c_index'], [['readonly']]) - assert_equal(i.ndim, 3) - i = nditer(a3d.T, ['f_index'], [['readonly']]) - assert_equal(i.ndim, 1) - i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) - assert_equal(i.ndim, 3) - - # When C or F order is forced, coalescing may still occur - a3d = arange(24).reshape(2, 3, 4) - i = nditer(a3d, order='C') - assert_equal(i.ndim, 1) - i = nditer(a3d.T, order='C') - assert_equal(i.ndim, 3) - i = nditer(a3d, order='F') - assert_equal(i.ndim, 3) - i = nditer(a3d.T, order='F') - assert_equal(i.ndim, 1) - i = nditer(a3d, order='A') - assert_equal(i.ndim, 1) - i = nditer(a3d.T, order='A') - assert_equal(i.ndim, 1) - -def test_iter_broadcasting(): - # Standard NumPy broadcasting rules - - # 1D with scalar - i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (6,)) - - # 2D with scalar - i = nditer([arange(6).reshape(2, 3), np.int32(2)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - # 2D with 1D - i = nditer([arange(6).reshape(2, 3), arange(3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - i = nditer([arange(2).reshape(2, 1), arange(3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - # 2D with 2D - i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 6) - assert_equal(i.shape, (2, 3)) - - # 3D with scalar - i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - # 3D with 1D - i = nditer([arange(3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - # 3D with 2D - i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - # 3D with 3D - i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), - arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*3) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], - ['multi_index'], [['readonly']]*2) - assert_equal(i.itersize, 24) - assert_equal(i.shape, (4, 2, 3)) - -def test_iter_itershape(): - # Check that allocated outputs work with a specified shape - a = np.arange(6, dtype='i2').reshape(2, 3) - i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], - op_axes=[[0, 1, None], None], - itershape=(-1, -1, 4)) - assert_equal(i.operands[1].shape, (2, 3, 4)) - assert_equal(i.operands[1].strides, (24, 8, 2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], - op_axes=[[0, 1, None], None], - itershape=(-1, -1, 4)) - assert_equal(i.operands[1].shape, (3, 2, 4)) - assert_equal(i.operands[1].strides, (8, 24, 2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], - order='F', - op_axes=[[0, 1, None], None], - itershape=(-1, -1, 4)) - assert_equal(i.operands[1].shape, (3, 2, 4)) - assert_equal(i.operands[1].strides, (2, 6, 12)) - - # If we specify 1 in the itershape, it shouldn't allow broadcasting - # of that dimension to a bigger value - assert_raises(ValueError, nditer, [a, None], [], - [['readonly'], ['writeonly', 'allocate']], - op_axes=[[0, 1, None], None], - itershape=(-1, 1, 4)) - # Test bug that for no op_axes but itershape, they are NULLed correctly - i = np.nditer([np.ones(2), None, None], itershape=(2,)) - -def test_iter_broadcasting_errors(): - # Check that errors are thrown for bad broadcasting shapes - - # 1D with 1D - assert_raises(ValueError, nditer, [arange(2), arange(3)], - [], [['readonly']]*2) - # 2D with 1D - assert_raises(ValueError, nditer, - [arange(6).reshape(2, 3), arange(2)], - [], [['readonly']]*2) - # 2D with 2D - assert_raises(ValueError, nditer, - [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], - [], [['readonly']]*2) - assert_raises(ValueError, nditer, - [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], - [], [['readonly']]*2) - # 3D with 3D - assert_raises(ValueError, nditer, - [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) - assert_raises(ValueError, nditer, - [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) - - # Verify that the error message mentions the right shapes - try: - i = nditer([arange(2).reshape(1, 2, 1), - arange(3).reshape(1, 3), - arange(6).reshape(2, 3)], - [], - [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) - assert_(False, 'Should have raised a broadcast error') - except ValueError as e: - msg = str(e) - # The message should contain the shape of the 3rd operand - assert_(msg.find('(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) - # The message should contain the broadcast shape - assert_(msg.find('(1,2,3)') >= 0, - 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) - - try: - i = nditer([arange(6).reshape(2, 3), arange(2)], [], - [['readonly'], ['readonly']], - op_axes=[[0, 1], [0, np.newaxis]], - itershape=(4, 3)) - assert_(False, 'Should have raised a broadcast error') - except ValueError as e: - msg = str(e) - # The message should contain "shape->remappedshape" for each operand - assert_(msg.find('(2,3)->(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) - assert_(msg.find('(2,)->(2,newaxis)') >= 0, - ('Message "%s" doesn\'t contain remapped operand shape' + - '(2,)->(2,newaxis)') % msg) - # The message should contain the itershape parameter - assert_(msg.find('(4,3)') >= 0, - 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) - - try: - i = nditer([np.zeros((2, 1, 1)), np.zeros((2,))], - [], - [['writeonly', 'no_broadcast'], ['readonly']]) - assert_(False, 'Should have raised a broadcast error') - except ValueError as e: - msg = str(e) - # The message should contain the shape of the bad operand - assert_(msg.find('(2,1,1)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) - # The message should contain the broadcast shape - assert_(msg.find('(2,1,2)') >= 0, - 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) - -def test_iter_flags_errors(): - # Check that bad combinations of flags produce errors - - a = arange(6) - - # Not enough operands - assert_raises(ValueError, nditer, [], [], []) - # Too many operands - assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) - # Bad global flag - assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) - # Bad op flag - assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) - # Bad order parameter - assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') - # Bad casting parameter - assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') - # op_flags must match ops - assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) - # Cannot track both a C and an F index - assert_raises(ValueError, nditer, a, - ['c_index', 'f_index'], [['readonly']]) - # Inner iteration and multi-indices/indices are incompatible - assert_raises(ValueError, nditer, a, - ['external_loop', 'multi_index'], [['readonly']]) - assert_raises(ValueError, nditer, a, - ['external_loop', 'c_index'], [['readonly']]) - assert_raises(ValueError, nditer, a, - ['external_loop', 'f_index'], [['readonly']]) - # Must specify exactly one of readwrite/readonly/writeonly per operand - assert_raises(ValueError, nditer, a, [], [[]]) - assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) - assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) - assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) - assert_raises(ValueError, nditer, a, - [], [['readonly', 'writeonly', 'readwrite']]) - # Python scalars are always readonly - assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) - assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) - # Array scalars are always readonly - assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) - assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) - # Check readonly array - a.flags.writeable = False - assert_raises(ValueError, nditer, a, [], [['writeonly']]) - assert_raises(ValueError, nditer, a, [], [['readwrite']]) - a.flags.writeable = True - # Multi-indices available only with the multi_index flag - i = nditer(arange(6), [], [['readonly']]) - assert_raises(ValueError, lambda i:i.multi_index, i) - # Index available only with an index flag - assert_raises(ValueError, lambda i:i.index, i) - # GotoCoords and GotoIndex incompatible with buffering or no_inner - def assign_multi_index(i): - i.multi_index = (0,) - def assign_index(i): - i.index = 0 - def assign_iterindex(i): - i.iterindex = 0; - def assign_iterrange(i): - i.iterrange = (0, 1); - i = nditer(arange(6), ['external_loop']) - assert_raises(ValueError, assign_multi_index, i) - assert_raises(ValueError, assign_index, i) - assert_raises(ValueError, assign_iterindex, i) - assert_raises(ValueError, assign_iterrange, i) - i = nditer(arange(6), ['buffered']) - assert_raises(ValueError, assign_multi_index, i) - assert_raises(ValueError, assign_index, i) - assert_raises(ValueError, assign_iterrange, i) - # Can't iterate if size is zero - assert_raises(ValueError, nditer, np.array([])) - -def test_iter_slice(): - a, b, c = np.arange(3), np.arange(3), np.arange(3.) - i = nditer([a, b, c], [], ['readwrite']) - i[0:2] = (3, 3) - assert_equal(a, [3, 1, 2]) - assert_equal(b, [3, 1, 2]) - assert_equal(c, [0, 1, 2]) - i[1] = 12 - assert_equal(i[0:2], [3, 12]) - -def test_iter_nbo_align_contig(): - # Check that byte order, alignment, and contig changes work - - # Byte order change by requesting a specific dtype - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - i = nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) - assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) - assert_equal(i.operands[0], a) - i.operands[0][:] = 2 - i = None - assert_equal(au, [2]*6) - - # Byte order change by requesting NBO - a = np.arange(6, dtype='f4') - au = a.byteswap().newbyteorder() - assert_(a.dtype.byteorder != au.dtype.byteorder) - i = nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], casting='equiv') - assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) - assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) - assert_equal(i.operands[0], a) - i.operands[0][:] = 2 - i = None - assert_equal(au, [2]*6) - - # Unaligned input - a = np.zeros((6*4+1,), dtype='i1')[1:] - a.dtype = 'f4' - a[:] = np.arange(6, dtype='f4') - assert_(not a.flags.aligned) - # Without 'aligned', shouldn't copy - i = nditer(a, [], [['readonly']]) - assert_(not i.operands[0].flags.aligned) - assert_equal(i.operands[0], a); - # With 'aligned', should make a copy - i = nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) - assert_(i.operands[0].flags.aligned) - assert_equal(i.operands[0], a); - i.operands[0][:] = 3 - i = None - assert_equal(a, [3]*6) - - # Discontiguous input - a = arange(12) - # If it is contiguous, shouldn't copy - i = nditer(a[:6], [], [['readonly']]) - assert_(i.operands[0].flags.contiguous) - assert_equal(i.operands[0], a[:6]); - # If it isn't contiguous, should buffer - i = nditer(a[::2], ['buffered', 'external_loop'], - [['readonly', 'contig']], - buffersize=10) - assert_(i[0].flags.contiguous) - assert_equal(i[0], a[::2]) - -def test_iter_array_cast(): - # Check that arrays are cast as requested - - # No cast 'f4' -> 'f4' - a = np.arange(6, dtype='f4').reshape(2, 3) - i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - - # Byte-order cast ' '>f4' - a = np.arange(6, dtype='f4')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('>f4')) - - # Safe case 'f4' -> 'f8' - a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) - i = nditer(a, [], [['readonly', 'copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f8')) - # The memory layout of the temporary should match a (a is (48,4,16)) - # except negative strides get flipped to positive strides. - assert_equal(i.operands[0].strides, (96, 8, 32)) - a = a[::-1,:, ::-1] - i = nditer(a, [], [['readonly', 'copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f8')) - assert_equal(i.operands[0].strides, (96, 8, 32)) - - # Same-kind cast 'f8' -> 'f4' -> 'f8' - a = np.arange(24, dtype='f8').reshape(2, 3, 4).T - i = nditer(a, [], - [['readwrite', 'updateifcopy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.operands[0], a) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - assert_equal(i.operands[0].strides, (4, 16, 48)) - # Check that UPDATEIFCOPY is activated - i.operands[0][2, 1, 1] = -12.5 - assert_(a[2, 1, 1] != -12.5) - i = None - assert_equal(a[2, 1, 1], -12.5) - - a = np.arange(6, dtype='i4')[::-2] - i = nditer(a, [], - [['writeonly', 'updateifcopy']], - casting='unsafe', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.operands[0].dtype, np.dtype('f4')) - # Even though the stride was negative in 'a', it - # becomes positive in the temporary - assert_equal(i.operands[0].strides, (4,)) - i.operands[0][:] = [1, 2, 3] - i = None - assert_equal(a, [1, 2, 3]) - -def test_iter_array_cast_errors(): - # Check that invalid casts are caught - - # Need to enable copying for casts to occur - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly']], op_dtypes=[np.dtype('f8')]) - # Also need to allow casting for casts to occur - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly', 'copy']], casting='no', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly', 'copy']], casting='equiv', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], - [['writeonly', 'updateifcopy']], - casting='no', - op_dtypes=[np.dtype('f4')]) - assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], - [['writeonly', 'updateifcopy']], - casting='equiv', - op_dtypes=[np.dtype('f4')]) - # ' '>f4' should not work with casting='no' - assert_raises(TypeError, nditer, arange(2, dtype='f4')]) - # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readwrite', 'updateifcopy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], - [['readwrite', 'updateifcopy']], - casting='safe', - op_dtypes=[np.dtype('f4')]) - # 'f4' -> 'i4' is neither a safe nor a same-kind cast - assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], - [['readonly', 'copy']], - casting='same_kind', - op_dtypes=[np.dtype('i4')]) - assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], - [['writeonly', 'updateifcopy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - -def test_iter_scalar_cast(): - # Check that scalars are cast as requested - - # No cast 'f4' -> 'f4' - i = nditer(np.float32(2.5), [], [['readonly']], - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.value.dtype, np.dtype('f4')) - assert_equal(i.value, 2.5) - # Safe cast 'f4' -> 'f8' - i = nditer(np.float32(2.5), [], - [['readonly', 'copy']], - casting='safe', - op_dtypes=[np.dtype('f8')]) - assert_equal(i.dtypes[0], np.dtype('f8')) - assert_equal(i.value.dtype, np.dtype('f8')) - assert_equal(i.value, 2.5) - # Same-kind cast 'f8' -> 'f4' - i = nditer(np.float64(2.5), [], - [['readonly', 'copy']], - casting='same_kind', - op_dtypes=[np.dtype('f4')]) - assert_equal(i.dtypes[0], np.dtype('f4')) - assert_equal(i.value.dtype, np.dtype('f4')) - assert_equal(i.value, 2.5) - # Unsafe cast 'f8' -> 'i4' - i = nditer(np.float64(3.0), [], - [['readonly', 'copy']], - casting='unsafe', - op_dtypes=[np.dtype('i4')]) - assert_equal(i.dtypes[0], np.dtype('i4')) - assert_equal(i.value.dtype, np.dtype('i4')) - assert_equal(i.value, 3) - # Readonly scalars may be cast even without setting COPY or BUFFERED - i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) - assert_equal(i[0].dtype, np.dtype('f8')) - assert_equal(i[0], 3.) - -def test_iter_scalar_cast_errors(): - # Check that invalid casts are caught - - # Need to allow copying/buffering for write casts of scalars to occur - assert_raises(TypeError, nditer, np.float32(2), [], - [['readwrite']], op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, 2.5, [], - [['readwrite']], op_dtypes=[np.dtype('f4')]) - # 'f8' -> 'f4' isn't a safe cast if the value would overflow - assert_raises(TypeError, nditer, np.float64(1e60), [], - [['readonly']], - casting='safe', - op_dtypes=[np.dtype('f4')]) - # 'f4' -> 'i4' is neither a safe nor a same-kind cast - assert_raises(TypeError, nditer, np.float32(2), [], - [['readonly']], - casting='same_kind', - op_dtypes=[np.dtype('i4')]) - -def test_iter_object_arrays_basic(): - # Check that object arrays work - - obj = {'a':3,'b':'d'} - a = np.array([[1, 2, 3], None, obj, None], dtype='O') - rc = sys.getrefcount(obj) - - # Need to allow references for object arrays - assert_raises(TypeError, nditer, a) - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a, ['refs_ok'], ['readonly']) - vals = [x[()] for x in i] - assert_equal(np.array(vals, dtype='O'), a) - vals, i, x = [None]*3 - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], - ['readonly'], order='C') - assert_(i.iterationneedsapi) - vals = [x[()] for x in i] - assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) - vals, i, x = [None]*3 - assert_equal(sys.getrefcount(obj), rc) - - i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], - ['readwrite'], order='C') - for x in i: - x[...] = None - vals, i, x = [None]*3 - assert_equal(sys.getrefcount(obj), rc-1) - assert_equal(a, np.array([None]*4, dtype='O')) - -def test_iter_object_arrays_conversions(): - # Conversions to/from objects - a = np.arange(6, dtype='O') - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='i4') - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - a = np.arange(6, dtype='i4') - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='O') - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - # Non-contiguous object array - a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) - a = a['a'] - a[:] = np.arange(6) - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='i4') - for x in i: - x[...] += 1 - assert_equal(a, np.arange(6)+1) - - #Non-contiguous value array - a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) - a = a['a'] - a[:] = np.arange(6) + 98172488 - i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], - casting='unsafe', op_dtypes='O') - ob = i[0][()] - rc = sys.getrefcount(ob) - for x in i: - x[...] += 1 - assert_equal(sys.getrefcount(ob), rc-1) - assert_equal(a, np.arange(6)+98172489) - -def test_iter_common_dtype(): - # Check that the iterator finds a common data type correctly - - i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('f8')); - assert_equal(i.dtypes[1], np.dtype('f8')); - i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('f8')); - assert_equal(i.dtypes[1], np.dtype('f8')); - i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='same_kind') - assert_equal(i.dtypes[0], np.dtype('f4')); - assert_equal(i.dtypes[1], np.dtype('f4')); - i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('u4')); - assert_equal(i.dtypes[1], np.dtype('u4')); - i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], - ['common_dtype'], - [['readonly', 'copy']]*2, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('i8')); - assert_equal(i.dtypes[1], np.dtype('i8')); - i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), - array([2j], dtype='c8'), array([9], dtype='f8')], - ['common_dtype'], - [['readonly', 'copy']]*4, - casting='safe') - assert_equal(i.dtypes[0], np.dtype('c16')); - assert_equal(i.dtypes[1], np.dtype('c16')); - assert_equal(i.dtypes[2], np.dtype('c16')); - assert_equal(i.dtypes[3], np.dtype('c16')); - assert_equal(i.value, (3, -12, 2j, 9)) - - # When allocating outputs, other outputs aren't factored in - i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], - [['readonly', 'copy'], - ['writeonly', 'allocate'], - ['writeonly']], - casting='safe') - assert_equal(i.dtypes[0], np.dtype('i4')); - assert_equal(i.dtypes[1], np.dtype('i4')); - assert_equal(i.dtypes[2], np.dtype('c16')); - # But, if common data types are requested, they are - i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], - ['common_dtype'], - [['readonly', 'copy'], - ['writeonly', 'allocate'], - ['writeonly']], - casting='safe') - assert_equal(i.dtypes[0], np.dtype('c16')); - assert_equal(i.dtypes[1], np.dtype('c16')); - assert_equal(i.dtypes[2], np.dtype('c16')); - -def test_iter_op_axes(): - # Check that custom axes work - - # Reverse the axes - a = arange(6).reshape(2, 3) - i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) - assert_(all([x==y for (x, y) in i])) - a = arange(24).reshape(2, 3, 4) - i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) - assert_(all([x==y for (x, y) in i])) - - # Broadcast 1D to any dimension - a = arange(1, 31).reshape(2, 3, 5) - b = arange(1, 3) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) - b = arange(1, 4) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) - b = arange(1, 6) - i = nditer([a, b], [], [['readonly']]*2, - op_axes=[None, [np.newaxis, np.newaxis, 0]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) - - # Inner product-style broadcasting - a = arange(24).reshape(2, 3, 4) - b = arange(40).reshape(5, 2, 4) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, - op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) - assert_equal(i.shape, (2, 3, 5, 2)) - - # Matrix product-style broadcasting - a = arange(12).reshape(3, 4) - b = arange(20).reshape(4, 5) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, - op_axes=[[0, -1], [-1, 1]]) - assert_equal(i.shape, (3, 5)) - -def test_iter_op_axes_errors(): - # Check that custom axes throws errors for bad inputs - - # Wrong number of items in op_axes - a = arange(6).reshape(2, 3) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0], [1], [0]]) - # Out of bounds items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[2, 1], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [2, -1]]) - # Duplicate items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 0], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [1, 1]]) - - # Different sized arrays in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [0, 1, 0]]) - - # Non-broadcastable dimensions in the result - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, - op_axes=[[0, 1], [1, 0]]) - -def test_iter_copy(): - # Check that copying the iterator works correctly - a = arange(24).reshape(2, 3, 4) - - # Simple iterator - i = nditer(a) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterindex = 3 - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - # Buffered iterator - i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterindex = 3 - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterrange = (3, 9) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - i.iterrange = (2, 18) - next(i) - next(i) - j = i.copy() - assert_equal([x[()] for x in i], [x[()] for x in j]) - - # Casting iterator - i = nditer(a, ['buffered'], order='F', casting='unsafe', - op_dtypes='f8', buffersize=5) - j = i.copy() - i = None - assert_equal([x[()] for x in j], a.ravel(order='F')) - - a = arange(24, dtype='cast->swap - - a = np.arange(10, dtype='f4').newbyteorder().byteswap() - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('f8').newbyteorder()], - buffersize=3) - for v in i: - v[...] *= 2 - - assert_equal(a, 2*np.arange(10, dtype='f4')) - - try: - warnings.simplefilter("ignore", np.ComplexWarning) - - a = np.arange(10, dtype='f8').newbyteorder().byteswap() - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='unsafe', - op_dtypes=[np.dtype('c8').newbyteorder()], - buffersize=3) - for v in i: - v[...] *= 2 - - assert_equal(a, 2*np.arange(10, dtype='f8')) - finally: - warnings.simplefilter("default", np.ComplexWarning) - -def test_iter_buffered_cast_byteswapped_complex(): - # Test that buffering can handle a cast which requires swap->cast->copy - - a = np.arange(10, dtype='c8').newbyteorder().byteswap() - a += 2j - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16')], - buffersize=3) - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) - - a = np.arange(10, dtype='c8') - a += 2j - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16').newbyteorder()], - buffersize=3) - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) - - a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() - a += 2j - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('c16')], - buffersize=3) - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) - - a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() - i = nditer(a, ['buffered', 'external_loop'], - [['readwrite', 'nbo', 'aligned']], - casting='same_kind', - op_dtypes=[np.dtype('f4')], - buffersize=7) - for v in i: - v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) - -def test_iter_buffered_cast_structured_type(): - # Tests buffering of structured types - - # simple -> struct type (duplicates the value) - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] - a = np.arange(3, dtype='f4') + 0.5 - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt) - vals = [np.array(x) for x in i] - assert_equal(vals[0]['a'], 0.5) - assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) - assert_equal(vals[0]['d'], 0.5) - assert_equal(vals[1]['a'], 1.5) - assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) - assert_equal(vals[1]['d'], 1.5) - assert_equal(vals[0].dtype, np.dtype(sdt)) - - # object -> struct type - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] - a = np.zeros((3,), dtype='O') - a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) - a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) - a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) - rc = sys.getrefcount(a[0]) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt) - vals = [x.copy() for x in i] - assert_equal(vals[0]['a'], 0.5) - assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) - assert_equal(vals[0]['d'], 0.5) - assert_equal(vals[1]['a'], 1.5) - assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) - assert_equal(vals[1]['d'], 1.5) - assert_equal(vals[0].dtype, np.dtype(sdt)) - vals, i, x = [None]*3 - assert_equal(sys.getrefcount(a[0]), rc) - - # struct type -> simple (takes the first value) - sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes='i4') - assert_equal([x[()] for x in i], [5, 8]) - - # struct type -> struct type (field-wise copy) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] - a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - assert_equal([np.array(x) for x in i], - [np.array((3, 1, 2), dtype=sdt2), - np.array((6, 4, 5), dtype=sdt2)]) - - # struct type -> struct type (field gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2, 1), dtype=sdt2), - np.array((5, 4), dtype=sdt2)]) - assert_equal(a, np.array([(5, 2, None), (8, 5, None)], dtype=sdt1)) - - # struct type -> struct type (structured field gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'i4')])] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2, 1), dtype=sdt2), - np.array((5, 4), dtype=sdt2)]) - assert_equal(a, np.array([(5, 2, (0, 0)), (8, 5, (0, 0))], dtype=sdt1)) - - # struct type -> struct type (structured field w/ ref gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2, 1), dtype=sdt2), - np.array((5, 4), dtype=sdt2)]) - assert_equal(a, np.array([(5, 2, (0, None)), (8, 5, (0, None))], dtype=sdt1)) - - # struct type -> struct type back (structured field w/ ref gets discarded) - sdt1 = [('b', 'O'), ('a', 'f8')] - sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] - a = np.array([(1, 2), (4, 5)], dtype=sdt1) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - assert_equal(x['d'], np.array((0, None), dtype=[('a', 'i2'), ('b', 'O')])) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2, 1, (0, None)), dtype=sdt2), - np.array((5, 4, (0, None)), dtype=sdt2)]) - assert_equal(a, np.array([(1, 4), (4, 7)], dtype=sdt1)) - -def test_iter_buffered_cast_subarray(): - # Tests buffering of subarrays - - # one element -> many (copies it to all) - sdt1 = [('a', 'f4')] - sdt2 = [('a', 'f8', (3, 2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - for x, count in zip(i, list(range(6))): - assert_(np.all(x['a'] == count)) - - # one element -> many -> back (copies it to all) - sdt1 = [('a', 'O', (1, 1))] - sdt2 = [('a', 'O', (3, 2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_(np.all(x['a'] == count)) - x['a'][0] += 2 - count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) - - # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'O', (3, 2, 2))] - sdt2 = [('a', 'O', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - x['a'] += 2 - count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) - - # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'f8', (3, 2, 2))] - sdt2 = [('a', 'O', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - count += 1 - - # many -> one element (copies just element 0) - sdt1 = [('a', 'O', (3, 2, 2))] - sdt2 = [('a', 'f4', (1,))] - a = np.zeros((6,), dtype=sdt1) - a['a'][:, 0, 0, 0] = np.arange(6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], count) - count += 1 - - # many -> matching shape (straightforward copy) - sdt1 = [('a', 'O', (3, 2, 2))] - sdt2 = [('a', 'f4', (3, 2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], a[count]['a']) - count += 1 - - # vector -> smaller vector (truncates) - sdt1 = [('a', 'f8', (6,))] - sdt2 = [('a', 'f4', (2,))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*6).reshape(6, 6) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'], a[count]['a'][:2]) - count += 1 - - # vector -> bigger vector (pads with zeros) - sdt1 = [('a', 'f8', (2,))] - sdt2 = [('a', 'f4', (6,))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2], a[count]['a']) - assert_equal(x['a'][2:], [0, 0, 0, 0]) - count += 1 - - # vector -> matrix (broadcasts) - sdt1 = [('a', 'f8', (2,))] - sdt2 = [('a', 'f4', (2, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][0], a[count]['a']) - assert_equal(x['a'][1], a[count]['a']) - count += 1 - - # vector -> matrix (broadcasts and zero-pads) - sdt1 = [('a', 'f8', (2, 1))] - sdt2 = [('a', 'f4', (3, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2, 1) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) - assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) - assert_equal(x['a'][2,:], [0, 0]) - count += 1 - - # matrix -> matrix (truncates and zero-pads) - sdt1 = [('a', 'f8', (2, 3))] - sdt2 = [('a', 'f4', (3, 2))] - a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2*3).reshape(6, 2, 3) - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - count = 0 - for x in i: - assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) - assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) - assert_equal(x['a'][2,:], [0, 0]) - count += 1 - -def test_iter_buffering_badwriteback(): - # Writing back from a buffer cannot combine elements - - # a needs write buffering, but had a broadcast dimension - a = np.arange(6).reshape(2, 3, 1) - b = np.arange(12).reshape(2, 3, 2) - assert_raises(ValueError, nditer, [a, b], - ['buffered', 'external_loop'], - [['readwrite'], ['writeonly']], - order='C') - - # But if a is readonly, it's fine - i = nditer([a, b], ['buffered', 'external_loop'], - [['readonly'], ['writeonly']], - order='C') - - # If a has just one element, it's fine too (constant 0 stride, a reduction) - a = np.arange(1).reshape(1, 1, 1) - i = nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], - [['readwrite'], ['writeonly']], - order='C') - - # check that it fails on other dimensions too - a = np.arange(6).reshape(1, 3, 2) - assert_raises(ValueError, nditer, [a, b], - ['buffered', 'external_loop'], - [['readwrite'], ['writeonly']], - order='C') - a = np.arange(4).reshape(2, 1, 2) - assert_raises(ValueError, nditer, [a, b], - ['buffered', 'external_loop'], - [['readwrite'], ['writeonly']], - order='C') - -def test_iter_buffering_string(): - # Safe casting disallows shrinking strings - a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) - assert_equal(a.dtype, np.dtype('S4')); - assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], - op_dtypes='S2') - i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') - assert_equal(i[0], asbytes('abc')) - assert_equal(i[0].dtype, np.dtype('S6')) - - a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode) - assert_equal(a.dtype, np.dtype('U4')); - assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], - op_dtypes='U2') - i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') - assert_equal(i[0], sixu('abc')) - assert_equal(i[0].dtype, np.dtype('U6')) - -def test_iter_buffering_growinner(): - # Test that the inner loop grows when no buffering is needed - a = np.arange(30) - i = nditer(a, ['buffered', 'growinner', 'external_loop'], - buffersize=5) - # Should end up with just one inner loop here - assert_equal(i[0].size, a.size) - - -@dec.slow -def test_iter_buffered_reduce_reuse(): - # large enough array for all views, including negative strides. - a = np.arange(2*3**5)[3**5:3**5+1] - flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] - op_flags = [('readonly',), ('readwrite', 'allocate')] - op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] - # wrong dtype to force buffering - op_dtypes = [np.float, a.dtype] - - def get_params(): - for xs in range(-3**2, 3**2 + 1): - for ys in range(xs, 3**2 + 1): - for op_axes in op_axes_list: - # last stride is reduced and because of that not - # important for this test, as it is the inner stride. - strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) - arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) - - for skip in [0, 1]: - yield arr, op_axes, skip - - for arr, op_axes, skip in get_params(): - nditer2 = np.nditer([arr.copy(), None], - op_axes=op_axes, flags=flags, op_flags=op_flags, - op_dtypes=op_dtypes) - nditer2.operands[-1][...] = 0 - nditer2.reset() - nditer2.iterindex = skip - - for (a2_in, b2_in) in nditer2: - b2_in += a2_in.astype(np.int_) - - comp_res = nditer2.operands[-1] - - for bufsize in range(0, 3**3): - nditer1 = np.nditer([arr, None], - op_axes=op_axes, flags=flags, op_flags=op_flags, - buffersize=bufsize, op_dtypes=op_dtypes) - nditer1.operands[-1][...] = 0 - nditer1.reset() - nditer1.iterindex = skip - - for (a1_in, b1_in) in nditer1: - b1_in += a1_in.astype(np.int_) - - res = nditer1.operands[-1] - assert_array_equal(res, comp_res) - - -def test_iter_no_broadcast(): - # Test that the no_broadcast flag works - a = np.arange(24).reshape(2, 3, 4) - b = np.arange(6).reshape(2, 3, 1) - c = np.arange(12).reshape(3, 4) - - i = nditer([a, b, c], [], - [['readonly', 'no_broadcast'], ['readonly'], ['readonly']]) - assert_raises(ValueError, nditer, [a, b, c], [], - [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) - assert_raises(ValueError, nditer, [a, b, c], [], - [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) - -def test_iter_nested_iters_basic(): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - -def test_iter_nested_iters_reorder(): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - - # In 'K' order (default), it gets reordered - i, j = np.nested_iters(a, [[0], [2, 1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[2, 0], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - # In 'C' order, it doesn't - i, j = np.nested_iters(a, [[0], [2, 1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) - - i, j = np.nested_iters(a, [[1, 0], [2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) - - i, j = np.nested_iters(a, [[2, 0], [1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) - -def test_iter_nested_iters_flip_axes(): - # Test nested iteration with negative axes - a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] - - # In 'K' order (default), the axes all get flipped - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - # In 'C' order, flipping axes is disabled - i, j = np.nested_iters(a, [[0], [1, 2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) - - i, j = np.nested_iters(a, [[0, 1], [2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) - - i, j = np.nested_iters(a, [[0, 2], [1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) - -def test_iter_nested_iters_broadcast(): - # Test nested iteration with broadcasting - a = arange(2).reshape(2, 1) - b = arange(3).reshape(1, 3) - - i, j = np.nested_iters([a, b], [[0], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) - - i, j = np.nested_iters([a, b], [[1], [0]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) - -def test_iter_nested_iters_dtype_copy(): - # Test nested iteration with a copy to change dtype - - # copy - a = arange(6, dtype='i4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readonly', 'copy'], - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) - vals = None - - # updateifcopy - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readwrite', 'updateifcopy'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[0, 1, 2], [3, 4, 5]]) - i, j, x, y = (None,)*4 # force the updateifcopy - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - -def test_iter_nested_iters_dtype_buffered(): - # Test nested iteration with buffering to change dtype - - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - flags=['buffered'], - op_flags=['readwrite'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - -def test_iter_reduction_error(): - - a = np.arange(6) - assert_raises(ValueError, nditer, [a, None], [], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0], [-1]]) - - a = np.arange(6).reshape(2, 3) - assert_raises(ValueError, nditer, [a, None], ['external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0, 1], [-1, -1]]) - -def test_iter_reduction(): - # Test doing reductions with the iterator - - a = np.arange(6) - i = nditer([a, None], ['reduce_ok'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0], [-1]]) - # Need to initialize the output operand to the addition unit - i.operands[1][...] = 0 - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(i.operands[1].ndim, 0) - assert_equal(i.operands[1], np.sum(a)) - - a = np.arange(6).reshape(2, 3) - i = nditer([a, None], ['reduce_ok', 'external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[0, 1], [-1, -1]]) - # Need to initialize the output operand to the addition unit - i.operands[1][...] = 0 - # Reduction shape/strides for the output - assert_equal(i[1].shape, (6,)) - assert_equal(i[1].strides, (0,)) - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(i.operands[1].ndim, 0) - assert_equal(i.operands[1], np.sum(a)) - - # This is a tricky reduction case for the buffering double loop - # to handle - a = np.ones((2, 3, 5)) - it1 = nditer([a, None], ['reduce_ok', 'external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[None, [0, -1, 1]]) - it2 = nditer([a, None], ['reduce_ok', 'external_loop', - 'buffered', 'delay_bufalloc'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[None, [0, -1, 1]], buffersize=10) - it1.operands[1].fill(0) - it2.operands[1].fill(0) - it2.reset() - for x in it1: - x[1][...] += x[0] - for x in it2: - x[1][...] += x[0] - assert_equal(it1.operands[1], it2.operands[1]) - assert_equal(it2.operands[1].sum(), a.size) - -def test_iter_buffering_reduction(): - # Test doing buffered reductions with the iterator - - a = np.arange(6) - b = np.array(0., dtype='f8').byteswap().newbyteorder() - i = nditer([a, b], ['reduce_ok', 'buffered'], - [['readonly'], ['readwrite', 'nbo']], - op_axes=[[0], [-1]]) - assert_equal(i[1].dtype, np.dtype('f8')) - assert_(i[1].dtype != b.dtype) - # Do the reduction - for x, y in i: - y[...] += x - # Since no axes were specified, should have allocated a scalar - assert_equal(b, np.sum(a)) - - a = np.arange(6).reshape(2, 3) - b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() - i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], - [['readonly'], ['readwrite', 'nbo']], - op_axes=[[0, 1], [0, -1]]) - # Reduction shape/strides for the output - assert_equal(i[1].shape, (3,)) - assert_equal(i[1].strides, (0,)) - # Do the reduction - for x, y in i: - y[...] += x - assert_equal(b, np.sum(a, axis=1)) - - # Iterator inner double loop was wrong on this one - p = np.arange(2) + 1 - it = np.nditer([p, None], - ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], - [['readonly'], ['readwrite', 'allocate']], - op_axes=[[-1, 0], [-1, -1]], - itershape=(2, 2)) - it.operands[1].fill(0) - it.reset() - assert_equal(it[0], [1, 2, 1, 2]) - -def test_iter_buffering_reduction_reuse_reduce_loops(): - # There was a bug triggering reuse of the reduce loop inappropriately, - # which caused processing to happen in unnecessarily small chunks - # and overran the buffer. - - a = np.zeros((2, 7)) - b = np.zeros((1, 7)) - it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], - op_flags=[['readonly'], ['readwrite']], - buffersize = 5) - - bufsizes = [] - for x, y in it: - bufsizes.append(x.shape[0]) - assert_equal(bufsizes, [5, 2, 5, 2]) - assert_equal(sum(bufsizes), a.size) - -def test_iter_writemasked_badinput(): - a = np.zeros((2, 3)) - b = np.zeros((3,)) - m = np.array([[True, True, False], [False, True, False]]) - m2 = np.array([True, True, False]) - m3 = np.array([0, 1, 1], dtype='u1') - mbad1 = np.array([0, 1, 1], dtype='i1') - mbad2 = np.array([0, 1, 1], dtype='f4') - - # Need an 'arraymask' if any operand is 'writemasked' - assert_raises(ValueError, nditer, [a, m], [], - [['readwrite', 'writemasked'], ['readonly']]) - - # A 'writemasked' operand must not be readonly - assert_raises(ValueError, nditer, [a, m], [], - [['readonly', 'writemasked'], ['readonly', 'arraymask']]) - - # 'writemasked' and 'arraymask' may not be used together - assert_raises(ValueError, nditer, [a, m], [], - [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) - - # 'arraymask' may only be specified once - assert_raises(ValueError, nditer, [a, m, m2], [], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask'], - ['readonly', 'arraymask']]) - - # An 'arraymask' with nothing 'writemasked' also doesn't make sense - assert_raises(ValueError, nditer, [a, m], [], - [['readwrite'], ['readonly', 'arraymask']]) - - # A writemasked reduction requires a similarly smaller mask - assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], - [['readonly'], - ['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - # But this should work with a smaller/equal mask to the reduction operand - np.nditer([a, b, m2], ['reduce_ok'], - [['readonly'], - ['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - # The arraymask itself cannot be a reduction - assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], - [['readonly'], - ['readwrite', 'writemasked'], - ['readwrite', 'arraymask']]) - - # A uint8 mask is ok too - np.nditer([a, m3], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['f4', None], - casting='same_kind') - # An int8 mask isn't ok - assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['f4', None], - casting='same_kind') - # A float32 mask isn't ok - assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['f4', None], - casting='same_kind') - -def test_iter_writemasked(): - a = np.zeros((3,), dtype='f8') - msk = np.array([True, True, False]) - - # When buffering is unused, 'writemasked' effectively does nothing. - # It's up to the user of the iterator to obey the requested semantics. - it = np.nditer([a, msk], [], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - for x, m in it: - x[...] = 1 - # Because we violated the semantics, all the values became 1 - assert_equal(a, [1, 1, 1]) - - # Even if buffering is enabled, we still may be accessing the array - # directly. - it = np.nditer([a, msk], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']]) - for x, m in it: - x[...] = 2.5 - # Because we violated the semantics, all the values became 2.5 - assert_equal(a, [2.5, 2.5, 2.5]) - - # If buffering will definitely happening, for instance because of - # a cast, only the items selected by the mask will be copied back from - # the buffer. - it = np.nditer([a, msk], ['buffered'], - [['readwrite', 'writemasked'], - ['readonly', 'arraymask']], - op_dtypes=['i8', None], - casting='unsafe') - for x, m in it: - x[...] = 3 - # Even though we violated the semantics, only the selected values - # were copied back - assert_equal(a, [3, 3, 2.5]) - -def test_iter_non_writable_attribute_deletion(): - it = np.nditer(np.ones(2)) - attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc", - "iterationneedsapi", "has_multi_index", "has_index", "dtypes", - "ndim", "nop", "itersize", "finished"] - - if sys.version[:3] == '2.4': - error = TypeError - else: - error = AttributeError - - for s in attr: - assert_raises(error, delattr, it, s) - - -def test_iter_writable_attribute_deletion(): - it = np.nditer(np.ones(2)) - attr = [ "multi_index", "index", "iterrange", "iterindex"] - for s in attr: - assert_raises(AttributeError, delattr, it, s) - - -def test_iter_element_deletion(): - it = np.nditer(np.ones(3)) - try: - del it[1] - del it[1:2] - except TypeError: - pass - except: - raise AssertionError - -def test_iter_allocated_array_dtypes(): - # If the dtype of an allocated output has a shape, the shape gets - # tacked onto the end of the result. - it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))]) - for a, b in it: - b[0] = a - 1 - b[1] = a + 1 - assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) - - # Make sure this works for scalars too - it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) - for a, b, c in it: - c[0, 0] = a - b - c[0, 1] = a + b - c[1, 0] = a * b - c[1, 1] = a / b - assert_equal(it.operands[2], [[8, 12], [20, 5]]) - - -def test_0d_iter(): - # Basic test for iteration of 0-d arrays: - i = nditer([2, 3], ['multi_index'], [['readonly']]*2) - assert_equal(i.ndim, 0) - assert_equal(next(i), (2, 3)) - assert_equal(i.multi_index, ()) - assert_equal(i.iterindex, 0) - assert_raises(StopIteration, next, i) - # test reset: - i.reset() - assert_equal(next(i), (2, 3)) - assert_raises(StopIteration, next, i) - - # test forcing to 0-d - i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()]) - assert_equal(i.ndim, 0) - assert_equal(len(i), 1) - # note that itershape=(), still behaves like None due to the conversions - - # Test a more complex buffered casting case (same as another test above) - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] - a = np.array(0.5, dtype='f4') - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', op_dtypes=sdt) - vals = next(i) - assert_equal(vals['a'], 0.5) - assert_equal(vals['b'], 0) - assert_equal(vals['c'], [[(0.5)]*3]*2) - assert_equal(vals['d'], 0.5) - - -def test_0d_nested_iter(): - a = np.arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[], [1, 0, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0, 2], []]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) - - i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) - vals = [] - for x in i: - for y in j: - vals.append([z for z in k]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - -def test_iter_too_large(): - # The total size of the iterator must not exceed the maximum intp due - # to broadcasting. Dividing by 1024 will keep it small enough to - # give a legal array. - size = np.iinfo(np.intp).max // 1024 - arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,)) - assert_raises(ValueError, nditer, (arr, arr[:, None])) - # test the same for multiindex. That may get more interesting when - # removing 0 dimensional axis is allowed (since an iterator can grow then) - assert_raises(ValueError, nditer, - (arr, arr[:, None]), flags=['multi_index']) - - -def test_iter_too_large_with_multiindex(): - # When a multi index is being tracked, the error is delayed this - # checks the delayed error messages and getting below that by - # removing an axis. - base_size = 2**10 - num = 1 - while base_size**num < np.iinfo(np.intp).max: - num += 1 - - shape_template = [1, 1] * num - arrays = [] - for i in range(num): - shape = shape_template[:] - shape[i * 2] = 2**10 - arrays.append(np.empty(shape)) - arrays = tuple(arrays) - - # arrays are now too large to be broadcast. The different modes test - # different nditer functionality with or without GIL. - for mode in range(6): - assert_raises(ValueError, test_nditer_too_large, arrays, -1, mode) - # but if we do nothing with the nditer, it can be constructed: - test_nditer_too_large(arrays, -1, 7) - - # When an axis is removed, things should work again (half the time): - for i in range(num): - for mode in range(6): - # an axis with size 1024 is removed: - test_nditer_too_large(arrays, i*2, mode) - # an axis with size 1 is removed: - assert_raises(ValueError, test_nditer_too_large, - arrays, i*2 + 1, mode) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numeric.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numeric.py deleted file mode 100644 index b9c05e456c62d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numeric.py +++ /dev/null @@ -1,2091 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import platform -from decimal import Decimal -import warnings -import itertools -import platform - -import numpy as np -from numpy.core import * -from numpy.core import umath -from numpy.random import rand, randint, randn -from numpy.testing import * -from numpy.core.multiarray import dot as dot_ - - -class Vec(object): - def __init__(self,sequence=None): - if sequence is None: - sequence=[] - self.array=array(sequence) - def __add__(self, other): - out=Vec() - out.array=self.array+other.array - return out - def __sub__(self, other): - out=Vec() - out.array=self.array-other.array - return out - def __mul__(self, other): # with scalar - out=Vec(self.array.copy()) - out.array*=other - return out - def __rmul__(self, other): - return self*other - - -class TestDot(TestCase): - def setUp(self): - self.A = rand(10, 8) - self.b1 = rand(8, 1) - self.b2 = rand(8) - self.b3 = rand(1, 8) - self.b4 = rand(10) - self.N = 14 - - def test_matmat(self): - A = self.A - c1 = dot(A.transpose(), A) - c2 = dot_(A.transpose(), A) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_matvec(self): - A, b1 = self.A, self.b1 - c1 = dot(A, b1) - c2 = dot_(A, b1) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_matvec2(self): - A, b2 = self.A, self.b2 - c1 = dot(A, b2) - c2 = dot_(A, b2) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_vecmat(self): - A, b4 = self.A, self.b4 - c1 = dot(b4, A) - c2 = dot_(b4, A) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_vecmat2(self): - b3, A = self.b3, self.A - c1 = dot(b3, A.transpose()) - c2 = dot_(b3, A.transpose()) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_vecmat3(self): - A, b4 = self.A, self.b4 - c1 = dot(A.transpose(), b4) - c2 = dot_(A.transpose(), b4) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_vecvecouter(self): - b1, b3 = self.b1, self.b3 - c1 = dot(b1, b3) - c2 = dot_(b1, b3) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_vecvecinner(self): - b1, b3 = self.b1, self.b3 - c1 = dot(b3, b1) - c2 = dot_(b3, b1) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_columnvect1(self): - b1 = ones((3, 1)) - b2 = [5.3] - c1 = dot(b1, b2) - c2 = dot_(b1, b2) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_columnvect2(self): - b1 = ones((3, 1)).transpose() - b2 = [6.2] - c1 = dot(b2, b1) - c2 = dot_(b2, b1) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_vecscalar(self): - b1 = rand(1, 1) - b2 = rand(1, 8) - c1 = dot(b1, b2) - c2 = dot_(b1, b2) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_vecscalar2(self): - b1 = rand(8, 1) - b2 = rand(1, 1) - c1 = dot(b1, b2) - c2 = dot_(b1, b2) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_all(self): - dims = [(), (1,), (1, 1)] - for dim1 in dims: - for dim2 in dims: - arg1 = rand(*dim1) - arg2 = rand(*dim2) - c1 = dot(arg1, arg2) - c2 = dot_(arg1, arg2) - assert_(c1.shape == c2.shape) - assert_almost_equal(c1, c2, decimal=self.N) - - def test_vecobject(self): - U_non_cont = transpose([[1., 1.], [1., 2.]]) - U_cont = ascontiguousarray(U_non_cont) - x = array([Vec([1., 0.]), Vec([0., 1.])]) - zeros = array([Vec([0., 0.]), Vec([0., 0.])]) - zeros_test = dot(U_cont, x) - dot(U_non_cont, x) - assert_equal(zeros[0].array, zeros_test[0].array) - assert_equal(zeros[1].array, zeros_test[1].array) - - -class TestResize(TestCase): - def test_copies(self): - A = array([[1, 2], [3, 4]]) - Ar1 = array([[1, 2, 3, 4], [1, 2, 3, 4]]) - assert_equal(resize(A, (2, 4)), Ar1) - - Ar2 = array([[1, 2], [3, 4], [1, 2], [3, 4]]) - assert_equal(resize(A, (4, 2)), Ar2) - - Ar3 = array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) - assert_equal(resize(A, (4, 3)), Ar3) - - def test_zeroresize(self): - A = array([[1, 2], [3, 4]]) - Ar = resize(A, (0,)) - assert_equal(Ar, array([])) - -class TestNonarrayArgs(TestCase): - # check that non-array arguments to functions wrap them in arrays - def test_squeeze(self): - A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] - assert_(squeeze(A).shape == (3, 3)) - - def test_cumproduct(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(all(cumproduct(A) == array([1, 2, 6, 24, 120, 720]))) - - def test_size(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(size(A) == 6) - assert_(size(A, 0) == 2) - assert_(size(A, 1) == 3) - - def test_mean(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(mean(A) == 3.5) - assert_(all(mean(A, 0) == array([2.5, 3.5, 4.5]))) - assert_(all(mean(A, 1) == array([2., 5.]))) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(isnan(mean([]))) - assert_(w[0].category is RuntimeWarning) - - def test_std(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_almost_equal(std(A), 1.707825127659933) - assert_almost_equal(std(A, 0), array([1.5, 1.5, 1.5])) - assert_almost_equal(std(A, 1), array([0.81649658, 0.81649658])) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(isnan(std([]))) - assert_(w[0].category is RuntimeWarning) - - def test_var(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_almost_equal(var(A), 2.9166666666666665) - assert_almost_equal(var(A, 0), array([2.25, 2.25, 2.25])) - assert_almost_equal(var(A, 1), array([0.66666667, 0.66666667])) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(isnan(var([]))) - assert_(w[0].category is RuntimeWarning) - - -class TestBoolScalar(TestCase): - def test_logical(self): - f = False_ - t = True_ - s = "xyz" - self.assertTrue((t and s) is s) - self.assertTrue((f and s) is f) - - def test_bitwise_or(self): - f = False_ - t = True_ - self.assertTrue((t | t) is t) - self.assertTrue((f | t) is t) - self.assertTrue((t | f) is t) - self.assertTrue((f | f) is f) - - def test_bitwise_and(self): - f = False_ - t = True_ - self.assertTrue((t & t) is t) - self.assertTrue((f & t) is f) - self.assertTrue((t & f) is f) - self.assertTrue((f & f) is f) - - def test_bitwise_xor(self): - f = False_ - t = True_ - self.assertTrue((t ^ t) is f) - self.assertTrue((f ^ t) is t) - self.assertTrue((t ^ f) is t) - self.assertTrue((f ^ f) is f) - - -class TestBoolArray(TestCase): - def setUp(self): - # offset for simd tests - self.t = array([True] * 41, dtype=np.bool)[1::] - self.f = array([False] * 41, dtype=np.bool)[1::] - self.o = array([False] * 42, dtype=np.bool)[2::] - self.nm = self.f.copy() - self.im = self.t.copy() - self.nm[3] = True - self.nm[-2] = True - self.im[3] = False - self.im[-2] = False - - def test_all_any(self): - self.assertTrue(self.t.all()) - self.assertTrue(self.t.any()) - self.assertFalse(self.f.all()) - self.assertFalse(self.f.any()) - self.assertTrue(self.nm.any()) - self.assertTrue(self.im.any()) - self.assertFalse(self.nm.all()) - self.assertFalse(self.im.all()) - # check bad element in all positions - for i in range(256 - 7): - d = array([False] * 256, dtype=np.bool)[7::] - d[i] = True - self.assertTrue(np.any(d)) - e = array([True] * 256, dtype=np.bool)[7::] - e[i] = False - self.assertFalse(np.all(e)) - assert_array_equal(e, ~d) - # big array test for blocked libc loops - for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: - d = array([False] * 100043, dtype=np.bool) - d[i] = True - self.assertTrue(np.any(d), msg="%r" % i) - e = array([True] * 100043, dtype=np.bool) - e[i] = False - self.assertFalse(np.all(e), msg="%r" % i) - - def test_logical_not_abs(self): - assert_array_equal(~self.t, self.f) - assert_array_equal(np.abs(~self.t), self.f) - assert_array_equal(np.abs(~self.f), self.t) - assert_array_equal(np.abs(self.f), self.f) - assert_array_equal(~np.abs(self.f), self.t) - assert_array_equal(~np.abs(self.t), self.f) - assert_array_equal(np.abs(~self.nm), self.im) - np.logical_not(self.t, out=self.o) - assert_array_equal(self.o, self.f) - np.abs(self.t, out=self.o) - assert_array_equal(self.o, self.t) - - def test_logical_and_or_xor(self): - assert_array_equal(self.t | self.t, self.t) - assert_array_equal(self.f | self.f, self.f) - assert_array_equal(self.t | self.f, self.t) - assert_array_equal(self.f | self.t, self.t) - np.logical_or(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t & self.t, self.t) - assert_array_equal(self.f & self.f, self.f) - assert_array_equal(self.t & self.f, self.f) - assert_array_equal(self.f & self.t, self.f) - np.logical_and(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t ^ self.t, self.f) - assert_array_equal(self.f ^ self.f, self.f) - assert_array_equal(self.t ^ self.f, self.t) - assert_array_equal(self.f ^ self.t, self.t) - np.logical_xor(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.f) - - assert_array_equal(self.nm & self.t, self.nm) - assert_array_equal(self.im & self.f, False) - assert_array_equal(self.nm & True, self.nm) - assert_array_equal(self.im & False, self.f) - assert_array_equal(self.nm | self.t, self.t) - assert_array_equal(self.im | self.f, self.im) - assert_array_equal(self.nm | True, self.t) - assert_array_equal(self.im | False, self.im) - assert_array_equal(self.nm ^ self.t, self.im) - assert_array_equal(self.im ^ self.f, self.im) - assert_array_equal(self.nm ^ True, self.im) - assert_array_equal(self.im ^ False, self.im) - - -class TestBoolCmp(TestCase): - def setUp(self): - self.f = ones(256, dtype=np.float32) - self.ef = ones(self.f.size, dtype=np.bool) - self.d = ones(128, dtype=np.float64) - self.ed = ones(self.d.size, dtype=np.bool) - # generate values for all permutation of 256bit simd vectors - s = 0 - for i in range(32): - self.f[s:s+8] = [i & 2**x for x in range(8)] - self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] - s += 8 - s = 0 - for i in range(16): - self.d[s:s+4] = [i & 2**x for x in range(4)] - self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] - s += 4 - - self.nf = self.f.copy() - self.nd = self.d.copy() - self.nf[self.ef] = np.nan - self.nd[self.ed] = np.nan - - def test_float(self): - # offset for alignment test - for i in range(4): - assert_array_equal(self.f[i:] > 0, self.ef[i:]) - assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) - assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) - assert_array_equal(-self.f[i:] < 0, self.ef[i:]) - assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) - r = self.f[i:] != 0 - assert_array_equal(r, self.ef[i:]) - r2 = self.f[i:] != np.zeros_like(self.f[i:]) - r3 = 0 != self.f[i:] - assert_array_equal(r, r2) - assert_array_equal(r, r3) - # check bool == 0x1 - assert_array_equal(r.view(np.int8), r.astype(np.int8)) - assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) - assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) - - # isnan on amd64 takes the same codepath - assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) - - def test_double(self): - # offset for alignment test - for i in range(2): - assert_array_equal(self.d[i:] > 0, self.ed[i:]) - assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) - assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) - assert_array_equal(-self.d[i:] < 0, self.ed[i:]) - assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) - r = self.d[i:] != 0 - assert_array_equal(r, self.ed[i:]) - r2 = self.d[i:] != np.zeros_like(self.d[i:]) - r3 = 0 != self.d[i:] - assert_array_equal(r, r2) - assert_array_equal(r, r3) - # check bool == 0x1 - assert_array_equal(r.view(np.int8), r.astype(np.int8)) - assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) - assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) - - # isnan on amd64 takes the same codepath - assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) - - -class TestSeterr(TestCase): - def test_default(self): - err = geterr() - self.assertEqual(err, dict( - divide='warn', - invalid='warn', - over='warn', - under='ignore', - )) - - def test_set(self): - with np.errstate(): - err = seterr() - old = seterr(divide='print') - self.assertTrue(err == old) - new = seterr() - self.assertTrue(new['divide'] == 'print') - seterr(over='raise') - self.assertTrue(geterr()['over'] == 'raise') - self.assertTrue(new['divide'] == 'print') - seterr(**old) - self.assertTrue(geterr() == old) - - @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") - def test_divide_err(self): - with errstate(divide='raise'): - try: - array([1.]) / array([0.]) - except FloatingPointError: - pass - else: - self.fail() - seterr(divide='ignore') - array([1.]) / array([0.]) - - def test_errobj(self): - olderrobj = np.geterrobj() - self.called = 0 - try: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - with errstate(divide='warn'): - np.seterrobj([20000, 1, None]) - array([1.]) / array([0.]) - self.assertEqual(len(w), 1) - - def log_err(*args): - self.called += 1 - extobj_err = args - assert (len(extobj_err) == 2) - assert ("divide" in extobj_err[0]) - - with errstate(divide='ignore'): - np.seterrobj([20000, 3, log_err]) - array([1.]) / array([0.]) - self.assertEqual(self.called, 1) - - np.seterrobj(olderrobj) - with errstate(divide='ignore'): - np.divide(1., 0., extobj=[20000, 3, log_err]) - self.assertEqual(self.called, 2) - finally: - np.seterrobj(olderrobj) - del self.called - - def test_errobj_noerrmask(self): - # errmask = 0 has a special code path for the default - olderrobj = np.geterrobj() - try: - # set errobj to something non default - np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, - umath.ERR_DEFAULT + 1, None]) - #call a ufunc - np.isnan(np.array([6])) - # same with the default, lots of times to get rid of possible - # pre-existing stack in the code - for i in range(10000): - np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT, - None]) - np.isnan(np.array([6])) - finally: - np.seterrobj(olderrobj) - - -class TestFloatExceptions(TestCase): - def assert_raises_fpe(self, fpeerr, flop, x, y): - ftype = type(x) - try: - flop(x, y) - assert_(False, - "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) - except FloatingPointError as exc: - assert_(str(exc).find(fpeerr) >= 0, - "Type %s raised wrong fpe error '%s'." % (ftype, exc)) - - def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): - # Check that fpe exception is raised. - # - # Given a floating operation `flop` and two scalar values, check that - # the operation raises the floating point exception specified by - #`fpeerr`. Tests all variants with 0-d array scalars as well. - - self.assert_raises_fpe(fpeerr, flop, sc1, sc2); - self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2); - self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]); - self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]); - - @dec.knownfailureif(True, "See ticket #2350") - def test_floating_exceptions(self): - # Test basic arithmetic function errors - with np.errstate(all='raise'): - # Test for all real and complex float types - for typecode in np.typecodes['AllFloat']: - ftype = np.obj2sctype(typecode) - if np.dtype(ftype).kind == 'f': - # Get some extreme values for the type - fi = np.finfo(ftype) - ft_tiny = fi.tiny - ft_max = fi.max - ft_eps = fi.eps - underflow = 'underflow' - divbyzero = 'divide by zero' - else: - # 'c', complex, corresponding real dtype - rtype = type(ftype(0).real) - fi = np.finfo(rtype) - ft_tiny = ftype(fi.tiny) - ft_max = ftype(fi.max) - ft_eps = ftype(fi.eps) - # The complex types raise different exceptions - underflow = '' - divbyzero = '' - overflow = 'overflow' - invalid = 'invalid' - - self.assert_raises_fpe(underflow, - lambda a, b:a/b, ft_tiny, ft_max) - self.assert_raises_fpe(underflow, - lambda a, b:a*b, ft_tiny, ft_tiny) - self.assert_raises_fpe(overflow, - lambda a, b:a*b, ft_max, ftype(2)) - self.assert_raises_fpe(overflow, - lambda a, b:a/b, ft_max, ftype(0.5)) - self.assert_raises_fpe(overflow, - lambda a, b:a+b, ft_max, ft_max*ft_eps) - self.assert_raises_fpe(overflow, - lambda a, b:a-b, -ft_max, ft_max*ft_eps) - self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) - self.assert_raises_fpe(divbyzero, - lambda a, b:a/b, ftype(1), ftype(0)) - self.assert_raises_fpe(invalid, - lambda a, b:a/b, ftype(np.inf), ftype(np.inf)) - self.assert_raises_fpe(invalid, - lambda a, b:a/b, ftype(0), ftype(0)) - self.assert_raises_fpe(invalid, - lambda a, b:a-b, ftype(np.inf), ftype(np.inf)) - self.assert_raises_fpe(invalid, - lambda a, b:a+b, ftype(np.inf), ftype(-np.inf)) - self.assert_raises_fpe(invalid, - lambda a, b:a*b, ftype(0), ftype(np.inf)) - - def test_warnings(self): - # test warning code path - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - with np.errstate(all="warn"): - np.divide(1, 0.) - self.assertEqual(len(w), 1) - self.assertTrue("divide by zero" in str(w[0].message)) - np.array(1e300) * np.array(1e300) - self.assertEqual(len(w), 2) - self.assertTrue("overflow" in str(w[-1].message)) - np.array(np.inf) - np.array(np.inf) - self.assertEqual(len(w), 3) - self.assertTrue("invalid value" in str(w[-1].message)) - np.array(1e-300) * np.array(1e-300) - self.assertEqual(len(w), 4) - self.assertTrue("underflow" in str(w[-1].message)) - - -class TestTypes(TestCase): - def check_promotion_cases(self, promote_func): - #Tests that the scalars get coerced correctly. - b = np.bool_(0) - i8, i16, i32, i64 = int8(0), int16(0), int32(0), int64(0) - u8, u16, u32, u64 = uint8(0), uint16(0), uint32(0), uint64(0) - f32, f64, fld = float32(0), float64(0), longdouble(0) - c64, c128, cld = complex64(0), complex128(0), clongdouble(0) - - # coercion within the same kind - assert_equal(promote_func(i8, i16), np.dtype(int16)) - assert_equal(promote_func(i32, i8), np.dtype(int32)) - assert_equal(promote_func(i16, i64), np.dtype(int64)) - assert_equal(promote_func(u8, u32), np.dtype(uint32)) - assert_equal(promote_func(f32, f64), np.dtype(float64)) - assert_equal(promote_func(fld, f32), np.dtype(longdouble)) - assert_equal(promote_func(f64, fld), np.dtype(longdouble)) - assert_equal(promote_func(c128, c64), np.dtype(complex128)) - assert_equal(promote_func(cld, c128), np.dtype(clongdouble)) - assert_equal(promote_func(c64, fld), np.dtype(clongdouble)) - - # coercion between kinds - assert_equal(promote_func(b, i32), np.dtype(int32)) - assert_equal(promote_func(b, u8), np.dtype(uint8)) - assert_equal(promote_func(i8, u8), np.dtype(int16)) - assert_equal(promote_func(u8, i32), np.dtype(int32)) - assert_equal(promote_func(i64, u32), np.dtype(int64)) - assert_equal(promote_func(u64, i32), np.dtype(float64)) - assert_equal(promote_func(i32, f32), np.dtype(float64)) - assert_equal(promote_func(i64, f32), np.dtype(float64)) - assert_equal(promote_func(f32, i16), np.dtype(float32)) - assert_equal(promote_func(f32, u32), np.dtype(float64)) - assert_equal(promote_func(f32, c64), np.dtype(complex64)) - assert_equal(promote_func(c128, f32), np.dtype(complex128)) - assert_equal(promote_func(cld, f64), np.dtype(clongdouble)) - - # coercion between scalars and 1-D arrays - assert_equal(promote_func(array([b]), i8), np.dtype(int8)) - assert_equal(promote_func(array([b]), u8), np.dtype(uint8)) - assert_equal(promote_func(array([b]), i32), np.dtype(int32)) - assert_equal(promote_func(array([b]), u32), np.dtype(uint32)) - assert_equal(promote_func(array([i8]), i64), np.dtype(int8)) - assert_equal(promote_func(u64, array([i32])), np.dtype(int32)) - assert_equal(promote_func(i64, array([u32])), np.dtype(uint32)) - assert_equal(promote_func(int32(-1), array([u64])), np.dtype(float64)) - assert_equal(promote_func(f64, array([f32])), np.dtype(float32)) - assert_equal(promote_func(fld, array([f32])), np.dtype(float32)) - assert_equal(promote_func(array([f64]), fld), np.dtype(float64)) - assert_equal(promote_func(fld, array([c64])), np.dtype(complex64)) - assert_equal(promote_func(c64, array([f64])), np.dtype(complex128)) - assert_equal(promote_func(complex64(3j), array([f64])), - np.dtype(complex128)) - - # coercion between scalars and 1-D arrays, where - # the scalar has greater kind than the array - assert_equal(promote_func(array([b]), f64), np.dtype(float64)) - assert_equal(promote_func(array([b]), i64), np.dtype(int64)) - assert_equal(promote_func(array([b]), u64), np.dtype(uint64)) - assert_equal(promote_func(array([i8]), f64), np.dtype(float64)) - assert_equal(promote_func(array([u16]), f64), np.dtype(float64)) - - # uint and int are treated as the same "kind" for - # the purposes of array-scalar promotion. - assert_equal(promote_func(array([u16]), i32), np.dtype(uint16)) - - # float and complex are treated as the same "kind" for - # the purposes of array-scalar promotion, so that you can do - # (0j + float32array) to get a complex64 array instead of - # a complex128 array. - assert_equal(promote_func(array([f32]), c128), np.dtype(complex64)) - - def test_coercion(self): - def res_type(a, b): - return np.add(a, b).dtype - self.check_promotion_cases(res_type) - - # Use-case: float/complex scalar * bool/int8 array - # shouldn't narrow the float/complex type - for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: - b = 1.234 * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) - b = np.longdouble(1.234) * a - assert_equal(b.dtype, np.dtype(np.longdouble), - "array type %s" % a.dtype) - b = np.float64(1.234) * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) - b = np.float32(1.234) * a - assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) - b = np.float16(1.234) * a - assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) - - b = 1.234j * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) - b = np.clongdouble(1.234j) * a - assert_equal(b.dtype, np.dtype(np.clongdouble), - "array type %s" % a.dtype) - b = np.complex128(1.234j) * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) - b = np.complex64(1.234j) * a - assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) - - # The following use-case is problematic, and to resolve its - # tricky side-effects requires more changes. - # - ## Use-case: (1-t)*a, where 't' is a boolean array and 'a' is - ## a float32, shouldn't promote to float64 - #a = np.array([1.0, 1.5], dtype=np.float32) - #t = np.array([True, False]) - #b = t*a - #assert_equal(b, [1.0, 0.0]) - #assert_equal(b.dtype, np.dtype('f4')) - #b = (1-t)*a - #assert_equal(b, [0.0, 1.5]) - #assert_equal(b.dtype, np.dtype('f4')) - ## Probably ~t (bitwise negation) is more proper to use here, - ## but this is arguably less intuitive to understand at a glance, and - ## would fail if 't' is actually an integer array instead of boolean: - #b = (~t)*a - #assert_equal(b, [0.0, 1.5]) - #assert_equal(b.dtype, np.dtype('f4')) - - def test_result_type(self): - self.check_promotion_cases(np.result_type) - assert_(np.result_type(None) == np.dtype(None)) - - def test_promote_types_endian(self): - # promote_types should always return native-endian types - assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) - - assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) - assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) - assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) - assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8')) - assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) - - assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) - assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) - - def test_promote_types_strings(self): - assert_equal(np.promote_types('bool', 'S'), np.dtype('S5')) - assert_equal(np.promote_types('b', 'S'), np.dtype('S4')) - assert_equal(np.promote_types('u1', 'S'), np.dtype('S3')) - assert_equal(np.promote_types('u2', 'S'), np.dtype('S5')) - assert_equal(np.promote_types('u4', 'S'), np.dtype('S10')) - assert_equal(np.promote_types('u8', 'S'), np.dtype('S20')) - assert_equal(np.promote_types('i1', 'S'), np.dtype('S4')) - assert_equal(np.promote_types('i2', 'S'), np.dtype('S6')) - assert_equal(np.promote_types('i4', 'S'), np.dtype('S11')) - assert_equal(np.promote_types('i8', 'S'), np.dtype('S21')) - assert_equal(np.promote_types('bool', 'U'), np.dtype('U5')) - assert_equal(np.promote_types('b', 'U'), np.dtype('U4')) - assert_equal(np.promote_types('u1', 'U'), np.dtype('U3')) - assert_equal(np.promote_types('u2', 'U'), np.dtype('U5')) - assert_equal(np.promote_types('u4', 'U'), np.dtype('U10')) - assert_equal(np.promote_types('u8', 'U'), np.dtype('U20')) - assert_equal(np.promote_types('i1', 'U'), np.dtype('U4')) - assert_equal(np.promote_types('i2', 'U'), np.dtype('U6')) - assert_equal(np.promote_types('i4', 'U'), np.dtype('U11')) - assert_equal(np.promote_types('i8', 'U'), np.dtype('U21')) - assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5')) - assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('b', 'S1'), np.dtype('S4')) - assert_equal(np.promote_types('b', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3')) - assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5')) - assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10')) - assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30')) - assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20')) - assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30')) - - def test_can_cast(self): - assert_(np.can_cast(np.int32, np.int64)) - assert_(np.can_cast(np.float64, np.complex)) - assert_(not np.can_cast(np.complex, np.float)) - - assert_(np.can_cast('i8', 'f8')) - assert_(not np.can_cast('i8', 'f4')) - assert_(np.can_cast('i4', 'S11')) - - assert_(np.can_cast('i8', 'i8', 'no')) - assert_(not np.can_cast('i8', 'no')) - - assert_(np.can_cast('i8', 'equiv')) - assert_(not np.can_cast('i8', 'equiv')) - - assert_(np.can_cast('i8', 'safe')) - assert_(not np.can_cast('i4', 'safe')) - - assert_(np.can_cast('i4', 'same_kind')) - assert_(not np.can_cast('u4', 'same_kind')) - - assert_(np.can_cast('u4', 'unsafe')) - - assert_(np.can_cast('bool', 'S5')) - assert_(not np.can_cast('bool', 'S4')) - - assert_(np.can_cast('b', 'S4')) - assert_(not np.can_cast('b', 'S3')) - - assert_(np.can_cast('u1', 'S3')) - assert_(not np.can_cast('u1', 'S2')) - assert_(np.can_cast('u2', 'S5')) - assert_(not np.can_cast('u2', 'S4')) - assert_(np.can_cast('u4', 'S10')) - assert_(not np.can_cast('u4', 'S9')) - assert_(np.can_cast('u8', 'S20')) - assert_(not np.can_cast('u8', 'S19')) - - assert_(np.can_cast('i1', 'S4')) - assert_(not np.can_cast('i1', 'S3')) - assert_(np.can_cast('i2', 'S6')) - assert_(not np.can_cast('i2', 'S5')) - assert_(np.can_cast('i4', 'S11')) - assert_(not np.can_cast('i4', 'S10')) - assert_(np.can_cast('i8', 'S21')) - assert_(not np.can_cast('i8', 'S20')) - - assert_(np.can_cast('bool', 'S5')) - assert_(not np.can_cast('bool', 'S4')) - - assert_(np.can_cast('b', 'U4')) - assert_(not np.can_cast('b', 'U3')) - - assert_(np.can_cast('u1', 'U3')) - assert_(not np.can_cast('u1', 'U2')) - assert_(np.can_cast('u2', 'U5')) - assert_(not np.can_cast('u2', 'U4')) - assert_(np.can_cast('u4', 'U10')) - assert_(not np.can_cast('u4', 'U9')) - assert_(np.can_cast('u8', 'U20')) - assert_(not np.can_cast('u8', 'U19')) - - assert_(np.can_cast('i1', 'U4')) - assert_(not np.can_cast('i1', 'U3')) - assert_(np.can_cast('i2', 'U6')) - assert_(not np.can_cast('i2', 'U5')) - assert_(np.can_cast('i4', 'U11')) - assert_(not np.can_cast('i4', 'U10')) - assert_(np.can_cast('i8', 'U21')) - assert_(not np.can_cast('i8', 'U20')) - - assert_raises(TypeError, np.can_cast, 'i4', None) - assert_raises(TypeError, np.can_cast, None, 'i4') - - -# Custom exception class to test exception propagation in fromiter -class NIterError(Exception): pass - - -class TestFromiter(TestCase): - def makegen(self): - for x in range(24): - yield x**2 - - def test_types(self): - ai32 = fromiter(self.makegen(), int32) - ai64 = fromiter(self.makegen(), int64) - af = fromiter(self.makegen(), float) - self.assertTrue(ai32.dtype == dtype(int32)) - self.assertTrue(ai64.dtype == dtype(int64)) - self.assertTrue(af.dtype == dtype(float)) - - def test_lengths(self): - expected = array(list(self.makegen())) - a = fromiter(self.makegen(), int) - a20 = fromiter(self.makegen(), int, 20) - self.assertTrue(len(a) == len(expected)) - self.assertTrue(len(a20) == 20) - self.assertRaises(ValueError, fromiter, - self.makegen(), int, len(expected) + 10) - - def test_values(self): - expected = array(list(self.makegen())) - a = fromiter(self.makegen(), int) - a20 = fromiter(self.makegen(), int, 20) - self.assertTrue(alltrue(a == expected, axis=0)) - self.assertTrue(alltrue(a20 == expected[:20], axis=0)) - - def load_data(self, n, eindex): - # Utility method for the issue 2592 tests. - # Raise an exception at the desired index in the iterator. - for e in range(n): - if e == eindex: - raise NIterError('error at index %s' % eindex) - yield e - - def test_2592(self): - # Test iteration exceptions are correctly raised. - count, eindex = 10, 5 - self.assertRaises(NIterError, np.fromiter, - self.load_data(count, eindex), dtype=int, count=count) - - def test_2592_edge(self): - # Test iter. exceptions, edge case (exception at end of iterator). - count = 10 - eindex = count-1 - self.assertRaises(NIterError, np.fromiter, - self.load_data(count, eindex), dtype=int, count=count) - - -class TestNonzero(TestCase): - def test_nonzero_trivial(self): - assert_equal(np.count_nonzero(array([])), 0) - assert_equal(np.count_nonzero(array([], dtype='?')), 0) - assert_equal(np.nonzero(array([])), ([],)) - - assert_equal(np.count_nonzero(array(0)), 0) - assert_equal(np.count_nonzero(array(0, dtype='?')), 0) - assert_equal(np.nonzero(array(0)), ([],)) - assert_equal(np.count_nonzero(array(1)), 1) - assert_equal(np.count_nonzero(array(1, dtype='?')), 1) - assert_equal(np.nonzero(array(1)), ([0],)) - - def test_nonzero_onedim(self): - x = array([1, 0, 2, -1, 0, 0, 8]) - assert_equal(np.count_nonzero(x), 4) - assert_equal(np.count_nonzero(x), 4) - assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) - - x = array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], - dtype=[('a', 'i4'), ('b', 'i2')]) - assert_equal(np.count_nonzero(x['a']), 3) - assert_equal(np.count_nonzero(x['b']), 4) - assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) - assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) - - def test_nonzero_twodim(self): - x = array([[0, 1, 0], [2, 0, 3]]) - assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) - - x = np.eye(3) - assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) - - x = array([[(0, 1), (0, 0), (1, 11)], - [(1, 1), (1, 0), (0, 0)], - [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) - assert_equal(np.count_nonzero(x['a']), 4) - assert_equal(np.count_nonzero(x['b']), 5) - assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) - assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) - - assert_(not x['a'].T.flags.aligned) - assert_equal(np.count_nonzero(x['a'].T), 4) - assert_equal(np.count_nonzero(x['b'].T), 5) - assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) - assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) - - def test_sparse(self): - # test special sparse condition boolean code path - for i in range(20): - c = np.zeros(200, dtype=np.bool) - c[i::20] = True - assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) - - c = np.zeros(400, dtype=np.bool) - c[10 + i:20 + i] = True - c[20 + i*2] = True - assert_equal(np.nonzero(c)[0], - np.concatenate((np.arange(10 +i, 20 + i), [20 +i*2]))) - - -class TestIndex(TestCase): - def test_boolean(self): - a = rand(3, 5, 8) - V = rand(5, 8) - g1 = randint(0, 5, size=15) - g2 = randint(0, 8, size=15) - V[g1, g2] = -V[g1, g2] - assert_((array([a[0][V>0], a[1][V>0], a[2][V>0]]) == a[:, V>0]).all()) - - def test_boolean_edgecase(self): - a = np.array([], dtype='int32') - b = np.array([], dtype='bool') - c = a[b] - assert_equal(c, []) - assert_equal(c.dtype, np.dtype('int32')) - - -class TestBinaryRepr(TestCase): - def test_zero(self): - assert_equal(binary_repr(0), '0') - - def test_large(self): - assert_equal(binary_repr(10736848), '101000111101010011010000') - - def test_negative(self): - assert_equal(binary_repr(-1), '-1') - assert_equal(binary_repr(-1, width=8), '11111111') - -class TestBaseRepr(TestCase): - def test_base3(self): - assert_equal(base_repr(3**5, 3), '100000') - - def test_positive(self): - assert_equal(base_repr(12, 10), '12') - assert_equal(base_repr(12, 10, 4), '000012') - assert_equal(base_repr(12, 4), '30') - assert_equal(base_repr(3731624803700888, 36), '10QR0ROFCEW') - - def test_negative(self): - assert_equal(base_repr(-12, 10), '-12') - assert_equal(base_repr(-12, 10, 4), '-000012') - assert_equal(base_repr(-12, 4), '-30') - -class TestArrayComparisons(TestCase): - def test_array_equal(self): - res = array_equal(array([1, 2]), array([1, 2])) - assert_(res) - assert_(type(res) is bool) - res = array_equal(array([1, 2]), array([1, 2, 3])) - assert_(not res) - assert_(type(res) is bool) - res = array_equal(array([1, 2]), array([3, 4])) - assert_(not res) - assert_(type(res) is bool) - res = array_equal(array([1, 2]), array([1, 3])) - assert_(not res) - assert_(type(res) is bool) - res = array_equal(array(['a'], dtype='S1'), array(['a'], dtype='S1')) - assert_(res) - assert_(type(res) is bool) - res = array_equal(array([('a', 1)], dtype='S1,u4'), array([('a', 1)], dtype='S1,u4')) - assert_(res) - assert_(type(res) is bool) - - def test_array_equiv(self): - res = array_equiv(array([1, 2]), array([1, 2])) - assert_(res) - assert_(type(res) is bool) - res = array_equiv(array([1, 2]), array([1, 2, 3])) - assert_(not res) - assert_(type(res) is bool) - res = array_equiv(array([1, 2]), array([3, 4])) - assert_(not res) - assert_(type(res) is bool) - res = array_equiv(array([1, 2]), array([1, 3])) - assert_(not res) - assert_(type(res) is bool) - - res = array_equiv(array([1, 1]), array([1])) - assert_(res) - assert_(type(res) is bool) - res = array_equiv(array([1, 1]), array([[1], [1]])) - assert_(res) - assert_(type(res) is bool) - res = array_equiv(array([1, 2]), array([2])) - assert_(not res) - assert_(type(res) is bool) - res = array_equiv(array([1, 2]), array([[1], [2]])) - assert_(not res) - assert_(type(res) is bool) - res = array_equiv(array([1, 2]), array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) - assert_(not res) - assert_(type(res) is bool) - - -def assert_array_strict_equal(x, y): - assert_array_equal(x, y) - # Check flags, 32 bit arches typically don't provide 16 byte alignment - if ((x.dtype.alignment <= 8 or - np.intp().dtype.itemsize != 4) and - sys.platform != 'win32'): - assert_(x.flags == y.flags) - else: - assert_(x.flags.owndata == y.flags.owndata) - assert_(x.flags.writeable == y.flags.writeable) - assert_(x.flags.c_contiguous == y.flags.c_contiguous) - assert_(x.flags.f_contiguous == y.flags.f_contiguous) - assert_(x.flags.updateifcopy == y.flags.updateifcopy) - # check endianness - assert_(x.dtype.isnative == y.dtype.isnative) - - -class TestClip(TestCase): - def setUp(self): - self.nr = 5 - self.nc = 3 - - def fastclip(self, a, m, M, out=None): - if out is None: - return a.clip(m, M) - else: - return a.clip(m, M, out) - - def clip(self, a, m, M, out=None): - # use slow-clip - selector = less(a, m)+2*greater(a, M) - return selector.choose((a, m, M), out=out) - - # Handy functions - def _generate_data(self, n, m): - return randn(n, m) - - def _generate_data_complex(self, n, m): - return randn(n, m) + 1.j *rand(n, m) - - def _generate_flt_data(self, n, m): - return (randn(n, m)).astype(float32) - - def _neg_byteorder(self, a): - a = asarray(a) - if sys.byteorder == 'little': - a = a.astype(a.dtype.newbyteorder('>')) - else: - a = a.astype(a.dtype.newbyteorder('<')) - return a - - def _generate_non_native_data(self, n, m): - data = randn(n, m) - data = self._neg_byteorder(data) - assert_(not data.dtype.isnative) - return data - - def _generate_int_data(self, n, m): - return (10 * rand(n, m)).astype(int64) - - def _generate_int32_data(self, n, m): - return (10 * rand(n, m)).astype(int32) - - # Now the real test cases - def test_simple_double(self): - #Test native double input with scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = 0.1 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_int(self): - #Test native int input with scalar min/max. - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(int) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_array_double(self): - #Test native double input with array min/max. - a = self._generate_data(self.nr, self.nc) - m = zeros(a.shape) - M = m + 0.5 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_simple_nonnative(self): - #Test non native double input with scalar min/max. - #Test native double input with non native double scalar min/max. - a = self._generate_non_native_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - #Test native double input with non native double scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = self._neg_byteorder(0.6) - assert_(not M.dtype.isnative) - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_equal(ac, act) - - def test_simple_complex(self): - #Test native complex input with native double scalar min/max. - #Test native input with complex double scalar min/max. - a = 3 * self._generate_data_complex(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - #Test native input with complex double scalar min/max. - a = 3 * self._generate_data(self.nr, self.nc) - m = -0.5 + 1.j - M = 1. + 2.j - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_clip_non_contig(self): - #Test clip for non contiguous native input and native scalar min/max. - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert_(not a.flags['F_CONTIGUOUS']) - assert_(not a.flags['C_CONTIGUOUS']) - ac = self.fastclip(a, -1.6, 1.7) - act = self.clip(a, -1.6, 1.7) - assert_array_strict_equal(ac, act) - - def test_simple_out(self): - #Test native double input with scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = zeros(a.shape) - act = zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int32_inout(self): - #Test native int32 input with double min/max and int32 out. - a = self._generate_int32_data(self.nr, self.nc) - m = float64(0) - M = float64(2) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_out(self): - #Test native int32 input with int32 scalar min/max and int64 out. - a = self._generate_int32_data(self.nr, self.nc) - m = int32(-1) - M = int32(1) - ac = zeros(a.shape, dtype = int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int64_inout(self): - #Test native int32 input with double array min/max and int32 out. - a = self._generate_int32_data(self.nr, self.nc) - m = zeros(a.shape, float64) - M = float64(1) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_int32_out(self): - #Test native double input with scalar min/max and int out. - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_simple_inplace_01(self): - #Test native double input with array min/max in-place. - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_simple_inplace_02(self): - #Test native double input with scalar min/max in-place. - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_noncontig_inplace(self): - #Test non contiguous double input with double scalar min/max in-place. - a = self._generate_data(self.nr * 2, self.nc * 3) - a = a[::2, ::3] - assert_(not a.flags['F_CONTIGUOUS']) - assert_(not a.flags['C_CONTIGUOUS']) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_equal(a, ac) - - def test_type_cast_01(self): - #Test native double input with scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_02(self): - #Test native int32 input with int32 scalar min/max. - a = self._generate_int_data(self.nr, self.nc) - a = a.astype(int32) - m = -2 - M = 4 - ac = self.fastclip(a, m, M) - act = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_03(self): - #Test native int32 input with float64 scalar min/max. - a = self._generate_int32_data(self.nr, self.nc) - m = -2 - M = 4 - ac = self.fastclip(a, float64(m), float64(M)) - act = self.clip(a, float64(m), float64(M)) - assert_array_strict_equal(ac, act) - - def test_type_cast_04(self): - #Test native int32 input with float32 scalar min/max. - a = self._generate_int32_data(self.nr, self.nc) - m = float32(-2) - M = float32(4) - act = self.fastclip(a, m, M) - ac = self.clip(a, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_05(self): - #Test native int32 with double arrays min/max. - a = self._generate_int_data(self.nr, self.nc) - m = -0.5 - M = 1. - ac = self.fastclip(a, m * zeros(a.shape), M) - act = self.clip(a, m * zeros(a.shape), M) - assert_array_strict_equal(ac, act) - - def test_type_cast_06(self): - #Test native with NON native scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = 0.5 - m_s = self._neg_byteorder(m) - M = 1. - act = self.clip(a, m_s, M) - ac = self.fastclip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_07(self): - #Test NON native with native array min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 * ones(a.shape) - M = 1. - a_s = self._neg_byteorder(a) - assert_(not a_s.dtype.isnative) - act = a_s.clip(m, M) - ac = self.fastclip(a_s, m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_08(self): - #Test NON native with native scalar min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 1. - a_s = self._neg_byteorder(a) - assert_(not a_s.dtype.isnative) - ac = self.fastclip(a_s, m, M) - act = a_s.clip(m, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_09(self): - #Test native with NON native array min/max. - a = self._generate_data(self.nr, self.nc) - m = -0.5 * ones(a.shape) - M = 1. - m_s = self._neg_byteorder(m) - assert_(not m_s.dtype.isnative) - ac = self.fastclip(a, m_s, M) - act = self.clip(a, m_s, M) - assert_array_strict_equal(ac, act) - - def test_type_cast_10(self): - #Test native int32 with float min/max and float out for output argument. - a = self._generate_int_data(self.nr, self.nc) - b = zeros(a.shape, dtype = float32) - m = float32(-0.5) - M = float32(1) - act = self.clip(a, m, M, out = b) - ac = self.fastclip(a, m, M, out = b) - assert_array_strict_equal(ac, act) - - def test_type_cast_11(self): - #Test non native with native scalar, min/max, out non native - a = self._generate_non_native_data(self.nr, self.nc) - b = a.copy() - b = b.astype(b.dtype.newbyteorder('>')) - bt = b.copy() - m = -0.5 - M = 1. - self.fastclip(a, m, M, out = b) - self.clip(a, m, M, out = bt) - assert_array_strict_equal(b, bt) - - def test_type_cast_12(self): - #Test native int32 input and min/max and float out - a = self._generate_int_data(self.nr, self.nc) - b = zeros(a.shape, dtype = float32) - m = int32(0) - M = int32(1) - act = self.clip(a, m, M, out = b) - ac = self.fastclip(a, m, M, out = b) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple(self): - #Test native double input with scalar min/max - a = self._generate_data(self.nr, self.nc) - m = -0.5 - M = 0.6 - ac = zeros(a.shape) - act = zeros(a.shape) - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple2(self): - #Test native int32 input with double min/max and int32 out - a = self._generate_int32_data(self.nr, self.nc) - m = float64(0) - M = float64(2) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_simple_int32(self): - #Test native int32 input with int32 scalar min/max and int64 out - a = self._generate_int32_data(self.nr, self.nc) - m = int32(-1) - M = int32(1) - ac = zeros(a.shape, dtype = int64) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_int32(self): - #Test native int32 input with double array min/max and int32 out - a = self._generate_int32_data(self.nr, self.nc) - m = zeros(a.shape, float64) - M = float64(1) - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_with_out_array_outint32(self): - #Test native double input with scalar min/max and int out - a = self._generate_data(self.nr, self.nc) - m = -1.0 - M = 2.0 - ac = zeros(a.shape, dtype = int32) - act = ac.copy() - self.fastclip(a, m, M, ac) - self.clip(a, m, M, act) - assert_array_strict_equal(ac, act) - - def test_clip_inplace_array(self): - #Test native double input with array min/max - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = zeros(a.shape) - M = 1.0 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_clip_inplace_simple(self): - #Test native double input with scalar min/max - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - self.fastclip(a, m, M, a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a, ac) - - def test_clip_func_takes_out(self): - # Ensure that the clip() function takes an out= argument. - a = self._generate_data(self.nr, self.nc) - ac = a.copy() - m = -0.5 - M = 0.6 - a2 = clip(a, m, M, out=a) - self.clip(a, m, M, ac) - assert_array_strict_equal(a2, ac) - self.assertTrue(a2 is a) - - -class TestAllclose(object): - rtol = 1e-5 - atol = 1e-8 - - def setUp(self): - self.olderr = np.seterr(invalid='ignore') - - def tearDown(self): - np.seterr(**self.olderr) - - def tst_allclose(self, x, y): - assert_(allclose(x, y), "%s and %s not close" % (x, y)) - - def tst_not_allclose(self, x, y): - assert_(not allclose(x, y), "%s and %s shouldn't be close" % (x, y)) - - def test_ip_allclose(self): - #Parametric test factory. - arr = array([100, 1000]) - aran = arange(125).reshape((5, 5, 5)) - - atol = self.atol - rtol = self.rtol - - data = [([1, 0], [1, 0]), - ([atol], [0]), - ([1], [1+rtol+atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol*2), - (aran, aran + aran*rtol), - (inf, inf), - (inf, [inf])] - - for (x, y) in data: - yield (self.tst_allclose, x, y) - - def test_ip_not_allclose(self): - #Parametric test factory. - aran = arange(125).reshape((5, 5, 5)) - - atol = self.atol - rtol = self.rtol - - data = [([inf, 0], [1, inf]), - ([inf, 0], [1, 0]), - ([inf, inf], [1, inf]), - ([inf, inf], [1, 0]), - ([-inf, 0], [inf, 0]), - ([nan, 0], [nan, 0]), - ([atol*2], [0]), - ([1], [1+rtol+atol*2]), - (aran, aran + aran*atol + atol*2), - (array([inf, 1]), array([0, inf]))] - - for (x, y) in data: - yield (self.tst_not_allclose, x, y) - - def test_no_parameter_modification(self): - x = array([inf, 1]) - y = array([0, inf]) - allclose(x, y) - assert_array_equal(x, array([inf, 1])) - assert_array_equal(y, array([0, inf])) - - - def test_min_int(self): - # Could make problems because of abs(min_int) == min_int - min_int = np.iinfo(np.int_).min - a = np.array([min_int], dtype=np.int_) - assert_(allclose(a, a)) - - -class TestIsclose(object): - rtol = 1e-5 - atol = 1e-8 - - def setup(self): - atol = self.atol - rtol = self.rtol - arr = array([100, 1000]) - aran = arange(125).reshape((5, 5, 5)) - - self.all_close_tests = [ - ([1, 0], [1, 0]), - ([atol], [0]), - ([1], [1 + rtol + atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol), - (aran, aran + aran*rtol), - (inf, inf), - (inf, [inf]), - ([inf, -inf], [inf, -inf]), - ] - self.none_close_tests = [ - ([inf, 0], [1, inf]), - ([inf, -inf], [1, 0]), - ([inf, inf], [1, -inf]), - ([inf, inf], [1, 0]), - ([nan, 0], [nan, -inf]), - ([atol*2], [0]), - ([1], [1 + rtol + atol*2]), - (aran, aran + rtol*1.1*aran + atol*1.1), - (array([inf, 1]), array([0, inf])), - ] - self.some_close_tests = [ - ([inf, 0], [inf, atol*2]), - ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, nan, 1e6]), - (arange(3), [0, 1, 2.1]), - (nan, [nan, nan, nan]), - ([0], [atol, inf, -inf, nan]), - (0, [atol, inf, -inf, nan]), - ] - self.some_close_results = [ - [True, False], - [True, False, False], - [True, True, False], - [False, False, False], - [True, False, False, False], - [True, False, False, False], - ] - - def test_ip_isclose(self): - self.setup() - tests = self.some_close_tests - results = self.some_close_results - for (x, y), result in zip(tests, results): - yield (assert_array_equal, isclose(x, y), result) - - def tst_all_isclose(self, x, y): - assert_(all(isclose(x, y)), "%s and %s not close" % (x, y)) - - def tst_none_isclose(self, x, y): - msg = "%s and %s shouldn't be close" - assert_(not any(isclose(x, y)), msg % (x, y)) - - def tst_isclose_allclose(self, x, y): - msg = "isclose.all() and allclose aren't same for %s and %s" - assert_array_equal(isclose(x, y).all(), allclose(x, y), msg % (x, y)) - - def test_ip_all_isclose(self): - self.setup() - for (x, y) in self.all_close_tests: - yield (self.tst_all_isclose, x, y) - - def test_ip_none_isclose(self): - self.setup() - for (x, y) in self.none_close_tests: - yield (self.tst_none_isclose, x, y) - - def test_ip_isclose_allclose(self): - self.setup() - tests = (self.all_close_tests + self.none_close_tests + - self.some_close_tests) - for (x, y) in tests: - yield (self.tst_isclose_allclose, x, y) - - def test_equal_nan(self): - assert_array_equal(isclose(nan, nan, equal_nan=True), [True]) - arr = array([1.0, nan]) - assert_array_equal(isclose(arr, arr, equal_nan=True), [True, True]) - - def test_masked_arrays(self): - x = np.ma.masked_where([True, True, False], np.arange(3)) - assert_(type(x) is type(isclose(2, x))) - - x = np.ma.masked_where([True, True, False], [nan, inf, nan]) - assert_(type(x) is type(isclose(inf, x))) - - x = np.ma.masked_where([True, True, False], [nan, nan, nan]) - y = isclose(nan, x, equal_nan=True) - assert_(type(x) is type(y)) - # Ensure that the mask isn't modified... - assert_array_equal([True, True, False], y.mask) - - x = np.ma.masked_where([True, True, False], [nan, nan, nan]) - y = isclose(x, x, equal_nan=True) - assert_(type(x) is type(y)) - # Ensure that the mask isn't modified... - assert_array_equal([True, True, False], y.mask) - - def test_scalar_return(self): - assert_(isscalar(isclose(1, 1))) - - def test_no_parameter_modification(self): - x = array([inf, 1]) - y = array([0, inf]) - isclose(x, y) - assert_array_equal(x, array([inf, 1])) - assert_array_equal(y, array([0, inf])) - -class TestStdVar(TestCase): - def setUp(self): - self.A = array([1, -1, 1, -1]) - self.real_var = 1 - - def test_basic(self): - assert_almost_equal(var(self.A), self.real_var) - assert_almost_equal(std(self.A)**2, self.real_var) - - def test_scalars(self): - assert_equal(var(1), 0) - assert_equal(std(1), 0) - - def test_ddof1(self): - assert_almost_equal(var(self.A, ddof=1), - self.real_var*len(self.A)/float(len(self.A)-1)) - assert_almost_equal(std(self.A, ddof=1)**2, - self.real_var*len(self.A)/float(len(self.A)-1)) - - def test_ddof2(self): - assert_almost_equal(var(self.A, ddof=2), - self.real_var*len(self.A)/float(len(self.A)-2)) - assert_almost_equal(std(self.A, ddof=2)**2, - self.real_var*len(self.A)/float(len(self.A)-2)) - -class TestStdVarComplex(TestCase): - def test_basic(self): - A = array([1, 1.j, -1, -1.j]) - real_var = 1 - assert_almost_equal(var(A), real_var) - assert_almost_equal(std(A)**2, real_var) - - def test_scalars(self): - assert_equal(var(1j), 0) - assert_equal(std(1j), 0) - - -class TestCreationFuncs(TestCase): - #Test ones, zeros, empty and filled - - def setUp(self): - self.dtypes = ('b', 'i', 'u', 'f', 'c', 'S', 'a', 'U', 'V') - self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} - self.ndims = 10 - - def check_function(self, func, fill_value=None): - par = ( - (0, 1, 2), - range(self.ndims), - self.orders, - self.dtypes, - 2**np.arange(9) - ) - fill_kwarg = {} - if fill_value is not None: - fill_kwarg = {'fill_value': fill_value} - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - for size, ndims, order, type, bytes in itertools.product(*par): - shape = ndims * [size] - try: - dtype = np.dtype('{0}{1}'.format(type, bytes)) - except TypeError: # dtype combination does not exist - continue - else: - # do not fill void type - if fill_value is not None and type in 'V': - continue - - arr = func(shape, order=order, dtype=dtype, - **fill_kwarg) - - assert_(arr.dtype == dtype) - assert_(getattr(arr.flags, self.orders[order])) - - if fill_value is not None: - if dtype.str.startswith('|S'): - val = str(fill_value) - else: - val = fill_value - assert_equal(arr, dtype.type(val)) - - def test_zeros(self): - self.check_function(np.zeros) - - def test_ones(self): - self.check_function(np.zeros) - - def test_empty(self): - self.check_function(np.empty) - - def test_filled(self): - self.check_function(np.full, 0) - self.check_function(np.full, 1) - - def test_for_reference_leak(self): - # Make sure we have an object for reference - dim = 1 - beg = sys.getrefcount(dim) - np.zeros([dim]*10) - assert_(sys.getrefcount(dim) == beg) - np.ones([dim]*10) - assert_(sys.getrefcount(dim) == beg) - np.empty([dim]*10) - assert_(sys.getrefcount(dim) == beg) - np.full([dim]*10, 0) - assert_(sys.getrefcount(dim) == beg) - - - -class TestLikeFuncs(TestCase): - '''Test ones_like, zeros_like, empty_like and full_like''' - - def setUp(self): - self.data = [ - # Array scalars - (array(3.), None), - (array(3), 'f8'), - # 1D arrays - (arange(6, dtype='f4'), None), - (arange(6), 'c16'), - # 2D C-layout arrays - (arange(6).reshape(2, 3), None), - (arange(6).reshape(3, 2), 'i1'), - # 2D F-layout arrays - (arange(6).reshape((2, 3), order='F'), None), - (arange(6).reshape((3, 2), order='F'), 'i1'), - # 3D C-layout arrays - (arange(24).reshape(2, 3, 4), None), - (arange(24).reshape(4, 3, 2), 'f4'), - # 3D F-layout arrays - (arange(24).reshape((2, 3, 4), order='F'), None), - (arange(24).reshape((4, 3, 2), order='F'), 'f4'), - # 3D non-C/F-layout arrays - (arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), - (arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), - ] - - def compare_array_value(self, dz, value, fill_value): - if value is not None: - if fill_value: - try: - z = dz.dtype.type(value) - except OverflowError: - pass - else: - assert_(all(dz == z)) - else: - assert_(all(dz == value)) - - def check_like_function(self, like_function, value, fill_value=False): - if fill_value: - fill_kwarg = {'fill_value': value} - else: - fill_kwarg = {} - for d, dtype in self.data: - # default (K) order, dtype - dz = like_function(d, dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - assert_equal(array(dz.strides)*d.dtype.itemsize, - array(d.strides)*dz.dtype.itemsize) - assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) - assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # C order, default dtype - dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - assert_(dz.flags.c_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # F order, default dtype - dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - assert_(dz.flags.f_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # A order - dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) - assert_equal(dz.shape, d.shape) - if d.flags.f_contiguous: - assert_(dz.flags.f_contiguous) - else: - assert_(dz.flags.c_contiguous) - if dtype is None: - assert_equal(dz.dtype, d.dtype) - else: - assert_equal(dz.dtype, np.dtype(dtype)) - self.compare_array_value(dz, value, fill_value) - - # Test the 'subok' parameter - a = np.matrix([[1, 2], [3, 4]]) - - b = like_function(a, **fill_kwarg) - assert_(type(b) is np.matrix) - - b = like_function(a, subok=False, **fill_kwarg) - assert_(type(b) is not np.matrix) - - def test_ones_like(self): - self.check_like_function(np.ones_like, 1) - - def test_zeros_like(self): - self.check_like_function(np.zeros_like, 0) - - def test_empty_like(self): - self.check_like_function(np.empty_like, None) - - def test_filled_like(self): - self.check_like_function(np.full_like, 0, True) - self.check_like_function(np.full_like, 1, True) - self.check_like_function(np.full_like, 1000, True) - self.check_like_function(np.full_like, 123.456, True) - self.check_like_function(np.full_like, np.inf, True) - -class _TestCorrelate(TestCase): - def _setup(self, dt): - self.x = np.array([1, 2, 3, 4, 5], dtype=dt) - self.y = np.array([-1, -2, -3], dtype=dt) - self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt) - self.z2 = np.array([ -5., -14., -26., -20., -14., -8., -3.], dtype=dt) - - def test_float(self): - self._setup(np.float) - z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, self.z2) - - def test_object(self): - self._setup(Decimal) - z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, self.z1) - z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, self.z2) - -class TestCorrelate(_TestCorrelate): - old_behavior = True - def _setup(self, dt): - # correlate uses an unconventional definition so that correlate(a, b) - # == correlate(b, a), so force the corresponding outputs to be the same - # as well - _TestCorrelate._setup(self, dt) - self.z2 = self.z1 - - @dec.deprecated() - def test_complex(self): - x = np.array([1, 2, 3, 4+1j], dtype=np.complex) - y = np.array([-1, -2j, 3+1j], dtype=np.complex) - r_z = np.array([3+1j, 6, 8-1j, 9+1j, -1-8j, -4-1j], dtype=np.complex) - z = np.correlate(x, y, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, r_z) - - @dec.deprecated() - def test_float(self): - _TestCorrelate.test_float(self) - - @dec.deprecated() - def test_object(self): - _TestCorrelate.test_object(self) - -class TestCorrelateNew(_TestCorrelate): - old_behavior = False - def test_complex(self): - x = np.array([1, 2, 3, 4+1j], dtype=np.complex) - y = np.array([-1, -2j, 3+1j], dtype=np.complex) - r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex) - #z = np.acorrelate(x, y, 'full') - #assert_array_almost_equal(z, r_z) - - r_z = r_z[::-1].conjugate() - z = np.correlate(y, x, 'full', old_behavior=self.old_behavior) - assert_array_almost_equal(z, r_z) - -class TestArgwhere(object): - def test_2D(self): - x = np.arange(6).reshape((2, 3)) - assert_array_equal(np.argwhere(x > 1), - [[0, 2], - [1, 0], - [1, 1], - [1, 2]]) - - def test_list(self): - assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) - -class TestStringFunction(object): - def test_set_string_function(self): - a = np.array([1]) - np.set_string_function(lambda x: "FOO", repr=True) - assert_equal(repr(a), "FOO") - np.set_string_function(None, repr=True) - assert_equal(repr(a), "array([1])") - - np.set_string_function(lambda x: "FOO", repr=False) - assert_equal(str(a), "FOO") - np.set_string_function(None, repr=False) - assert_equal(str(a), "[1]") - -class TestRoll(TestCase): - def test_roll1d(self): - x = np.arange(10) - xr = np.roll(x, 2) - assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) - - def test_roll2d(self): - x2 = np.reshape(np.arange(10), (2, 5)) - x2r = np.roll(x2, 1) - assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) - - x2r = np.roll(x2, 1, axis=0) - assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) - - x2r = np.roll(x2, 1, axis=1) - assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) - - def test_roll_empty(self): - x = np.array([]) - assert_equal(np.roll(x, 1), np.array([])) - -class TestCross(TestCase): - def test_2x2(self): - u = [1, 2] - v = [3, 4] - z = -2 - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) - - def test_2x3(self): - u = [1, 2] - v = [3, 4, 5] - z = np.array([10, -5, -2]) - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) - - def test_3x3(self): - u = [1, 2, 3] - v = [4, 5, 6] - z = np.array([-3, 6, -3]) - cp = cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) - - def test_broadcasting(self): - # Ticket #2624 (Trac #2032) - u = np.tile([1, 2], (11, 1)) - v = np.tile([3, 4], (11, 1)) - z = -2 - assert_equal(np.cross(u, v), z) - assert_equal(np.cross(v, u), -z) - assert_equal(np.cross(u, u), 0) - - u = np.tile([1, 2], (11, 1)).T - v = np.tile([3, 4, 5], (11, 1)) - z = np.tile([10, -5, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0), z) - assert_equal(np.cross(v, u.T), -z) - assert_equal(np.cross(v, v), 0) - - u = np.tile([1, 2, 3], (11, 1)).T - v = np.tile([3, 4], (11, 1)).T - z = np.tile([-12, 9, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0, axisb=0), z) - assert_equal(np.cross(v.T, u.T), -z) - assert_equal(np.cross(u.T, u.T), 0) - - u = np.tile([1, 2, 3], (5, 1)) - v = np.tile([4, 5, 6], (5, 1)).T - z = np.tile([-3, 6, -3], (5, 1)) - assert_equal(np.cross(u, v, axisb=0), z) - assert_equal(np.cross(v.T, u), -z) - assert_equal(np.cross(u, u), 0) - - def test_broadcasting_shapes(self): - u = np.ones((2, 1, 3)) - v = np.ones((5, 3)) - assert_equal(np.cross(u, v).shape, (2, 5, 3)) - u = np.ones((10, 3, 5)) - v = np.ones((2, 5)) - assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) - assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=2) - assert_raises(ValueError, np.cross, u, v, axisa=3, axisb=0) - u = np.ones((10, 3, 5, 7)) - v = np.ones((5, 7, 2)) - assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) - assert_raises(ValueError, np.cross, u, v, axisa=-5, axisb=2) - assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=-4) - -def test_outer_out_param(): - arr1 = np.ones((5,)) - arr2 = np.ones((2,)) - arr3 = np.linspace(-2, 2, 5) - out1 = np.ndarray(shape=(5,5)) - out2 = np.ndarray(shape=(2, 5)) - res1 = np.outer(arr1, arr3, out1) - assert_equal(res1, out1) - assert_equal(np.outer(arr2, arr3, out2), out2) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numerictypes.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numerictypes.py deleted file mode 100644 index ef8db0f334781..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_numerictypes.py +++ /dev/null @@ -1,377 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import warnings -from numpy.testing import * -from numpy.compat import asbytes, asunicode -import numpy as np - -# This is the structure of the table used for plain objects: -# -# +-+-+-+ -# |x|y|z| -# +-+-+-+ - -# Structure of a plain array description: -Pdescr = [ - ('x', 'i4', (2,)), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -# A plain list of tuples with values for testing: -PbufferT = [ - # x y z - ([3, 2], [[6., 4.], [6., 4.]], 8), - ([4, 3], [[7., 5.], [7., 5.]], 9), - ] - - -# This is the structure of the table used for nested objects (DON'T PANIC!): -# -# +-+---------------------------------+-----+----------+-+-+ -# |x|Info |color|info |y|z| -# | +-----+--+----------------+----+--+ +----+-----+ | | -# | |value|y2|Info2 |name|z2| |Name|Value| | | -# | | | +----+-----+--+--+ | | | | | | | -# | | | |name|value|y3|z3| | | | | | | | -# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ -# - -# The corresponding nested array description: -Ndescr = [ - ('x', 'i4', (2,)), - ('Info', [ - ('value', 'c16'), - ('y2', 'f8'), - ('Info2', [ - ('name', 'S2'), - ('value', 'c16', (2,)), - ('y3', 'f8', (2,)), - ('z3', 'u4', (2,))]), - ('name', 'S2'), - ('z2', 'b1')]), - ('color', 'S2'), - ('info', [ - ('Name', 'U8'), - ('Value', 'c16')]), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 - ([3, 2], (6j, 6., (asbytes('nn'), [6j, 4j], [6., 4.], [1, 2]), asbytes('NN'), True), asbytes('cc'), (asunicode('NN'), 6j), [[6., 4.], [6., 4.]], 8), - ([4, 3], (7j, 7., (asbytes('oo'), [7j, 5j], [7., 5.], [2, 1]), asbytes('OO'), False), asbytes('dd'), (asunicode('OO'), 7j), [[7., 5.], [7., 5.]], 9), - ] - - -byteorder = {'little':'<', 'big':'>'}[sys.byteorder] - -def normalize_descr(descr): - "Normalize a description adding the platform byteorder." - - out = [] - for item in descr: - dtype = item[1] - if isinstance(dtype, str): - if dtype[0] not in ['|', '<', '>']: - onebyte = dtype[1:] == "1" - if onebyte or dtype[0] in ['S', 'V', 'b']: - dtype = "|" + dtype - else: - dtype = byteorder + dtype - if len(item) > 2 and np.prod(item[2]) > 1: - nitem = (item[0], dtype, item[2]) - else: - nitem = (item[0], dtype) - out.append(nitem) - elif isinstance(item[1], list): - l = [] - for j in normalize_descr(item[1]): - l.append(j) - out.append((item[0], l)) - else: - raise ValueError("Expected a str or list and got %s" % \ - (type(item))) - return out - - -############################################################ -# Creation tests -############################################################ - -class create_zeros(object): - """Check the creation of heterogeneous arrays zero-valued""" - - def test_zeros0D(self): - """Check creation of 0-dimensional objects""" - h = np.zeros((), dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - self.assertTrue(h.dtype.fields['x'][0].name[:4] == 'void') - self.assertTrue(h.dtype.fields['x'][0].char == 'V') - self.assertTrue(h.dtype.fields['x'][0].type == np.void) - # A small check that data is ok - assert_equal(h['z'], np.zeros((), dtype='u1')) - - def test_zerosSD(self): - """Check creation of single-dimensional objects""" - h = np.zeros((2,), dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - self.assertTrue(h.dtype['y'].name[:4] == 'void') - self.assertTrue(h.dtype['y'].char == 'V') - self.assertTrue(h.dtype['y'].type == np.void) - # A small check that data is ok - assert_equal(h['z'], np.zeros((2,), dtype='u1')) - - def test_zerosMD(self): - """Check creation of multi-dimensional objects""" - h = np.zeros((2, 3), dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - self.assertTrue(h.dtype['z'].name == 'uint8') - self.assertTrue(h.dtype['z'].char == 'B') - self.assertTrue(h.dtype['z'].type == np.uint8) - # A small check that data is ok - assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) - - -class test_create_zeros_plain(create_zeros, TestCase): - """Check the creation of heterogeneous arrays zero-valued (plain)""" - _descr = Pdescr - -class test_create_zeros_nested(create_zeros, TestCase): - """Check the creation of heterogeneous arrays zero-valued (nested)""" - _descr = Ndescr - - -class create_values(object): - """Check the creation of heterogeneous arrays with values""" - - def test_tuple(self): - """Check creation from tuples""" - h = np.array(self._buffer, dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - self.assertTrue(h.shape == (2,)) - else: - self.assertTrue(h.shape == ()) - - def test_list_of_tuple(self): - """Check creation from list of tuples""" - h = np.array([self._buffer], dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - self.assertTrue(h.shape == (1, 2)) - else: - self.assertTrue(h.shape == (1,)) - - def test_list_of_list_of_tuple(self): - """Check creation from list of list of tuples""" - h = np.array([[self._buffer]], dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - if self.multiple_rows: - self.assertTrue(h.shape == (1, 1, 2)) - else: - self.assertTrue(h.shape == (1, 1)) - - -class test_create_values_plain_single(create_values, TestCase): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class test_create_values_plain_multiple(create_values, TestCase): - """Check the creation of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class test_create_values_nested_single(create_values, TestCase): - """Check the creation of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = 0 - _buffer = NbufferT[0] - -class test_create_values_nested_multiple(create_values, TestCase): - """Check the creation of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = 1 - _buffer = NbufferT - - -############################################################ -# Reading tests -############################################################ - -class read_values_plain(object): - """Check the reading of values in heterogeneous arrays (plain)""" - - def test_access_fields(self): - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - self.assertTrue(h.shape == ()) - assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) - assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) - else: - self.assertTrue(len(h) == 2) - assert_equal(h['x'], np.array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], np.array([self._buffer[0][1], - self._buffer[1][1]], dtype='f8')) - assert_equal(h['z'], np.array([self._buffer[0][2], - self._buffer[1][2]], dtype='u1')) - - -class test_read_values_plain_single(read_values_plain, TestCase): - """Check the creation of heterogeneous arrays (plain, single row)""" - _descr = Pdescr - multiple_rows = 0 - _buffer = PbufferT[0] - -class test_read_values_plain_multiple(read_values_plain, TestCase): - """Check the values of heterogeneous arrays (plain, multiple rows)""" - _descr = Pdescr - multiple_rows = 1 - _buffer = PbufferT - -class read_values_nested(object): - """Check the reading of values in heterogeneous arrays (nested)""" - - - def test_access_top_fields(self): - """Check reading the top fields of a nested array""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - self.assertTrue(h.shape == ()) - assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) - assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) - assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) - else: - self.assertTrue(len(h) == 2) - assert_equal(h['x'], np.array([self._buffer[0][0], - self._buffer[1][0]], dtype='i4')) - assert_equal(h['y'], np.array([self._buffer[0][4], - self._buffer[1][4]], dtype='f8')) - assert_equal(h['z'], np.array([self._buffer[0][5], - self._buffer[1][5]], dtype='u1')) - - - def test_nested1_acessors(self): - """Check reading the nested fields of a nested array (1st level)""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['value'], - np.array(self._buffer[1][0], dtype='c16')) - assert_equal(h['Info']['y2'], - np.array(self._buffer[1][1], dtype='f8')) - assert_equal(h['info']['Name'], - np.array(self._buffer[3][0], dtype='U2')) - assert_equal(h['info']['Value'], - np.array(self._buffer[3][1], dtype='c16')) - else: - assert_equal(h['Info']['value'], - np.array([self._buffer[0][1][0], - self._buffer[1][1][0]], - dtype='c16')) - assert_equal(h['Info']['y2'], - np.array([self._buffer[0][1][1], - self._buffer[1][1][1]], - dtype='f8')) - assert_equal(h['info']['Name'], - np.array([self._buffer[0][3][0], - self._buffer[1][3][0]], - dtype='U2')) - assert_equal(h['info']['Value'], - np.array([self._buffer[0][3][1], - self._buffer[1][3][1]], - dtype='c16')) - - def test_nested2_acessors(self): - """Check reading the nested fields of a nested array (2nd level)""" - h = np.array(self._buffer, dtype=self._descr) - if not self.multiple_rows: - assert_equal(h['Info']['Info2']['value'], - np.array(self._buffer[1][2][1], dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - np.array(self._buffer[1][2][3], dtype='u4')) - else: - assert_equal(h['Info']['Info2']['value'], - np.array([self._buffer[0][1][2][1], - self._buffer[1][1][2][1]], - dtype='c16')) - assert_equal(h['Info']['Info2']['z3'], - np.array([self._buffer[0][1][2][3], - self._buffer[1][1][2][3]], - dtype='u4')) - - def test_nested1_descriptor(self): - """Check access nested descriptors of a nested array (1st level)""" - h = np.array(self._buffer, dtype=self._descr) - self.assertTrue(h.dtype['Info']['value'].name == 'complex128') - self.assertTrue(h.dtype['Info']['y2'].name == 'float64') - if sys.version_info[0] >= 3: - self.assertTrue(h.dtype['info']['Name'].name == 'str256') - else: - self.assertTrue(h.dtype['info']['Name'].name == 'unicode256') - self.assertTrue(h.dtype['info']['Value'].name == 'complex128') - - def test_nested2_descriptor(self): - """Check access nested descriptors of a nested array (2nd level)""" - h = np.array(self._buffer, dtype=self._descr) - self.assertTrue(h.dtype['Info']['Info2']['value'].name == 'void256') - self.assertTrue(h.dtype['Info']['Info2']['z3'].name == 'void64') - - -class test_read_values_nested_single(read_values_nested, TestCase): - """Check the values of heterogeneous arrays (nested, single row)""" - _descr = Ndescr - multiple_rows = False - _buffer = NbufferT[0] - -class test_read_values_nested_multiple(read_values_nested, TestCase): - """Check the values of heterogeneous arrays (nested, multiple rows)""" - _descr = Ndescr - multiple_rows = True - _buffer = NbufferT - -class TestEmptyField(TestCase): - def test_assign(self): - a = np.arange(10, dtype=np.float32) - a.dtype = [("int", "<0i4"), ("float", "<2f4")] - assert_(a['int'].shape == (5, 0)) - assert_(a['float'].shape == (5, 2)) - -class TestCommonType(TestCase): - def test_scalar_loses1(self): - res = np.find_common_type(['f4', 'f4', 'i2'], ['f8']) - assert_(res == 'f4') - def test_scalar_loses2(self): - res = np.find_common_type(['f4', 'f4'], ['i8']) - assert_(res == 'f4') - def test_scalar_wins(self): - res = np.find_common_type(['f4', 'f4', 'i2'], ['c8']) - assert_(res == 'c8') - def test_scalar_wins2(self): - res = np.find_common_type(['u4', 'i4', 'i4'], ['f4']) - assert_(res == 'f8') - def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose - res = np.find_common_type(['u8', 'i8', 'i8'], ['f8']) - assert_(res == 'f8') - -class TestMultipleFields(TestCase): - def setUp(self): - self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') - def _bad_call(self): - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', '', DeprecationWarning) - return self.ary['f0', 'f1'] - def test_no_tuple(self): - self.assertRaises(IndexError, self._bad_call) - def test_return(self): - res = self.ary[['f0', 'f2']].tolist() - assert_(res == [(1, 3), (5, 7)]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_print.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_print.py deleted file mode 100644 index 487b5de7d15bd..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_print.py +++ /dev/null @@ -1,245 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import * -import nose - -import locale -import sys - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - -_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} - - -def check_float_type(tp): - for x in [0, 1, -1, 1e20] : - assert_equal(str(tp(x)), str(float(x)), - err_msg='Failed str formatting for type %s' % tp) - - if tp(1e10).itemsize > 4: - assert_equal(str(tp(1e10)), str(float('1e10')), - err_msg='Failed str formatting for type %s' % tp) - else: - ref = '1e+10' - assert_equal(str(tp(1e10)), ref, - err_msg='Failed str formatting for type %s' % tp) - -def test_float_types(): - """ Check formatting. - - This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the - python float precision. - - """ - for t in [np.float32, np.double, np.longdouble] : - yield check_float_type, t - -def check_nan_inf_float(tp): - for x in [np.inf, -np.inf, np.nan]: - assert_equal(str(tp(x)), _REF[x], - err_msg='Failed str formatting for type %s' % tp) - -def test_nan_inf_float(): - """ Check formatting of nan & inf. - - This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the - python float precision. - - """ - for t in [np.float32, np.double, np.longdouble] : - yield check_nan_inf_float, t - -def check_complex_type(tp): - for x in [0, 1, -1, 1e20] : - assert_equal(str(tp(x)), str(complex(x)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x*1j)), str(complex(x*1j)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), - err_msg='Failed str formatting for type %s' % tp) - - if tp(1e10).itemsize > 8: - assert_equal(str(tp(1e10)), str(complex(1e10)), - err_msg='Failed str formatting for type %s' % tp) - else: - ref = '(1e+10+0j)' - assert_equal(str(tp(1e10)), ref, - err_msg='Failed str formatting for type %s' % tp) - -def test_complex_types(): - """Check formatting of complex types. - - This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the - python float precision. - - """ - for t in [np.complex64, np.cdouble, np.clongdouble] : - yield check_complex_type, t - -def test_complex_inf_nan(): - """Check inf/nan formatting of complex types.""" - TESTS = { - complex(np.inf, 0): "(inf+0j)", - complex(0, np.inf): "inf*j", - complex(-np.inf, 0): "(-inf+0j)", - complex(0, -np.inf): "-inf*j", - complex(np.inf, 1): "(inf+1j)", - complex(1, np.inf): "(1+inf*j)", - complex(-np.inf, 1): "(-inf+1j)", - complex(1, -np.inf): "(1-inf*j)", - complex(np.nan, 0): "(nan+0j)", - complex(0, np.nan): "nan*j", - complex(-np.nan, 0): "(nan+0j)", - complex(0, -np.nan): "nan*j", - complex(np.nan, 1): "(nan+1j)", - complex(1, np.nan): "(1+nan*j)", - complex(-np.nan, 1): "(nan+1j)", - complex(1, -np.nan): "(1+nan*j)", - } - for tp in [np.complex64, np.cdouble, np.clongdouble]: - for c, s in TESTS.items(): - yield _check_complex_inf_nan, c, s, tp - -def _check_complex_inf_nan(c, s, dtype): - assert_equal(str(dtype(c)), s) - -# print tests -def _test_redirected_print(x, tp, ref=None): - file = StringIO() - file_tp = StringIO() - stdout = sys.stdout - try: - sys.stdout = file_tp - print(tp(x)) - sys.stdout = file - if ref: - print(ref) - else: - print(x) - finally: - sys.stdout = stdout - - assert_equal(file.getvalue(), file_tp.getvalue(), - err_msg='print failed for type%s' % tp) - -def check_float_type_print(tp): - for x in [0, 1, -1, 1e20]: - _test_redirected_print(float(x), tp) - - for x in [np.inf, -np.inf, np.nan]: - _test_redirected_print(float(x), tp, _REF[x]) - - if tp(1e10).itemsize > 4: - _test_redirected_print(float(1e10), tp) - else: - ref = '1e+10' - _test_redirected_print(float(1e10), tp, ref) - -def check_complex_type_print(tp): - # We do not create complex with inf/nan directly because the feature is - # missing in python < 2.6 - for x in [0, 1, -1, 1e20]: - _test_redirected_print(complex(x), tp) - - if tp(1e10).itemsize > 8: - _test_redirected_print(complex(1e10), tp) - else: - ref = '(1e+10+0j)' - _test_redirected_print(complex(1e10), tp, ref) - - _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') - _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') - _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') - -def test_float_type_print(): - """Check formatting when using print """ - for t in [np.float32, np.double, np.longdouble] : - yield check_float_type_print, t - -def test_complex_type_print(): - """Check formatting when using print """ - for t in [np.complex64, np.cdouble, np.clongdouble] : - yield check_complex_type_print, t - -def test_scalar_format(): - """Test the str.format method with NumPy scalar types""" - tests = [('{0}', True, np.bool_), - ('{0}', False, np.bool_), - ('{0:d}', 130, np.uint8), - ('{0:d}', 50000, np.uint16), - ('{0:d}', 3000000000, np.uint32), - ('{0:d}', 15000000000000000000, np.uint64), - ('{0:d}', -120, np.int8), - ('{0:d}', -30000, np.int16), - ('{0:d}', -2000000000, np.int32), - ('{0:d}', -7000000000000000000, np.int64), - ('{0:g}', 1.5, np.float16), - ('{0:g}', 1.5, np.float32), - ('{0:g}', 1.5, np.float64), - ('{0:g}', 1.5, np.longdouble)] - # Python 2.6 doesn't implement complex.__format__ - if sys.version_info[:2] > (2, 6): - tests += [('{0:g}', 1.5+0.5j, np.complex64), - ('{0:g}', 1.5+0.5j, np.complex128), - ('{0:g}', 1.5+0.5j, np.clongdouble)] - - for (fmat, val, valtype) in tests: - try: - assert_equal(fmat.format(val), fmat.format(valtype(val)), - "failed with val %s, type %s" % (val, valtype)) - except ValueError as e: - assert_(False, - "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % - (fmat, repr(val), repr(valtype), str(e))) - - -# Locale tests: scalar types formatting should be independent of the locale -def in_foreign_locale(func): - """ - Swap LC_NUMERIC locale to one in which the decimal point is ',' and not '.' - If not possible, raise nose.SkipTest - - """ - if sys.platform == 'win32': - locales = ['FRENCH'] - else: - locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] - - def wrapper(*args, **kwargs): - curloc = locale.getlocale(locale.LC_NUMERIC) - try: - for loc in locales: - try: - locale.setlocale(locale.LC_NUMERIC, loc) - break - except locale.Error: - pass - else: - raise nose.SkipTest("Skipping locale test, because " - "French locale not found") - return func(*args, **kwargs) - finally: - locale.setlocale(locale.LC_NUMERIC, locale=curloc) - return nose.tools.make_decorator(func)(wrapper) - -@in_foreign_locale -def test_locale_single(): - assert_equal(str(np.float32(1.2)), str(float(1.2))) - -@in_foreign_locale -def test_locale_double(): - assert_equal(str(np.double(1.2)), str(float(1.2))) - -@in_foreign_locale -def test_locale_longdouble(): - assert_equal(str(np.longdouble(1.2)), str(float(1.2))) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_records.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_records.py deleted file mode 100644 index 8c9ce5c708a47..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_records.py +++ /dev/null @@ -1,176 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from os import path -import numpy as np -from numpy.testing import * -from numpy.compat import asbytes, asunicode - -import warnings -import collections -import pickle - - -class TestFromrecords(TestCase): - def test_fromrecords(self): - r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], - names='col1,col2,col3') - assert_equal(r[0].item(), (456, 'dbe', 1.2)) - - def test_method_array(self): - r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big') - assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924)) - - def test_method_array2(self): - r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), - (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') - assert_equal(r[1].item(), (2, 22.0, asbytes('b'))) - - def test_recarray_slices(self): - r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), - (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') - assert_equal(r[1::2][1].item(), (4, 44.0, asbytes('d'))) - - def test_recarray_fromarrays(self): - x1 = np.array([1, 2, 3, 4]) - x2 = np.array(['a', 'dd', 'xyz', '12']) - x3 = np.array([1.1, 2, 3, 4]) - r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') - assert_equal(r[1].item(), (2, 'dd', 2.0)) - x1[1] = 34 - assert_equal(r.a, np.array([1, 2, 3, 4])) - - def test_recarray_fromfile(self): - data_dir = path.join(path.dirname(__file__), 'data') - filename = path.join(data_dir, 'recarray_from_file.fits') - fd = open(filename, 'rb') - fd.seek(2880 * 2) - r = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') - fd.seek(2880 * 2) - r = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big') - fd.close() - - def test_recarray_from_obj(self): - count = 10 - a = np.zeros(count, dtype='O') - b = np.zeros(count, dtype='f8') - c = np.zeros(count, dtype='f8') - for i in range(len(a)): - a[i] = list(range(1, 10)) - - mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') - for i in range(len(a)): - assert_((mine.date[i] == list(range(1, 10)))) - assert_((mine.data1[i] == 0.0)) - assert_((mine.data2[i] == 0.0)) - - def test_recarray_from_repr(self): - x = np.rec.array([ (1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) - y = eval("np." + repr(x)) - assert_(isinstance(y, np.recarray)) - assert_equal(y, x) - - def test_recarray_from_names(self): - ra = np.rec.array([ - (1, 'abc', 3.7000002861022949, 0), - (2, 'xy', 6.6999998092651367, 1), - (0, ' ', 0.40000000596046448, 0)], - names='c1, c2, c3, c4') - pa = np.rec.fromrecords([ - (1, 'abc', 3.7000002861022949, 0), - (2, 'xy', 6.6999998092651367, 1), - (0, ' ', 0.40000000596046448, 0)], - names='c1, c2, c3, c4') - assert_(ra.dtype == pa.dtype) - assert_(ra.shape == pa.shape) - for k in range(len(ra)): - assert_(ra[k].item() == pa[k].item()) - - def test_recarray_conflict_fields(self): - ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2), - (3, 'wrs', 1.3)], - names='field, shape, mean') - ra.mean = [1.1, 2.2, 3.3] - assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3]) - assert_(type(ra.mean) is type(ra.var)) - ra.shape = (1, 3) - assert_(ra.shape == (1, 3)) - ra.shape = ['A', 'B', 'C'] - assert_array_equal(ra['shape'], [['A', 'B', 'C']]) - ra.field = 5 - assert_array_equal(ra['field'], [[5, 5, 5]]) - assert_(isinstance(ra.field, collections.Callable)) - - def test_fromrecords_with_explicit_dtype(self): - a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], - dtype=[('a', int), ('b', np.object)]) - assert_equal(a.a, [1, 2]) - assert_equal(a[0].a, 1) - assert_equal(a.b, ['a', 'bbb']) - assert_equal(a[-1].b, 'bbb') - # - ndtype = np.dtype([('a', int), ('b', np.object)]) - a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype) - assert_equal(a.a, [1, 2]) - assert_equal(a[0].a, 1) - assert_equal(a.b, ['a', 'bbb']) - assert_equal(a[-1].b, 'bbb') - - -class TestRecord(TestCase): - def setUp(self): - self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], - dtype=[("col1", "= 3) or - (sys.platform == "win32" and - platform.architecture()[0] == "64bit"), - "numpy.intp('0xff', 16) not supported on Py3, " - "as it does not inherit from Python int") - def test_intp(self,level=rlevel): - """Ticket #99""" - i_width = np.int_(0).nbytes*2 - 1 - np.intp('0x' + 'f'*i_width, 16) - self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) - self.assertRaises(ValueError, np.intp, '0x1', 32) - assert_equal(255, np.intp('0xFF', 16)) - assert_equal(1024, np.intp(1024)) - - def test_endian_bool_indexing(self,level=rlevel): - """Ticket #105""" - a = np.arange(10., dtype='>f8') - b = np.arange(10., dtype='2) & (a<6)) - xb = np.where((b>2) & (b<6)) - ya = ((a>2) & (a<6)) - yb = ((b>2) & (b<6)) - assert_array_almost_equal(xa, ya.nonzero()) - assert_array_almost_equal(xb, yb.nonzero()) - assert_(np.all(a[ya] > 0.5)) - assert_(np.all(b[yb] > 0.5)) - - def test_endian_where(self,level=rlevel): - """GitHub issue #369""" - net = np.zeros(3, dtype='>f4') - net[1] = 0.00458849 - net[2] = 0.605202 - max_net = net.max() - test = np.where(net <= 0., max_net, net) - correct = np.array([ 0.60520202, 0.00458849, 0.60520202]) - assert_array_almost_equal(test, correct) - - def test_endian_recarray(self,level=rlevel): - """Ticket #2185""" - dt = np.dtype([ - ('head', '>u4'), - ('data', '>u4', 2), - ]) - buf = np.recarray(1, dtype=dt) - buf[0]['head'] = 1 - buf[0]['data'][:] = [1, 1] - - h = buf[0]['head'] - d = buf[0]['data'][0] - buf[0]['head'] = h - buf[0]['data'][0] = d - assert_(buf[0]['head'] == 1) - - def test_mem_dot(self,level=rlevel): - """Ticket #106""" - x = np.random.randn(0, 1) - y = np.random.randn(10, 1) - # Dummy array to detect bad memory access: - _z = np.ones(10) - _dummy = np.empty((0, 10)) - z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) - np.dot(x, np.transpose(y), out=z) - assert_equal(_z, np.ones(10)) - # Do the same for the built-in dot: - np.core.multiarray.dot(x, np.transpose(y), out=z) - assert_equal(_z, np.ones(10)) - - def test_arange_endian(self,level=rlevel): - """Ticket #111""" - ref = np.arange(10) - x = np.arange(10, dtype=' 8: -# a = np.exp(np.array([1000],dtype=np.longfloat)) -# assert_(str(a)[1:9] == str(a[0])[:8]) - - def test_argmax(self,level=rlevel): - """Ticket #119""" - a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) - for i in range(a.ndim): - aargmax = a.argmax(i) - - def test_mem_divmod(self,level=rlevel): - """Ticket #126""" - for i in range(10): - divmod(np.array([i])[0], 10) - - - def test_hstack_invalid_dims(self,level=rlevel): - """Ticket #128""" - x = np.arange(9).reshape((3, 3)) - y = np.array([0, 0, 0]) - self.assertRaises(ValueError, np.hstack, (x, y)) - - def test_squeeze_type(self,level=rlevel): - """Ticket #133""" - a = np.array([3]) - b = np.array(3) - assert_(type(a.squeeze()) is np.ndarray) - assert_(type(b.squeeze()) is np.ndarray) - - def test_add_identity(self,level=rlevel): - """Ticket #143""" - assert_equal(0, np.add.identity) - - def test_numpy_float_python_long_addition(self): - # Check that numpy float and python longs can be added correctly. - a = np.float_(23.) + 2**135 - assert_equal(a, 23. + 2**135) - - def test_binary_repr_0(self,level=rlevel): - """Ticket #151""" - assert_equal('0', np.binary_repr(0)) - - def test_rec_iterate(self,level=rlevel): - """Ticket #160""" - descr = np.dtype([('i', int), ('f', float), ('s', '|S3')]) - x = np.rec.array([(1, 1.1, '1.0'), - (2, 2.2, '2.0')], dtype=descr) - x[0].tolist() - [i for i in x[0]] - - def test_unicode_string_comparison(self,level=rlevel): - """Ticket #190""" - a = np.array('hello', np.unicode_) - b = np.array('world') - a == b - - def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel): - """Fix in r2836""" - # Create non-contiguous Fortran ordered array - x = np.array(np.random.rand(3, 3), order='F')[:, :2] - assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes())) - - def test_flat_assignment(self,level=rlevel): - """Correct behaviour of ticket #194""" - x = np.empty((3, 1)) - x.flat = np.arange(3) - assert_array_almost_equal(x, [[0], [1], [2]]) - x.flat = np.arange(3, dtype=float) - assert_array_almost_equal(x, [[0], [1], [2]]) - - def test_broadcast_flat_assignment(self,level=rlevel): - """Ticket #194""" - x = np.empty((3, 1)) - def bfa(): x[:] = np.arange(3) - def bfb(): x[:] = np.arange(3, dtype=float) - self.assertRaises(ValueError, bfa) - self.assertRaises(ValueError, bfb) - - def test_nonarray_assignment(self): - # See also Issue gh-2870, test for non-array assignment - # and equivalent unsafe casted array assignment - a = np.arange(10) - b = np.ones(10, dtype=bool) - r = np.arange(10) - def assign(a, b, c): - a[b] = c - assert_raises(ValueError, assign, a, b, np.nan) - a[b] = np.array(np.nan) # but not this. - assert_raises(ValueError, assign, a, r, np.nan) - a[r] = np.array(np.nan) - - def test_unpickle_dtype_with_object(self,level=rlevel): - """Implemented in r2840""" - dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')]) - f = BytesIO() - pickle.dump(dt, f) - f.seek(0) - dt_ = pickle.load(f) - f.close() - assert_equal(dt, dt_) - - def test_mem_array_creation_invalid_specification(self,level=rlevel): - """Ticket #196""" - dt = np.dtype([('x', int), ('y', np.object_)]) - # Wrong way - self.assertRaises(ValueError, np.array, [1, 'object'], dt) - # Correct way - np.array([(1, 'object')], dt) - - def test_recarray_single_element(self,level=rlevel): - """Ticket #202""" - a = np.array([1, 2, 3], dtype=np.int32) - b = a.copy() - r = np.rec.array(a, shape=1, formats=['3i4'], names=['d']) - assert_array_equal(a, b) - assert_equal(a, r[0][0]) - - def test_zero_sized_array_indexing(self,level=rlevel): - """Ticket #205""" - tmp = np.array([]) - def index_tmp(): tmp[np.array(10)] - self.assertRaises(IndexError, index_tmp) - - def test_chararray_rstrip(self,level=rlevel): - """Ticket #222""" - x = np.chararray((1,), 5) - x[0] = asbytes('a ') - x = x.rstrip() - assert_equal(x[0], asbytes('a')) - - def test_object_array_shape(self,level=rlevel): - """Ticket #239""" - assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,)) - assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2)) - assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2)) - assert_equal(np.array([], dtype=object).shape, (0,)) - assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0)) - assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,)) - - def test_mem_around(self,level=rlevel): - """Ticket #243""" - x = np.zeros((1,)) - y = [0] - decimal = 6 - np.around(abs(x-y), decimal) <= 10.0**(-decimal) - - def test_character_array_strip(self,level=rlevel): - """Ticket #246""" - x = np.char.array(("x", "x ", "x ")) - for c in x: assert_equal(c, "x") - - def test_lexsort(self,level=rlevel): - """Lexsort memory error""" - v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) - assert_equal(np.lexsort(v), 0) - - def test_lexsort_invalid_sequence(self): - # Issue gh-4123 - class BuggySequence(object): - def __len__(self): - return 4 - def __getitem__(self, key): - raise KeyError - - assert_raises(KeyError, np.lexsort, BuggySequence()) - - def test_pickle_py2_bytes_encoding(self): - # Check that arrays and scalars pickled on Py2 are - # unpickleable on Py3 using encoding='bytes' - - test_data = [ - # (original, py2_pickle) - (np.unicode_('\u6f2c'), - asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" - "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n" - "I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")), - - (np.array([9e123], dtype=np.float64), - asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n" - "p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n" - "p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n" - "I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")), - - (np.array([(9e123,)], dtype=[('name', float)]), - asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n" - "(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n" - "(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n" - "(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n" - "I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n" - "bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")), - ] - - if sys.version_info[:2] >= (3, 4): - # encoding='bytes' was added in Py3.4 - for original, data in test_data: - result = pickle.loads(data, encoding='bytes') - assert_equal(result, original) - - if isinstance(result, np.ndarray) and result.dtype.names: - for name in result.dtype.names: - assert_(isinstance(name, str)) - - def test_pickle_dtype(self,level=rlevel): - """Ticket #251""" - pickle.dumps(np.float) - - def test_swap_real(self, level=rlevel): - """Ticket #265""" - assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0) - assert_equal(np.arange(4, dtype=' 1 and x['two'] > 2) - - def test_method_args(self, level=rlevel): - # Make sure methods and functions have same default axis - # keyword and arguments - funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'), - ('sometrue', 'any'), - ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'), - 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', - 'round', 'min', 'max', 'argsort', 'sort'] - funcs2 = ['compress', 'take', 'repeat'] - - for func in funcs1: - arr = np.random.rand(8, 7) - arr2 = arr.copy() - if isinstance(func, tuple): - func_meth = func[1] - func = func[0] - else: - func_meth = func - res1 = getattr(arr, func_meth)() - res2 = getattr(np, func)(arr2) - if res1 is None: - res1 = arr - - if res1.dtype.kind in 'uib': - assert_((res1 == res2).all(), func) - else: - assert_(abs(res1-res2).max() < 1e-8, func) - - for func in funcs2: - arr1 = np.random.rand(8, 7) - arr2 = np.random.rand(8, 7) - res1 = None - if func == 'compress': - arr1 = arr1.ravel() - res1 = getattr(arr2, func)(arr1) - else: - arr2 = (15*arr2).astype(int).ravel() - if res1 is None: - res1 = getattr(arr1, func)(arr2) - res2 = getattr(np, func)(arr1, arr2) - assert_(abs(res1-res2).max() < 1e-8, func) - - def test_mem_lexsort_strings(self, level=rlevel): - """Ticket #298""" - lst = ['abc', 'cde', 'fgh'] - np.lexsort((lst,)) - - def test_fancy_index(self, level=rlevel): - """Ticket #302""" - x = np.array([1, 2])[np.array([0])] - assert_equal(x.shape, (1,)) - - def test_recarray_copy(self, level=rlevel): - """Ticket #312""" - dt = [('x', np.int16), ('y', np.float64)] - ra = np.array([(1, 2.3)], dtype=dt) - rb = np.rec.array(ra, dtype=dt) - rb['x'] = 2. - assert_(ra['x'] != rb['x']) - - def test_rec_fromarray(self, level=rlevel): - """Ticket #322""" - x1 = np.array([[1, 2], [3, 4], [5, 6]]) - x2 = np.array(['a', 'dd', 'xyz']) - x3 = np.array([1.1, 2, 3]) - np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") - - def test_object_array_assign(self, level=rlevel): - x = np.empty((2, 2), object) - x.flat[2] = (1, 2, 3) - assert_equal(x.flat[2], (1, 2, 3)) - - def test_ndmin_float64(self, level=rlevel): - """Ticket #324""" - x = np.array([1, 2, 3], dtype=np.float64) - assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) - assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) - - def test_ndmin_order(self, level=rlevel): - """Issue #465 and related checks""" - assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) - assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) - assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) - assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) - - def test_mem_axis_minimization(self, level=rlevel): - """Ticket #327""" - data = np.arange(5) - data = np.add.outer(data, data) - - def test_mem_float_imag(self, level=rlevel): - """Ticket #330""" - np.float64(1.0).imag - - def test_dtype_tuple(self, level=rlevel): - """Ticket #334""" - assert_(np.dtype('i4') == np.dtype(('i4', ()))) - - def test_dtype_posttuple(self, level=rlevel): - """Ticket #335""" - np.dtype([('col1', '()i4')]) - - def test_numeric_carray_compare(self, level=rlevel): - """Ticket #341""" - assert_equal(np.array(['X'], 'c'), asbytes('X')) - - def test_string_array_size(self, level=rlevel): - """Ticket #342""" - self.assertRaises(ValueError, - np.array, [['X'], ['X', 'X', 'X']], '|S1') - - def test_dtype_repr(self, level=rlevel): - """Ticket #344""" - dt1=np.dtype(('uint32', 2)) - dt2=np.dtype(('uint32', (2,))) - assert_equal(dt1.__repr__(), dt2.__repr__()) - - def test_reshape_order(self, level=rlevel): - """Make sure reshape order works.""" - a = np.arange(6).reshape(2, 3, order='F') - assert_equal(a, [[0, 2, 4], [1, 3, 5]]) - a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) - b = a[:, 1] - assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) - - def test_reshape_zero_strides(self, level=rlevel): - """Issue #380, test reshaping of zero strided arrays""" - a = np.ones(1) - a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) - assert_(a.reshape(5, 1).strides[0] == 0) - - def test_reshape_zero_size(self, level=rlevel): - """GitHub Issue #2700, setting shape failed for 0-sized arrays""" - a = np.ones((0, 2)) - a.shape = (-1, 2) - - # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. - # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous. - @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max) - def test_reshape_trailing_ones_strides(self): - # GitHub issue gh-2949, bad strides for trailing ones of new shape - a = np.zeros(12, dtype=np.int32)[::2] # not contiguous - strides_c = (16, 8, 8, 8) - strides_f = (8, 24, 48, 48) - assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) - assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) - assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) - - def test_repeat_discont(self, level=rlevel): - """Ticket #352""" - a = np.arange(12).reshape(4, 3)[:, 2] - assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) - - def test_array_index(self, level=rlevel): - """Make sure optimization is not called in this case.""" - a = np.array([1, 2, 3]) - a2 = np.array([[1, 2, 3]]) - assert_equal(a[np.where(a==3)], a2[np.where(a2==3)]) - - def test_object_argmax(self, level=rlevel): - a = np.array([1, 2, 3], dtype=object) - assert_(a.argmax() == 2) - - def test_recarray_fields(self, level=rlevel): - """Ticket #372""" - dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) - dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) - for a in [np.array([(1, 2), (3, 4)], "i4,i4"), - np.rec.array([(1, 2), (3, 4)], "i4,i4"), - np.rec.array([(1, 2), (3, 4)]), - np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), - np.rec.fromarrays([(1, 2), (3, 4)])]: - assert_(a.dtype in [dt0, dt1]) - - def test_random_shuffle(self, level=rlevel): - """Ticket #374""" - a = np.arange(5).reshape((5, 1)) - b = a.copy() - np.random.shuffle(b) - assert_equal(np.sort(b, axis=0), a) - - def test_refcount_vdot(self, level=rlevel): - """Changeset #3443""" - _assert_valid_refcount(np.vdot) - - def test_startswith(self, level=rlevel): - ca = np.char.array(['Hi', 'There']) - assert_equal(ca.startswith('H'), [True, False]) - - def test_noncommutative_reduce_accumulate(self, level=rlevel): - """Ticket #413""" - tosubtract = np.arange(5) - todivide = np.array([2.0, 0.5, 0.25]) - assert_equal(np.subtract.reduce(tosubtract), -10) - assert_equal(np.divide.reduce(todivide), 16.0) - assert_array_equal(np.subtract.accumulate(tosubtract), - np.array([0, -1, -3, -6, -10])) - assert_array_equal(np.divide.accumulate(todivide), - np.array([2., 4., 16.])) - - def test_convolve_empty(self, level=rlevel): - """Convolve should raise an error for empty input array.""" - self.assertRaises(ValueError, np.convolve, [], [1]) - self.assertRaises(ValueError, np.convolve, [1], []) - - def test_multidim_byteswap(self, level=rlevel): - """Ticket #449""" - r=np.array([(1, (0, 1, 2))], dtype="i2,3i2") - assert_array_equal(r.byteswap(), - np.array([(256, (0, 256, 512))], r.dtype)) - - def test_string_NULL(self, level=rlevel): - """Changeset 3557""" - assert_equal(np.array("a\x00\x0b\x0c\x00").item(), - 'a\x00\x0b\x0c') - - def test_junk_in_string_fields_of_recarray(self, level=rlevel): - """Ticket #483""" - r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')]) - assert_(asbytes(r['var1'][0][0]) == asbytes('abc')) - - def test_take_output(self, level=rlevel): - """Ensure that 'take' honours output parameter.""" - x = np.arange(12).reshape((3, 4)) - a = np.take(x, [0, 2], axis=1) - b = np.zeros_like(a) - np.take(x, [0, 2], axis=1, out=b) - assert_array_equal(a, b) - - def test_take_object_fail(self): - # Issue gh-3001 - d = 123. - a = np.array([d, 1], dtype=object) - ref_d = sys.getrefcount(d) - try: - a.take([0, 100]) - except IndexError: - pass - assert_(ref_d == sys.getrefcount(d)) - - def test_array_str_64bit(self, level=rlevel): - """Ticket #501""" - s = np.array([1, np.nan], dtype=np.float64) - with np.errstate(all='raise'): - sstr = np.array_str(s) - - def test_frompyfunc_endian(self, level=rlevel): - """Ticket #503""" - from math import radians - uradians = np.frompyfunc(radians, 1, 1) - big_endian = np.array([83.4, 83.5], dtype='>f8') - little_endian = np.array([83.4, 83.5], dtype=' object - # casting succeeds - def rs(): - x = np.ones([484, 286]) - y = np.zeros([484, 286]) - x |= y - self.assertRaises(TypeError, rs) - - def test_unicode_scalar(self, level=rlevel): - """Ticket #600""" - x = np.array(["DROND", "DROND1"], dtype="U6") - el = x[1] - new = pickle.loads(pickle.dumps(el)) - assert_equal(new, el) - - def test_arange_non_native_dtype(self, level=rlevel): - """Ticket #616""" - for T in ('>f4', '0)]=v - # After removing deprecation, the following are ValueErrors. - # This might seem odd as compared to the value error below. This - # is due to the fact that the new code always uses "nonzero" logic - # and the boolean special case is not taken. - self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float)) - self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float)) - # Old special case (different code path): - self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) - - def test_mem_scalar_indexing(self, level=rlevel): - """Ticket #603""" - x = np.array([0], dtype=float) - index = np.array(0, dtype=np.int32) - x[index] - - def test_binary_repr_0_width(self, level=rlevel): - assert_equal(np.binary_repr(0, width=3), '000') - - def test_fromstring(self, level=rlevel): - assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), - [12, 9, 9]) - - def test_searchsorted_variable_length(self, level=rlevel): - x = np.array(['a', 'aa', 'b']) - y = np.array(['d', 'e']) - assert_equal(x.searchsorted(y), [3, 3]) - - def test_string_argsort_with_zeros(self, level=rlevel): - """Check argsort for strings containing zeros.""" - x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") - assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) - assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) - - def test_string_sort_with_zeros(self, level=rlevel): - """Check sort for strings containing zeros.""" - x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") - y = np.fromstring("\x00\x01\x00\x02", dtype="|S2") - assert_array_equal(np.sort(x, kind="q"), y) - - def test_copy_detection_zero_dim(self, level=rlevel): - """Ticket #658""" - np.indices((0, 3, 4)).T.reshape(-1, 3) - - def test_flat_byteorder(self, level=rlevel): - """Ticket #657""" - x = np.arange(10) - assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): - x = np.array([-1, 0, 1], dtype=dt) - assert_equal(x.flat[0].dtype, x[0].dtype) - - def test_copy_detection_corner_case(self, level=rlevel): - """Ticket #658""" - np.indices((0, 3, 4)).T.reshape(-1, 3) - - # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. - # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous, - # 0-sized reshape itself is tested elsewhere. - @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max) - def test_copy_detection_corner_case2(self, level=rlevel): - """Ticket #771: strides are not set correctly when reshaping 0-sized - arrays""" - b = np.indices((0, 3, 4)).T.reshape(-1, 3) - assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) - - def test_object_array_refcounting(self, level=rlevel): - """Ticket #633""" - if not hasattr(sys, 'getrefcount'): - return - - # NB. this is probably CPython-specific - - cnt = sys.getrefcount - - a = object() - b = object() - c = object() - - cnt0_a = cnt(a) - cnt0_b = cnt(b) - cnt0_c = cnt(c) - - # -- 0d -> 1-d broadcast slice assignment - - arr = np.zeros(5, dtype=np.object_) - - arr[:] = a - assert_equal(cnt(a), cnt0_a + 5) - - arr[:] = b - assert_equal(cnt(a), cnt0_a) - assert_equal(cnt(b), cnt0_b + 5) - - arr[:2] = c - assert_equal(cnt(b), cnt0_b + 3) - assert_equal(cnt(c), cnt0_c + 2) - - del arr - - # -- 1-d -> 2-d broadcast slice assignment - - arr = np.zeros((5, 2), dtype=np.object_) - arr0 = np.zeros(2, dtype=np.object_) - - arr0[0] = a - assert_(cnt(a) == cnt0_a + 1) - arr0[1] = b - assert_(cnt(b) == cnt0_b + 1) - - arr[:,:] = arr0 - assert_(cnt(a) == cnt0_a + 6) - assert_(cnt(b) == cnt0_b + 6) - - arr[:, 0] = None - assert_(cnt(a) == cnt0_a + 1) - - del arr, arr0 - - # -- 2-d copying + flattening - - arr = np.zeros((5, 2), dtype=np.object_) - - arr[:, 0] = a - arr[:, 1] = b - assert_(cnt(a) == cnt0_a + 5) - assert_(cnt(b) == cnt0_b + 5) - - arr2 = arr.copy() - assert_(cnt(a) == cnt0_a + 10) - assert_(cnt(b) == cnt0_b + 10) - - arr2 = arr[:, 0].copy() - assert_(cnt(a) == cnt0_a + 10) - assert_(cnt(b) == cnt0_b + 5) - - arr2 = arr.flatten() - assert_(cnt(a) == cnt0_a + 10) - assert_(cnt(b) == cnt0_b + 10) - - del arr, arr2 - - # -- concatenate, repeat, take, choose - - arr1 = np.zeros((5, 1), dtype=np.object_) - arr2 = np.zeros((5, 1), dtype=np.object_) - - arr1[...] = a - arr2[...] = b - assert_(cnt(a) == cnt0_a + 5) - assert_(cnt(b) == cnt0_b + 5) - - arr3 = np.concatenate((arr1, arr2)) - assert_(cnt(a) == cnt0_a + 5 + 5) - assert_(cnt(b) == cnt0_b + 5 + 5) - - arr3 = arr1.repeat(3, axis=0) - assert_(cnt(a) == cnt0_a + 5 + 3*5) - - arr3 = arr1.take([1, 2, 3], axis=0) - assert_(cnt(a) == cnt0_a + 5 + 3) - - x = np.array([[0], [1], [0], [1], [1]], int) - arr3 = x.choose(arr1, arr2) - assert_(cnt(a) == cnt0_a + 5 + 2) - assert_(cnt(b) == cnt0_b + 5 + 3) - - def test_mem_custom_float_to_array(self, level=rlevel): - """Ticket 702""" - class MyFloat(object): - def __float__(self): - return 1.0 - - tmp = np.atleast_1d([MyFloat()]) - tmp2 = tmp.astype(float) - - def test_object_array_refcount_self_assign(self, level=rlevel): - """Ticket #711""" - class VictimObject(object): - deleted = False - def __del__(self): - self.deleted = True - d = VictimObject() - arr = np.zeros(5, dtype=np.object_) - arr[:] = d - del d - arr[:] = arr # refcount of 'd' might hit zero here - assert_(not arr[0].deleted) - arr[:] = arr # trying to induce a segfault by doing it again... - assert_(not arr[0].deleted) - - def test_mem_fromiter_invalid_dtype_string(self, level=rlevel): - x = [1, 2, 3] - self.assertRaises(ValueError, - np.fromiter, [xi for xi in x], dtype='S') - - def test_reduce_big_object_array(self, level=rlevel): - """Ticket #713""" - oldsize = np.setbufsize(10*16) - a = np.array([None]*161, object) - assert_(not np.any(a)) - np.setbufsize(oldsize) - - def test_mem_0d_array_index(self, level=rlevel): - """Ticket #714""" - np.zeros(10)[np.array(0)] - - def test_floats_from_string(self, level=rlevel): - """Ticket #640, floats from string""" - fsingle = np.single('1.234') - fdouble = np.double('1.234') - flongdouble = np.longdouble('1.234') - assert_almost_equal(fsingle, 1.234) - assert_almost_equal(fdouble, 1.234) - assert_almost_equal(flongdouble, 1.234) - - def test_nonnative_endian_fill(self, level=rlevel): - """ Non-native endian arrays were incorrectly filled with scalars before - r5034. - """ - if sys.byteorder == 'little': - dtype = np.dtype('>i4') - else: - dtype = np.dtype('= 3: - f = open(filename, 'rb') - xp = pickle.load(f, encoding='latin1') - f.close() - else: - f = open(filename) - xp = pickle.load(f) - f.close() - xpd = xp.astype(np.float64) - assert_((xp.__array_interface__['data'][0] != - xpd.__array_interface__['data'][0])) - - def test_compress_small_type(self, level=rlevel): - """Ticket #789, changeset 5217. - """ - # compress with out argument segfaulted if cannot cast safely - import numpy as np - a = np.array([[1, 2], [3, 4]]) - b = np.zeros((2, 1), dtype = np.single) - try: - a.compress([True, False], axis = 1, out = b) - raise AssertionError("compress with an out which cannot be " - "safely casted should not return " - "successfully") - except TypeError: - pass - - def test_attributes(self, level=rlevel): - """Ticket #791 - """ - class TestArray(np.ndarray): - def __new__(cls, data, info): - result = np.array(data) - result = result.view(cls) - result.info = info - return result - def __array_finalize__(self, obj): - self.info = getattr(obj, 'info', '') - dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') - assert_(dat.info == 'jubba') - dat.resize((4, 2)) - assert_(dat.info == 'jubba') - dat.sort() - assert_(dat.info == 'jubba') - dat.fill(2) - assert_(dat.info == 'jubba') - dat.put([2, 3, 4], [6, 3, 4]) - assert_(dat.info == 'jubba') - dat.setfield(4, np.int32, 0) - assert_(dat.info == 'jubba') - dat.setflags() - assert_(dat.info == 'jubba') - assert_(dat.all(1).info == 'jubba') - assert_(dat.any(1).info == 'jubba') - assert_(dat.argmax(1).info == 'jubba') - assert_(dat.argmin(1).info == 'jubba') - assert_(dat.argsort(1).info == 'jubba') - assert_(dat.astype(TestArray).info == 'jubba') - assert_(dat.byteswap().info == 'jubba') - assert_(dat.clip(2, 7).info == 'jubba') - assert_(dat.compress([0, 1, 1]).info == 'jubba') - assert_(dat.conj().info == 'jubba') - assert_(dat.conjugate().info == 'jubba') - assert_(dat.copy().info == 'jubba') - dat2 = TestArray([2, 3, 1, 0], 'jubba') - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - assert_(dat2.choose(choices).info == 'jubba') - assert_(dat.cumprod(1).info == 'jubba') - assert_(dat.cumsum(1).info == 'jubba') - assert_(dat.diagonal().info == 'jubba') - assert_(dat.flatten().info == 'jubba') - assert_(dat.getfield(np.int32, 0).info == 'jubba') - assert_(dat.imag.info == 'jubba') - assert_(dat.max(1).info == 'jubba') - assert_(dat.mean(1).info == 'jubba') - assert_(dat.min(1).info == 'jubba') - assert_(dat.newbyteorder().info == 'jubba') - assert_(dat.nonzero()[0].info == 'jubba') - assert_(dat.nonzero()[1].info == 'jubba') - assert_(dat.prod(1).info == 'jubba') - assert_(dat.ptp(1).info == 'jubba') - assert_(dat.ravel().info == 'jubba') - assert_(dat.real.info == 'jubba') - assert_(dat.repeat(2).info == 'jubba') - assert_(dat.reshape((2, 4)).info == 'jubba') - assert_(dat.round().info == 'jubba') - assert_(dat.squeeze().info == 'jubba') - assert_(dat.std(1).info == 'jubba') - assert_(dat.sum(1).info == 'jubba') - assert_(dat.swapaxes(0, 1).info == 'jubba') - assert_(dat.take([2, 3, 5]).info == 'jubba') - assert_(dat.transpose().info == 'jubba') - assert_(dat.T.info == 'jubba') - assert_(dat.var(1).info == 'jubba') - assert_(dat.view(TestArray).info == 'jubba') - - def test_recarray_tolist(self, level=rlevel): - """Ticket #793, changeset r5215 - """ - # Comparisons fail for NaN, so we can't use random memory - # for the test. - buf = np.zeros(40, dtype=np.int8) - a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf) - b = a.tolist() - assert_( a[0].tolist() == b[0]) - assert_( a[1].tolist() == b[1]) - - def test_nonscalar_item_method(self): - # Make sure that .item() fails graciously when it should - a = np.arange(5) - assert_raises(ValueError, a.item) - - def test_char_array_creation(self, level=rlevel): - a = np.array('123', dtype='c') - b = np.array(asbytes_nested(['1', '2', '3'])) - assert_equal(a, b) - - def test_unaligned_unicode_access(self, level=rlevel) : - """Ticket #825""" - for i in range(1, 9) : - msg = 'unicode offset: %d chars'%i - t = np.dtype([('a', 'S%d'%i), ('b', 'U2')]) - x = np.array([(asbytes('a'), sixu('b'))], dtype=t) - if sys.version_info[0] >= 3: - assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) - else: - assert_equal(str(x), "[('a', u'b')]", err_msg=msg) - - def test_sign_for_complex_nan(self, level=rlevel): - """Ticket 794.""" - with np.errstate(invalid='ignore'): - C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) - have = np.sign(C) - want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) - assert_equal(have, want) - - def test_for_equal_names(self, level=rlevel): - """Ticket #674""" - dt = np.dtype([('foo', float), ('bar', float)]) - a = np.zeros(10, dt) - b = list(a.dtype.names) - b[0] = "notfoo" - a.dtype.names = b - assert_(a.dtype.names[0] == "notfoo") - assert_(a.dtype.names[1] == "bar") - - def test_for_object_scalar_creation(self, level=rlevel): - """Ticket #816""" - a = np.object_() - b = np.object_(3) - b2 = np.object_(3.0) - c = np.object_([4, 5]) - d = np.object_([None, {}, []]) - assert_(a is None) - assert_(type(b) is int) - assert_(type(b2) is float) - assert_(type(c) is np.ndarray) - assert_(c.dtype == object) - assert_(d.dtype == object) - - def test_array_resize_method_system_error(self): - """Ticket #840 - order should be an invalid keyword.""" - x = np.array([[0, 1], [2, 3]]) - self.assertRaises(TypeError, x.resize, (2, 2), order='C') - - def test_for_zero_length_in_choose(self, level=rlevel): - "Ticket #882" - a = np.array(1) - self.assertRaises(ValueError, lambda x: x.choose([]), a) - - def test_array_ndmin_overflow(self): - "Ticket #947." - self.assertRaises(ValueError, lambda: np.array([1], ndmin=33)) - - def test_errobj_reference_leak(self, level=rlevel): - """Ticket #955""" - with np.errstate(all="ignore"): - z = int(0) - p = np.int32(-1) - - gc.collect() - n_before = len(gc.get_objects()) - z**p # this shouldn't leak a reference to errobj - gc.collect() - n_after = len(gc.get_objects()) - assert_(n_before >= n_after, (n_before, n_after)) - - def test_void_scalar_with_titles(self, level=rlevel): - """No ticket""" - data = [('john', 4), ('mary', 5)] - dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] - arr = np.array(data, dtype=dtype1) - assert_(arr[0][0] == 'john') - assert_(arr[0][1] == 4) - - def test_void_scalar_constructor(self): - #Issue #1550 - - #Create test string data, construct void scalar from data and assert - #that void scalar contains original data. - test_string = np.array("test") - test_string_void_scalar = np.core.multiarray.scalar( - np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes()) - - assert_(test_string_void_scalar.view(test_string.dtype) == test_string) - - #Create record scalar, construct from data and assert that - #reconstructed scalar is correct. - test_record = np.ones((), "i,i") - test_record_void_scalar = np.core.multiarray.scalar( - test_record.dtype, test_record.tobytes()) - - assert_(test_record_void_scalar == test_record) - - #Test pickle and unpickle of void and record scalars - assert_(pickle.loads(pickle.dumps(test_string)) == test_string) - assert_(pickle.loads(pickle.dumps(test_record)) == test_record) - - def test_blasdot_uninitialized_memory(self): - """Ticket #950""" - for m in [0, 1, 2]: - for n in [0, 1, 2]: - for k in range(3): - # Try to ensure that x->data contains non-zero floats - x = np.array([123456789e199], dtype=np.float64) - x.resize((m, 0)) - y = np.array([123456789e199], dtype=np.float64) - y.resize((0, n)) - - # `dot` should just return zero (m,n) matrix - z = np.dot(x, y) - assert_(np.all(z == 0)) - assert_(z.shape == (m, n)) - - def test_zeros(self): - """Regression test for #1061.""" - # Set a size which cannot fit into a 64 bits signed integer - sz = 2 ** 64 - good = 'Maximum allowed dimension exceeded' - try: - np.empty(sz) - except ValueError as e: - if not str(e) == good: - self.fail("Got msg '%s', expected '%s'" % (e, good)) - except Exception as e: - self.fail("Got exception of type %s instead of ValueError" % type(e)) - - def test_huge_arange(self): - """Regression test for #1062.""" - # Set a size which cannot fit into a 64 bits signed integer - sz = 2 ** 64 - good = 'Maximum allowed size exceeded' - try: - a = np.arange(sz) - self.assertTrue(np.size == sz) - except ValueError as e: - if not str(e) == good: - self.fail("Got msg '%s', expected '%s'" % (e, good)) - except Exception as e: - self.fail("Got exception of type %s instead of ValueError" % type(e)) - - def test_fromiter_bytes(self): - """Ticket #1058""" - a = np.fromiter(list(range(10)), dtype='b') - b = np.fromiter(list(range(10)), dtype='B') - assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - - def test_array_from_sequence_scalar_array(self): - """Ticket #1078: segfaults when creating an array with a sequence of 0d - arrays.""" - a = np.array((np.ones(2), np.array(2))) - assert_equal(a.shape, (2,)) - assert_equal(a.dtype, np.dtype(object)) - assert_equal(a[0], np.ones(2)) - assert_equal(a[1], np.array(2)) - - a = np.array(((1,), np.array(1))) - assert_equal(a.shape, (2,)) - assert_equal(a.dtype, np.dtype(object)) - assert_equal(a[0], (1,)) - assert_equal(a[1], np.array(1)) - - def test_array_from_sequence_scalar_array2(self): - """Ticket #1081: weird array with strange input...""" - t = np.array([np.array([]), np.array(0, object)]) - assert_equal(t.shape, (2,)) - assert_equal(t.dtype, np.dtype(object)) - - def test_array_too_big(self): - """Ticket #1080.""" - assert_raises(ValueError, np.zeros, [975]*7, np.int8) - assert_raises(ValueError, np.zeros, [26244]*5, np.int8) - - def test_dtype_keyerrors_(self): - """Ticket #1106.""" - dt = np.dtype([('f1', np.uint)]) - assert_raises(KeyError, dt.__getitem__, "f2") - assert_raises(IndexError, dt.__getitem__, 1) - assert_raises(ValueError, dt.__getitem__, 0.0) - - def test_lexsort_buffer_length(self): - """Ticket #1217, don't segfault.""" - a = np.ones(100, dtype=np.int8) - b = np.ones(100, dtype=np.int32) - i = np.lexsort((a[::-1], b)) - assert_equal(i, np.arange(100, dtype=np.int)) - - def test_object_array_to_fixed_string(self): - """Ticket #1235.""" - a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) - b = np.array(a, dtype=(np.str_, 8)) - assert_equal(a, b) - c = np.array(a, dtype=(np.str_, 5)) - assert_equal(c, np.array(['abcde', 'ijklm'])) - d = np.array(a, dtype=(np.str_, 12)) - assert_equal(a, d) - e = np.empty((2, ), dtype=(np.str_, 8)) - e[:] = a[:] - assert_equal(a, e) - - def test_unicode_to_string_cast(self): - """Ticket #1240.""" - a = np.array( - [ [sixu('abc'), sixu('\u03a3')], - [sixu('asdf'), sixu('erw')] - ], dtype='U') - def fail(): - b = np.array(a, 'S4') - self.assertRaises(UnicodeEncodeError, fail) - - def test_mixed_string_unicode_array_creation(self): - a = np.array(['1234', sixu('123')]) - assert_(a.itemsize == 16) - a = np.array([sixu('123'), '1234']) - assert_(a.itemsize == 16) - a = np.array(['1234', sixu('123'), '12345']) - assert_(a.itemsize == 20) - a = np.array([sixu('123'), '1234', sixu('12345')]) - assert_(a.itemsize == 20) - a = np.array([sixu('123'), '1234', sixu('1234')]) - assert_(a.itemsize == 16) - - def test_misaligned_objects_segfault(self): - """Ticket #1198 and #1267""" - a1 = np.zeros((10,), dtype='O,c') - a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') - a1['f0'] = a2 - r = repr(a1) - np.argmax(a1['f0']) - a1['f0'][1] = "FOO" - a1['f0'] = "FOO" - a3 = np.array(a1['f0'], dtype='S') - np.nonzero(a1['f0']) - a1.sort() - a4 = copy.deepcopy(a1) - - def test_misaligned_scalars_segfault(self): - """Ticket #1267""" - s1 = np.array(('a', 'Foo'), dtype='c,O') - s2 = np.array(('b', 'Bar'), dtype='c,O') - s1['f1'] = s2['f1'] - s1['f1'] = 'Baz' - - def test_misaligned_dot_product_objects(self): - """Ticket #1267""" - # This didn't require a fix, but it's worth testing anyway, because - # it may fail if .dot stops enforcing the arrays to be BEHAVED - a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') - b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') - np.dot(a['f0'], b['f0']) - - def test_byteswap_complex_scalar(self): - """Ticket #1259 and gh-441""" - for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: - z = np.array([2.2-1.1j], dtype) - x = z[0] # always native-endian - y = x.byteswap() - if x.dtype.byteorder == z.dtype.byteorder: - # little-endian machine - assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder())) - else: - # big-endian machine - assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype)) - # double check real and imaginary parts: - assert_equal(x.real, y.real.byteswap()) - assert_equal(x.imag, y.imag.byteswap()) - - def test_structured_arrays_with_objects1(self): - """Ticket #1299""" - stra = 'aaaa' - strb = 'bbbb' - x = np.array([[(0, stra), (1, strb)]], 'i8,O') - x[x.nonzero()] = x.ravel()[:1] - assert_(x[0, 1] == x[0, 0]) - - def test_structured_arrays_with_objects2(self): - """Ticket #1299 second test""" - stra = 'aaaa' - strb = 'bbbb' - numb = sys.getrefcount(strb) - numa = sys.getrefcount(stra) - x = np.array([[(0, stra), (1, strb)]], 'i8,O') - x[x.nonzero()] = x.ravel()[:1] - assert_(sys.getrefcount(strb) == numb) - assert_(sys.getrefcount(stra) == numa + 2) - - def test_duplicate_title_and_name(self): - """Ticket #1254""" - def func(): - x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')]) - self.assertRaises(ValueError, func) - - def test_signed_integer_division_overflow(self): - """Ticket #1317.""" - def test_type(t): - min = np.array([np.iinfo(t).min]) - min //= -1 - - with np.errstate(divide="ignore"): - for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long): - test_type(t) - - def test_buffer_hashlib(self): - try: - from hashlib import md5 - except ImportError: - from md5 import new as md5 - - x = np.array([1, 2, 3], dtype=np.dtype('c') - - def test_log1p_compiler_shenanigans(self): - # Check if log1p is behaving on 32 bit intel systems. - assert_(np.isfinite(np.log1p(np.exp2(-53)))) - - def test_fromiter_comparison(self, level=rlevel): - a = np.fromiter(list(range(10)), dtype='b') - b = np.fromiter(list(range(10)), dtype='B') - assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) - - def test_fromstring_crash(self): - # Ticket #1345: the following should not cause a crash - np.fromstring(asbytes('aa, aa, 1.0'), sep=',') - - def test_ticket_1539(self): - dtypes = [x for x in np.typeDict.values() - if (issubclass(x, np.number) - and not issubclass(x, np.timedelta64))] - a = np.array([], dtypes[0]) - failures = [] - # ignore complex warnings - with warnings.catch_warnings(): - warnings.simplefilter('ignore', np.ComplexWarning) - for x in dtypes: - b = a.astype(x) - for y in dtypes: - c = a.astype(y) - try: - np.dot(b, c) - except TypeError as e: - failures.append((x, y)) - if failures: - raise AssertionError("Failures: %r" % failures) - - def test_ticket_1538(self): - x = np.finfo(np.float32) - for name in 'eps epsneg max min resolution tiny'.split(): - assert_equal(type(getattr(x, name)), np.float32, - err_msg=name) - - def test_ticket_1434(self): - # Check that the out= argument in var and std has an effect - data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) - out = np.zeros((3,)) - - ret = data.var(axis=1, out=out) - assert_(ret is out) - assert_array_equal(ret, data.var(axis=1)) - - ret = data.std(axis=1, out=out) - assert_(ret is out) - assert_array_equal(ret, data.std(axis=1)) - - def test_complex_nan_maximum(self): - cnan = complex(0, np.nan) - assert_equal(np.maximum(1, cnan), cnan) - - def test_subclass_int_tuple_assignment(self): - # ticket #1563 - class Subclass(np.ndarray): - def __new__(cls, i): - return np.ones((i,)).view(cls) - x = Subclass(5) - x[(0,)] = 2 # shouldn't raise an exception - assert_equal(x[0], 2) - - def test_ufunc_no_unnecessary_views(self): - # ticket #1548 - class Subclass(np.ndarray): - pass - x = np.array([1, 2, 3]).view(Subclass) - y = np.add(x, x, x) - assert_equal(id(x), id(y)) - - def test_take_refcount(self): - # ticket #939 - a = np.arange(16, dtype=np.float) - a.shape = (4, 4) - lut = np.ones((5 + 3, 4), np.float) - rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) - c1 = sys.getrefcount(rgba) - try: - lut.take(a, axis=0, mode='clip', out=rgba) - except TypeError: - pass - c2 = sys.getrefcount(rgba) - assert_equal(c1, c2) - - def test_fromfile_tofile_seeks(self): - # On Python 3, tofile/fromfile used to get (#1610) the Python - # file handle out of sync - f0 = tempfile.NamedTemporaryFile() - f = f0.file - f.write(np.arange(255, dtype='u1').tobytes()) - - f.seek(20) - ret = np.fromfile(f, count=4, dtype='u1') - assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) - assert_equal(f.tell(), 24) - - f.seek(40) - np.array([1, 2, 3], dtype='u1').tofile(f) - assert_equal(f.tell(), 43) - - f.seek(40) - data = f.read(3) - assert_equal(data, asbytes("\x01\x02\x03")) - - f.seek(80) - f.read(4) - data = np.fromfile(f, dtype='u1', count=4) - assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) - - f.close() - - def test_complex_scalar_warning(self): - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_warns(np.ComplexWarning, float, x) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - assert_equal(float(x), float(x.real)) - - def test_complex_scalar_complex_cast(self): - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_equal(complex(x), 1+2j) - - def test_complex_boolean_cast(self): - """Ticket #2218""" - for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp) - assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) - assert_(np.any(x)) - assert_(np.all(x[1:])) - - def test_uint_int_conversion(self): - x = 2**64 - 1 - assert_equal(int(np.uint64(x)), x) - - def test_duplicate_field_names_assign(self): - ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8') - ra.dtype.names = ('f1', 'f2') - rep = repr(ra) # should not cause a segmentation fault - assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) - - def test_eq_string_and_object_array(self): - # From e-mail thread "__eq__ with str and object" (Keith Goodman) - a1 = np.array(['a', 'b'], dtype=object) - a2 = np.array(['a', 'c']) - assert_array_equal(a1 == a2, [True, False]) - assert_array_equal(a2 == a1, [True, False]) - - def test_nonzero_byteswap(self): - a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) - a.dtype = np.float32 - assert_equal(a.nonzero()[0], [1]) - a = a.byteswap().newbyteorder() - assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap - - def test_find_common_type_boolean(self): - # Ticket #1695 - assert_(np.find_common_type([], ['?', '?']) == '?') - - def test_empty_mul(self): - a = np.array([1.]) - a[1:1] *= 2 - assert_equal(a, [1.]) - - def test_array_side_effect(self): - assert_equal(np.dtype('S10').itemsize, 10) - - A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_) - - # This was throwing an exception because in ctors.c, - # discover_itemsize was calling PyObject_Length without checking - # the return code. This failed to get the length of the number 2, - # and the exception hung around until something checked - # PyErr_Occurred() and returned an error. - assert_equal(np.dtype('S10').itemsize, 10) - - def test_any_float(self): - # all and any for floats - a = np.array([0.1, 0.9]) - assert_(np.any(a)) - assert_(np.all(a)) - - def test_large_float_sum(self): - a = np.arange(10000, dtype='f') - assert_equal(a.sum(dtype='d'), a.astype('d').sum()) - - def test_ufunc_casting_out(self): - a = np.array(1.0, dtype=np.float32) - b = np.array(1.0, dtype=np.float64) - c = np.array(1.0, dtype=np.float32) - np.add(a, b, out=c) - assert_equal(c, 2.0) - - def test_array_scalar_contiguous(self): - # Array scalars are both C and Fortran contiguous - assert_(np.array(1.0).flags.c_contiguous) - assert_(np.array(1.0).flags.f_contiguous) - assert_(np.array(np.float32(1.0)).flags.c_contiguous) - assert_(np.array(np.float32(1.0)).flags.f_contiguous) - - def test_squeeze_contiguous(self): - """Similar to GitHub issue #387""" - a = np.zeros((1, 2)).squeeze() - b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze() - assert_(a.flags.c_contiguous) - assert_(a.flags.f_contiguous) - assert_(b.flags.f_contiguous) - - def test_reduce_contiguous(self): - """GitHub issue #387""" - a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) - b = np.add.reduce(np.zeros((2, 1, 2)), 1) - assert_(a.flags.c_contiguous) - assert_(a.flags.f_contiguous) - assert_(b.flags.c_contiguous) - - def test_object_array_self_reference(self): - # Object arrays with references to themselves can cause problems - a = np.array(0, dtype=object) - a[()] = a - assert_raises(TypeError, int, a) - assert_raises(TypeError, long, a) - assert_raises(TypeError, float, a) - assert_raises(TypeError, oct, a) - assert_raises(TypeError, hex, a) - - # Test the same for a circular reference. - b = np.array(a, dtype=object) - a[()] = b - assert_raises(TypeError, int, a) - # Numpy has no tp_traverse currently, so circular references - # cannot be detected. So resolve it: - a[()] = 0 - - # This was causing a to become like the above - a = np.array(0, dtype=object) - a[...] += 1 - assert_equal(a, 1) - - def test_object_array_self_copy(self): - # An object array being copied into itself DECREF'ed before INCREF'ing - # causing segmentation faults (gh-3787) - a = np.array(object(), dtype=object) - np.copyto(a, a) - assert_equal(sys.getrefcount(a[()]), 2) - a[()].__class__ # will segfault if object was deleted - - def test_zerosize_accumulate(self): - "Ticket #1733" - x = np.array([[42, 0]], dtype=np.uint32) - assert_equal(np.add.accumulate(x[:-1, 0]), []) - - def test_objectarray_setfield(self): - # Setfield directly manipulates the raw array data, - # so is invalid for object arrays. - x = np.array([1, 2, 3], dtype=object) - assert_raises(RuntimeError, x.setfield, 4, np.int32, 0) - - def test_setting_rank0_string(self): - "Ticket #1736" - s1 = asbytes("hello1") - s2 = asbytes("hello2") - a = np.zeros((), dtype="S10") - a[()] = s1 - assert_equal(a, np.array(s1)) - a[()] = np.array(s2) - assert_equal(a, np.array(s2)) - - a = np.zeros((), dtype='f4') - a[()] = 3 - assert_equal(a, np.array(3)) - a[()] = np.array(4) - assert_equal(a, np.array(4)) - - def test_string_astype(self): - "Ticket #1748" - s1 = asbytes('black') - s2 = asbytes('white') - s3 = asbytes('other') - a = np.array([[s1], [s2], [s3]]) - assert_equal(a.dtype, np.dtype('S5')) - b = a.astype(np.dtype('S0')) - assert_equal(b.dtype, np.dtype('S5')) - - def test_ticket_1756(self): - """Ticket #1756 """ - s = asbytes('0123456789abcdef') - a = np.array([s]*5) - for i in range(1, 17): - a1 = np.array(a, "|S%d"%i) - a2 = np.array([s[:i]]*5) - assert_equal(a1, a2) - - def test_fields_strides(self): - "Ticket #1760" - r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') - assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) - assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) - assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) - assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) - - def test_alignment_update(self): - """Check that alignment flag is updated on stride setting""" - a = np.arange(10) - assert_(a.flags.aligned) - a.strides = 3 - assert_(not a.flags.aligned) - - def test_ticket_1770(self): - "Should not segfault on python 3k" - import numpy as np - try: - a = np.zeros((1,), dtype=[('f1', 'f')]) - a['f1'] = 1 - a['f2'] = 1 - except ValueError: - pass - except: - raise AssertionError - - def test_ticket_1608(self): - "x.flat shouldn't modify data" - x = np.array([[1, 2], [3, 4]]).T - y = np.array(x.flat) - assert_equal(x, [[1, 3], [2, 4]]) - - def test_pickle_string_overwrite(self): - import re - - data = np.array([1], dtype='b') - blob = pickle.dumps(data, protocol=1) - data = pickle.loads(blob) - - # Check that loads does not clobber interned strings - s = re.sub("a(.)", "\x01\\1", "a_") - assert_equal(s[0], "\x01") - data[0] = 0xbb - s = re.sub("a(.)", "\x01\\1", "a_") - assert_equal(s[0], "\x01") - - def test_pickle_bytes_overwrite(self): - if sys.version_info[0] >= 3: - data = np.array([1], dtype='b') - data = pickle.loads(pickle.dumps(data)) - data[0] = 0xdd - bytestring = "\x01 ".encode('ascii') - assert_equal(bytestring[0:1], '\x01'.encode('ascii')) - - def test_pickle_py2_array_latin1_hack(self): - # Check that unpickling hacks in Py3 that support - # encoding='latin1' work correctly. - - # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) - data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n" - "tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" - "I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" - "p13\ntp14\nb.") - if sys.version_info[0] >= 3: - # This should work: - result = pickle.loads(data, encoding='latin1') - assert_array_equal(result, np.array([129], dtype='b')) - # Should not segfault: - assert_raises(Exception, pickle.loads, data, encoding='koi8-r') - - def test_pickle_py2_scalar_latin1_hack(self): - # Check that scalar unpickling hack in Py3 that supports - # encoding='latin1' work correctly. - - # Python2 output for pickle.dumps(...) - datas = [ - # (original, python2_pickle, koi8r_validity) - (np.unicode_('\u6bd2'), - asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" - "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n" - "tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."), - 'invalid'), - - (np.float64(9e123), - asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n" - "p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n" - "bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."), - 'invalid'), - - (np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1 - asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n" - "I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n" - "tp8\nRp9\n."), - 'different'), - ] - if sys.version_info[0] >= 3: - for original, data, koi8r_validity in datas: - result = pickle.loads(data, encoding='latin1') - assert_equal(result, original) - - # Decoding under non-latin1 encoding (e.g.) KOI8-R can - # produce bad results, but should not segfault. - if koi8r_validity == 'different': - # Unicode code points happen to lie within latin1, - # but are different in koi8-r, resulting to silent - # bogus results - result = pickle.loads(data, encoding='koi8-r') - assert_(result != original) - elif koi8r_validity == 'invalid': - # Unicode code points outside latin1, so results - # to an encoding exception - assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') - else: - raise ValueError(koi8r_validity) - - def test_structured_type_to_object(self): - a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') - a_obj = np.empty((2,), dtype=object) - a_obj[0] = (0, 1) - a_obj[1] = (3, 2) - # astype records -> object - assert_equal(a_rec.astype(object), a_obj) - # '=' records -> object - b = np.empty_like(a_obj) - b[...] = a_rec - assert_equal(b, a_obj) - # '=' object -> records - b = np.empty_like(a_rec) - b[...] = a_obj - assert_equal(b, a_rec) - - def test_assign_obj_listoflists(self): - # Ticket # 1870 - # The inner list should get assigned to the object elements - a = np.zeros(4, dtype=object) - b = a.copy() - a[0] = [1] - a[1] = [2] - a[2] = [3] - a[3] = [4] - b[...] = [[1], [2], [3], [4]] - assert_equal(a, b) - # The first dimension should get broadcast - a = np.zeros((2, 2), dtype=object) - a[...] = [[1, 2]] - assert_equal(a, [[1, 2], [1, 2]]) - - def test_memoryleak(self): - # Ticket #1917 - ensure that array data doesn't leak - for i in range(1000): - # 100MB times 1000 would give 100GB of memory usage if it leaks - a = np.empty((100000000,), dtype='i1') - del a - - def test_ufunc_reduce_memoryleak(self): - a = np.arange(6) - acnt = sys.getrefcount(a) - res = np.add.reduce(a) - assert_equal(sys.getrefcount(a), acnt) - - def test_search_sorted_invalid_arguments(self): - # Ticket #2021, should not segfault. - x = np.arange(0, 4, dtype='datetime64[D]') - assert_raises(TypeError, x.searchsorted, 1) - - def test_string_truncation(self): - # Ticket #1990 - Data can be truncated in creation of an array from a - # mixed sequence of numeric values and strings - for val in [True, 1234, 123.4, complex(1, 234)]: - for tostr in [asunicode, asbytes]: - b = np.array([val, tostr('xx')]) - assert_equal(tostr(b[0]), tostr(val)) - b = np.array([tostr('xx'), val]) - assert_equal(tostr(b[1]), tostr(val)) - - # test also with longer strings - b = np.array([val, tostr('xxxxxxxxxx')]) - assert_equal(tostr(b[0]), tostr(val)) - b = np.array([tostr('xxxxxxxxxx'), val]) - assert_equal(tostr(b[1]), tostr(val)) - - def test_string_truncation_ucs2(self): - # Ticket #2081. Python compiled with two byte unicode - # can lead to truncation if itemsize is not properly - # adjusted for Numpy's four byte unicode. - if sys.version_info[0] >= 3: - a = np.array(['abcd']) - else: - a = np.array([sixu('abcd')]) - assert_equal(a.dtype.itemsize, 16) - - def test_unique_stable(self): - # Ticket #2063 must always choose stable sort for argsort to - # get consistent results - v = np.array(([0]*5 + [1]*6 + [2]*6)*4) - res = np.unique(v, return_index=True) - tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11])) - assert_equal(res, tgt) - - def test_unicode_alloc_dealloc_match(self): - # Ticket #1578, the mismatch only showed up when running - # python-debug for python versions >= 2.7, and then as - # a core dump and error message. - a = np.array(['abc'], dtype=np.unicode)[0] - del a - - def test_refcount_error_in_clip(self): - # Ticket #1588 - a = np.zeros((2,), dtype='>i2').clip(min=0) - x = a + a - # This used to segfault: - y = str(x) - # Check the final string: - assert_(y == "[0 0]") - - def test_searchsorted_wrong_dtype(self): - # Ticket #2189, it used to segfault, so we check that it raises the - # proper exception. - a = np.array([('a', 1)], dtype='S1, int') - assert_raises(TypeError, np.searchsorted, a, 1.2) - # Ticket #2066, similar problem: - dtype = np.format_parser(['i4', 'i4'], [], []) - a = np.recarray((2, ), dtype) - assert_raises(TypeError, np.searchsorted, a, 1) - - def test_complex64_alignment(self): - # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment - dtt = np.complex64 - arr = np.arange(10, dtype=dtt) - # 2D array - arr2 = np.reshape(arr, (2, 5)) - # Fortran write followed by (C or F) read caused bus error - data_str = arr2.tobytes('F') - data_back = np.ndarray(arr2.shape, - arr2.dtype, - buffer=data_str, - order='F') - assert_array_equal(arr2, data_back) - - def test_structured_count_nonzero(self): - arr = np.array([0, 1]).astype('i4, (2)i4')[:1] - count = np.count_nonzero(arr) - assert_equal(count, 0) - - def test_copymodule_preserves_f_contiguity(self): - a = np.empty((2, 2), order='F') - b = copy.copy(a) - c = copy.deepcopy(a) - assert_(b.flags.fortran) - assert_(b.flags.f_contiguous) - assert_(c.flags.fortran) - assert_(c.flags.f_contiguous) - - def test_fortran_order_buffer(self): - import numpy as np - a = np.array([['Hello', 'Foob']], dtype='U5', order='F') - arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) - arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')], - [sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]]) - assert_array_equal(arr, arr2) - - def test_assign_from_sequence_error(self): - # Ticket #4024. - arr = np.array([1, 2, 3]) - assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) - arr.__setitem__(slice(None), [9]) - assert_equal(arr, [9, 9, 9]) - - def test_format_on_flex_array_element(self): - # Ticket #4369. - dt = np.dtype([('date', '= 3: - assert_raises(TypeError, f, lhs, rhs) - else: - f(lhs, rhs) - assert_(not op.eq(lhs, rhs)) - assert_(op.ne(lhs, rhs)) - - def test_richcompare_scalar_and_subclass(self): - # gh-4709 - class Foo(np.ndarray): - def __eq__(self, other): - return "OK" - x = np.array([1,2,3]).view(Foo) - assert_equal(10 == x, "OK") - assert_equal(np.int32(10) == x, "OK") - assert_equal(np.array([10]) == x, "OK") - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarinherit.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarinherit.py deleted file mode 100644 index 6f394196c2060..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarinherit.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- -""" Test printing of scalar types. - -""" - -import numpy as np -from numpy.testing import TestCase, run_module_suite - - -class A(object): pass -class B(A, np.float64): pass - -class C(B): pass -class D(C, B): pass - -class B0(np.float64, A): pass -class C0(B0): pass - -class TestInherit(TestCase): - def test_init(self): - x = B(1.0) - assert str(x) == '1.0' - y = C(2.0) - assert str(y) == '2.0' - z = D(3.0) - assert str(z) == '3.0' - def test_init2(self): - x = B0(1.0) - assert str(x) == '1.0' - y = C0(2.0) - assert str(y) == '2.0' - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarmath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarmath.py deleted file mode 100644 index afdc06c03d8e1..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_scalarmath.py +++ /dev/null @@ -1,275 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import platform -from numpy.testing import * -from numpy.testing.utils import _gen_alignment_data -import numpy as np - -types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, - np.int_, np.uint, np.longlong, np.ulonglong, - np.single, np.double, np.longdouble, np.csingle, - np.cdouble, np.clongdouble] - -# This compares scalarmath against ufuncs. - -class TestTypes(TestCase): - def test_types(self, level=1): - for atype in types: - a = atype(1) - assert_(a == 1, "error with %r: got %r" % (atype, a)) - - def test_type_add(self, level=1): - # list of types - for k, atype in enumerate(types): - a_scalar = atype(3) - a_array = np.array([3], dtype=atype) - for l, btype in enumerate(types): - b_scalar = btype(1) - b_array = np.array([1], dtype=btype) - c_scalar = a_scalar + b_scalar - c_array = a_array + b_array - # It was comparing the type numbers, but the new ufunc - # function-finding mechanism finds the lowest function - # to which both inputs can be cast - which produces 'l' - # when you do 'q' + 'b'. The old function finding mechanism - # skipped ahead based on the first argument, but that - # does not produce properly symmetric results... - assert_equal(c_scalar.dtype, c_array.dtype, - "error with types (%d/'%c' + %d/'%c')" % - (k, np.dtype(atype).char, l, np.dtype(btype).char)) - - def test_type_create(self, level=1): - for k, atype in enumerate(types): - a = np.array([1, 2, 3], atype) - b = atype([1, 2, 3]) - assert_equal(a, b) - - def test_leak(self): - # test leak of scalar objects - # a leak would show up in valgrind as still-reachable of ~2.6MB - for i in range(200000): - np.add(1, 1) - - -class TestBaseMath(TestCase): - def test_blocked(self): - # test alignments offsets for simd instructions - # alignments for vz + 2 * (vs - 1) + 1 - for dt, sz in [(np.float32, 11), (np.float64, 7)]: - for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, - type='binary', - max_size=sz): - exp1 = np.ones_like(inp1) - inp1[...] = np.ones_like(inp1) - inp2[...] = np.zeros_like(inp2) - assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) - assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg) - assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) - - np.add(inp1, inp2, out=out) - assert_almost_equal(out, exp1, err_msg=msg) - - inp2[...] += np.arange(inp2.size, dtype=dt) + 1 - assert_almost_equal(np.square(inp2), - np.multiply(inp2, inp2), err_msg=msg) - assert_almost_equal(np.reciprocal(inp2), - np.divide(1, inp2), err_msg=msg) - - inp1[...] = np.ones_like(inp1) - inp2[...] = np.zeros_like(inp2) - np.add(inp1, 1, out=out) - assert_almost_equal(out, exp1 + 1, err_msg=msg) - np.add(1, inp2, out=out) - assert_almost_equal(out, exp1, err_msg=msg) - - def test_lower_align(self): - # check data that is not aligned to element size - # i.e doubles are aligned to 4 bytes on i386 - d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - assert_almost_equal(d + d, d * 2) - np.add(d, d, out=o) - np.add(np.ones_like(d), d, out=o) - np.add(d, np.ones_like(d), out=o) - np.add(np.ones_like(d), d) - np.add(d, np.ones_like(d)) - - -class TestPower(TestCase): - def test_small_types(self): - for t in [np.int8, np.int16, np.float16]: - a = t(3) - b = a ** 4 - assert_(b == 81, "error with %r: got %r" % (t, b)) - - def test_large_types(self): - for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: - a = t(51) - b = a ** 4 - msg = "error with %r: got %r" % (t, b) - if np.issubdtype(t, np.integer): - assert_(b == 6765201, msg) - else: - assert_almost_equal(b, 6765201, err_msg=msg) - def test_mixed_types(self): - typelist = [np.int8, np.int16, np.float16, - np.float32, np.float64, np.int8, - np.int16, np.int32, np.int64] - for t1 in typelist: - for t2 in typelist: - a = t1(3) - b = t2(2) - result = a**b - msg = ("error with %r and %r:" - "got %r, expected %r") % (t1, t2, result, 9) - if np.issubdtype(np.dtype(result), np.integer): - assert_(result == 9, msg) - else: - assert_almost_equal(result, 9, err_msg=msg) - -class TestComplexDivision(TestCase): - def test_zero_division(self): - with np.errstate(all="ignore"): - for t in [np.complex64, np.complex128]: - a = t(0.0) - b = t(1.0) - assert_(np.isinf(b/a)) - b = t(complex(np.inf, np.inf)) - assert_(np.isinf(b/a)) - b = t(complex(np.inf, np.nan)) - assert_(np.isinf(b/a)) - b = t(complex(np.nan, np.inf)) - assert_(np.isinf(b/a)) - b = t(complex(np.nan, np.nan)) - assert_(np.isnan(b/a)) - b = t(0.) - assert_(np.isnan(b/a)) - - -class TestConversion(TestCase): - def test_int_from_long(self): - l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] - li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] - for T in [None, np.float64, np.int64]: - a = np.array(l, dtype=T) - assert_equal([int(_m) for _m in a], li) - - a = np.array(l[:3], dtype=np.uint64) - assert_equal([int(_m) for _m in a], li[:3]) - - def test_iinfo_long_values(self): - for code in 'bBhH': - res = np.array(np.iinfo(code).max + 1, dtype=code) - tgt = np.iinfo(code).min - assert_(res == tgt) - - for code in np.typecodes['AllInteger']: - res = np.array(np.iinfo(code).max, dtype=code) - tgt = np.iinfo(code).max - assert_(res == tgt) - - for code in np.typecodes['AllInteger']: - res = np.typeDict[code](np.iinfo(code).max) - tgt = np.iinfo(code).max - assert_(res == tgt) - - def test_int_raise_behaviour(self): - def Overflow_error_func(dtype): - res = np.typeDict[dtype](np.iinfo(dtype).max + 1) - - for code in 'lLqQ': - assert_raises(OverflowError, Overflow_error_func, code) - - def test_longdouble_int(self): - # gh-627 - x = np.longdouble(np.inf) - assert_raises(OverflowError, x.__int__) - x = np.clongdouble(np.inf) - assert_raises(OverflowError, x.__int__) - - def test_numpy_scalar_relational_operators(self): - #All integer - for dt1 in np.typecodes['AllInteger']: - assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) - - for dt2 in np.typecodes['AllInteger']: - assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - - #Unsigned integers - for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - - #unsigned vs signed - for dt2 in 'bhilqp': - assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - - #Signed integers and floats - for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - - for dt2 in 'bhlqp' + np.typecodes['Float']: - assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) - - -#class TestRepr(TestCase): -# def test_repr(self): -# for t in types: -# val = t(1197346475.0137341) -# val_repr = repr(val) -# val2 = eval(val_repr) -# assert_equal( val, val2 ) - - -class TestRepr(object): - def _test_type_repr(self, t): - finfo=np.finfo(t) - last_fraction_bit_idx = finfo.nexp + finfo.nmant - last_exponent_bit_idx = finfo.nexp - storage_bytes = np.dtype(t).itemsize*8 - # could add some more types to the list below - for which in ['small denorm', 'small norm']: - # Values from http://en.wikipedia.org/wiki/IEEE_754 - constr = np.array([0x00]*storage_bytes, dtype=np.uint8) - if which == 'small denorm': - byte = last_fraction_bit_idx // 8 - bytebit = 7-(last_fraction_bit_idx % 8) - constr[byte] = 1< real - n 1 negative nums + O - n 1 sign nums + O -> int - n 1 invert bool + ints + O flts raise an error - n 1 degrees real + M cmplx raise an error - n 1 radians real + M cmplx raise an error - n 1 arccos flts + M - n 1 arccosh flts + M - n 1 arcsin flts + M - n 1 arcsinh flts + M - n 1 arctan flts + M - n 1 arctanh flts + M - n 1 cos flts + M - n 1 sin flts + M - n 1 tan flts + M - n 1 cosh flts + M - n 1 sinh flts + M - n 1 tanh flts + M - n 1 exp flts + M - n 1 expm1 flts + M - n 1 log flts + M - n 1 log10 flts + M - n 1 log1p flts + M - n 1 sqrt flts + M real x < 0 raises error - n 1 ceil real + M - n 1 trunc real + M - n 1 floor real + M - n 1 fabs real + M - n 1 rint flts + M - n 1 isnan flts -> bool - n 1 isinf flts -> bool - n 1 isfinite flts -> bool - n 1 signbit real -> bool - n 1 modf real -> (frac, int) - n 1 logical_not bool + nums + M -> bool - n 2 left_shift ints + O flts raise an error - n 2 right_shift ints + O flts raise an error - n 2 add bool + nums + O boolean + is || - n 2 subtract bool + nums + O boolean - is ^ - n 2 multiply bool + nums + O boolean * is & - n 2 divide nums + O - n 2 floor_divide nums + O - n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d - n 2 fmod nums + M - n 2 power nums + O - n 2 greater bool + nums + O -> bool - n 2 greater_equal bool + nums + O -> bool - n 2 less bool + nums + O -> bool - n 2 less_equal bool + nums + O -> bool - n 2 equal bool + nums + O -> bool - n 2 not_equal bool + nums + O -> bool - n 2 logical_and bool + nums + M -> bool - n 2 logical_or bool + nums + M -> bool - n 2 logical_xor bool + nums + M -> bool - n 2 maximum bool + nums + O - n 2 minimum bool + nums + O - n 2 bitwise_and bool + ints + O flts raise an error - n 2 bitwise_or bool + ints + O flts raise an error - n 2 bitwise_xor bool + ints + O flts raise an error - n 2 arctan2 real + M - n 2 remainder ints + real + O - n 2 hypot real + M - ===== ==== ============= =============== ======================== - - Types other than those listed will be accepted, but they are cast to - the smallest compatible type for which the function is defined. The - casting rules are: - - bool -> int8 -> float32 - ints -> double - - """ - pass - - - def test_signature(self): - # the arguments to test_signature are: nin, nout, core_signature - # pass - assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1) - - # pass. empty core signature; treat as plain ufunc (with trivial core) - assert_equal(umt.test_signature(2, 1, "(),()->()"), 0) - - # in the following calls, a ValueError should be raised because - # of error in core signature - # error: extra parenthesis - msg = "core_sig: extra parenthesis" - try: - ret = umt.test_signature(2, 1, "((i)),(i)->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - # error: parenthesis matching - msg = "core_sig: parenthesis matching" - try: - ret = umt.test_signature(2, 1, "(i),)i(->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - # error: incomplete signature. letters outside of parenthesis are ignored - msg = "core_sig: incomplete signature" - try: - ret = umt.test_signature(2, 1, "(i),->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - # error: incomplete signature. 2 output arguments are specified - msg = "core_sig: incomplete signature" - try: - ret = umt.test_signature(2, 2, "(i),(i)->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - - # more complicated names for variables - assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1) - - def test_get_signature(self): - assert_equal(umt.inner1d.signature, "(i),(i)->()") - - def test_forced_sig(self): - a = 0.5*np.arange(3, dtype='f8') - assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) - assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), - casting='unsafe'), [0, 0, 1]) - - b = np.zeros((3,), dtype='f8') - np.add(a, 0.5, out=b) - assert_equal(b, [0.5, 1, 1.5]) - b[:] = 0 - np.add(a, 0.5, sig='i', out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 - np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - - def test_sum_stability(self): - a = np.ones(500, dtype=np.float32) - assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) - - a = np.ones(500, dtype=np.float64) - assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) - - def test_sum(self): - for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble): - for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, - 128, 1024, 1235): - tgt = dt(v * (v + 1) / 2) - d = np.arange(1, v + 1, dtype=dt) - assert_almost_equal(np.sum(d), tgt) - assert_almost_equal(np.sum(d[::-1]), tgt) - - d = np.ones(500, dtype=dt) - assert_almost_equal(np.sum(d[::2]), 250.) - assert_almost_equal(np.sum(d[1::2]), 250.) - assert_almost_equal(np.sum(d[::3]), 167.) - assert_almost_equal(np.sum(d[1::3]), 167.) - assert_almost_equal(np.sum(d[::-2]), 250.) - assert_almost_equal(np.sum(d[-1::-2]), 250.) - assert_almost_equal(np.sum(d[::-3]), 167.) - assert_almost_equal(np.sum(d[-1::-3]), 167.) - # sum with first reduction entry != 0 - d = np.ones((1,), dtype=dt) - d += d - assert_almost_equal(d, 2.) - - def test_sum_complex(self): - for dt in (np.complex64, np.complex128, np.clongdouble): - for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, - 128, 1024, 1235): - tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) *1j) - d = np.empty(v, dtype=dt) - d.real = np.arange(1, v + 1) - d.imag = -np.arange(1, v + 1) - assert_almost_equal(np.sum(d), tgt) - assert_almost_equal(np.sum(d[::-1]), tgt) - - d = np.ones(500, dtype=dt) + 1j - assert_almost_equal(np.sum(d[::2]), 250. + 250j) - assert_almost_equal(np.sum(d[1::2]), 250. + 250j) - assert_almost_equal(np.sum(d[::3]), 167. + 167j) - assert_almost_equal(np.sum(d[1::3]), 167. + 167j) - assert_almost_equal(np.sum(d[::-2]), 250. + 250j) - assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) - assert_almost_equal(np.sum(d[::-3]), 167. + 167j) - assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) - # sum with first reduction entry != 0 - d = np.ones((1,), dtype=dt) + 1j - d += d - assert_almost_equal(d, 2. + 2j) - - def test_inner1d(self): - a = np.arange(6).reshape((2, 3)) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1)) - a = np.arange(6) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a)) - - def test_broadcast(self): - msg = "broadcast" - a = np.arange(4).reshape((2, 1, 2)) - b = np.arange(4).reshape((1, 2, 2)) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - msg = "extend & broadcast loop dimensions" - b = np.arange(4).reshape((2, 2)) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - msg = "broadcast in core dimensions" - a = np.arange(8).reshape((4, 2)) - b = np.arange(4).reshape((4, 1)) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - msg = "extend & broadcast core and loop dimensions" - a = np.arange(8).reshape((4, 2)) - b = np.array(7) - assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) - msg = "broadcast should fail" - a = np.arange(2).reshape((2, 1, 1)) - b = np.arange(3).reshape((3, 1, 1)) - try: - ret = umt.inner1d(a, b) - assert_equal(ret, None, err_msg=msg) - except ValueError: None - - def test_type_cast(self): - msg = "type cast" - a = np.arange(6, dtype='short').reshape((2, 3)) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) - msg = "type cast on one argument" - a = np.arange(6).reshape((2, 3)) - b = a+0.1 - assert_array_almost_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), - err_msg=msg) - - def test_endian(self): - msg = "big endian" - a = np.arange(6, dtype='>i4').reshape((2, 3)) - assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) - msg = "little endian" - a = np.arange(6, dtype=' 0, m > 0: fine - # n = 0, m > 0: fine, doing 0 reductions of m-element arrays - # n > 0, m = 0: can't reduce a 0-element array, ValueError - # n = 0, m = 0: can't reduce a 0-element array, ValueError (for - # consistency with the above case) - # This test doesn't actually look at return values, it just checks to - # make sure that error we get an error in exactly those cases where we - # expect one, and assumes the calculations themselves are done - # correctly. - def ok(f, *args, **kwargs): - f(*args, **kwargs) - def err(f, *args, **kwargs): - assert_raises(ValueError, f, *args, **kwargs) - def t(expect, func, n, m): - expect(func, np.zeros((n, m)), axis=1) - expect(func, np.zeros((m, n)), axis=0) - expect(func, np.zeros((n // 2, n // 2, m)), axis=2) - expect(func, np.zeros((n // 2, m, n // 2)), axis=1) - expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) - expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) - expect(func, np.zeros((m // 3, m // 3, m // 3, - n // 2, n //2)), - axis=(0, 1, 2)) - # Check what happens if the inner (resp. outer) dimensions are a - # mix of zero and non-zero: - expect(func, np.zeros((10, m, n)), axis=(0, 1)) - expect(func, np.zeros((10, n, m)), axis=(0, 2)) - expect(func, np.zeros((m, 10, n)), axis=0) - expect(func, np.zeros((10, m, n)), axis=1) - expect(func, np.zeros((10, n, m)), axis=2) - # np.maximum is just an arbitrary ufunc with no reduction identity - assert_equal(np.maximum.identity, None) - t(ok, np.maximum.reduce, 30, 30) - t(ok, np.maximum.reduce, 0, 30) - t(err, np.maximum.reduce, 30, 0) - t(err, np.maximum.reduce, 0, 0) - err(np.maximum.reduce, []) - np.maximum.reduce(np.zeros((0, 0)), axis=()) - - # all of the combinations are fine for a reduction that has an - # identity - t(ok, np.add.reduce, 30, 30) - t(ok, np.add.reduce, 0, 30) - t(ok, np.add.reduce, 30, 0) - t(ok, np.add.reduce, 0, 0) - np.add.reduce([]) - np.add.reduce(np.zeros((0, 0)), axis=()) - - # OTOH, accumulate always makes sense for any combination of n and m, - # because it maps an m-element array to an m-element array. These - # tests are simpler because accumulate doesn't accept multiple axes. - for uf in (np.maximum, np.add): - uf.accumulate(np.zeros((30, 0)), axis=0) - uf.accumulate(np.zeros((0, 30)), axis=0) - uf.accumulate(np.zeros((30, 30)), axis=0) - uf.accumulate(np.zeros((0, 0)), axis=0) - - def test_safe_casting(self): - # In old versions of numpy, in-place operations used the 'unsafe' - # casting rules. In some future version, 'same_kind' will become the - # default. - a = np.array([1, 2, 3], dtype=int) - # Non-in-place addition is fine - assert_array_equal(assert_no_warnings(np.add, a, 1.1), - [2.1, 3.1, 4.1]) - assert_warns(DeprecationWarning, np.add, a, 1.1, out=a) - assert_array_equal(a, [2, 3, 4]) - def add_inplace(a, b): - a += b - assert_warns(DeprecationWarning, add_inplace, a, 1.1) - assert_array_equal(a, [3, 4, 5]) - # Make sure that explicitly overriding the warning is allowed: - assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") - assert_array_equal(a, [4, 5, 6]) - - # There's no way to propagate exceptions from the place where we issue - # this deprecation warning, so we must throw the exception away - # entirely rather than cause it to be raised at some other point, or - # trigger some other unsuspecting if (PyErr_Occurred()) { ...} at some - # other location entirely. - import warnings - import sys - if sys.version_info[0] >= 3: - from io import StringIO - else: - from StringIO import StringIO - with warnings.catch_warnings(): - warnings.simplefilter("error") - old_stderr = sys.stderr - try: - sys.stderr = StringIO() - # No error, but dumps to stderr - a += 1.1 - # No error on the next bit of code executed either - 1 + 1 - assert_("Implicitly casting" in sys.stderr.getvalue()) - finally: - sys.stderr = old_stderr - - def test_ufunc_custom_out(self): - # Test ufunc with built in input types and custom output type - - a = np.array([0, 1, 2], dtype='i8') - b = np.array([0, 1, 2], dtype='i8') - c = np.empty(3, dtype=rational) - - # Output must be specified so numpy knows what - # ufunc signature to look for - result = test_add(a, b, c) - assert_equal(result, np.array([0, 2, 4], dtype=rational)) - - # no output type should raise TypeError - assert_raises(TypeError, test_add, a, b) - - def test_operand_flags(self): - a = np.arange(16, dtype='l').reshape(4, 4) - b = np.arange(9, dtype='l').reshape(3, 3) - opflag_tests.inplace_add(a[:-1, :-1], b) - assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], - [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l')) - - a = np.array(0) - opflag_tests.inplace_add(a, 3) - assert_equal(a, 3) - opflag_tests.inplace_add(a, [3, 4]) - assert_equal(a, 10) - - def test_struct_ufunc(self): - import numpy.core.struct_ufunc_test as struct_ufunc - - a = np.array([(1, 2, 3)], dtype='u8,u8,u8') - b = np.array([(1, 2, 3)], dtype='u8,u8,u8') - - result = struct_ufunc.add_triplet(a, b) - assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) - - def test_custom_ufunc(self): - a = np.array([rational(1, 2), rational(1, 3), rational(1, 4)], - dtype=rational); - b = np.array([rational(1, 2), rational(1, 3), rational(1, 4)], - dtype=rational); - - result = test_add_rationals(a, b) - expected = np.array([rational(1), rational(2, 3), rational(1, 2)], - dtype=rational); - assert_equal(result, expected); - - def test_custom_array_like(self): - class MyThing(object): - __array_priority__ = 1000 - - rmul_count = 0 - getitem_count = 0 - - def __init__(self, shape): - self.shape = shape - - def __len__(self): - return self.shape[0] - - def __getitem__(self, i): - MyThing.getitem_count += 1 - if not isinstance(i, tuple): - i = (i,) - if len(i) > len(self.shape): - raise IndexError("boo") - - return MyThing(self.shape[len(i):]) - - def __rmul__(self, other): - MyThing.rmul_count += 1 - return self - - np.float64(5)*MyThing((3, 3)) - assert_(MyThing.rmul_count == 1, MyThing.rmul_count) - assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) - - def test_inplace_fancy_indexing(self): - - a = np.arange(10) - np.add.at(a, [2, 5, 2], 1) - assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) - - a = np.arange(10) - b = np.array([100, 100, 100]) - np.add.at(a, [2, 5, 2], b) - assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) - - a = np.arange(9).reshape(3, 3) - b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) - np.add.at(a, (slice(None), [1, 2, 1]), b) - assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) - assert_equal(a, - [[[0, 401, 202], - [3, 404, 205], - [6, 407, 208]], - - [[9, 410, 211], - [12, 413, 214], - [15, 416, 217]], - - [[18, 419, 220], - [21, 422, 223], - [24, 425, 226]]]) - - a = np.arange(9).reshape(3, 3) - b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) - np.add.at(a, ([1, 2, 1], slice(None)), b) - assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) - assert_equal(a, - [[[0, 1, 2 ], - [203, 404, 605], - [106, 207, 308]], - - [[9, 10, 11 ], - [212, 413, 614], - [115, 216, 317]], - - [[18, 19, 20 ], - [221, 422, 623], - [124, 225, 326]]]) - - a = np.arange(9).reshape(3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (0, [1, 2, 1]), b) - assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, ([1, 2, 1], 0, slice(None)), b) - assert_equal(a, - [[[0, 1, 2], - [3, 4, 5], - [6, 7, 8]], - - [[209, 410, 611], - [12, 13, 14], - [15, 16, 17]], - - [[118, 219, 320], - [21, 22, 23], - [24, 25, 26]]]) - - a = np.arange(27).reshape(3, 3, 3) - b = np.array([100, 200, 300]) - np.add.at(a, (slice(None), slice(None), slice(None)), b) - assert_equal(a, - [[[100, 201, 302], - [103, 204, 305], - [106, 207, 308]], - - [[109, 210, 311], - [112, 213, 314], - [115, 216, 317]], - - [[118, 219, 320], - [121, 222, 323], - [124, 225, 326]]]) - - a = np.arange(10) - np.negative.at(a, [2, 5, 2]) - assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9]) - - # Test 0-dim array - a = np.array(0) - np.add.at(a, (), 1) - assert_equal(a, 1) - - assert_raises(IndexError, np.add.at, a, 0, 1) - assert_raises(IndexError, np.add.at, a, [], 1) - - # Test mixed dtypes - a = np.arange(10) - np.power.at(a, [1, 2, 3, 2], 3.5) - assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) - - # Test boolean indexing and boolean ufuncs - a = np.arange(10) - index = a % 2 == 0 - np.equal.at(a, index, [0, 2, 4, 6, 8]) - assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) - - # Test unary operator - a = np.arange(10, dtype='u4') - np.invert.at(a, [2, 5, 2]) - assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) - - # Test empty subspace - orig = np.arange(4) - a = orig[:, None][:, 0:0] - np.add.at(a, [0, 1], 3) - assert_array_equal(orig, np.arange(4)) - - # Test with swapped byte order - index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) - values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) - np.add.at(values, index, 3) - assert_array_equal(values, [1, 8, 6, 4]) - - # Test exception thrown - values = np.array(['a', 1], dtype=np.object) - self.assertRaises(TypeError, np.add.at, values, [0, 1], 1) - assert_array_equal(values, np.array(['a', 1], dtype=np.object)) - - def test_reduce_arguments(self): - f = np.add.reduce - d = np.ones((5,2), dtype=int) - o = np.ones((2,), dtype=d.dtype) - r = o * 5 - assert_equal(f(d), r) - # a, axis=0, dtype=None, out=None, keepdims=False - assert_equal(f(d, axis=0), r) - assert_equal(f(d, 0), r) - assert_equal(f(d, 0, dtype=None), r) - assert_equal(f(d, 0, dtype='i'), r) - assert_equal(f(d, 0, 'i'), r) - assert_equal(f(d, 0, None), r) - assert_equal(f(d, 0, None, out=None), r) - assert_equal(f(d, 0, None, out=o), r) - assert_equal(f(d, 0, None, o), r) - assert_equal(f(d, 0, None, None), r) - assert_equal(f(d, 0, None, None, keepdims=False), r) - assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) - # multiple keywords - assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) - assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) - assert_equal(f(d, 0, None, out=None, keepdims=False), r) - - # too little - assert_raises(TypeError, f) - # too much - assert_raises(TypeError, f, d, 0, None, None, False, 1) - # invalid axis - assert_raises(TypeError, f, d, "invalid") - assert_raises(TypeError, f, d, axis="invalid") - assert_raises(TypeError, f, d, axis="invalid", dtype=None, - keepdims=True) - # invalid dtype - assert_raises(TypeError, f, d, 0, "invalid") - assert_raises(TypeError, f, d, dtype="invalid") - assert_raises(TypeError, f, d, dtype="invalid", out=None) - # invalid out - assert_raises(TypeError, f, d, 0, None, "invalid") - assert_raises(TypeError, f, d, out="invalid") - assert_raises(TypeError, f, d, out="invalid", dtype=None) - # keepdims boolean, no invalid value - # assert_raises(TypeError, f, d, 0, None, None, "invalid") - # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) - # invalid mix - assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", - out=None) - - # invalid keyord - assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", - out=None) - assert_raises(TypeError, f, d, invalid=0) - assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, - out=None, invalid=0) - assert_raises(TypeError, f, d, axis=0, dtype=None, - out=None, invalid=0) - assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath.py deleted file mode 100644 index 7451af8f08e59..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath.py +++ /dev/null @@ -1,1665 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import platform -import warnings - -from numpy.testing import * -from numpy.testing.utils import _gen_alignment_data -import numpy.core.umath as ncu -import numpy as np - - -def on_powerpc(): - """ True if we are running on a Power PC platform.""" - return platform.processor() == 'powerpc' or \ - platform.machine().startswith('ppc') - - -class _FilterInvalids(object): - def setUp(self): - self.olderr = np.seterr(invalid='ignore') - - def tearDown(self): - np.seterr(**self.olderr) - - -class TestConstants(TestCase): - def test_pi(self): - assert_allclose(ncu.pi, 3.141592653589793, 1e-15) - - - def test_e(self): - assert_allclose(ncu.e, 2.718281828459045, 1e-15) - - - def test_euler_gamma(self): - assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) - - -class TestDivision(TestCase): - def test_division_int(self): - # int division should follow Python - x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) - if 5 / 10 == 0.5: - assert_equal(x / 100, [0.05, 0.1, 0.9, 1, - -0.05, -0.1, -0.9, -1, -1.2]) - else: - assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) - assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) - assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) - - def test_division_complex(self): - # check that implementation is correct - msg = "Complex division implementation check" - x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) - assert_almost_equal(x**2/x, x, err_msg=msg) - # check overflow, underflow - msg = "Complex division overflow/underflow check" - x = np.array([1.e+110, 1.e-110], dtype=np.complex128) - y = x**2/x - assert_almost_equal(y/x, [1, 1], err_msg=msg) - - def test_zero_division_complex(self): - with np.errstate(invalid="ignore", divide="ignore"): - x = np.array([0.0], dtype=np.complex128) - y = 1.0/x - assert_(np.isinf(y)[0]) - y = complex(np.inf, np.nan)/x - assert_(np.isinf(y)[0]) - y = complex(np.nan, np.inf)/x - assert_(np.isinf(y)[0]) - y = complex(np.inf, np.inf)/x - assert_(np.isinf(y)[0]) - y = 0.0/x - assert_(np.isnan(y)[0]) - - def test_floor_division_complex(self): - # check that implementation is correct - msg = "Complex floor division implementation check" - x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) - y = np.array([0., -1., 0., 0.], dtype=np.complex128) - assert_equal(np.floor_divide(x**2, x), y, err_msg=msg) - # check overflow, underflow - msg = "Complex floor division overflow/underflow check" - x = np.array([1.e+110, 1.e-110], dtype=np.complex128) - y = np.floor_divide(x**2, x) - assert_equal(y, [1.e+110, 0], err_msg=msg) - - -class TestPower(TestCase): - def test_power_float(self): - x = np.array([1., 2., 3.]) - assert_equal(x**0, [1., 1., 1.]) - assert_equal(x**1, x) - assert_equal(x**2, [1., 4., 9.]) - y = x.copy() - y **= 2 - assert_equal(y, [1., 4., 9.]) - assert_almost_equal(x**(-1), [1., 0.5, 1./3]) - assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) - - for out, inp, msg in _gen_alignment_data(dtype=np.float32, - type='unary', - max_size=11): - exp = [ncu.sqrt(i) for i in inp] - assert_almost_equal(inp**(0.5), exp, err_msg=msg) - np.sqrt(inp, out=out) - assert_equal(out, exp, err_msg=msg) - - for out, inp, msg in _gen_alignment_data(dtype=np.float64, - type='unary', - max_size=7): - exp = [ncu.sqrt(i) for i in inp] - assert_almost_equal(inp**(0.5), exp, err_msg=msg) - np.sqrt(inp, out=out) - assert_equal(out, exp, err_msg=msg) - - - def test_power_complex(self): - x = np.array([1+2j, 2+3j, 3+4j]) - assert_equal(x**0, [1., 1., 1.]) - assert_equal(x**1, x) - assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) - assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) - assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) - assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) - assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) - assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, - (-117-44j)/15625]) - assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), - ncu.sqrt(3+4j)]) - norm = 1./((x**14)[0]) - assert_almost_equal(x**14 * norm, - [i * norm for i in [-76443+16124j, 23161315+58317492j, - 5583548873 + 2465133864j]]) - - # Ticket #836 - def assert_complex_equal(x, y): - assert_array_equal(x.real, y.real) - assert_array_equal(x.imag, y.imag) - - for z in [complex(0, np.inf), complex(1, np.inf)]: - z = np.array([z], dtype=np.complex_) - with np.errstate(invalid="ignore"): - assert_complex_equal(z**1, z) - assert_complex_equal(z**2, z*z) - assert_complex_equal(z**3, z*z*z) - - def test_power_zero(self): - # ticket #1271 - zero = np.array([0j]) - one = np.array([1+0j]) - cinf = np.array([complex(np.inf, 0)]) - cnan = np.array([complex(np.nan, np.nan)]) - - def assert_complex_equal(x, y): - x, y = np.asarray(x), np.asarray(y) - assert_array_equal(x.real, y.real) - assert_array_equal(x.imag, y.imag) - - # positive powers - for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: - assert_complex_equal(np.power(zero, p), zero) - - # zero power - assert_complex_equal(np.power(zero, 0), one) - with np.errstate(invalid="ignore"): - assert_complex_equal(np.power(zero, 0+1j), cnan) - - # negative power - for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: - assert_complex_equal(np.power(zero, -p), cnan) - assert_complex_equal(np.power(zero, -1+0.2j), cnan) - - def test_fast_power(self): - x = np.array([1, 2, 3], np.int16) - assert_((x**2.00001).dtype is (x**2.0).dtype) - - # Check that the fast path ignores 1-element not 0-d arrays - res = x ** np.array([[[2]]]) - assert_equal(res.shape, (1, 1, 3)) - - -class TestLog2(TestCase): - def test_log2_values(self) : - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g'] : - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_almost_equal(np.log2(xf), yf) - - def test_log2_ints(self): - # a good log2 implementation should provide this, - # might fail on OS with bad libm - for i in range(1, 65): - v = np.log2(2.**i) - assert_equal(v, float(i), err_msg='at exponent %d' % i) - - def test_log2_special(self): - assert_equal(np.log2(1.), 0.) - assert_equal(np.log2(np.inf), np.inf) - assert_(np.isnan(np.log2(np.nan))) - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_(np.isnan(np.log2(-1.))) - assert_(np.isnan(np.log2(-np.inf))) - assert_equal(np.log2(0.), -np.inf) - assert_(w[0].category is RuntimeWarning) - assert_(w[1].category is RuntimeWarning) - assert_(w[2].category is RuntimeWarning) - - -class TestExp2(TestCase): - def test_exp2_values(self) : - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g'] : - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt) - assert_almost_equal(np.exp2(yf), xf) - - -class TestLogAddExp2(_FilterInvalids): - # Need test for intermediate precisions - def test_logaddexp2_values(self) : - x = [1, 2, 3, 4, 5] - y = [5, 4, 3, 2, 1] - z = [6, 6, 6, 6, 6] - for dt, dec in zip(['f', 'd', 'g'], [6, 15, 15]) : - xf = np.log2(np.array(x, dtype=dt)) - yf = np.log2(np.array(y, dtype=dt)) - zf = np.log2(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) - - def test_logaddexp2_range(self) : - x = [1000000, -1000000, 1000200, -1000200] - y = [1000200, -1000200, 1000000, -1000000] - z = [1000200, -1000000, 1000200, -1000000] - for dt in ['f', 'd', 'g'] : - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) - - def test_inf(self) : - inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] - z = [inf, inf, inf, -inf, inf, inf, 1, 1] - with np.errstate(invalid='ignore'): - for dt in ['f', 'd', 'g'] : - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_equal(np.logaddexp2(logxf, logyf), logzf) - - def test_nan(self): - assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) - assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) - assert_(np.isnan(np.logaddexp2(np.nan, 0))) - assert_(np.isnan(np.logaddexp2(0, np.nan))) - assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) - - -class TestLog(TestCase): - def test_log_values(self) : - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g'] : - log2_ = 0.69314718055994530943 - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ - assert_almost_equal(np.log(xf), yf) - - -class TestExp(TestCase): - def test_exp_values(self) : - x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] - y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f', 'd', 'g'] : - log2_ = 0.69314718055994530943 - xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ - assert_almost_equal(np.exp(yf), xf) - - -class TestLogAddExp(_FilterInvalids): - def test_logaddexp_values(self) : - x = [1, 2, 3, 4, 5] - y = [5, 4, 3, 2, 1] - z = [6, 6, 6, 6, 6] - for dt, dec in zip(['f', 'd', 'g'], [6, 15, 15]) : - xf = np.log(np.array(x, dtype=dt)) - yf = np.log(np.array(y, dtype=dt)) - zf = np.log(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) - - def test_logaddexp_range(self) : - x = [1000000, -1000000, 1000200, -1000200] - y = [1000200, -1000200, 1000000, -1000000] - z = [1000200, -1000000, 1000200, -1000000] - for dt in ['f', 'd', 'g'] : - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_almost_equal(np.logaddexp(logxf, logyf), logzf) - - def test_inf(self) : - inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] - z = [inf, inf, inf, -inf, inf, inf, 1, 1] - with np.errstate(invalid='ignore'): - for dt in ['f', 'd', 'g'] : - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_equal(np.logaddexp(logxf, logyf), logzf) - - def test_nan(self): - assert_(np.isnan(np.logaddexp(np.nan, np.inf))) - assert_(np.isnan(np.logaddexp(np.inf, np.nan))) - assert_(np.isnan(np.logaddexp(np.nan, 0))) - assert_(np.isnan(np.logaddexp(0, np.nan))) - assert_(np.isnan(np.logaddexp(np.nan, np.nan))) - - -class TestLog1p(TestCase): - def test_log1p(self): - assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) - assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) - - def test_special(self): - with np.errstate(invalid="ignore", divide="ignore"): - assert_equal(ncu.log1p(np.nan), np.nan) - assert_equal(ncu.log1p(np.inf), np.inf) - assert_equal(ncu.log1p(-1.), -np.inf) - assert_equal(ncu.log1p(-2.), np.nan) - assert_equal(ncu.log1p(-np.inf), np.nan) - - -class TestExpm1(TestCase): - def test_expm1(self): - assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) - assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) - - def test_special(self): - assert_equal(ncu.expm1(np.inf), np.inf) - assert_equal(ncu.expm1(0.), 0.) - assert_equal(ncu.expm1(-0.), -0.) - assert_equal(ncu.expm1(np.inf), np.inf) - assert_equal(ncu.expm1(-np.inf), -1.) - - -class TestHypot(TestCase, object): - def test_simple(self): - assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) - assert_almost_equal(ncu.hypot(0, 0), 0) - - -def assert_hypot_isnan(x, y): - with np.errstate(invalid='ignore'): - assert_(np.isnan(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))) - - -def assert_hypot_isinf(x, y): - with np.errstate(invalid='ignore'): - assert_(np.isinf(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) - - -class TestHypotSpecialValues(TestCase): - def test_nan_outputs(self): - assert_hypot_isnan(np.nan, np.nan) - assert_hypot_isnan(np.nan, 1) - - def test_nan_outputs2(self): - assert_hypot_isinf(np.nan, np.inf) - assert_hypot_isinf(np.inf, np.nan) - assert_hypot_isinf(np.inf, 0) - assert_hypot_isinf(0, np.inf) - assert_hypot_isinf(np.inf, np.inf) - assert_hypot_isinf(np.inf, 23.0) - - def test_no_fpe(self): - assert_no_warnings(ncu.hypot, np.inf, 0) - - -def assert_arctan2_isnan(x, y): - assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_ispinf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_isninf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_ispzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))) - - -def assert_arctan2_isnzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) - - -class TestArctan2SpecialValues(TestCase): - def test_one_one(self): - # atan2(1, 1) returns pi/4. - assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) - assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) - assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) - - def test_zero_nzero(self): - # atan2(+-0, -0) returns +-pi. - assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi) - assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi) - - def test_zero_pzero(self): - # atan2(+-0, +0) returns +-0. - assert_arctan2_ispzero(np.PZERO, np.PZERO) - assert_arctan2_isnzero(np.NZERO, np.PZERO) - - def test_zero_negative(self): - # atan2(+-0, x) returns +-pi for x < 0. - assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi) - assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi) - - def test_zero_positive(self): - # atan2(+-0, x) returns +-0 for x > 0. - assert_arctan2_ispzero(np.PZERO, 1) - assert_arctan2_isnzero(np.NZERO, 1) - - def test_positive_zero(self): - # atan2(y, +-0) returns +pi/2 for y > 0. - assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi) - assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi) - - def test_negative_zero(self): - # atan2(y, +-0) returns -pi/2 for y < 0. - assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi) - assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi) - - def test_any_ninf(self): - # atan2(+-y, -infinity) returns +-pi for finite y > 0. - assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi) - assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) - - def test_any_pinf(self): - # atan2(+-y, +infinity) returns +-0 for finite y > 0. - assert_arctan2_ispzero(1, np.inf) - assert_arctan2_isnzero(-1, np.inf) - - def test_inf_any(self): - # atan2(+-infinity, x) returns +-pi/2 for finite x. - assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) - - def test_inf_ninf(self): - # atan2(+-infinity, -infinity) returns +-3*pi/4. - assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) - - def test_inf_pinf(self): - # atan2(+-infinity, +infinity) returns +-pi/4. - assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) - assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) - - def test_nan_any(self): - # atan2(nan, x) returns nan for any x, including inf - assert_arctan2_isnan(np.nan, np.inf) - assert_arctan2_isnan(np.inf, np.nan) - assert_arctan2_isnan(np.nan, np.nan) - - -class TestLdexp(TestCase): - def _check_ldexp(self, tp): - assert_almost_equal(ncu.ldexp(np.array(2., np.float32), - np.array(3, tp)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.float64), - np.array(3, tp)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), - np.array(3, tp)), 16.) - - def test_ldexp(self): - # The default Python int type should work - assert_almost_equal(ncu.ldexp(2., 3), 16.) - # The following int types should all be accepted - self._check_ldexp(np.int8) - self._check_ldexp(np.int16) - self._check_ldexp(np.int32) - self._check_ldexp('i') - self._check_ldexp('l') - - def test_ldexp_overflow(self): - # silence warning emitted on overflow - with np.errstate(over="ignore"): - imax = np.iinfo(np.dtype('l')).max - imin = np.iinfo(np.dtype('l')).min - assert_equal(ncu.ldexp(2., imax), np.inf) - assert_equal(ncu.ldexp(2., imin), 0) - - -class TestMaximum(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.maximum.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), np.nan) - assert_equal(func(tmp2), np.nan) - - def test_reduce_complex(self): - assert_equal(np.maximum.reduce([1, 2j]), 1) - assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([nan, nan, nan]) - assert_equal(np.maximum(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([nan, nan, nan], dtype=np.complex) - assert_equal(np.maximum(arg1, arg2), out) - - def test_object_array(self): - arg1 = np.arange(5, dtype=np.object) - arg2 = arg1 + 1 - assert_equal(np.maximum(arg1, arg2), arg2) - - -class TestMinimum(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.minimum.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), np.nan) - assert_equal(func(tmp2), np.nan) - - def test_reduce_complex(self): - assert_equal(np.minimum.reduce([1, 2j]), 2j) - assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([nan, nan, nan]) - assert_equal(np.minimum(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([nan, nan, nan], dtype=np.complex) - assert_equal(np.minimum(arg1, arg2), out) - - def test_object_array(self): - arg1 = np.arange(5, dtype=np.object) - arg2 = arg1 + 1 - assert_equal(np.minimum(arg1, arg2), arg1) - - -class TestFmax(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.fmax.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 10) - assert_equal(func(tmp2), 10) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), 9) - assert_equal(func(tmp2), 9) - - def test_reduce_complex(self): - assert_equal(np.fmax.reduce([1, 2j]), 1) - assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([0, 0, nan]) - assert_equal(np.fmax(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([0, 0, nan], dtype=np.complex) - assert_equal(np.fmax(arg1, arg2), out) - - -class TestFmin(_FilterInvalids): - def test_reduce(self): - dflt = np.typecodes['AllFloat'] - dint = np.typecodes['AllInteger'] - seq1 = np.arange(11) - seq2 = seq1[::-1] - func = np.fmin.reduce - for dt in dint: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - for dt in dflt: - tmp1 = seq1.astype(dt) - tmp2 = seq2.astype(dt) - assert_equal(func(tmp1), 0) - assert_equal(func(tmp2), 0) - tmp1[::2] = np.nan - tmp2[::2] = np.nan - assert_equal(func(tmp1), 1) - assert_equal(func(tmp2), 1) - - def test_reduce_complex(self): - assert_equal(np.fmin.reduce([1, 2j]), 2j) - assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) - - def test_float_nans(self): - nan = np.nan - arg1 = np.array([0, nan, nan]) - arg2 = np.array([nan, 0, nan]) - out = np.array([0, 0, nan]) - assert_equal(np.fmin(arg1, arg2), out) - - def test_complex_nans(self): - nan = np.nan - for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] : - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([0, 0, nan], dtype=np.complex) - assert_equal(np.fmin(arg1, arg2), out) - - -class TestBool(TestCase): - def test_truth_table(self): - arg1 = [False, False, True, True] - arg2 = [False, True, False, True] - # OR - out = [False, True, True, True] - for func in (np.logical_or, np.bitwise_or, np.maximum): - assert_equal(func(arg1, arg2), out) - # AND - out = [False, False, False, True] - for func in (np.logical_and, np.bitwise_and, np.minimum): - assert_equal(func(arg1, arg2), out) - # XOR - out = [False, True, True, False] - for func in (np.logical_xor, np.bitwise_xor, np.not_equal): - assert_equal(func(arg1, arg2), out) - - -class TestFloatingPoint(TestCase): - def test_floating_point(self): - assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) - - -class TestDegrees(TestCase): - def test_degrees(self): - assert_almost_equal(ncu.degrees(np.pi), 180.0) - assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) - - -class TestRadians(TestCase): - def test_radians(self): - assert_almost_equal(ncu.radians(180.0), np.pi) - assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) - - -class TestSign(TestCase): - def test_sign(self): - a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) - out = np.zeros(a.shape) - tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) - - with np.errstate(invalid='ignore'): - res = ncu.sign(a) - assert_equal(res, tgt) - res = ncu.sign(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - - -class TestMinMax(TestCase): - def test_minmax_blocked(self): - # simd tests on max/min, test all alignments, slow but important - # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) - for dt, sz in [(np.float32, 15), (np.float64, 7)]: - for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', - max_size=sz): - for i in range(inp.size): - inp[:] = np.arange(inp.size, dtype=dt) - inp[i] = np.nan - emsg = lambda: '%r\n%s' % (inp, msg) - assert_(np.isnan(inp.max()), msg=emsg) - assert_(np.isnan(inp.min()), msg=emsg) - - inp[i] = 1e10 - assert_equal(inp.max(), 1e10, err_msg=msg) - inp[i] = -1e10 - assert_equal(inp.min(), -1e10, err_msg=msg) - - def test_lower_align(self): - # check data that is not aligned to element size - # i.e doubles are aligned to 4 bytes on i386 - d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - assert_equal(d.max(), d[0]) - assert_equal(d.min(), d[0]) - - -class TestAbsoluteNegative(TestCase): - def test_abs_neg_blocked(self): - # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 - for dt, sz in [(np.float32, 11), (np.float64, 5)]: - for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', - max_size=sz): - tgt = [ncu.absolute(i) for i in inp] - np.absolute(inp, out=out) - assert_equal(out, tgt, err_msg=msg) - self.assertTrue((out >= 0).all()) - - tgt = [-1*(i) for i in inp] - np.negative(inp, out=out) - assert_equal(out, tgt, err_msg=msg) - - # will throw invalid flag depending on compiler optimizations - with np.errstate(invalid='ignore'): - for v in [np.nan, -np.inf, np.inf]: - for i in range(inp.size): - d = np.arange(inp.size, dtype=dt) - inp[:] = -d - inp[i] = v - d[i] = -v if v == -np.inf else v - assert_array_equal(np.abs(inp), d, err_msg=msg) - np.abs(inp, out=out) - assert_array_equal(out, d, err_msg=msg) - - assert_array_equal(-inp, -1*inp, err_msg=msg) - np.negative(inp, out=out) - assert_array_equal(out, -1*inp, err_msg=msg) - - def test_lower_align(self): - # check data that is not aligned to element size - # i.e doubles are aligned to 4 bytes on i386 - d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) - assert_equal(np.abs(d), d) - assert_equal(np.negative(d), -d) - np.negative(d, out=d) - np.negative(np.ones_like(d), out=d) - np.abs(d, out=d) - np.abs(np.ones_like(d), out=d) - - -class TestSpecialMethods(TestCase): - def test_wrap(self): - class with_wrap(object): - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr, context): - r = with_wrap() - r.arr = arr - r.context = context - return r - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x.arr, np.zeros(1)) - func, args, i = x.context - self.assertTrue(func is ncu.minimum) - self.assertEqual(len(args), 2) - assert_equal(args[0], a) - assert_equal(args[1], a) - self.assertEqual(i, 0) - - def test_wrap_with_iterable(self): - # test fix for bug #1026: - class with_wrap(np.ndarray): - __array_priority__ = 10 - def __new__(cls): - return np.asarray(1).view(cls).copy() - def __array_wrap__(self, arr, context): - return arr.view(type(self)) - a = with_wrap() - x = ncu.multiply(a, (1, 2, 3)) - self.assertTrue(isinstance(x, with_wrap)) - assert_array_equal(x, np.array((1, 2, 3))) - - def test_priority_with_scalar(self): - # test fix for bug #826: - class A(np.ndarray): - __array_priority__ = 10 - def __new__(cls): - return np.asarray(1.0, 'float64').view(cls).copy() - a = A() - x = np.float64(1)*a - self.assertTrue(isinstance(x, A)) - assert_array_equal(x, np.array(1)) - - def test_old_wrap(self): - class with_wrap(object): - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr): - r = with_wrap() - r.arr = arr - return r - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x.arr, np.zeros(1)) - - def test_priority(self): - class A(object): - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr, context): - r = type(self)() - r.arr = arr - r.context = context - return r - class B(A): - __array_priority__ = 20. - class C(A): - __array_priority__ = 40. - x = np.zeros(1) - a = A() - b = B() - c = C() - f = ncu.minimum - self.assertTrue(type(f(x, x)) is np.ndarray) - self.assertTrue(type(f(x, a)) is A) - self.assertTrue(type(f(x, b)) is B) - self.assertTrue(type(f(x, c)) is C) - self.assertTrue(type(f(a, x)) is A) - self.assertTrue(type(f(b, x)) is B) - self.assertTrue(type(f(c, x)) is C) - - self.assertTrue(type(f(a, a)) is A) - self.assertTrue(type(f(a, b)) is B) - self.assertTrue(type(f(b, a)) is B) - self.assertTrue(type(f(b, b)) is B) - self.assertTrue(type(f(b, c)) is C) - self.assertTrue(type(f(c, b)) is C) - self.assertTrue(type(f(c, c)) is C) - - self.assertTrue(type(ncu.exp(a) is A)) - self.assertTrue(type(ncu.exp(b) is B)) - self.assertTrue(type(ncu.exp(c) is C)) - - def test_failing_wrap(self): - class A(object): - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr, context): - raise RuntimeError - a = A() - self.assertRaises(RuntimeError, ncu.maximum, a, a) - - def test_default_prepare(self): - class with_wrap(object): - __array_priority__ = 10 - def __array__(self): - return np.zeros(1) - def __array_wrap__(self, arr, context): - return arr - a = with_wrap() - x = ncu.minimum(a, a) - assert_equal(x, np.zeros(1)) - assert_equal(type(x), np.ndarray) - - def test_prepare(self): - class with_prepare(np.ndarray): - __array_priority__ = 10 - def __array_prepare__(self, arr, context): - # make sure we can return a new - return np.array(arr).view(type=with_prepare) - a = np.array(1).view(type=with_prepare) - x = np.add(a, a) - assert_equal(x, np.array(2)) - assert_equal(type(x), with_prepare) - - def test_failing_prepare(self): - class A(object): - def __array__(self): - return np.zeros(1) - def __array_prepare__(self, arr, context=None): - raise RuntimeError - a = A() - self.assertRaises(RuntimeError, ncu.maximum, a, a) - - def test_array_with_context(self): - class A(object): - def __array__(self, dtype=None, context=None): - func, args, i = context - self.func = func - self.args = args - self.i = i - return np.zeros(1) - class B(object): - def __array__(self, dtype=None): - return np.zeros(1, dtype) - class C(object): - def __array__(self): - return np.zeros(1) - a = A() - ncu.maximum(np.zeros(1), a) - self.assertTrue(a.func is ncu.maximum) - assert_equal(a.args[0], 0) - self.assertTrue(a.args[1] is a) - self.assertTrue(a.i == 1) - assert_equal(ncu.maximum(a, B()), 0) - assert_equal(ncu.maximum(a, C()), 0) - - @dec.skipif(True) # ufunc override disabled for 1.9 - def test_ufunc_override(self): - class A(object): - def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): - return self, func, method, pos, inputs, kwargs - - a = A() - - b = np.matrix([1]) - c = np.array([1]) - res0 = np.multiply(a, b) - res1 = np.dot(a, b) - - # self - assert_equal(res0[0], a) - assert_equal(res1[0], a) - assert_equal(res0[1], np.multiply) - assert_equal(res1[1], np.dot) - assert_equal(res0[2], '__call__') - assert_equal(res1[2], '__call__') - assert_equal(res0[3], 0) - assert_equal(res1[3], 0) - assert_equal(res0[4], (a, b)) - assert_equal(res1[4], (a, b)) - assert_equal(res0[5], {}) - assert_equal(res1[5], {}) - - @dec.skipif(True) # ufunc override disabled for 1.9 - def test_ufunc_override_mro(self): - - # Some multi arg functions for testing. - def tres_mul(a, b, c): - return a * b * c - - def quatro_mul(a, b, c, d): - return a * b * c * d - - # Make these into ufuncs. - three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) - four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) - - class A(object): - def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): - return "A" - - class ASub(A): - def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): - return "ASub" - - class B(object): - def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): - return "B" - - class C(object): - def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): - return NotImplemented - - class CSub(object): - def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): - return NotImplemented - - - - a = A() - a_sub = ASub() - b = B() - c = C() - c_sub = CSub() - - # Standard - res = np.multiply(a, a_sub) - assert_equal(res, "ASub") - res = np.multiply(a_sub, b) - assert_equal(res, "ASub") - - # With 1 NotImplemented - res = np.multiply(c, a) - assert_equal(res, "A") - - # Both NotImplemented. - assert_raises(TypeError, np.multiply, c, c_sub) - assert_raises(TypeError, np.multiply, c_sub, c) - assert_raises(TypeError, np.multiply, 2, c) - - # Ternary testing. - assert_equal(three_mul_ufunc(a, 1, 2), "A") - assert_equal(three_mul_ufunc(1, a, 2), "A") - assert_equal(three_mul_ufunc(1, 2, a), "A") - - assert_equal(three_mul_ufunc(a, a, 6), "A") - assert_equal(three_mul_ufunc(a, 2, a), "A") - assert_equal(three_mul_ufunc(a, 2, b), "A") - assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") - assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") - assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") - assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") - - assert_equal(three_mul_ufunc(a, b, c), "A") - assert_equal(three_mul_ufunc(a, b, c_sub), "A") - assert_equal(three_mul_ufunc(1, 2, b), "B") - - assert_raises(TypeError, three_mul_ufunc, 1, 2, c) - assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) - assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) - - # Quaternary testing. - assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") - assert_equal(four_mul_ufunc(1, a, 2, 3), "A") - assert_equal(four_mul_ufunc(1, 1, a, 3), "A") - assert_equal(four_mul_ufunc(1, 1, 2, a), "A") - - assert_equal(four_mul_ufunc(a, b, 2, 3), "A") - assert_equal(four_mul_ufunc(1, a, 2, b), "A") - assert_equal(four_mul_ufunc(b, 1, a, 3), "B") - assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") - assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") - - assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) - assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) - assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c) - - @dec.skipif(True) # ufunc override disabled for 1.9 - def test_ufunc_override_methods(self): - class A(object): - def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): - return self, ufunc, method, pos, inputs, kwargs - - # __call__ - a = A() - res = np.multiply.__call__(1, a, foo='bar', answer=42) - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], '__call__') - assert_equal(res[3], 1) - assert_equal(res[4], (1, a)) - assert_equal(res[5], {'foo': 'bar', 'answer': 42}) - - # reduce, positional args - res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduce') - assert_equal(res[3], 0) - assert_equal(res[4], (a,)) - assert_equal(res[5], {'dtype':'dtype0', - 'out': 'out0', - 'keepdims': 'keep0', - 'axis': 'axis0'}) - - # reduce, kwargs - res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', - keepdims='keep0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduce') - assert_equal(res[3], 0) - assert_equal(res[4], (a,)) - assert_equal(res[5], {'dtype':'dtype0', - 'out': 'out0', - 'keepdims': 'keep0', - 'axis': 'axis0'}) - - # accumulate, pos args - res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'accumulate') - assert_equal(res[3], 0) - assert_equal(res[4], (a,)) - assert_equal(res[5], {'dtype':'dtype0', - 'out': 'out0', - 'axis': 'axis0'}) - - # accumulate, kwargs - res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', - out='out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'accumulate') - assert_equal(res[3], 0) - assert_equal(res[4], (a,)) - assert_equal(res[5], {'dtype':'dtype0', - 'out': 'out0', - 'axis': 'axis0'}) - - # reduceat, pos args - res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduceat') - assert_equal(res[3], 0) - assert_equal(res[4], (a, [4, 2])) - assert_equal(res[5], {'dtype':'dtype0', - 'out': 'out0', - 'axis': 'axis0'}) - - # reduceat, kwargs - res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', - out='out0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'reduceat') - assert_equal(res[3], 0) - assert_equal(res[4], (a, [4, 2])) - assert_equal(res[5], {'dtype':'dtype0', - 'out': 'out0', - 'axis': 'axis0'}) - - # outer - res = np.multiply.outer(a, 42) - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'outer') - assert_equal(res[3], 0) - assert_equal(res[4], (a, 42)) - assert_equal(res[5], {}) - - # at - res = np.multiply.at(a, [4, 2], 'b0') - assert_equal(res[0], a) - assert_equal(res[1], np.multiply) - assert_equal(res[2], 'at') - assert_equal(res[3], 0) - assert_equal(res[4], (a, [4, 2], 'b0')) - - @dec.skipif(True) # ufunc override disabled for 1.9 - def test_ufunc_override_out(self): - class A(object): - def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): - return kwargs - - - class B(object): - def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): - return kwargs - - a = A() - b = B() - res0 = np.multiply(a, b, 'out_arg') - res1 = np.multiply(a, b, out='out_arg') - res2 = np.multiply(2, b, 'out_arg') - res3 = np.multiply(3, b, out='out_arg') - res4 = np.multiply(a, 4, 'out_arg') - res5 = np.multiply(a, 5, out='out_arg') - - assert_equal(res0['out'], 'out_arg') - assert_equal(res1['out'], 'out_arg') - assert_equal(res2['out'], 'out_arg') - assert_equal(res3['out'], 'out_arg') - assert_equal(res4['out'], 'out_arg') - assert_equal(res5['out'], 'out_arg') - - # ufuncs with multiple output modf and frexp. - res6 = np.modf(a, 'out0', 'out1') - res7 = np.frexp(a, 'out0', 'out1') - assert_equal(res6['out'][0], 'out0') - assert_equal(res6['out'][1], 'out1') - assert_equal(res7['out'][0], 'out0') - assert_equal(res7['out'][1], 'out1') - - @dec.skipif(True) # ufunc override disabled for 1.9 - def test_ufunc_override_exception(self): - class A(object): - def __numpy_ufunc__(self, *a, **kwargs): - raise ValueError("oops") - a = A() - for func in [np.divide, np.dot]: - assert_raises(ValueError, func, a, a) - -class TestChoose(TestCase): - def test_mixed(self): - c = np.array([True, True]) - a = np.array([True, True]) - assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) - - -def is_longdouble_finfo_bogus(): - info = np.finfo(np.longcomplex) - return not np.isfinite(np.log10(info.tiny/info.eps)) - - -class TestComplexFunctions(object): - funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, - np.arctanh, np.sin, np.cos, np.tan, np.exp, - np.exp2, np.log, np.sqrt, np.log10, np.log2, - np.log1p] - - def test_it(self): - for f in self.funcs: - if f is np.arccosh : - x = 1.5 - else : - x = .5 - fr = f(x) - fz = f(np.complex(x)) - assert_almost_equal(fz.real, fr, err_msg='real part %s'%f) - assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f) - - def test_precisions_consistent(self) : - z = 1 + 1j - for f in self.funcs : - fcf = f(np.csingle(z)) - fcd = f(np.cdouble(z)) - fcl = f(np.clongdouble(z)) - assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f) - assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f) - - def test_branch_cuts(self): - # check branch cuts and continuity on them - yield _check_branch_cut, np.log, -0.5, 1j, 1, -1 - yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1 - yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1 - yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1 - yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1 - - yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1 - yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1 - yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1 - - yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1 - yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1 - yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1 - - # check against bogus branch cuts: assert continuity between quadrants - yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1 - yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1 - yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1 - - yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1 - yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1 - yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1 - - @dec.knownfailureif(True, "These branch cuts are known to fail") - def test_branch_cuts_failing(self): - # XXX: signed zero not OK with ICC on 64-bit platform for log, see - # http://permalink.gmane.org/gmane.comp.python.numeric.general/25335 - yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True - # XXX: signed zeros are not OK for sqrt or for the arc* functions - yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True - yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True - yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True - yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True - yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True - yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True - yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True - - def test_against_cmath(self): - import cmath, sys - - points = [-1-1j, -1+1j, +1-1j, +1+1j] - name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', - 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} - atol = 4*np.finfo(np.complex).eps - for func in self.funcs: - fname = func.__name__.split('.')[-1] - cname = name_map.get(fname, fname) - try: - cfunc = getattr(cmath, cname) - except AttributeError: - continue - for p in points: - a = complex(func(np.complex_(p))) - b = cfunc(p) - assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname, p, a, b)) - - def check_loss_of_precision(self, dtype): - """Check loss of precision in complex arc* functions""" - - # Check against known-good functions - - info = np.finfo(dtype) - real_dtype = dtype(0.).real.dtype - eps = info.eps - - def check(x, rtol): - x = x.astype(real_dtype) - - z = x.astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arcsinh')) - - z = (1j*x).astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arcsin')) - - z = x.astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arctanh')) - - z = (1j*x).astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) - assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), - 'arctan')) - - # The switchover was chosen as 1e-3; hence there can be up to - # ~eps/1e-3 of relative cancellation error before it - - x_series = np.logspace(-20, -3.001, 200) - x_basic = np.logspace(-2.999, 0, 10, endpoint=False) - - if dtype is np.longcomplex: - # It's not guaranteed that the system-provided arc functions - # are accurate down to a few epsilons. (Eg. on Linux 64-bit) - # So, give more leeway for long complex tests here: - check(x_series, 50*eps) - else: - check(x_series, 2*eps) - check(x_basic, 2*eps/1e-3) - - # Check a few points - - z = np.array([1e-5*(1+1j)], dtype=dtype) - p = 9.999999999333333333e-6 + 1.000000000066666666e-5j - d = np.absolute(1-np.arctanh(z)/p) - assert_(np.all(d < 1e-15)) - - p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j - d = np.absolute(1-np.arcsinh(z)/p) - assert_(np.all(d < 1e-15)) - - p = 9.999999999333333333e-6j + 1.000000000066666666e-5 - d = np.absolute(1-np.arctan(z)/p) - assert_(np.all(d < 1e-15)) - - p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 - d = np.absolute(1-np.arcsin(z)/p) - assert_(np.all(d < 1e-15)) - - # Check continuity across switchover points - - def check(func, z0, d=1): - z0 = np.asarray(z0, dtype=dtype) - zp = z0 + abs(z0) * d * eps * 2 - zm = z0 - abs(z0) * d * eps * 2 - assert_(np.all(zp != zm), (zp, zm)) - - # NB: the cancellation error at the switchover is at least eps - good = (abs(func(zp) - func(zm)) < 2*eps) - assert_(np.all(good), (func, z0[~good])) - - for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): - pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) - if rp != 0 or ip != 0] - check(func, pts, 1) - check(func, pts, 1j) - check(func, pts, 1+1j) - - def test_loss_of_precision(self): - for dtype in [np.complex64, np.complex_]: - yield self.check_loss_of_precision, dtype - - @dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo") - def test_loss_of_precision_longcomplex(self): - self.check_loss_of_precision(np.longcomplex) - - -class TestAttributes(TestCase): - def test_attributes(self): - add = ncu.add - assert_equal(add.__name__, 'add') - assert_(add.__doc__.startswith('add(x1, x2[, out])\n\n')) - self.assertTrue(add.ntypes >= 18) # don't fail if types added - self.assertTrue('ii->i' in add.types) - assert_equal(add.nin, 2) - assert_equal(add.nout, 1) - assert_equal(add.identity, 0) - - -class TestSubclass(TestCase): - def test_subclass_op(self): - class simple(np.ndarray): - def __new__(subtype, shape): - self = np.ndarray.__new__(subtype, shape, dtype=object) - self.fill(0) - return self - a = simple((3, 4)) - assert_equal(a+a, a) - -def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, - dtype=np.complex): - """ - Check for a branch cut in a function. - - Assert that `x0` lies on a branch cut of function `f` and `f` is - continuous from the direction `dx`. - - Parameters - ---------- - f : func - Function to check - x0 : array-like - Point on branch cut - dx : array-like - Direction to check continuity in - re_sign, im_sign : {1, -1} - Change of sign of the real or imaginary part expected - sig_zero_ok : bool - Whether to check if the branch cut respects signed zero (if applicable) - dtype : dtype - Dtype to check (should be complex) - - """ - x0 = np.atleast_1d(x0).astype(dtype) - dx = np.atleast_1d(dx).astype(dtype) - - scale = np.finfo(dtype).eps * 1e3 - atol = 1e-4 - - y0 = f(x0) - yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) - ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) - - assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) - assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) - assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) - assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) - - if sig_zero_ok: - # check that signed zeros also work as a displacement - jr = (x0.real == 0) & (dx.real != 0) - ji = (x0.imag == 0) & (dx.imag != 0) - - x = -x0 - x.real[jr] = 0.*dx.real - x.imag[ji] = 0.*dx.imag - x = -x - ym = f(x) - ym = ym[jr | ji] - y0 = y0[jr | ji] - assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) - assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) - -def test_copysign(): - assert_(np.copysign(1, -1) == -1) - with np.errstate(divide="ignore"): - assert_(1 / np.copysign(0, -1) < 0) - assert_(1 / np.copysign(0, 1) > 0) - assert_(np.signbit(np.copysign(np.nan, -1))) - assert_(not np.signbit(np.copysign(np.nan, 1))) - -def _test_nextafter(t): - one = t(1) - two = t(2) - zero = t(0) - eps = np.finfo(t).eps - assert_(np.nextafter(one, two) - one == eps) - assert_(np.nextafter(one, zero) - one < 0) - assert_(np.isnan(np.nextafter(np.nan, one))) - assert_(np.isnan(np.nextafter(one, np.nan))) - assert_(np.nextafter(one, one) == one) - -def test_nextafter(): - return _test_nextafter(np.float64) - -def test_nextafterf(): - return _test_nextafter(np.float32) - -@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(), - "Long double support buggy on win32 and PPC, ticket 1664.") -def test_nextafterl(): - return _test_nextafter(np.longdouble) - -def _test_spacing(t): - one = t(1) - eps = np.finfo(t).eps - nan = t(np.nan) - inf = t(np.inf) - with np.errstate(invalid='ignore'): - assert_(np.spacing(one) == eps) - assert_(np.isnan(np.spacing(nan))) - assert_(np.isnan(np.spacing(inf))) - assert_(np.isnan(np.spacing(-inf))) - assert_(np.spacing(t(1e30)) != 0) - -def test_spacing(): - return _test_spacing(np.float64) - -def test_spacingf(): - return _test_spacing(np.float32) - -@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(), - "Long double support buggy on win32 and PPC, ticket 1664.") -def test_spacingl(): - return _test_spacing(np.longdouble) - -def test_spacing_gfortran(): - # Reference from this fortran file, built with gfortran 4.3.3 on linux - # 32bits: - # PROGRAM test_spacing - # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) - # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) - # - # WRITE(*,*) spacing(0.00001_DBL) - # WRITE(*,*) spacing(1.0_DBL) - # WRITE(*,*) spacing(1000._DBL) - # WRITE(*,*) spacing(10500._DBL) - # - # WRITE(*,*) spacing(0.00001_SGL) - # WRITE(*,*) spacing(1.0_SGL) - # WRITE(*,*) spacing(1000._SGL) - # WRITE(*,*) spacing(10500._SGL) - # END PROGRAM - ref = {} - ref[np.float64] = [1.69406589450860068E-021, - 2.22044604925031308E-016, - 1.13686837721616030E-013, - 1.81898940354585648E-012] - ref[np.float32] = [ - 9.09494702E-13, - 1.19209290E-07, - 6.10351563E-05, - 9.76562500E-04] - - for dt, dec in zip([np.float32, np.float64], (10, 20)): - x = np.array([1e-5, 1, 1000, 10500], dtype=dt) - assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec) - -def test_nextafter_vs_spacing(): - # XXX: spacing does not handle long double yet - for t in [np.float32, np.float64]: - for _f in [1, 1e-5, 1000]: - f = t(_f) - f1 = t(_f + 1) - assert_(np.nextafter(f, f1) - f == np.spacing(f)) - -def test_pos_nan(): - """Check np.nan is a positive nan.""" - assert_(np.signbit(np.nan) == 0) - -def test_reduceat(): - """Test bug in reduceat when structured arrays are not copied.""" - db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) - a = np.empty([100], dtype=db) - a['name'] = 'Simple' - a['time'] = 10 - a['value'] = 100 - indx = [0, 7, 15, 25] - - h2 = [] - val1 = indx[0] - for val2 in indx[1:]: - h2.append(np.add.reduce(a['value'][val1:val2])) - val1 = val2 - h2.append(np.add.reduce(a['value'][val1:])) - h2 = np.array(h2) - - # test buffered -- this should work - h1 = np.add.reduceat(a['value'], indx) - assert_array_almost_equal(h1, h2) - - # This is when the error occurs. - # test no buffer - res = np.setbufsize(32) - h1 = np.add.reduceat(a['value'], indx) - np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) - assert_array_almost_equal(h1, h2) - -def test_reduceat_empty(): - """Reduceat should work with empty arrays""" - indices = np.array([], 'i4') - x = np.array([], 'f8') - result = np.add.reduceat(x, indices) - assert_equal(result.dtype, x.dtype) - assert_equal(result.shape, (0,)) - # Another case with a slightly different zero-sized shape - x = np.ones((5, 2)) - result = np.add.reduceat(x, [], axis=0) - assert_equal(result.dtype, x.dtype) - assert_equal(result.shape, (0, 2)) - result = np.add.reduceat(x, [], axis=1) - assert_equal(result.dtype, x.dtype) - assert_equal(result.shape, (5, 0)) - -def test_complex_nan_comparisons(): - nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] - fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), - complex(1, 1), complex(-1, -1), complex(0, 0)] - - with np.errstate(invalid='ignore'): - for x in nans + fins: - x = np.array([x]) - for y in nans + fins: - y = np.array([y]) - - if np.isfinite(x) and np.isfinite(y): - continue - - assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) - assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) - assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) - assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) - assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath_complex.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath_complex.py deleted file mode 100644 index 4f3da4397acfd..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_umath_complex.py +++ /dev/null @@ -1,537 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import platform - -from numpy.testing import * -import numpy.core.umath as ncu -import numpy as np - -# TODO: branch cuts (use Pauli code) -# TODO: conj 'symmetry' -# TODO: FPU exceptions - -# At least on Windows the results of many complex functions are not conforming -# to the C99 standard. See ticket 1574. -# Ditto for Solaris (ticket 1642) and OS X on PowerPC. -with np.errstate(all='ignore'): - functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) - or (np.log(complex(np.NZERO, 0)).imag != np.pi)) -# TODO: replace with a check on whether platform-provided C99 funcs are used -skip_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) - -def platform_skip(func): - return dec.skipif(skip_complex_tests, - "Numpy is using complex functions (e.g. sqrt) provided by your" - "platform's C library. However, they do not seem to behave according" - "to C99 -- so C99 tests are skipped.")(func) - - -class TestCexp(object): - def test_simple(self): - check = check_complex_value - f = np.exp - - yield check, f, 1, 0, np.exp(1), 0, False - yield check, f, 0, 1, np.cos(1), np.sin(1), False - - ref = np.exp(1) * np.complex(np.cos(1), np.sin(1)) - yield check, f, 1, 1, ref.real, ref.imag, False - - @platform_skip - def test_special_values(self): - # C99: Section G 6.3.1 - - check = check_complex_value - f = np.exp - - # cexp(+-0 + 0i) is 1 + 0i - yield check, f, np.PZERO, 0, 1, 0, False - yield check, f, np.NZERO, 0, 1, 0, False - - # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU - # exception - yield check, f, 1, np.inf, np.nan, np.nan - yield check, f, -1, np.inf, np.nan, np.nan - yield check, f, 0, np.inf, np.nan, np.nan - - # cexp(inf + 0i) is inf + 0i - yield check, f, np.inf, 0, np.inf, 0 - - # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y - ref = np.complex(np.cos(1.), np.sin(1.)) - yield check, f, -np.inf, 1, np.PZERO, np.PZERO - - ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75)) - yield check, f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO - - # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y - ref = np.complex(np.cos(1.), np.sin(1.)) - yield check, f, np.inf, 1, np.inf, np.inf - - ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75)) - yield check, f, np.inf, 0.75 * np.pi, -np.inf, np.inf - - # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) - def _check_ninf_inf(dummy): - msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" - with np.errstate(invalid='ignore'): - z = f(np.array(np.complex(-np.inf, np.inf))) - if z.real != 0 or z.imag != 0: - raise AssertionError(msgform %(z.real, z.imag)) - - yield _check_ninf_inf, None - - # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. - def _check_inf_inf(dummy): - msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" - with np.errstate(invalid='ignore'): - z = f(np.array(np.complex(np.inf, np.inf))) - if not np.isinf(z.real) or not np.isnan(z.imag): - raise AssertionError(msgform % (z.real, z.imag)) - - yield _check_inf_inf, None - - # cexp(-inf + nan i) is +-0 +- 0i - def _check_ninf_nan(dummy): - msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" - with np.errstate(invalid='ignore'): - z = f(np.array(np.complex(-np.inf, np.nan))) - if z.real != 0 or z.imag != 0: - raise AssertionError(msgform % (z.real, z.imag)) - - yield _check_ninf_nan, None - - # cexp(inf + nan i) is +-inf + nan - def _check_inf_nan(dummy): - msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" - with np.errstate(invalid='ignore'): - z = f(np.array(np.complex(np.inf, np.nan))) - if not np.isinf(z.real) or not np.isnan(z.imag): - raise AssertionError(msgform % (z.real, z.imag)) - - yield _check_inf_nan, None - - # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU - # ex) - yield check, f, np.nan, 1, np.nan, np.nan - yield check, f, np.nan, -1, np.nan, np.nan - - yield check, f, np.nan, np.inf, np.nan, np.nan - yield check, f, np.nan, -np.inf, np.nan, np.nan - - # cexp(nan + nani) is nan + nani - yield check, f, np.nan, np.nan, np.nan, np.nan - - @dec.knownfailureif(True, "cexp(nan + 0I) is wrong on most implementations") - def test_special_values2(self): - # XXX: most implementations get it wrong here (including glibc <= 2.10) - # cexp(nan + 0i) is nan + 0i - yield check, f, np.nan, 0, np.nan, 0 - -class TestClog(TestCase): - def test_simple(self): - x = np.array([1+0j, 1+2j]) - y_r = np.log(np.abs(x)) + 1j * np.angle(x) - y = np.log(x) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - @platform_skip - @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") - def test_special_values(self): - xl = [] - yl = [] - - # From C99 std (Sec 6.3.2) - # XXX: check exceptions raised - # --- raise for invalid fails. - - # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' - # floating-point exception. - with np.errstate(divide='raise'): - x = np.array([np.NZERO], dtype=np.complex) - y = np.complex(-np.inf, np.pi) - self.assertRaises(FloatingPointError, np.log, x) - with np.errstate(divide='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' - # floating-point exception. - with np.errstate(divide='raise'): - x = np.array([0], dtype=np.complex) - y = np.complex(-np.inf, 0) - self.assertRaises(FloatingPointError, np.log, x) - with np.errstate(divide='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - # clog(x + i inf returns +inf + i pi /2, for finite x. - x = np.array([complex(1, np.inf)], dtype=np.complex) - y = np.complex(np.inf, 0.5 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - x = np.array([complex(-1, np.inf)], dtype=np.complex) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(x + iNaN) returns NaN + iNaN and optionally raises the - # 'invalid' floating- point exception, for finite x. - with np.errstate(invalid='raise'): - x = np.array([complex(1., np.nan)], dtype=np.complex) - y = np.complex(np.nan, np.nan) - #self.assertRaises(FloatingPointError, np.log, x) - with np.errstate(invalid='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - with np.errstate(invalid='raise'): - x = np.array([np.inf + 1j * np.nan], dtype=np.complex) - #self.assertRaises(FloatingPointError, np.log, x) - with np.errstate(invalid='ignore'): - assert_almost_equal(np.log(x), y) - - xl.append(x) - yl.append(y) - - # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. - x = np.array([-np.inf + 1j], dtype=np.complex) - y = np.complex(np.inf, np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. - x = np.array([np.inf + 1j], dtype=np.complex) - y = np.complex(np.inf, 0) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(- inf + i inf) returns +inf + i3pi /4. - x = np.array([complex(-np.inf, np.inf)], dtype=np.complex) - y = np.complex(np.inf, 0.75 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+ inf + i inf) returns +inf + ipi /4. - x = np.array([complex(np.inf, np.inf)], dtype=np.complex) - y = np.complex(np.inf, 0.25 * np.pi) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(+/- inf + iNaN) returns +inf + iNaN. - x = np.array([complex(np.inf, np.nan)], dtype=np.complex) - y = np.complex(np.inf, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - x = np.array([complex(-np.inf, np.nan)], dtype=np.complex) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + iy) returns NaN + iNaN and optionally raises the - # 'invalid' floating-point exception, for finite y. - x = np.array([complex(np.nan, 1)], dtype=np.complex) - y = np.complex(np.nan, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + i inf) returns +inf + iNaN. - x = np.array([complex(np.nan, np.inf)], dtype=np.complex) - y = np.complex(np.inf, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(NaN + iNaN) returns NaN + iNaN. - x = np.array([complex(np.nan, np.nan)], dtype=np.complex) - y = np.complex(np.nan, np.nan) - assert_almost_equal(np.log(x), y) - xl.append(x) - yl.append(y) - - # clog(conj(z)) = conj(clog(z)). - xa = np.array(xl, dtype=np.complex) - ya = np.array(yl, dtype=np.complex) - with np.errstate(divide='ignore'): - for i in range(len(xa)): - assert_almost_equal(np.log(np.conj(xa[i])), np.conj(np.log(xa[i]))) - -class TestCsqrt(object): - - def test_simple(self): - # sqrt(1) - yield check_complex_value, np.sqrt, 1, 0, 1, 0 - - # sqrt(1i) - yield check_complex_value, np.sqrt, 0, 1, 0.5*np.sqrt(2), 0.5*np.sqrt(2), False - - # sqrt(-1) - yield check_complex_value, np.sqrt, -1, 0, 0, 1 - - def test_simple_conjugate(self): - ref = np.conj(np.sqrt(np.complex(1, 1))) - def f(z): - return np.sqrt(np.conj(z)) - yield check_complex_value, f, 1, 1, ref.real, ref.imag, False - - #def test_branch_cut(self): - # _check_branch_cut(f, -1, 0, 1, -1) - - @platform_skip - def test_special_values(self): - check = check_complex_value - f = np.sqrt - - # C99: Sec G 6.4.2 - x, y = [], [] - - # csqrt(+-0 + 0i) is 0 + 0i - yield check, f, np.PZERO, 0, 0, 0 - yield check, f, np.NZERO, 0, 0, 0 - - # csqrt(x + infi) is inf + infi for any x (including NaN) - yield check, f, 1, np.inf, np.inf, np.inf - yield check, f, -1, np.inf, np.inf, np.inf - - yield check, f, np.PZERO, np.inf, np.inf, np.inf - yield check, f, np.NZERO, np.inf, np.inf, np.inf - yield check, f, np.inf, np.inf, np.inf, np.inf - yield check, f, -np.inf, np.inf, np.inf, np.inf - yield check, f, -np.nan, np.inf, np.inf, np.inf - - # csqrt(x + nani) is nan + nani for any finite x - yield check, f, 1, np.nan, np.nan, np.nan - yield check, f, -1, np.nan, np.nan, np.nan - yield check, f, 0, np.nan, np.nan, np.nan - - # csqrt(-inf + yi) is +0 + infi for any finite y > 0 - yield check, f, -np.inf, 1, np.PZERO, np.inf - - # csqrt(inf + yi) is +inf + 0i for any finite y > 0 - yield check, f, np.inf, 1, np.inf, np.PZERO - - # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) - def _check_ninf_nan(dummy): - msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" - z = np.sqrt(np.array(np.complex(-np.inf, np.nan))) - #Fixme: ugly workaround for isinf bug. - with np.errstate(invalid='ignore'): - if not (np.isnan(z.real) and np.isinf(z.imag)): - raise AssertionError(msgform % (z.real, z.imag)) - - yield _check_ninf_nan, None - - # csqrt(+inf + nani) is inf + nani - yield check, f, np.inf, np.nan, np.inf, np.nan - - # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x - # + nani) - yield check, f, np.nan, 0, np.nan, np.nan - yield check, f, np.nan, 1, np.nan, np.nan - yield check, f, np.nan, np.nan, np.nan, np.nan - - # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch - # cuts first) - -class TestCpow(TestCase): - def setUp(self): - self.olderr = np.seterr(invalid='ignore') - - def tearDown(self): - np.seterr(**self.olderr) - - def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) - y_r = x ** 2 - y = np.power(x, 2) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - def test_scalar(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) - lx = list(range(len(x))) - # Compute the values for complex type in python - p_r = [complex(x[i]) ** complex(y[i]) for i in lx] - # Substitute a result allowed by C99 standard - p_r[4] = complex(np.inf, np.nan) - # Do the same with numpy complex scalars - n_r = [x[i] ** y[i] for i in lx] - for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) - - def test_array(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) - lx = list(range(len(x))) - # Compute the values for complex type in python - p_r = [complex(x[i]) ** complex(y[i]) for i in lx] - # Substitute a result allowed by C99 standard - p_r[4] = complex(np.inf, np.nan) - # Do the same with numpy arrays - n_r = x ** y - for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) - -class TestCabs(object): - def setUp(self): - self.olderr = np.seterr(invalid='ignore') - - def tearDown(self): - np.seterr(**self.olderr) - - def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) - y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) - y = np.abs(x) - for i in range(len(x)): - assert_almost_equal(y[i], y_r[i]) - - def test_fabs(self): - # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) - x = np.array([1+0j], dtype=np.complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(1, np.NZERO)], dtype=np.complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex) - assert_array_equal(np.abs(x), np.real(x)) - - x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex) - assert_array_equal(np.abs(x), np.real(x)) - - def test_cabs_inf_nan(self): - x, y = [], [] - - # cabs(+-nan + nani) returns nan - x.append(np.nan) - y.append(np.nan) - yield check_real_value, np.abs, np.nan, np.nan, np.nan - - x.append(np.nan) - y.append(-np.nan) - yield check_real_value, np.abs, -np.nan, np.nan, np.nan - - # According to C99 standard, if exactly one of the real/part is inf and - # the other nan, then cabs should return inf - x.append(np.inf) - y.append(np.nan) - yield check_real_value, np.abs, np.inf, np.nan, np.inf - - x.append(-np.inf) - y.append(np.nan) - yield check_real_value, np.abs, -np.inf, np.nan, np.inf - - # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) - def f(a): - return np.abs(np.conj(a)) - def g(a, b): - return np.abs(np.complex(a, b)) - - xa = np.array(x, dtype=np.complex) - for i in range(len(xa)): - ref = g(x[i], y[i]) - yield check_real_value, f, x[i], y[i], ref - -class TestCarg(object): - def test_simple(self): - check_real_value(ncu._arg, 1, 0, 0, False) - check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) - - check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) - check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) - - @dec.knownfailureif(True, - "Complex arithmetic with signed zero is buggy on most implementation") - def test_zero(self): - # carg(-0 +- 0i) returns +- pi - yield check_real_value, ncu._arg, np.NZERO, np.PZERO, np.pi, False - yield check_real_value, ncu._arg, np.NZERO, np.NZERO, -np.pi, False - - # carg(+0 +- 0i) returns +- 0 - yield check_real_value, ncu._arg, np.PZERO, np.PZERO, np.PZERO - yield check_real_value, ncu._arg, np.PZERO, np.NZERO, np.NZERO - - # carg(x +- 0i) returns +- 0 for x > 0 - yield check_real_value, ncu._arg, 1, np.PZERO, np.PZERO, False - yield check_real_value, ncu._arg, 1, np.NZERO, np.NZERO, False - - # carg(x +- 0i) returns +- pi for x < 0 - yield check_real_value, ncu._arg, -1, np.PZERO, np.pi, False - yield check_real_value, ncu._arg, -1, np.NZERO, -np.pi, False - - # carg(+- 0 + yi) returns pi/2 for y > 0 - yield check_real_value, ncu._arg, np.PZERO, 1, 0.5 * np.pi, False - yield check_real_value, ncu._arg, np.NZERO, 1, 0.5 * np.pi, False - - # carg(+- 0 + yi) returns -pi/2 for y < 0 - yield check_real_value, ncu._arg, np.PZERO, -1, 0.5 * np.pi, False - yield check_real_value, ncu._arg, np.NZERO, -1, -0.5 * np.pi, False - - #def test_branch_cuts(self): - # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) - - def test_special_values(self): - # carg(-np.inf +- yi) returns +-pi for finite y > 0 - yield check_real_value, ncu._arg, -np.inf, 1, np.pi, False - yield check_real_value, ncu._arg, -np.inf, -1, -np.pi, False - - # carg(np.inf +- yi) returns +-0 for finite y > 0 - yield check_real_value, ncu._arg, np.inf, 1, np.PZERO, False - yield check_real_value, ncu._arg, np.inf, -1, np.NZERO, False - - # carg(x +- np.infi) returns +-pi/2 for finite x - yield check_real_value, ncu._arg, 1, np.inf, 0.5 * np.pi, False - yield check_real_value, ncu._arg, 1, -np.inf, -0.5 * np.pi, False - - # carg(-np.inf +- np.infi) returns +-3pi/4 - yield check_real_value, ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False - yield check_real_value, ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False - - # carg(np.inf +- np.infi) returns +-pi/4 - yield check_real_value, ncu._arg, np.inf, np.inf, 0.25 * np.pi, False - yield check_real_value, ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False - - # carg(x + yi) returns np.nan if x or y is nan - yield check_real_value, ncu._arg, np.nan, 0, np.nan, False - yield check_real_value, ncu._arg, 0, np.nan, np.nan, False - - yield check_real_value, ncu._arg, np.nan, np.inf, np.nan, False - yield check_real_value, ncu._arg, np.inf, np.nan, np.nan, False - -def check_real_value(f, x1, y1, x, exact=True): - z1 = np.array([complex(x1, y1)]) - if exact: - assert_equal(f(z1), x) - else: - assert_almost_equal(f(z1), x) - -def check_complex_value(f, x1, y1, x2, y2, exact=True): - z1 = np.array([complex(x1, y1)]) - z2 = np.complex(x2, y2) - with np.errstate(invalid='ignore'): - if exact: - assert_equal(f(z1), z2) - else: - assert_almost_equal(f(z1), z2) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_unicode.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_unicode.py deleted file mode 100644 index d184b3a9fe99b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/tests/test_unicode.py +++ /dev/null @@ -1,357 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.testing import * -from numpy.core import * -from numpy.compat import asbytes, sixu - -# Guess the UCS length for this python interpreter -if sys.version_info[:2] >= (3, 3): - # Python 3.3 uses a flexible string representation - ucs4 = False - def buffer_length(arr): - if isinstance(arr, unicode): - arr = str(arr) - return (sys.getsizeof(arr+"a") - sys.getsizeof(arr)) * len(arr) - v = memoryview(arr) - if v.shape is None: - return len(v) * v.itemsize - else: - return prod(v.shape) * v.itemsize -elif sys.version_info[0] >= 3: - import array as _array - ucs4 = (_array.array('u').itemsize == 4) - def buffer_length(arr): - if isinstance(arr, unicode): - return _array.array('u').itemsize * len(arr) - v = memoryview(arr) - if v.shape is None: - return len(v) * v.itemsize - else: - return prod(v.shape) * v.itemsize -else: - if len(buffer(sixu('u'))) == 4: - ucs4 = True - else: - ucs4 = False - def buffer_length(arr): - if isinstance(arr, ndarray): - return len(arr.data) - return len(buffer(arr)) - -# In both cases below we need to make sure that the byte swapped value (as -# UCS4) is still a valid unicode: -# Value that can be represented in UCS2 interpreters -ucs2_value = sixu('\u0900') -# Value that cannot be represented in UCS2 interpreters (but can in UCS4) -ucs4_value = sixu('\U00100900') - - -############################################################ -# Creation tests -############################################################ - -class create_zeros(object): - """Check the creation of zero-valued arrays""" - - def content_check(self, ua, ua_scalar, nbytes): - - # Check the length of the unicode base type - self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) - # Check the length of the data buffer - self.assertTrue(buffer_length(ua) == nbytes) - # Small check that data in array element is ok - self.assertTrue(ua_scalar == sixu('')) - # Encode to ascii and double check - self.assertTrue(ua_scalar.encode('ascii') == asbytes('')) - # Check buffer lengths for scalars - if ucs4: - self.assertTrue(buffer_length(ua_scalar) == 0) - else: - self.assertTrue(buffer_length(ua_scalar) == 0) - - def test_zeros0D(self): - """Check creation of 0-dimensional objects""" - ua = zeros((), dtype='U%s' % self.ulen) - self.content_check(ua, ua[()], 4*self.ulen) - - def test_zerosSD(self): - """Check creation of single-dimensional objects""" - ua = zeros((2,), dtype='U%s' % self.ulen) - self.content_check(ua, ua[0], 4*self.ulen*2) - self.content_check(ua, ua[1], 4*self.ulen*2) - - def test_zerosMD(self): - """Check creation of multi-dimensional objects""" - ua = zeros((2, 3, 4), dtype='U%s' % self.ulen) - self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) - self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) - - -class test_create_zeros_1(create_zeros, TestCase): - """Check the creation of zero-valued arrays (size 1)""" - ulen = 1 - - -class test_create_zeros_2(create_zeros, TestCase): - """Check the creation of zero-valued arrays (size 2)""" - ulen = 2 - - -class test_create_zeros_1009(create_zeros, TestCase): - """Check the creation of zero-valued arrays (size 1009)""" - ulen = 1009 - - -class create_values(object): - """Check the creation of unicode arrays with values""" - - def content_check(self, ua, ua_scalar, nbytes): - - # Check the length of the unicode base type - self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) - # Check the length of the data buffer - self.assertTrue(buffer_length(ua) == nbytes) - # Small check that data in array element is ok - self.assertTrue(ua_scalar == self.ucs_value*self.ulen) - # Encode to UTF-8 and double check - self.assertTrue(ua_scalar.encode('utf-8') == \ - (self.ucs_value*self.ulen).encode('utf-8')) - # Check buffer lengths for scalars - if ucs4: - self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) - else: - if self.ucs_value == ucs4_value: - # In UCS2, the \U0010FFFF will be represented using a - # surrogate *pair* - self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) - else: - # In UCS2, the \uFFFF will be represented using a - # regular 2-byte word - self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) - - def test_values0D(self): - """Check creation of 0-dimensional objects with values""" - ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) - self.content_check(ua, ua[()], 4*self.ulen) - - def test_valuesSD(self): - """Check creation of single-dimensional objects with values""" - ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) - self.content_check(ua, ua[0], 4*self.ulen*2) - self.content_check(ua, ua[1], 4*self.ulen*2) - - def test_valuesMD(self): - """Check creation of multi-dimensional objects with values""" - ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) - self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) - self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) - - -class test_create_values_1_ucs2(create_values, TestCase): - """Check the creation of valued arrays (size 1, UCS2 values)""" - ulen = 1 - ucs_value = ucs2_value - - -class test_create_values_1_ucs4(create_values, TestCase): - """Check the creation of valued arrays (size 1, UCS4 values)""" - ulen = 1 - ucs_value = ucs4_value - - -class test_create_values_2_ucs2(create_values, TestCase): - """Check the creation of valued arrays (size 2, UCS2 values)""" - ulen = 2 - ucs_value = ucs2_value - - -class test_create_values_2_ucs4(create_values, TestCase): - """Check the creation of valued arrays (size 2, UCS4 values)""" - ulen = 2 - ucs_value = ucs4_value - - -class test_create_values_1009_ucs2(create_values, TestCase): - """Check the creation of valued arrays (size 1009, UCS2 values)""" - ulen = 1009 - ucs_value = ucs2_value - - -class test_create_values_1009_ucs4(create_values, TestCase): - """Check the creation of valued arrays (size 1009, UCS4 values)""" - ulen = 1009 - ucs_value = ucs4_value - - -############################################################ -# Assignment tests -############################################################ - -class assign_values(object): - """Check the assignment of unicode arrays with values""" - - def content_check(self, ua, ua_scalar, nbytes): - - # Check the length of the unicode base type - self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) - # Check the length of the data buffer - self.assertTrue(buffer_length(ua) == nbytes) - # Small check that data in array element is ok - self.assertTrue(ua_scalar == self.ucs_value*self.ulen) - # Encode to UTF-8 and double check - self.assertTrue(ua_scalar.encode('utf-8') == \ - (self.ucs_value*self.ulen).encode('utf-8')) - # Check buffer lengths for scalars - if ucs4: - self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) - else: - if self.ucs_value == ucs4_value: - # In UCS2, the \U0010FFFF will be represented using a - # surrogate *pair* - self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) - else: - # In UCS2, the \uFFFF will be represented using a - # regular 2-byte word - self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) - - def test_values0D(self): - """Check assignment of 0-dimensional objects with values""" - ua = zeros((), dtype='U%s' % self.ulen) - ua[()] = self.ucs_value*self.ulen - self.content_check(ua, ua[()], 4*self.ulen) - - def test_valuesSD(self): - """Check assignment of single-dimensional objects with values""" - ua = zeros((2,), dtype='U%s' % self.ulen) - ua[0] = self.ucs_value*self.ulen - self.content_check(ua, ua[0], 4*self.ulen*2) - ua[1] = self.ucs_value*self.ulen - self.content_check(ua, ua[1], 4*self.ulen*2) - - def test_valuesMD(self): - """Check assignment of multi-dimensional objects with values""" - ua = zeros((2, 3, 4), dtype='U%s' % self.ulen) - ua[0, 0, 0] = self.ucs_value*self.ulen - self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) - ua[-1, -1, -1] = self.ucs_value*self.ulen - self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) - - -class test_assign_values_1_ucs2(assign_values, TestCase): - """Check the assignment of valued arrays (size 1, UCS2 values)""" - ulen = 1 - ucs_value = ucs2_value - - -class test_assign_values_1_ucs4(assign_values, TestCase): - """Check the assignment of valued arrays (size 1, UCS4 values)""" - ulen = 1 - ucs_value = ucs4_value - - -class test_assign_values_2_ucs2(assign_values, TestCase): - """Check the assignment of valued arrays (size 2, UCS2 values)""" - ulen = 2 - ucs_value = ucs2_value - - -class test_assign_values_2_ucs4(assign_values, TestCase): - """Check the assignment of valued arrays (size 2, UCS4 values)""" - ulen = 2 - ucs_value = ucs4_value - - -class test_assign_values_1009_ucs2(assign_values, TestCase): - """Check the assignment of valued arrays (size 1009, UCS2 values)""" - ulen = 1009 - ucs_value = ucs2_value - - -class test_assign_values_1009_ucs4(assign_values, TestCase): - """Check the assignment of valued arrays (size 1009, UCS4 values)""" - ulen = 1009 - ucs_value = ucs4_value - - - -############################################################ -# Byteorder tests -############################################################ - -class byteorder_values: - """Check the byteorder of unicode arrays in round-trip conversions""" - - def test_values0D(self): - """Check byteorder of 0-dimensional objects""" - ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) - ua2 = ua.newbyteorder() - # This changes the interpretation of the data region (but not the - # actual data), therefore the returned scalars are not - # the same (they are byte-swapped versions of each other). - self.assertTrue(ua[()] != ua2[()]) - ua3 = ua2.newbyteorder() - # Arrays must be equal after the round-trip - assert_equal(ua, ua3) - - def test_valuesSD(self): - """Check byteorder of single-dimensional objects""" - ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) - ua2 = ua.newbyteorder() - self.assertTrue(ua[0] != ua2[0]) - self.assertTrue(ua[-1] != ua2[-1]) - ua3 = ua2.newbyteorder() - # Arrays must be equal after the round-trip - assert_equal(ua, ua3) - - def test_valuesMD(self): - """Check byteorder of multi-dimensional objects""" - ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, - dtype='U%s' % self.ulen) - ua2 = ua.newbyteorder() - self.assertTrue(ua[0, 0, 0] != ua2[0, 0, 0]) - self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1]) - ua3 = ua2.newbyteorder() - # Arrays must be equal after the round-trip - assert_equal(ua, ua3) - - -class test_byteorder_1_ucs2(byteorder_values, TestCase): - """Check the byteorder in unicode (size 1, UCS2 values)""" - ulen = 1 - ucs_value = ucs2_value - - -class test_byteorder_1_ucs4(byteorder_values, TestCase): - """Check the byteorder in unicode (size 1, UCS4 values)""" - ulen = 1 - ucs_value = ucs4_value - - -class test_byteorder_2_ucs2(byteorder_values, TestCase): - """Check the byteorder in unicode (size 2, UCS2 values)""" - ulen = 2 - ucs_value = ucs2_value - - -class test_byteorder_2_ucs4(byteorder_values, TestCase): - """Check the byteorder in unicode (size 2, UCS4 values)""" - ulen = 2 - ucs_value = ucs4_value - - -class test_byteorder_1009_ucs2(byteorder_values, TestCase): - """Check the byteorder in unicode (size 1009, UCS2 values)""" - ulen = 1009 - ucs_value = ucs2_value - - -class test_byteorder_1009_ucs4(byteorder_values, TestCase): - """Check the byteorder in unicode (size 1009, UCS4 values)""" - ulen = 1009 - ucs_value = ucs4_value - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath.py deleted file mode 100644 index 30f3b0b135d5c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'umath.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath_tests.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath_tests.py deleted file mode 100644 index 9ae91de7e2b63..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/core/umath_tests.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'umath_tests.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ctypeslib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ctypeslib.py deleted file mode 100644 index 961fa601261f8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ctypeslib.py +++ /dev/null @@ -1,426 +0,0 @@ -""" -============================ -``ctypes`` Utility Functions -============================ - -See Also ---------- -load_library : Load a C library. -ndpointer : Array restype/argtype with verification. -as_ctypes : Create a ctypes array from an ndarray. -as_array : Create an ndarray from a ctypes array. - -References ----------- -.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes - -Examples --------- -Load the C library: - ->>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP - -Our result type, an ndarray that must be of type double, be 1-dimensional -and is C-contiguous in memory: - ->>> array_1d_double = np.ctypeslib.ndpointer( -... dtype=np.double, -... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP - -Our C-function typically takes an array and updates its values -in-place. For example:: - - void foo_func(double* x, int length) - { - int i; - for (i = 0; i < length; i++) { - x[i] = i*i; - } - } - -We wrap it using: - ->>> _lib.foo_func.restype = None #doctest: +SKIP ->>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP - -Then, we're ready to call ``foo_func``: - ->>> out = np.empty(15, dtype=np.double) ->>> _lib.foo_func(out, len(out)) #doctest: +SKIP - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library', - 'c_intp', 'as_ctypes', 'as_array'] - -import sys, os -from numpy import integer, ndarray, dtype as _dtype, deprecate, array -from numpy.core.multiarray import _flagdict, flagsobj - -try: - import ctypes -except ImportError: - ctypes = None - -if ctypes is None: - def _dummy(*args, **kwds): - """ - Dummy object that raises an ImportError if ctypes is not available. - - Raises - ------ - ImportError - If ctypes is not available. - - """ - raise ImportError("ctypes is not available.") - ctypes_load_library = _dummy - load_library = _dummy - as_ctypes = _dummy - as_array = _dummy - from numpy import intp as c_intp - _ndptr_base = object -else: - import numpy.core._internal as nic - c_intp = nic._getintp_ctype() - del nic - _ndptr_base = ctypes.c_void_p - - # Adapted from Albert Strasheim - def load_library(libname, loader_path): - if ctypes.__version__ < '1.0.1': - import warnings - warnings.warn("All features of ctypes interface may not work " \ - "with ctypes < 1.0.1") - - ext = os.path.splitext(libname)[1] - if not ext: - # Try to load library with platform-specific name, otherwise - # default to libname.[so|pyd]. Sometimes, these files are built - # erroneously on non-linux platforms. - from numpy.distutils.misc_util import get_shared_lib_extension - so_ext = get_shared_lib_extension() - libname_ext = [libname + so_ext] - # mac, windows and linux >= py3.2 shared library and loadable - # module have different extensions so try both - so_ext2 = get_shared_lib_extension(is_python_ext=True) - if not so_ext2 == so_ext: - libname_ext.insert(0, libname + so_ext2) - else: - libname_ext = [libname] - - loader_path = os.path.abspath(loader_path) - if not os.path.isdir(loader_path): - libdir = os.path.dirname(loader_path) - else: - libdir = loader_path - - for ln in libname_ext: - libpath = os.path.join(libdir, ln) - if os.path.exists(libpath): - try: - return ctypes.cdll[libpath] - except OSError: - ## defective lib file - raise - ## if no successful return in the libname_ext loop: - raise OSError("no file with expected extension") - - ctypes_load_library = deprecate(load_library, 'ctypes_load_library', - 'load_library') - -def _num_fromflags(flaglist): - num = 0 - for val in flaglist: - num += _flagdict[val] - return num - -_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', - 'OWNDATA', 'UPDATEIFCOPY'] -def _flags_fromnum(num): - res = [] - for key in _flagnames: - value = _flagdict[key] - if (num & value): - res.append(key) - return res - - -class _ndptr(_ndptr_base): - - def _check_retval_(self): - """This method is called when this class is used as the .restype - asttribute for a shared-library function. It constructs a numpy - array from a void pointer.""" - return array(self) - - @property - def __array_interface__(self): - return {'descr': self._dtype_.descr, - '__ref': self, - 'strides': None, - 'shape': self._shape_, - 'version': 3, - 'typestr': self._dtype_.descr[0][1], - 'data': (self.value, False), - } - - @classmethod - def from_param(cls, obj): - if not isinstance(obj, ndarray): - raise TypeError("argument must be an ndarray") - if cls._dtype_ is not None \ - and obj.dtype != cls._dtype_: - raise TypeError("array must have data type %s" % cls._dtype_) - if cls._ndim_ is not None \ - and obj.ndim != cls._ndim_: - raise TypeError("array must have %d dimension(s)" % cls._ndim_) - if cls._shape_ is not None \ - and obj.shape != cls._shape_: - raise TypeError("array must have shape %s" % str(cls._shape_)) - if cls._flags_ is not None \ - and ((obj.flags.num & cls._flags_) != cls._flags_): - raise TypeError("array must have flags %s" % - _flags_fromnum(cls._flags_)) - return obj.ctypes - - -# Factory for an array-checking class with from_param defined for -# use with ctypes argtypes mechanism -_pointer_type_cache = {} -def ndpointer(dtype=None, ndim=None, shape=None, flags=None): - """ - Array-checking restype/argtypes. - - An ndpointer instance is used to describe an ndarray in restypes - and argtypes specifications. This approach is more flexible than - using, for example, ``POINTER(c_double)``, since several restrictions - can be specified, which are verified upon calling the ctypes function. - These include data type, number of dimensions, shape and flags. If a - given array does not satisfy the specified restrictions, - a ``TypeError`` is raised. - - Parameters - ---------- - dtype : data-type, optional - Array data-type. - ndim : int, optional - Number of array dimensions. - shape : tuple of ints, optional - Array shape. - flags : str or tuple of str - Array flags; may be one or more of: - - - C_CONTIGUOUS / C / CONTIGUOUS - - F_CONTIGUOUS / F / FORTRAN - - OWNDATA / O - - WRITEABLE / W - - ALIGNED / A - - UPDATEIFCOPY / U - - Returns - ------- - klass : ndpointer type object - A type object, which is an ``_ndtpr`` instance containing - dtype, ndim, shape and flags information. - - Raises - ------ - TypeError - If a given array does not satisfy the specified restrictions. - - Examples - -------- - >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, - ... ndim=1, - ... flags='C_CONTIGUOUS')] - ... #doctest: +SKIP - >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) - ... #doctest: +SKIP - - """ - - if dtype is not None: - dtype = _dtype(dtype) - num = None - if flags is not None: - if isinstance(flags, str): - flags = flags.split(',') - elif isinstance(flags, (int, integer)): - num = flags - flags = _flags_fromnum(num) - elif isinstance(flags, flagsobj): - num = flags.num - flags = _flags_fromnum(num) - if num is None: - try: - flags = [x.strip().upper() for x in flags] - except: - raise TypeError("invalid flags specification") - num = _num_fromflags(flags) - try: - return _pointer_type_cache[(dtype, ndim, shape, num)] - except KeyError: - pass - if dtype is None: - name = 'any' - elif dtype.names: - name = str(id(dtype)) - else: - name = dtype.str - if ndim is not None: - name += "_%dd" % ndim - if shape is not None: - try: - strshape = [str(x) for x in shape] - except TypeError: - strshape = [str(shape)] - shape = (shape,) - shape = tuple(shape) - name += "_"+"x".join(strshape) - if flags is not None: - name += "_"+"_".join(flags) - else: - flags = [] - klass = type("ndpointer_%s"%name, (_ndptr,), - {"_dtype_": dtype, - "_shape_" : shape, - "_ndim_" : ndim, - "_flags_" : num}) - _pointer_type_cache[dtype] = klass - return klass - -if ctypes is not None: - ct = ctypes - ################################################################ - # simple types - - # maps the numpy typecodes like ' 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - \ No newline at end of file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__init__.py deleted file mode 100644 index b43e08b052a1a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -if sys.version_info[0] < 3: - from .__version__ import version as __version__ - # Must import local ccompiler ASAP in order to get - # customized CCompiler.spawn effective. - from . import ccompiler - from . import unixccompiler - - from .info import __doc__ - from .npy_pkg_config import * - - try: - import __config__ - _INSTALLED = True - except ImportError: - _INSTALLED = False -else: - from numpy.distutils.__version__ import version as __version__ - # Must import local ccompiler ASAP in order to get - # customized CCompiler.spawn effective. - import numpy.distutils.ccompiler - import numpy.distutils.unixccompiler - - from numpy.distutils.info import __doc__ - from numpy.distutils.npy_pkg_config import * - - try: - import numpy.distutils.__config__ - _INSTALLED = True - except ImportError: - _INSTALLED = False - -if _INSTALLED: - from numpy.testing import Tester - test = Tester().test - bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__version__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__version__.py deleted file mode 100644 index 969decbba20e7..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/__version__.py +++ /dev/null @@ -1,6 +0,0 @@ -from __future__ import division, absolute_import, print_function - -major = 0 -minor = 4 -micro = 0 -version = '%(major)d.%(minor)d.%(micro)d' % (locals()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/ccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/ccompiler.py deleted file mode 100644 index 8484685c0f975..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/ccompiler.py +++ /dev/null @@ -1,656 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import re -import os -import sys -import types -from copy import copy - -from distutils.ccompiler import * -from distutils import ccompiler -from distutils.errors import DistutilsExecError, DistutilsModuleError, \ - DistutilsPlatformError -from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion - -from numpy.distutils import log -from numpy.distutils.exec_command import exec_command -from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ - quote_args -from numpy.distutils.compat import get_exception - - -def replace_method(klass, method_name, func): - if sys.version_info[0] < 3: - m = types.MethodType(func, None, klass) - else: - # Py3k does not have unbound method anymore, MethodType does not work - m = lambda self, *args, **kw: func(self, *args, **kw) - setattr(klass, method_name, m) - -# Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None): - """ - Execute a command in a sub-process. - - Parameters - ---------- - cmd : str - The command to execute. - display : str or sequence of str, optional - The text to add to the log file kept by `numpy.distutils`. - If not given, `display` is equal to `cmd`. - - Returns - ------- - None - - Raises - ------ - DistutilsExecError - If the command failed, i.e. the exit status was not 0. - - """ - if display is None: - display = cmd - if is_sequence(display): - display = ' '.join(list(display)) - log.info(display) - s, o = exec_command(cmd) - if s: - if is_sequence(cmd): - cmd = ' '.join(list(cmd)) - try: - print(o) - except UnicodeError: - # When installing through pip, `o` can contain non-ascii chars - pass - if re.search('Too many open files', o): - msg = '\nTry rerunning setup command until build succeeds.' - else: - msg = '' - raise DistutilsExecError('Command "%s" failed with exit status %d%s' % (cmd, s, msg)) - -replace_method(CCompiler, 'spawn', CCompiler_spawn) - -def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """ - Return the name of the object files for the given source files. - - Parameters - ---------- - source_filenames : list of str - The list of paths to source files. Paths can be either relative or - absolute, this is handled transparently. - strip_dir : bool, optional - Whether to strip the directory from the returned paths. If True, - the file name prepended by `output_dir` is returned. Default is False. - output_dir : str, optional - If given, this path is prepended to the returned paths to the - object files. - - Returns - ------- - obj_names : list of str - The list of paths to the object files corresponding to the source - files in `source_filenames`. - - """ - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(os.path.normpath(src_name)) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if base.startswith('..'): - # Resolve starting relative path components, middle ones - # (if any) have been handled by os.path.normpath above. - i = base.rfind('..')+2 - d = base[:i] - d = os.path.basename(os.path.abspath(d)) - base = d + base[i:] - if ext not in self.src_extensions: - raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_name = os.path.join(output_dir, base + self.obj_extension) - obj_names.append(obj_name) - return obj_names - -replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) - -def CCompiler_compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """ - Compile one or more source files. - - Please refer to the Python distutils API reference for more details. - - Parameters - ---------- - sources : list of str - A list of filenames - output_dir : str, optional - Path to the output directory. - macros : list of tuples - A list of macro definitions. - include_dirs : list of str, optional - The directories to add to the default include file search path for - this compilation only. - debug : bool, optional - Whether or not to output debug symbols in or alongside the object - file(s). - extra_preargs, extra_postargs : ? - Extra pre- and post-arguments. - depends : list of str, optional - A list of file names that all targets depend on. - - Returns - ------- - objects : list of str - A list of object file names, one per source file `sources`. - - Raises - ------ - CompileError - If compilation fails. - - """ - # This method is effective only with Python >=2.3 distutils. - # Any changes here should be applied also to fcompiler.compile - # method to support pre Python 2.3 distutils. - if not sources: - return [] - # FIXME:RELATIVE_IMPORT - if sys.version_info[0] < 3: - from .fcompiler import FCompiler - else: - from numpy.distutils.fcompiler import FCompiler - if isinstance(self, FCompiler): - display = [] - for fc in ['f77', 'f90', 'fix']: - fcomp = getattr(self, 'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - # build any sources in same order as they were originally specified - # especially important for fortran .f90 files using modules - if isinstance(self, FCompiler): - objects_to_build = list(build.keys()) - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - else: - for obj, (src, ext) in build.items(): - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ - Customize compiler using distutils command. - - Parameters - ---------- - cmd : class instance - An instance inheriting from `distutils.cmd.Command`. - ignore : sequence of str, optional - List of `CCompiler` commands (without ``'set_'``) that should not be - altered. Strings that are checked for are: - ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', - 'rpath', 'link_objects')``. - - Returns - ------- - None - - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name, value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = list(compiler.executables.keys()) - for key in ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch', - 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler, key): - v = getattr(compiler, key) - mx = max(mx, len(key)) - props.append((key, repr(v))) - lines = [] - format = '%-' + repr(mx+1) + 's = %s' - for prop in props: - lines.append(format % prop) - return '\n'.join(lines) - -def CCompiler_show_customization(self): - """ - Print the compiler customizations to stdout. - - Parameters - ---------- - None - - Returns - ------- - None - - Notes - ----- - Printing is only done if the distutils log threshold is < 2. - - """ - if 0: - for attrname in ['include_dirs', 'define', 'undef', - 'libraries', 'library_dirs', - 'rpath', 'link_objects']: - attr = getattr(self, attrname, None) - if not attr: - continue - log.info("compiler '%s' is set to %s" % (attrname, attr)) - try: - self.get_version() - except: - pass - if log._global_log.threshold<2: - print('*'*80) - print(self.__class__) - print(_compiler_to_string(self)) - print('*'*80) - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - """ - Do any platform-specific customization of a compiler instance. - - This method calls `distutils.sysconfig.customize_compiler` for - platform-specific customization, as well as optionally remove a flag - to suppress spurious warnings in case C++ code is being compiled. - - Parameters - ---------- - dist : object - This parameter is not used for anything. - need_cxx : bool, optional - Whether or not C++ has to be compiled. If so (True), the - ``"-Wstrict-prototypes"`` option is removed to prevent spurious - warnings. Default is False. - - Returns - ------- - None - - Notes - ----- - All the default options used by distutils can be extracted with:: - - from distutils import sysconfig - sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - 'CCSHARED', 'LDSHARED', 'SO') - - """ - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a, b)]\ - + self.compiler[1:] - else: - if hasattr(self, 'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - log.warn('Missing compiler_cxx fix for '+self.__class__.__name__) - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler. - - Parameters - ---------- - pat : str, optional - A regular expression matching version numbers. - Default is ``r'[-.\\d]+'``. - ignore : str, optional - A regular expression matching patterns to skip. - Default is ``''``, in which case nothing is skipped. - start : str, optional - A regular expression matching the start of where to start looking - for version numbers. - Default is ``''``, in which case searching is started at the - beginning of the version string given to `matcher`. - - Returns - ------- - matcher : callable - A function that is appropriate to use as the ``.version_match`` - attribute of a `CCompiler` class. `matcher` takes a single parameter, - a version string. - - """ - def matcher(self, version_string): - # version string may appear in the second line, so getting rid - # of new lines: - version_string = version_string.replace('\n', ' ') - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while True: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """ - Return compiler version, or None if compiler is not available. - - Parameters - ---------- - force : bool, optional - If True, force a new determination of the version, even if the - compiler already has a version attribute. Default is False. - ok_status : list of int, optional - The list of status values returned by the version look-up process - for which a version string is returned. If the status value is not - in `ok_status`, None is returned. Default is ``[0]``. - - Returns - ------- - version : str or None - Version string, in the format of `distutils.version.LooseVersion`. - - """ - if not force and hasattr(self, 'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - status, output = exec_command(version_cmd, use_tee=0) - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - """ - Return the C++ compiler. - - Parameters - ---------- - None - - Returns - ------- - cxx : class instance - The C++ compiler, as a `CCompiler` instance. - - """ - if self.compiler_type=='msvc': return self - cxx = copy(self) - cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:] - if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]: - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ - + cxx.linker_so[2:] - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', - "Intel C Compiler for 64-bit applications") -compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', - "PathScale Compiler for SiCortex-based applications") -ccompiler._default_compilers += (('linux.*', 'intel'), - ('linux.*', 'intele'), - ('linux.*', 'intelem'), - ('linux.*', 'pathcc')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError: - msg = str(get_exception()) - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError: - msg = str(get_exception()) - raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ - module_name) - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + - "in module '%s'") % (class_name, module_name)) - compiler = klass(None, dry_run, force) - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - library_dirs = quote_args(library_dirs) - runtime_library_dirs = quote_args(runtime_library_dirs) - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.'+_cc+'compiler') - if _m is not None: - setattr(_m, 'gen_lib_options', gen_lib_options) - -_distutils_gen_preprocess_options = gen_preprocess_options -def gen_preprocess_options (macros, include_dirs): - include_dirs = quote_args(include_dirs) - return _distutils_gen_preprocess_options(macros, include_dirs) -ccompiler.gen_preprocess_options = gen_preprocess_options - -##Fix distutils.util.split_quoted: -# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears -# that removing this fix causes f2py problems on Windows XP (see ticket #723). -# Specifically, on WinXP when gfortran is installed in a directory path, which -# contains spaces, then f2py is unable to find it. -import re -import string -_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace) -_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'") -_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"') -_has_white_re = re.compile(r'\s') -def split_quoted(s): - s = s.strip() - words = [] - pos = 0 - - while s: - m = _wordchars_re.match(s, pos) - end = m.end() - if end == len(s): - words.append(s[:end]) - break - - if s[end] in string.whitespace: # unescaped, unquoted whitespace: now - words.append(s[:end]) # we definitely have a word delimiter - s = s[end:].lstrip() - pos = 0 - - elif s[end] == '\\': # preserve whatever is being escaped; - # will become part of the current word - s = s[:end] + s[end+1:] - pos = end+1 - - else: - if s[end] == "'": # slurp singly-quoted string - m = _squote_re.match(s, end) - elif s[end] == '"': # slurp doubly-quoted string - m = _dquote_re.match(s, end) - else: - raise RuntimeError("this can't happen (bad char '%c')" % s[end]) - - if m is None: - raise ValueError("bad string (mismatched %s quotes?)" % s[end]) - - (beg, end) = m.span() - if _has_white_re.search(s[beg+1:end-1]): - s = s[:beg] + s[beg+1:end-1] + s[end:] - pos = m.end() - 2 - else: - # Keeping quotes when a quoted word does not contain - # white-space. XXX: send a patch to distutils - pos = m.end() - - if pos >= len(s): - words.append(s) - break - - return words -ccompiler.split_quoted = split_quoted -##Fix distutils.util.split_quoted: diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/__init__.py deleted file mode 100644 index 76a2600723def..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands. - -""" -from __future__ import division, absolute_import, print_function - -def test_na_writable_attributes_deletion(): - a = np.NA(2) - attr = ['payload', 'dtype'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ #'build_py', - 'clean', - 'install_clib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command', globals(), locals(), distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'install_lib', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/autodist.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/autodist.py deleted file mode 100644 index 1b9b1dd57c58d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/autodist.py +++ /dev/null @@ -1,43 +0,0 @@ -"""This module implements additional tests ala autoconf which can be useful. - -""" -from __future__ import division, absolute_import, print_function - - -# We put them here since they could be easily reused outside numpy.distutils - -def check_inline(cmd): - """Return the inline identifier (may be empty).""" - cmd._check_compiler() - body = """ -#ifndef __cplusplus -static %(inline)s int static_func (void) -{ - return 0; -} -%(inline)s int nostatic_func (void) -{ - return 0; -} -#endif""" - - for kw in ['inline', '__inline__', '__inline']: - st = cmd.try_compile(body % {'inline': kw}, None, None) - if st: - return kw - - return '' - -def check_compiler_gcc4(cmd): - """Return True if the C compiler is GCC 4.x.""" - cmd._check_compiler() - body = """ -int -main() -{ -#if (! defined __GNUC__) || (__GNUC__ < 4) -#error gcc >= 4 required -#endif -} -""" - return cmd.try_compile(body, None, None) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/bdist_rpm.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 3e52a503b1721..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py', setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build.py deleted file mode 100644 index b6912be15e41a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build.py +++ /dev/null @@ -1,39 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) - - def run(self): - old_build.run(self) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_clib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_clib.py deleted file mode 100644 index 84ca87250170e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,284 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" -from __future__ import division, absolute_import, print_function - -import os -from glob import glob -import shutil -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import filter_sources, has_f_sources,\ - has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \ - get_numpy_include_dirs - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0]+'=',)+_l[_i][1:] -# - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('inplace', 'i', 'Build in-place'), - ] - - boolean_options = old_build_clib.boolean_options + ['inplace'] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - self.inplace = 0 - return - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources', [])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources', [])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - - # Make sure that extension sources are complete. - self.run_command('build_src') - - for (lib_name, build_info) in self.libraries: - l = build_info.get('language', None) - if l and l not in languages: languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self._f_compiler is not None: - self._f_compiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self._f_compiler.customize_cmd(self) - self.libraries = libraries - - self._f_compiler.show_customization() - else: - self._f_compiler = None - - self.build_libraries(self.libraries) - - if self.inplace: - for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) - source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) - self.mkpath(l.target_dir) - shutil.copy(source, target) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self._f_compiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % lib_name) - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language', 'c')=='f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: source_languages.append('c') - if cxx_sources: source_languages.append('c++') - if requiref90: source_languages.append('f90') - elif f_sources: source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends', []) - if not (self.force or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc', {}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script '\ - 'for fortran compiler: %s' \ - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources"\ - " but no Fortran compiler found" % (lib_name)) - - if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or [] - - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - if include_dirs is None: - include_dirs = [] - extra_postargs = build_info.get('extra_compiler_args') or [] - - include_dirs.extend(get_numpy_include_dirs()) - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: self.mkpath(module_build_dir) - - if compiler.compiler_type=='msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - objects = [] - if c_sources: - log.info("compiling C sources") - objects = compiler.compile(c_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile(cxx_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options(\ - module_dirs, module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self._f_compiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f)==os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' \ - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - objects.extend(f_objects) - - # assume that default linker is suitable for - # linking Fortran object files - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries', []) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo[1].get('libraries', [])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_ext.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_ext.py deleted file mode 100644 index b48e4227a03bf..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,503 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import exec_command -from numpy.distutils.system_info import combine_paths -from numpy.distutils.misc_util import filter_sources, has_f_sources, \ - has_cxx_sources, get_ext_source_files, \ - get_numpy_include_dirs, is_sequence, get_build_architecture, \ - msvc_version -from numpy.distutils.command.config_compiler import show_fortran_compilers - -try: - set -except NameError: - from sets import Set as set - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - - def finalize_options(self): - incl_dirs = self.include_dirs - old_build_ext.finalize_options(self) - if incl_dirs is not None: - self.include_dirs.extend(self.distribution.include_dirs or []) - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - if self.inplace: - if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' \ - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj('build_clib') - else: - build_clib = self.distribution.get_command_obj('build_clib') - build_clib.inplace = 1 - build_clib.ensure_finalized() - build_clib.run() - self.distribution.have_run['build_clib'] = 1 - - else: - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - self.compiler.show_customization() - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname, build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,'\ - ' overwriting build_info\n%s... \nwith\n%s...' \ - % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname, build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries', []) - c_lib_dirs += binfo.get('library_dirs', []) - for m in binfo.get('macros', []): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname, {}).get('source_languages', []): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - # reset language attribute for choosing proper linker - if 'c++' in ext_languages: - ext_language = 'c++' - elif 'f90' in ext_languages: - ext_language = 'f90' - elif 'f77' in ext_languages: - ext_language = 'f77' - else: - ext_language = 'c' # default - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name, l, ext_language)) - ext.language = ext_language - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution, need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler = self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - - def swig_sources(self, sources): - # Do nothing. Swig sources have beed handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - if not (self.force or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - - - if self.compiler.compiler_type=='msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language=='f90': - fcompiler = self._f90_compiler - elif ext.language=='f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext, 'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext, 'extra_f90_compile_args') else [] - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" \ - "but no C++ compiler found" % (ext.name)) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " \ - "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77', 'f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " \ - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language=='c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " \ - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends':ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - c_objects = [] - if c_sources: - log.info("compiling C sources") - c_objects = self.compiler.compile(c_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile(cxx_sources, - output_dir=output_dir, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args, - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp, os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f)==os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type=='msvc': - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs) - - elif ext.language in ['f77', 'f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language=='c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if sys.version[:3]>='2.3': - kws = {'target_lang':ext.language} - else: - kws = {} - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp,**kws) - - def _add_dummy_mingwex_sym(self, c_sources): - build_src = self.get_finalized_command("build_src").build_src - build_clib = self.get_finalized_command("build_clib").build_clib - objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib(objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: return - - for libname in c_libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - s, o = exec_command(['cygpath', '-w', dir], use_tee=False) - if not s: - dir = o - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files (self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs (self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_py.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_py.py deleted file mode 100644 index 54dcde4350839..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def run(self): - build_src = self.get_finalized_command('build_src') - if build_src.py_modules_dict and self.packages is None: - self.packages = list(build_src.py_modules_dict.keys ()) - old_build_py.run(self) - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package, []) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = [_m for _m in self.py_modules if is_string(_m)] - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_scripts.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_scripts.py deleted file mode 100644 index c8b25fc719b59..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,51 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. - -""" -from __future__ import division, absolute_import, print_function - -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_src.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_src.py deleted file mode 100644 index 7463a0e1745f9..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,806 +0,0 @@ -""" Build swig, f2py, pyrex sources. -""" -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import shlex -import copy - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - -def have_pyrex(): - try: - import Pyrex.Compiler.Main - return True - except ImportError: - return False - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import fortran_ext_match, \ - appendpath, is_string, is_sequence, get_cmd -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file - -def subst_vars(target, source, d): - """Substitute any occurence of @foo@ by d['foo'] from source file into - target.""" - var = re.compile('@([a-zA-Z_]+)@') - fs = open(source, 'r') - try: - ft = open(target, 'w') - try: - for l in fs: - m = var.search(l) - if m: - ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) - else: - ft.write(l) - finally: - ft.close() - finally: - fs.close() - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " + - "directory alongside your pure Python modules"), - ] - - boolean_options = ['force', 'inplace'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = shlex.split(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = shlex.split(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig', 'swig_opt']: - o = '--'+c.replace('_', '-') - v = getattr(build_ext, c, None) - if v: - if getattr(self, c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o, v)) - setattr(self, c, v) - - def run(self): - log.info("build_src") - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - self.build_npy_pkg_config() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data, str): - new_data_files.append(data) - elif isinstance(data, tuple): - d, files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src, d) - funcs = [f for f in files if hasattr(f, '__call__')] - files = [f for f in files if not hasattr(f, '__call__')] - for f in funcs: - if f.__code__.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s, list): - files.extend(s) - elif isinstance(s, str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d, files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - - def _build_npy_pkg_config(self, info, gd): - import shutil - template, install_dir, subst_dict = info - template_dir = os.path.dirname(template) - for k, v in gd.items(): - subst_dict[k] = v - - if self.inplace == 1: - generated_dir = os.path.join(template_dir, install_dir) - else: - generated_dir = os.path.join(self.build_src, template_dir, - install_dir) - generated = os.path.basename(os.path.splitext(template)[0]) - generated_path = os.path.join(generated_dir, generated) - if not os.path.exists(generated_dir): - os.makedirs(generated_dir) - - subst_vars(generated_path, template, subst_dict) - - # Where to install relatively to install prefix - full_install_dir = os.path.join(template_dir, install_dir) - return full_install_dir, generated_path - - def build_npy_pkg_config(self): - log.info('build_src: building npy-pkg config files') - - # XXX: another ugly workaround to circumvent distutils brain damage. We - # need the install prefix here, but finalizing the options of the - # install command when only building sources cause error. Instead, we - # copy the install command instance, and finalize the copy so that it - # does not disrupt how distutils want to do things when with the - # original install command instance. - install_cmd = copy.copy(get_cmd('install')) - if not install_cmd.finalized == 1: - install_cmd.finalize_options() - build_npkg = False - gd = {} - if self.inplace == 1: - top_prefix = '.' - build_npkg = True - elif hasattr(install_cmd, 'install_libbase'): - top_prefix = install_cmd.install_libbase - build_npkg = True - - if build_npkg: - for pkg, infos in self.distribution.installed_pkg_config.items(): - pkg_path = self.distribution.package_dir[pkg] - prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) - d = {'prefix': prefix} - for info in infos: - install_dir, generated = self._build_npy_pkg_config(info, d) - self.distribution.data_files.append((install_dir, - [generated])) - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if hasattr(source, '__call__'): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources', [])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - - sources = self.template_sources(sources, ext) - - sources = self.swig_sources(sources, ext) - - sources = self.f2py_sources(sources, ext) - - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src]\ - +name.split('.')[:-1])) - self.mkpath(build_dir) - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources, ['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources, ['.h', '.hpp', '.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir, os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - fid = open(target_file, 'w') - fid.write(outstr) - fid.close() - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - if self.inplace or not have_pyrex(): - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - target_file = os.path.join(target_dir, ext_name + '.c') - depends = [source] + extension.depends - if self.force or newer_group(depends, target_file, 'newer'): - if have_pyrex(): - import Pyrex.Compiler.Main - log.info("pyrexc:> %s" % (target_file)) - self.mkpath(target_dir) - options = Pyrex.Compiler.Main.CompilationOptions( - defaults=Pyrex.Compiler.Main.default_options, - include_path=extension.include_dirs, - output_file=target_file) - pyrex_result = Pyrex.Compiler.Main.compile(source, - options=options) - if pyrex_result.num_errors != 0: - raise DistutilsError("%d errors while compiling %r with Pyrex" \ - % (pyrex_result.num_errors, source)) - elif os.path.isfile(target_file): - log.warn("Pyrex required for compiling %r but not available,"\ - " using old target %r"\ - % (source, target_file)) - else: - raise DistutilsError("Pyrex required for compiling %r"\ - " but notavailable" % (source,)) - return target_file - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir, name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - for d in target_dirs: - self.mkpath(d) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name, build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options', [])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - import numpy.f2py - numpy.f2py.run_main(f2py_options - + ['--build-dir', target_dir, source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src]\ - +name.split('.')[:-1])) - target_file = os.path.join(target_dir, ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - import numpy.f2py - numpy.f2py.run_main(f2py_options + ['--lower', - '--build-dir', target_dir]+\ - ['-m', ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - target_c = os.path.join(self.build_src, 'fortranobject.c') - target_h = os.path.join(self.build_src, 'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if self.build_src not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." \ - % (self.build_src)) - extension.include_dirs.append(self.build_src) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d, 'src', 'fortranobject.c') - source_h = os.path.join(d, 'src', 'fortranobject.h') - if newer(source_c, target_c) or newer(source_h, target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c, target_c) - self.copy_file(source_h, target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: - filename = os.path.join(target_dir, ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if '-c++' in extension.swig_opts: - typ = 'c++' - is_cpp = True - extension.swig_opts.remove('-c++') - elif self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - # the code below assumes that the sources list - # contains not more than one .i SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - else: - typ2 = get_swig_target(source) - if typ2 is None: - log.warn('source %r does not define swig target, assuming %s swig target' \ - % (source, typ)) - elif typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - else: - log.warn('assuming that %r has c++ swig target' % (source)) - if is_cpp: - target_ext = '.cpp' - target_file = os.path.join(target_dir, '%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - for d in target_dirs: - self.mkpath(d) - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] + extension.swig_opts - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match -_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search -_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search - -def get_swig_target(source): - f = open(source, 'r') - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - f.close() - return result - -def get_swig_modulename(source): - f = open(source, 'r') - name = None - for line in f: - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - f.close() - return name - -def _find_swig_target(target_dir, name): - for ext in ['.cpp', '.c']: - target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?'\ - '__user__[\w_]*)', re.I).match - -def get_f2py_modulename(source): - name = None - f = open(source) - for line in f: - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - f.close() - return name - -########################################## diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config.py deleted file mode 100644 index 1b688bdd67adb..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config.py +++ /dev/null @@ -1,476 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson -from __future__ import division, absolute_import, print_function - -import os, signal -import warnings -import sys - -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from distutils.ccompiler import CompileError, LinkError -import distutils -from numpy.distutils.exec_command import exec_command -from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import check_inline, check_compiler_gcc4 -from numpy.distutils.compat import get_exception - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def try_run(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, lang="c"): - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ - "Usage of try_run is deprecated: please do not \n" \ - "use it anymore, and avoid configuration checks \n" \ - "involving running executable on the target machine.\n" \ - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning) - return old_config.try_run(self, body, headers, include_dirs, libraries, - library_dirs, lang) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - - if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc': - # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: - # initialize call query_vcvarsall, which throws an IOError, and - # causes an error along the way without much information. We try to - # catch it here, hoping it is early enough, and print an helpful - # message instead of Error: None. - if not self.compiler.initialized: - try: - self.compiler.initialize() - except IOError: - e = get_exception() - msg = """\ -Could not initialize compiler instance: do you have Visual Studio -installed? If you are trying to build with MinGW, please use "python setup.py -build -c mingw32" instead. If you have Visual Studio installed, check it is -correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2, -VS 2010 for >= 3.3). - -Original exception was: %s, and the Compiler class was %s -============================================================================""" \ - % (e, self.compiler.__class__.__name__) - print ("""\ -============================================================================""") - raise distutils.errors.DistutilsPlatformError(msg) - - # After MSVC is initialized, add an explicit /MANIFEST to linker - # flags. See issues gh-4245 and gh-4101 for details. Also - # relevant are issues 4431 and 16296 on the Python bug tracker. - from distutils import msvc9compiler - if msvc9compiler.get_build_version() >= 10: - for ldflags in [self.compiler.ldflags_shared, - self.compiler.ldflags_shared_debug]: - if '/MANIFEST' not in ldflags: - ldflags.append('/MANIFEST') - - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self, mth, lang, args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77', 'f90']: - self.compiler = self.fcompiler - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError, CompileError): - msg = str(get_exception()) - self.compiler = save_compiler - raise CompileError - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - return self._wrap_method(old_config._compile, lang, - (body, headers, include_dirs, lang)) - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77', 'f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - s, o = exec_command(['cygpath', '-w', d], - use_tee=False) - if not s: d = o - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir, '%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - elif self.compiler.compiler_type == 'mingw32': - generate_manifest(self) - return self._wrap_method(old_config._link, lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): - self._check_compiler() - return self.try_compile( - "/* we need a dummy line to make distutils happy */", - [header], include_dirs) - - def check_decl(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = """ -int main() -{ -#ifndef %s - (void) %s; -#endif - ; - return 0; -}""" % (symbol, symbol) - - return self.try_compile(body, headers, include_dirs) - - def check_macro_true(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = """ -int main() -{ -#if %s -#else -#error false or undefined macro -#endif - ; - return 0; -}""" % (symbol,) - - return self.try_compile(body, headers, include_dirs) - - def check_type(self, type_name, headers=None, include_dirs=None, - library_dirs=None): - """Check type availability. Return True if the type can be compiled, - False otherwise""" - self._check_compiler() - - # First check the type can be compiled - body = r""" -int main() { - if ((%(name)s *) 0) - return 0; - if (sizeof (%(name)s)) - return 0; -} -""" % {'name': type_name} - - st = False - try: - try: - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - st = True - except distutils.errors.CompileError: - st = False - finally: - self._clean() - - return st - - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): - """Check size of a given type.""" - self._check_compiler() - - # First check the type can be compiled - body = r""" -typedef %(type)s npy_check_sizeof_type; -int main () -{ - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; -} -""" - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - self._clean() - - if expected: - body = r""" -typedef %(type)s npy_check_sizeof_type; -int main () -{ - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; - test_array [0] = 0 - - ; - return 0; -} -""" - for size in expected: - try: - self._compile(body % {'type': type_name, 'size': size}, - headers, include_dirs, 'c') - self._clean() - return size - except CompileError: - pass - - # this fails to *compile* if size > sizeof(type) - body = r""" -typedef %(type)s npy_check_sizeof_type; -int main () -{ - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; -} -""" - - # The principle is simple: we first find low and high bounds of size - # for the type, where low/high are looked up on a log scale. Then, we - # do a binary search to find the exact size between low and high - low = 0 - mid = 0 - while True: - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - break - except CompileError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - # Binary search: - while low != high: - mid = (high - low) // 2 + low - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - high = mid - except CompileError: - low = mid + 1 - return low - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - if type(decl) == str: - body.append(decl) - else: - body.append("int %s (void);" % func) - # Handle MSVC intrinsics: force MS compiler to make a function call. - # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrinsic and our 'fake' test - # declaration do not match. - body.append("#ifdef _MSC_VER") - body.append("#pragma function(%s)" % func) - body.append("#endif") - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_funcs_once(self, funcs, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - """Check a list of functions at once. - - This is useful to speed up things, since all the functions in the funcs - list will be put in one compilation unit. - - Arguments - --------- - funcs : seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - libraru_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionay, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f. - """ - self._check_compiler() - body = [] - if decl: - for f, v in decl.items(): - if v: - body.append("int %s (void);" % f) - - # Handle MS intrinsics. See check_func for more info. - body.append("#ifdef _MSC_VER") - for func in funcs: - body.append("#pragma function(%s)" % func) - body.append("#endif") - - body.append("int main (void) {") - if call: - for f in funcs: - if f in call and call[f]: - if not (call_args and f in call_args and call_args[f]): - args = '' - else: - args = call_args[f] - body.append(" %s(%s);" % (f, args)) - else: - body.append(" %s;" % f) - else: - for f in funcs: - body.append(" %s;" % f) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_inline(self): - """Return the inline keyword recognized by the compiler, empty string - otherwise.""" - return check_inline(self) - - def check_compiler_gcc4(self): - """Return True if the C compiler is gcc >= 4.""" - return check_compiler_gcc4(self) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c", use_tee=None): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ - "Usage of get_output is deprecated: please do not \n" \ - "use it anymore, and avoid configuration checks \n" \ - "involving running executable on the target machine.\n" \ - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning) - from distutils.ccompiler import CompileError, LinkError - self._check_compiler() - exitcode, output = 255, '' - try: - grabber = GrabStdout() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - grabber.restore() - except: - output = grabber.data - grabber.restore() - raise - exe = os.path.join('.', exe) - exitstatus, output = exec_command(exe, execute_in='.', - use_tee=use_tee) - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - self._clean() - return exitcode, output - -class GrabStdout(object): - - def __init__(self): - self.sys_stdout = sys.stdout - self.data = '' - sys.stdout = self - - def write (self, data): - self.sys_stdout.write(data) - self.data += data - - def flush (self): - self.sys_stdout.flush() - - def restore(self): - sys.stdout = self.sys_stdout diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config_compiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config_compiler.py deleted file mode 100644 index 5e638feccce04..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,125 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=[]): - # Using cache to prevent infinite recursion - if _cache: return - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=', None, "specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=', None, "specify F77 compiler flags"), - ('f90flags=', None, "specify F90 compiler flags"), - ('opt=', None, "specify optimization flags"), - ('arch=', None, "specify architecture specific optimization flags"), - ('debug', 'g', "compile with debugging information"), - ('noopt', None, "compile without optimization"), - ('noarch', None, "compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug', 'noopt', 'noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=', None, "specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/develop.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/develop.py deleted file mode 100644 index 1410ab2a00fd4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/develop.py +++ /dev/null @@ -1,17 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. - -""" -from __future__ import division, absolute_import, print_function - -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/egg_info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/egg_info.py deleted file mode 100644 index b7104de5be409..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install.py deleted file mode 100644 index a1dd47755c64a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod - have_setuptools = True -else: - import distutils.command.install as old_install_mod - have_setuptools = False -from distutils.file_util import write_file - -old_install = old_install_mod.install - -class install(old_install): - - # Always run install_clib - the command is cheap, so no need to bypass it; - # but it's not run by setuptools -- so it's run again in install_data - sub_commands = old_install.sub_commands + [ - ('install_clib', lambda x: True) - ] - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def setuptools_run(self): - """ The setuptools version of the .run() method. - - We must pull in the entire code so we can override the level used in the - _getframe() call since we wrap this call by one more level. - """ - from distutils.command.install import install as distutils_install - - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return distutils_install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__', '') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - distutils_install.run(self) - else: - self.do_egg_install() - - def run(self): - if not have_setuptools: - r = old_install.run(self) - else: - r = self.setuptools_run() - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - f = open(self.record, 'r') - lines = [] - need_rewrite = False - for l in f: - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - f.close() - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_clib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_clib.py deleted file mode 100644 index 662aa00bda9b4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_clib.py +++ /dev/null @@ -1,39 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.core import Command -from distutils.ccompiler import new_compiler -from numpy.distutils.misc_util import get_cmd - -class install_clib(Command): - description = "Command to install installable C libraries" - - user_options = [] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', ('install_lib', 'install_dir')) - - def run (self): - build_clib_cmd = get_cmd("build_clib") - build_dir = build_clib_cmd.build_clib - - # We need the compiler to get the library name -> filename association - if not build_clib_cmd.compiler: - compiler = new_compiler(compiler=None) - compiler.customize(self.distribution) - else: - compiler = build_clib_cmd.compiler - - for l in self.distribution.installed_libraries: - target_dir = os.path.join(self.install_dir, l.target_dir) - name = compiler.library_filename(l.name) - source = os.path.join(build_dir, name) - self.mkpath(target_dir) - self.outfiles.append(self.copy_file(source, target_dir)[0]) - - def get_outputs(self): - return self.outfiles diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_data.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_data.py deleted file mode 100644 index 996cf7e4017a6..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -have_setuptools = ('setuptools' in sys.modules) - -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def run(self): - old_install_data.run(self) - - if have_setuptools: - # Run install_clib again, since setuptools does not run sub-commands - # of install automatically - self.run_command('install_clib') - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_headers.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_headers.py deleted file mode 100644 index f3f58aa2876fd..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header, tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy.core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/sdist.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/sdist.py deleted file mode 100644 index bfaab1c8ffa18..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h, str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/compat.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/compat.py deleted file mode 100644 index 9a81cd392fc4a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/compat.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Small modules to cope with python 2 vs 3 incompatibilities inside -numpy.distutils - -""" -from __future__ import division, absolute_import, print_function - -import sys - -def get_exception(): - return sys.exc_info()[1] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/conv_template.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/conv_template.py deleted file mode 100644 index a67fe4e511446..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/conv_template.py +++ /dev/null @@ -1,337 +0,0 @@ -#!/usr/bin/python -""" -takes templated file .xxx.src and produces .xxx file where .xxx is -.i or .c or .h, using the following template rules - -/**begin repeat -- on a line by itself marks the start of a repeated code - segment -/**end repeat**/ -- on a line by itself marks it's end - -After the /**begin repeat and before the */, all the named templates are placed -these should all have the same number of replacements - -Repeat blocks can be nested, with each nested block labeled with its depth, -i.e. -/**begin repeat1 - *.... - */ -/**end repeat1**/ - -When using nested loops, you can optionally exlude particular -combinations of the variables using (inside the comment portion of the inner loop): - - :exclude: var1=value1, var2=value2, ... - -This will exlude the pattern where var1 is value1 and var2 is value2 when -the result is being generated. - - -In the main body each replace will use one entry from the list of named replacements - - Note that all #..# forms in a block must have the same number of - comma-separated entries. - -Example: - - An input file containing - - /**begin repeat - * #a = 1,2,3# - * #b = 1,2,3# - */ - - /**begin repeat1 - * #c = ted, jim# - */ - @a@, @b@, @c@ - /**end repeat1**/ - - /**end repeat**/ - - produces - - line 1 "template.c.src" - - /* - ********************************************************************* - ** This file was autogenerated from a template DO NOT EDIT!!** - ** Changes should be made to the original source (.src) file ** - ********************************************************************* - */ - - #line 9 - 1, 1, ted - - #line 9 - 1, 1, jim - - #line 9 - 2, 2, ted - - #line 9 - 2, 2, jim - - #line 9 - 3, 3, ted - - #line 9 - 3, 3, jim - -""" -from __future__ import division, absolute_import, print_function - - -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -from numpy.distutils.compat import get_exception - -# names for replacement that are already global. -global_names = {} - -# header placed at the front of head processed file -header =\ -""" -/* - ***************************************************************************** - ** This file was autogenerated from a template DO NOT EDIT!!!! ** - ** Changes should be made to the original source (.src) file ** - ***************************************************************************** - */ - -""" -# Parse string for repeat loops -def parse_structure(astr, level): - """ - The returned line number is from the beginning of the string, starting - at zero. Returns an empty list if no loops found. - - """ - if level == 0 : - loopbeg = "/**begin repeat" - loopend = "/**end repeat**/" - else : - loopbeg = "/**begin repeat%d" % level - loopend = "/**end repeat%d**/" % level - - ind = 0 - line = 0 - spanlist = [] - while True: - start = astr.find(loopbeg, ind) - if start == -1: - break - start2 = astr.find("*/", start) - start2 = astr.find("\n", start2) - fini1 = astr.find(loopend, start2) - fini2 = astr.find("\n", fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) - ind = fini2 - spanlist.sort() - return spanlist - - -def paren_repl(obj): - torep = obj.group(1) - numrep = obj.group(2) - return ','.join([torep]*int(numrep)) - -parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)") -plainrep = re.compile(r"([^*]+)\*(\d+)") -def parse_values(astr): - # replaces all occurrences of '(a,b,c)*4' in astr - # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate - # empty values, i.e., ()*4 yields ',,,'. The result is - # split at ',' and a list of values returned. - astr = parenrep.sub(paren_repl, astr) - # replaces occurences of xxx*3 with xxx, xxx, xxx - astr = ','.join([plainrep.sub(paren_repl, x.strip()) - for x in astr.split(',')]) - return astr.split(',') - - -stripast = re.compile(r"\n\s*\*?") -named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") -exclude_vars_re = re.compile(r"(\w*)=(\w*)") -exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : - """Find all named replacements in the header - - Returns a list of dictionaries, one for each loop iteration, - where each key is a name to be substituted and the corresponding - value is the replacement string. - - Also return a list of exclusions. The exclusions are dictionaries - of key value pairs. There can be more than one exclusion. - [{'var1':'value1', 'var2', 'value2'[,...]}, ...] - - """ - # Strip out '\n' and leading '*', if any, in continuation lines. - # This should not effect code previous to this change as - # continuation lines were not allowed. - loophead = stripast.sub("", loophead) - # parse out the names and lists of values - names = [] - reps = named_re.findall(loophead) - nsub = None - for rep in reps: - name = rep[0] - vals = parse_values(rep[1]) - size = len(vals) - if nsub is None : - nsub = size - elif nsub != size : - msg = "Mismatch in number of values:\n%s = %s" % (name, vals) - raise ValueError(msg) - names.append((name, vals)) - - - # Find any exclude variables - excludes = [] - - for obj in exclude_re.finditer(loophead): - span = obj.span() - # find next newline - endline = loophead.find('\n', span[1]) - substr = loophead[span[1]:endline] - ex_names = exclude_vars_re.findall(substr) - excludes.append(dict(ex_names)) - - # generate list of dictionaries, one for each template iteration - dlist = [] - if nsub is None : - raise ValueError("No substitution variables found") - for i in range(nsub) : - tmp = {} - for name, vals in names : - tmp[name] = vals[i] - dlist.append(tmp) - return dlist - -replace_re = re.compile(r"@([\w]+)@") -def parse_string(astr, env, level, line) : - lineno = "#line %d\n" % line - - # local function for string replacement, uses env - def replace(match): - name = match.group(1) - try : - val = env[name] - except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) - raise ValueError(msg) - return val - - code = [lineno] - struct = parse_structure(astr, level) - if struct : - # recurse over inner loops - oldend = 0 - newlevel = level + 1 - for sub in struct: - pref = astr[oldend:sub[0]] - head = astr[sub[0]:sub[1]] - text = astr[sub[1]:sub[2]] - oldend = sub[3] - newline = line + sub[4] - code.append(replace_re.sub(replace, pref)) - try : - envlist = parse_loop_header(head) - except ValueError: - e = get_exception() - msg = "line %d: %s" % (newline, e) - raise ValueError(msg) - for newenv in envlist : - newenv.update(env) - newcode = parse_string(text, newenv, newlevel, newline) - code.extend(newcode) - suff = astr[oldend:] - code.append(replace_re.sub(replace, suff)) - else : - # replace keys - code.append(replace_re.sub(replace, astr)) - code.append('\n') - return ''.join(code) - -def process_str(astr): - code = [header] - code.extend(parse_string(astr, global_names, 0, 1)) - return ''.join(code) - - -include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" - r"(?P[\w\d./\\]+[.]src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - fid = open(source) - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - print('Including file', fn) - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - fid.close() - return lines - -def process_file(source): - lines = resolve_includes(source) - sourcefile = os.path.normcase(source).replace("\\", "\\\\") - try: - code = process_str(''.join(lines)) - except ValueError: - e = get_exception() - raise ValueError('In "%s" loop at %s' % (sourcefile, e)) - return '#line 1 "%s"\n%s' % (sourcefile, code) - - -def unique_key(adict): - # this obtains a unique key given a dictionary - # currently it works by appending together n of the letters of the - # current keys and increasing n until a unique key is found - # -- not particularly quick - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = "".join([x[:n] for x in allkeys]) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -if __name__ == "__main__": - - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - try: - writestr = process_str(allstr) - except ValueError: - e = get_exception() - raise ValueError("In %s loop at %s" % (file, e)) - outfile.write(writestr) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/core.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/core.py deleted file mode 100644 index 3f0fd464a0d39..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/core.py +++ /dev/null @@ -1,210 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from distutils.core import * - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension -from numpy.distutils.numpy_distribution import NumpyDistribution -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, \ - install_clib -from numpy.distutils.misc_util import get_data_files, is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install_clib': install_clib.install_clib, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k, v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError(repr(type(dv))) - -def _command_line_ok(_cache=[]): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - dist = None - if always and dist is None: - dist = NumpyDistribution() - return dist - -def setup(**attr): - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config, 'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules', []): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, lib_name, build_info) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - # Use our custom NumpyDistribution class instead of distutils' one - new_attr['distclass'] = NumpyDistribution - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],)) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],)) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],)) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, lib_name, build_info): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,)) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,)) - break - libraries.append((lib_name, build_info)) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/cpuinfo.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/cpuinfo.py deleted file mode 100644 index 020f2c02fee63..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,693 +0,0 @@ -#!/usr/bin/env python -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['cpu'] - -import sys, re, types -import os - -if sys.version_info[0] >= 3: - from subprocess import getstatusoutput -else: - from commands import getstatusoutput - -import warnings -import platform - -from numpy.distutils.compat import get_exception - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = getstatusoutput(cmd) - except EnvironmentError: - e = get_exception() - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, output - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase(object): - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self, func): - try: - return func() - except: - pass - - def __getattr__(self, name): - if not name.startswith('_'): - if hasattr(self, '_'+name): - attr = getattr(self, '_'+name) - if isinstance(attr, types.MethodType): - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError(name) - - def _getNCPUs(self): - return 1 - - def __get_nbits(self): - abits = platform.architecture()[0] - nbits = re.compile('(\d+)bit').search(abits).group(1) - return nbits - - def _is_32bit(self): - return self.__get_nbits() == '32' - - def _is_64bit(self): - return self.__get_nbits() == '64' - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except EnvironmentError: - e = get_exception() - warnings.warn(str(e), UserWarning) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return self.is_Intel() \ - and (self.info[0]['cpu family'] == '6' \ - or self.info[0]['cpu family'] == '15' ) \ - and (self.has_sse3() and not self.has_ssse3())\ - and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None - - def _is_Core2(self): - return self.is_64bit() and self.is_Intel() and \ - re.match(r'.*?Core\(TM\)2\b', \ - self.info[0]['model name']) is not None - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'], re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None - - def _has_ssse3(self): - return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0, 1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self, n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except: pass - def __machine(self, n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self, n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW', self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1', self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250', self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2', self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30', self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4', self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5', self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60', self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80', self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra', self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - if sys.version_info[0] >= 3: - import winreg - else: - import _winreg as winreg - - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)"\ - "\s+stepping\s+(?P\d+)", re.IGNORECASE) - chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while True: - try: - proc=winreg.EnumKey(chnd, pnum) - except winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=winreg.OpenKey(chnd, proc) - pidx=0 - while True: - try: - name, value, vtpe=winreg.EnumValue(phnd, pidx) - except winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except: - print(sys.exc_info()[1], '(ignoring)') - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0, 1, 2, 3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6, 7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3, 5, 6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7, 8, 9, 10, 11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6, 15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5, 6, 15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return (self.info[0]['Family']==6 and \ - self.info[0]['Model'] in [7, 8, 9, 10, 11]) \ - or self.info[0]['Family']==15 - elif self.is_AMD(): - return (self.info[0]['Family']==6 and \ - self.info[0]['Model'] in [6, 7, 8, 10]) \ - or self.info[0]['Family']==15 - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6, 15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -#if __name__ == "__main__": -# -# cpu.is_blaa() -# cpu.is_Intel() -# cpu.is_Alpha() -# -# print 'CPU information:', -# for name in dir(cpuinfo): -# if name[0]=='_' and name[1]!='_': -# r = getattr(cpu,name[1:])() -# if r: -# if r!=1: -# print '%s=%s' %(name[1:],r), -# else: -# print name[1:], -# print diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/environment.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/environment.py deleted file mode 100644 index 3798e16f5da7b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/environment.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig(object): - def __init__(self, distutils_section='ALL', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert = conf_desc - if not convert: - convert = lambda x : x - print('%s.%s:' % (self._distutils_section, name)) - v = self._hook_handler(name, hook) - print(' hook : %s' % (convert(v),)) - if envvar: - v = os.environ.get(envvar, None) - print(' environ: %s' % (convert(v),)) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print(' config : %s' % (convert(v),)) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError(name) - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert = conf_desc - var = self._hook_handler(name, hook) - if envvar is not None: - var = os.environ.get(envvar, var) - if confvar is not None and self._conf: - var = self._conf.get(confvar, (None, var))[1] - if convert is not None: - var = convert(var) - return var - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/exec_command.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/exec_command.py deleted file mode 100644 index baf81f337aa2e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/exec_command.py +++ /dev/null @@ -1,618 +0,0 @@ -#!/usr/bin/env python -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - exec_command --- execute command in a specified directory and - in the modified environment. - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Succesfully tested on: - os.name | sys.platform | comments - --------+--------------+---------- - posix | linux2 | Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 - posix | linux2 | Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 - posix | sunos5 | SunOS 5.9, Python 2.2, 2.3.2 - posix | darwin | Darwin 7.2.0, Python 2.3 - nt | win32 | Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 - nt | win32 | Windows 98, Python 2.1.1. Idle 0.8 - nt | win32 | Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. - posix | cygwin | Cygwin 98-4.10, Python 2.3.3(cygming special) - nt | win32 | Windows XP, Python 2.3.3 - -Known bugs: -- Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['exec_command', 'find_executable'] - -import os -import sys -import shlex - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log -from numpy.distutils.compat import get_exception - -from numpy.compat import open_latin1 - -def temp_file_name(): - fo, name = make_temp_file() - fo.close() - return name - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt', 'dos']: - fdir, fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW', 'PYTHON') - pythonexe = os.path.join(fdir, fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -def splitcmdline(line): - import warnings - warnings.warn('splitcmdline is deprecated; use shlex.split', - DeprecationWarning) - return shlex.split(line) - -def find_executable(exe, path=None, _cache={}): - """Return full path of a executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH', os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt', 'dos', 'os2']: - fn, ext = os.path.splitext(exe) - extra_suffixes = ['.exe', '.com', '.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.info('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {} - for name in names: - env[name] = os.environ.get(name) - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name, value in env.items(): - os.environ[name] = value or '' - -def _supports_fileno(stream): - """ - Returns True if 'stream' supports the file descriptor and allows fileno(). - """ - if hasattr(stream, 'fileno'): - try: - r = stream.fileno() - return True - except IOError: - return False - else: - return False - -def exec_command( command, - execute_in='', use_shell=None, use_tee = None, - _with_python = 1, - **env ): - """ Return (status,output) of executed command. - - command is a concatenated string of executable and arguments. - The output contains both stdout and stderr messages. - The following special keyword arguments can be used: - use_shell - execute `sh -c command` - use_tee - pipe the output of command through tee - execute_in - before run command `cd execute_in` and after `cd -`. - - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - """ - log.debug('exec_command(%r,%s)' % (command,\ - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( list(env.keys()) ) - _update_environment( **env ) - - try: - # _exec_command is robust but slow, it relies on - # usable sys.std*.fileno() descriptors. If they - # are bad (like in win32 Idle, PyCrust environments) - # then _exec_command_python (even slower) - # will be used as a last resort. - # - # _exec_command_posix uses os.system and is faster - # but not on all platforms os.system will return - # a correct status. - if (_with_python and _supports_fileno(sys.stdout) and - sys.stdout.fileno() == -1): - st = _exec_command_python(command, - exec_command_dir = exec_dir, - **env) - elif os.name=='posix': - st = _exec_command_posix(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - else: - st = _exec_command(command, use_shell=use_shell, - use_tee=use_tee,**env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - -def _exec_command_posix( command, - use_shell = None, - use_tee = None, - **env ): - log.debug('_exec_command_posix(...)') - - if is_sequence(command): - command_str = ' '.join(list(command)) - else: - command_str = command - - tmpfile = temp_file_name() - stsfile = None - if use_tee: - stsfile = temp_file_name() - filter = '' - if use_tee == 2: - filter = r'| tr -cd "\n" | tr "\n" "."; echo' - command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\ - % (command_str, stsfile, tmpfile, filter) - else: - stsfile = temp_file_name() - command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\ - % (command_str, stsfile, tmpfile) - #command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile) - - log.debug('Running os.system(%r)' % (command_posix)) - status = os.system(command_posix) - - if use_tee: - if status: - # if command_tee fails then fall back to robust exec_command - log.warn('_exec_command_posix failed (status=%s)' % status) - return _exec_command(command, use_shell=use_shell, **env) - - if stsfile is not None: - f = open_latin1(stsfile, 'r') - status_text = f.read() - status = int(status_text) - f.close() - os.remove(stsfile) - - f = open_latin1(tmpfile, 'r') - text = f.read() - f.close() - os.remove(tmpfile) - - if text[-1:]=='\n': - text = text[:-1] - - return status, text - - -def _exec_command_python(command, - exec_command_dir='', **env): - log.debug('_exec_command_python(...)') - - python_exe = get_pythonexe() - cmdfile = temp_file_name() - stsfile = temp_file_name() - outfile = temp_file_name() - - f = open(cmdfile, 'w') - f.write('import os\n') - f.write('import sys\n') - f.write('sys.path.insert(0,%r)\n' % (exec_command_dir)) - f.write('from exec_command import exec_command\n') - f.write('del sys.path[0]\n') - f.write('cmd = %r\n' % command) - f.write('os.environ = %r\n' % (os.environ)) - f.write('s,o = exec_command(cmd, _with_python=0, **%r)\n' % (env)) - f.write('f=open(%r,"w")\nf.write(str(s))\nf.close()\n' % (stsfile)) - f.write('f=open(%r,"w")\nf.write(o)\nf.close()\n' % (outfile)) - f.close() - - cmd = '%s %s' % (python_exe, cmdfile) - status = os.system(cmd) - if status: - raise RuntimeError("%r failed" % (cmd,)) - os.remove(cmdfile) - - f = open_latin1(stsfile, 'r') - status = int(f.read()) - f.close() - os.remove(stsfile) - - f = open_latin1(outfile, 'r') - text = f.read() - f.close() - os.remove(outfile) - - return status, text - -def quote_arg(arg): - if arg[0]!='"' and ' ' in arg: - return '"%s"' % arg - return arg - -def _exec_command( command, use_shell=None, use_tee = None, **env ): - log.debug('_exec_command(...)') - - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - using_command = 0 - if use_shell: - # We use shell (unless use_shell==0) so that wildcards can be - # used. - sh = os.environ.get('SHELL', '/bin/sh') - if is_sequence(command): - argv = [sh, '-c', ' '.join(list(command))] - else: - argv = [sh, '-c', command] - else: - # On NT, DOS we avoid using command.com as it's exit status is - # not related to the exit status of a command. - if is_sequence(command): - argv = command[:] - else: - argv = shlex.split(command) - - if hasattr(os, 'spawnvpe'): - spawn_command = os.spawnvpe - else: - spawn_command = os.spawnve - argv[0] = find_executable(argv[0]) or argv[0] - if not os.path.isfile(argv[0]): - log.warn('Executable %s does not exist' % (argv[0])) - if os.name in ['nt', 'dos']: - # argv[0] might be internal command - argv = [os.environ['COMSPEC'], '/C'] + argv - using_command = 1 - - _so_has_fileno = _supports_fileno(sys.stdout) - _se_has_fileno = _supports_fileno(sys.stderr) - so_flush = sys.stdout.flush - se_flush = sys.stderr.flush - if _so_has_fileno: - so_fileno = sys.stdout.fileno() - so_dup = os.dup(so_fileno) - if _se_has_fileno: - se_fileno = sys.stderr.fileno() - se_dup = os.dup(se_fileno) - - outfile = temp_file_name() - fout = open(outfile, 'w') - if using_command: - errfile = temp_file_name() - ferr = open(errfile, 'w') - - log.debug('Running %s(%s,%r,%r,os.environ)' \ - % (spawn_command.__name__, os.P_WAIT, argv[0], argv)) - - argv0 = argv[0] - if not using_command: - argv[0] = quote_arg(argv0) - - so_flush() - se_flush() - if _so_has_fileno: - os.dup2(fout.fileno(), so_fileno) - - if _se_has_fileno: - if using_command: - #XXX: disabled for now as it does not work from cmd under win32. - # Tests fail on msys - os.dup2(ferr.fileno(), se_fileno) - else: - os.dup2(fout.fileno(), se_fileno) - try: - status = spawn_command(os.P_WAIT, argv0, argv, os.environ) - except OSError: - errmess = str(get_exception()) - status = 999 - sys.stderr.write('%s: %s'%(errmess, argv[0])) - - so_flush() - se_flush() - if _so_has_fileno: - os.dup2(so_dup, so_fileno) - if _se_has_fileno: - os.dup2(se_dup, se_fileno) - - fout.close() - fout = open_latin1(outfile, 'r') - text = fout.read() - fout.close() - os.remove(outfile) - - if using_command: - ferr.close() - ferr = open_latin1(errfile, 'r') - errmess = ferr.read() - ferr.close() - os.remove(errfile) - if errmess and not status: - # Not sure how to handle the case where errmess - # contains only warning messages and that should - # not be treated as errors. - #status = 998 - if text: - text = text + '\n' - #text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess) - text = text + errmess - print (errmess) - if text[-1:]=='\n': - text = text[:-1] - if status is None: - status = 0 - - if use_tee: - print (text) - - return status, text - - -def test_nt(**kws): - pythonexe = get_pythonexe() - echo = find_executable('echo') - using_cygwin_echo = echo != 'echo' - if using_cygwin_echo: - log.warn('Using cygwin echo in win32 environment is not supported') - - s, o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'AAA\',\'\')"') - assert s==0 and o=='', (s, o) - - s, o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'AAA\')"', - AAA='Tere') - assert s==0 and o=='Tere', (s, o) - - os.environ['BBB'] = 'Hi' - s, o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'BBB\',\'\')"') - assert s==0 and o=='Hi', (s, o) - - s, o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'BBB\',\'\')"', - BBB='Hey') - assert s==0 and o=='Hey', (s, o) - - s, o=exec_command(pythonexe\ - +' -c "import os;print os.environ.get(\'BBB\',\'\')"') - assert s==0 and o=='Hi', (s, o) - elif 0: - s, o=exec_command('echo Hello') - assert s==0 and o=='Hello', (s, o) - - s, o=exec_command('echo a%AAA%') - assert s==0 and o=='a', (s, o) - - s, o=exec_command('echo a%AAA%', AAA='Tere') - assert s==0 and o=='aTere', (s, o) - - os.environ['BBB'] = 'Hi' - s, o=exec_command('echo a%BBB%') - assert s==0 and o=='aHi', (s, o) - - s, o=exec_command('echo a%BBB%', BBB='Hey') - assert s==0 and o=='aHey', (s, o) - s, o=exec_command('echo a%BBB%') - assert s==0 and o=='aHi', (s, o) - - s, o=exec_command('this_is_not_a_command') - assert s and o!='', (s, o) - - s, o=exec_command('type not_existing_file') - assert s and o!='', (s, o) - - s, o=exec_command('echo path=%path%') - assert s==0 and o!='', (s, o) - - s, o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \ - % pythonexe) - assert s==0 and o=='win32', (s, o) - - s, o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe) - assert s==1 and o, (s, o) - - s, o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\ - % pythonexe) - assert s==0 and o=='012', (s, o) - - s, o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe) - assert s==15 and o=='', (s, o) - - s, o=exec_command('%s -c "print \'Heipa\'"' % pythonexe) - assert s==0 and o=='Heipa', (s, o) - - print ('ok') - -def test_posix(**kws): - s, o=exec_command("echo Hello",**kws) - assert s==0 and o=='Hello', (s, o) - - s, o=exec_command('echo $AAA',**kws) - assert s==0 and o=='', (s, o) - - s, o=exec_command('echo "$AAA"',AAA='Tere',**kws) - assert s==0 and o=='Tere', (s, o) - - - s, o=exec_command('echo "$AAA"',**kws) - assert s==0 and o=='', (s, o) - - os.environ['BBB'] = 'Hi' - s, o=exec_command('echo "$BBB"',**kws) - assert s==0 and o=='Hi', (s, o) - - s, o=exec_command('echo "$BBB"',BBB='Hey',**kws) - assert s==0 and o=='Hey', (s, o) - - s, o=exec_command('echo "$BBB"',**kws) - assert s==0 and o=='Hi', (s, o) - - - s, o=exec_command('this_is_not_a_command',**kws) - assert s!=0 and o!='', (s, o) - - s, o=exec_command('echo path=$PATH',**kws) - assert s==0 and o!='', (s, o) - - s, o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws) - assert s==0 and o=='posix', (s, o) - - s, o=exec_command('python -c "raise \'Ignore me.\'"',**kws) - assert s==1 and o, (s, o) - - s, o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws) - assert s==0 and o=='012', (s, o) - - s, o=exec_command('python -c "import sys;sys.exit(15)"',**kws) - assert s==15 and o=='', (s, o) - - s, o=exec_command('python -c "print \'Heipa\'"',**kws) - assert s==0 and o=='Heipa', (s, o) - - print ('ok') - -def test_execute_in(**kws): - pythonexe = get_pythonexe() - tmpfile = temp_file_name() - fn = os.path.basename(tmpfile) - tmpdir = os.path.dirname(tmpfile) - f = open(tmpfile, 'w') - f.write('Hello') - f.close() - - s, o = exec_command('%s -c "print \'Ignore the following IOError:\','\ - 'open(%r,\'r\')"' % (pythonexe, fn),**kws) - assert s and o!='', (s, o) - s, o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe, fn), - execute_in = tmpdir,**kws) - assert s==0 and o=='Hello', (s, o) - os.remove(tmpfile) - print ('ok') - -def test_svn(**kws): - s, o = exec_command(['svn', 'status'],**kws) - assert s, (s, o) - print ('svn ok') - -def test_cl(**kws): - if os.name=='nt': - s, o = exec_command(['cl', '/V'],**kws) - assert s, (s, o) - print ('cl ok') - -if os.name=='posix': - test = test_posix -elif os.name in ['nt', 'dos']: - test = test_nt -else: - raise NotImplementedError('exec_command tests for ', os.name) - -############################################################ - -if __name__ == "__main__": - - test(use_tee=0) - test(use_tee=1) - test_execute_in(use_tee=0) - test_execute_in(use_tee=1) - test_svn(use_tee=1) - test_cl(use_tee=1) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/extension.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/extension.py deleted file mode 100644 index 344c66da02875..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/extension.py +++ /dev/null @@ -1,90 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import re -from distutils.extension import Extension as old_Extension - -if sys.version_info[0] >= 3: - basestring = str - - -cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match -fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match - -class Extension(old_Extension): - def __init__ (self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - extra_f77_compile_args=None, - extra_f90_compile_args=None, - ): - old_Extension.__init__(self, name, [], - include_dirs, - define_macros, - undef_macros, - library_dirs, - libraries, - runtime_library_dirs, - extra_objects, - extra_compile_args, - extra_link_args, - export_symbols) - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - # swig_opts is assumed to be a list. Here we handle the case where it - # is specified as a string instead. - if isinstance(self.swig_opts, basestring): - import warnings - msg = "swig_opts is specified as a string instead of a list" - warnings.warn(msg, SyntaxWarning) - self.swig_opts = self.swig_opts.split() - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - self.extra_f77_compile_args = extra_f77_compile_args or [] - self.extra_f90_compile_args = extra_f90_compile_args or [] - - return - - def has_cxx_sources(self): - for source in self.sources: - if cxx_ext_re(str(source)): - return True - return False - - def has_f2py_sources(self): - for source in self.sources: - if fortran_pyf_ext_re(source): - return True - return False - -# class Extension diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 0b1b1ee6d9a85..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,989 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -import types -try: - set -except NameError: - from sets import Set as set - -from numpy.compat import open_latin1 - -from distutils.sysconfig import get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ - make_temp_file, get_shared_lib_extension -from numpy.distutils.environment import EnvironmentConfig -from numpy.distutils.exec_command import find_executable -from numpy.distutils.compat import get_exception - -__metaclass__ = type - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration descripition is - # (, , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropiate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool), - noarch = (None, None, 'noarch', str2bool), - debug = (None, None, 'debug', str2bool), - verbose = (None, None, 'verbose', str2bool), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None), - version_cmd = ('exe.version_cmd', None, None, None), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None), - archiver = (None, 'AR', 'ar', None), - ranlib = (None, 'RANLIB', 'ranlib', None), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist), - fix = ('flags.fix', None, None, flaglist), - opt = ('flags.opt', 'FOPT', 'opt', flaglist), - opt_f77 = ('flags.opt_f77', None, None, flaglist), - opt_f90 = ('flags.opt_f90', None, None, flaglist), - arch = ('flags.arch', 'FARCH', 'arch', flaglist), - arch_f77 = ('flags.arch_f77', None, None, flaglist), - arch_f90 = ('flags.arch_f90', None, None, flaglist), - debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist), - debug_f77 = ('flags.debug_f77', None, None, flaglist), - debug_f90 = ('flags.debug_f90', None, None, flaglist), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist), - ) - - language_map = {'.f': 'f77', - '.for': 'f77', - '.F': 'f77', # XXX: needs preprocessor - '.ftn': 'f77', - '.f77': 'f77', - '.f90': 'f90', - '.F90': 'f90', # XXX: needs preprocessor - '.f95': 'f90', - } - language_order = ['f90', 'f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd': ["f77", "-v"], - 'compiler_f77': ["f77"], - 'compiler_f90': ["f90"], - 'compiler_fix': ["f90", "-fixed"], - 'linker_so': ["f90", "-shared"], - 'linker_exe': ["f90"], - 'archiver': ["ar", "-cr"], - 'ranlib': None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] - obj_extension = ".o" - - shared_lib_extension = get_shared_lib_extension() - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - # extra_{f77,f90}_compile_args are set by build_ext.build_extension method - extra_f77_compile_args = [] - extra_f90_compile_args = [] - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = self.__new__(self.__class__) - obj.__dict__.update(self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropiate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overriden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(elf): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - version = CCompiler.get_version(self, force=force, ok_status=ok_status) - if version is None: - raise CompilerNotFound() - return version - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77flags = self.flag_vars.f77 - if f90: - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - if fix: - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=[f77]+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=[fix]+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in list(self.executables.keys()) + \ - ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch']: - if hasattr(self, key): - v = getattr(self, key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print(l) - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if is_f_file(src) and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - extra_compile_args = self.extra_f77_compile_args or [] - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(), obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - if extra_compile_args: - log.info('extra %s options: %r' \ - % (flavor[1:], ' '.join(extra_compile_args))) - - extra_flags = src_flags.get(self.compiler_type, []) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs + extra_compile_args - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command, display=display) - except DistutilsExecError: - msg = str(get_exception()) - raise CompileError(msg) - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(), module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ', self.__class__.__name__) - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ', self.__class__.__name__) - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError("'output_dir' must be a string or None") - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(), output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError: - msg = str(get_exception()) - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', - 'intelvem', 'intelem')), - ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), - ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', - 'intele', 'intelem', 'gnu', 'g95', 'pathf95')), - ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), - ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), - ('irix.*', ('mips', 'gnu', 'gnu95',)), - ('aix.*', ('ibm', 'gnu', 'gnu95',)), - # os.name mappings - ('posix', ('gnu', 'gnu95',)), - ('nt', ('gnu', 'gnu95',)), - ('mac', ('gnu95', 'gnu', 'pg')), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -# Flag to avoid rechecking for Fortran compiler every time -failed_fcompilers = set() - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - global failed_fcompilers - fcompiler_key = (plat, compiler) - if fcompiler_key in failed_fcompilers: - return None - - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - failed_fcompilers.add(fcompiler_key) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound): - e = get_exception() - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print("For compiler details, run 'config_fc --verbose' setup command.") - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - f = open_latin1(file, 'r') - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - f.close() - return result - -def has_f90_header(src): - f = open_latin1(src, 'r') - line = f.readline() - f.close() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - f = open_latin1(src, 'r') - i = 0 - for line in f: - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - f.close() - return flags - -# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/absoft.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index bde0529bea082..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,160 +0,0 @@ - -# http://www.absoft.com/literature/osxuserguide.pdf -# http://www.absoft.com/documentation.html - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K", "shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link', '/PATH:"%s"' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '11.0': - opt.extend(['af90math', 'afio', 'af77math', 'amisc']) - elif self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math', 'fio', 'f77math', 'U77']) - else: - opt.extend(['fio', 'f90math', 'fmath', 'U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22', '-N90', '-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f', '-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - opt.extend(["-f", "fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='absoft') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/compaq.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index 5162b168c1609..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,128 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ -from __future__ import division, absolute_import, print_function - -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.compat import get_exception -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix' or sys.platform[:6] == 'cygwin' : - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore', '-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g', '-check bounds'] - def get_flags_opt(self): - return ['-O4', '-align dcommons', '-assume bigarrays', - '-assume nozsize', '-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared', '-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'\ - ' Version (?P[^\s]*).*' - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError: - pass - except AttributeError: - msg = get_exception() - if '_MSVCCompiler__root' in str(msg): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg)) - else: - raise - except IOError: - e = get_exception() - if not "vcvarsall.bat" in str(e): - print("Unexpected IOError in", __file__) - raise e - except ValueError: - e = get_exception() - if not "path']" in str(e): - print("Unexpected ValueError in", __file__) - raise e - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase', '/assume:underscore'] - def get_flags_opt(self): - return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='compaq') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/g95.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index 26f73b530e84c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,45 +0,0 @@ -# http://g95.sourceforge.net/ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - compiler = G95FCompiler() - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/gnu.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index 368506470ad43..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,390 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import re -import os -import sys -import warnings -import platform -import tempfile -from subprocess import Popen, PIPE, STDOUT - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import exec_command -from numpy.distutils.misc_util import msvc_runtime_library -from numpy.distutils.compat import get_exception - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)") - -# XXX: handle cross compilation -def is_win64(): - return sys.platform == "win32" and platform.architecture()[0] == "64bit" - -if is_win64(): - #_EXTRAFLAGS = ["-fno-leading-underscore"] - _EXTRAFLAGS = [] -else: - _EXTRAFLAGS = [] - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77',) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - m = re.search(r'GNU Fortran', version_string) - if not m: - return None - m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith('0') or v.startswith('2') or v.startswith('3'): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - # 'g77 --version' results - # SunOS: GNU Fortran (GCC 3.2) 3.2 20020814 (release) - # Debian: GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian) - # GNU Fortran (GCC) 3.3.3 (Debian 20040401) - # GNU Fortran 0.5.25 20010319 (prerelease) - # Redhat: GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2 20030222 (Red Hat Linux 3.2.2-5) - # GNU Fortran (GCC) 3.4.2 (mingw-special) - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "--version"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - - suggested_f90_compiler = 'gnu95' - - #def get_linker_so(self): - # # win32 linking should be handled by standard linker - # # Darwin g77 cannot be used as a linker. - # #if re.match(r'(darwin)', sys.platform): - # # return - # return FCompiler.get_linker_so(self) - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform=='darwin': - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value - # and leave it alone. But, distutils will complain if the - # environment's value is different from the one in the Python - # Makefile used to build Python. We let disutils handle this - # error checking. - if not target: - # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, - # we try to get it first from the Python Makefile and then we - # fall back to setting it to 10.3 to maximize the set of - # versions we can work with. This is a reasonable default - # even when using the official Python dist and those derived - # from it. - import distutils.sysconfig as sc - g = {} - filename = sc.get_makefile_filename() - sc.parse_makefile(filename, g) - target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') - os.environ['MACOSX_DEPLOYMENT_TARGET'] = target - if target == '10.3': - s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' - warnings.warn(s) - - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - status, output = exec_command(self.compiler_f77 + - ['-print-libgcc-file-name'], - use_tee=0) - if not status: - return os.path.dirname(output) - return None - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - if not os.path.exists(os.path.join(d, "lib%s.a" % self.g2c)): - d2 = os.path.abspath(os.path.join(d, - '../../../../lib')) - if os.path.exists(os.path.join(d2, "lib%s.a" % self.g2c)): - opt.append(d2) - opt.append(d) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d, f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type=='msvc': - # the following code is not needed (read: breaks) when using MinGW - # in case want to link F77 compiled code with MSVC - opt.append('gcc') - runtime_lib = msvc_runtime_library() - if runtime_lib: - opt.append(runtime_lib) - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - v = self.get_version() - if v and v<='3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def _c_arch_flags(self): - """ Return detected arch flags from CFLAGS """ - from distutils import sysconfig - try: - cflags = sysconfig.get_config_vars()['CFLAGS'] - except KeyError: - return [] - arch_re = re.compile(r"-arch\s+(\w+)") - arch_flags = [] - for arch in arch_re.findall(cflags): - arch_flags += ['-arch', arch] - return arch_flags - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - return '-Wl,-rpath="%s"' % dir - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran',) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - v = v[1] - if v>='4.': - # gcc-4 series releases do not support -mno-cygwin option - pass - else: - # use -mno-cygwin flag for gfortran when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe']: - self.executables[key].append('-mno-cygwin') - return v - - # 'gfortran --version' results: - # XXX is the below right? - # Debian: GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3)) - # GNU Fortran 95 (GCC) 4.1.2 20061115 (prerelease) (Debian 4.1.1-21) - # OS X: GNU Fortran 95 (GCC) 4.1.0 - # GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental) - # GNU Fortran (GCC) 4.3.0 20070316 (experimental) - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_f90' : [None, "-Wall", "-g", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", - "-fno-second-underscore"] + _EXTRAFLAGS, - 'linker_so' : ["", "-Wall", "-g"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - module_dir_switch = '-J' - module_include_switch = '-I' - - g2c = 'gfortran' - - def _universal_flags(self, cmd): - """Return a list of -arch flags for every supported architecture.""" - if not sys.platform == 'darwin': - return [] - arch_flags = [] - # get arches the C compiler gets. - c_archs = self._c_arch_flags() - if "i386" in c_archs: - c_archs[c_archs.index("i386")] = "i686" - # check the arches the Fortran compiler supports, and compare with - # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64"]: - if _can_target(cmd, arch) and arch in c_archs: - arch_flags.extend(["-arch", arch]) - return arch_flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - arch_flags = self._universal_flags(self.compiler_f90) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - arch_flags = self._universal_flags(self.linker_so) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_library_dirs(self): - opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: - d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir) - mingwdir = os.path.normpath(os.path.join(root, target, "lib")) - full = os.path.join(mingwdir, "libmingwex.a") - if os.path.exists(full): - opt.append(mingwdir) - return opt - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i+1, "mingwex") - opt.insert(i+1, "mingw32") - # XXX: fix this mess, does not work for mingw - if is_win64(): - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - raise NotImplementedError("Only MS compiler supported with gfortran on win64") - return opt - - def get_target(self): - status, output = exec_command(self.compiler_f77 + - ['-v'], - use_tee=0) - if not status: - m = TARGET_R.search(output) - if m: - return m.group(1) - return "" - - def get_flags_opt(self): - if is_win64(): - return ['-O0'] - else: - return GnuFCompiler.get_flags_opt(self) - -def _can_target(cmd, arch): - """Return true is the command supports the -arch flag for the given - architecture.""" - newcmd = cmd[:] - fid, filename = tempfile.mkstemp(suffix=".f") - try: - d = os.path.dirname(filename) - output = os.path.splitext(filename)[0] + ".o" - try: - newcmd.extend(["-arch", arch, "-c", filename]) - p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) - p.communicate() - return p.returncode == 0 - finally: - if os.path.exists(output): - os.remove(output) - finally: - os.remove(filename) - return False - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - - compiler = GnuFCompiler() - compiler.customize() - print(compiler.get_version()) - - try: - compiler = Gnu95FCompiler() - compiler.customize() - print(compiler.get_version()) - except Exception: - msg = get_exception() - print(msg) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/hpux.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 9004961e1de73..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,45 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256, 0, 1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self, force, ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='hpux') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/ibm.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index cc65df9721f9e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import re -import sys - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import exec_command, find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - s, o = exec_command(lslpp + ' -Lc xlfcmp') - m = re.search('xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = sorted(os.listdir(xlf_dir)) - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0, 40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - fi = open(xlf_cfg, 'r') - crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P.*)/crt1.o').match - for line in fi: - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fi.close() - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O3'] - -if __name__ == '__main__': - log.set_verbosity(2) - compiler = IBMFCompiler() - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/intel.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index a80e525e3c7aa..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,205 +0,0 @@ -# http://developer.intel.com/software/products/compilers/flin/ -from __future__ import division, absolute_import, print_function - -import sys - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_free(self): - return ["-FR"] - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): - #return ['-i8 -xhost -openmp -fp-model strict'] - return ['-xhost -openmp -fp-model strict'] - - def get_flags_arch(self): - return [] - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] - return opt - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium|IA-64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for 64-bit apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): - #return ['-i8 -xhost -openmp -fp-model strict'] - return ['-xhost -openmp -fp-model strict'] - - def get_flags_arch(self): - return [] - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '/FI', '/c', - f + '.f', '/o', f + '.o'] - - ar_exe = 'lib.exe' - possible_executables = ['ifort', 'ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' #No space after /Fo! - library_switch = '/OUT:' #No space after /OUT:! - module_dir_switch = '/module:' #No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo', '/MD', '/nbs', '/Qlowercase', '/us'] - return opt - - def get_flags_free(self): - return ["-FR"] - - def get_flags_debug(self): - return ['/4Yb', '/d2'] - - def get_flags_opt(self): - return ['/O1'] # Scipy test failures with /O2 - - def get_flags_arch(self): - return ["/arch:IA-32", "/QaxSSE3"] - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - -class IntelEM64VisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelvem' - description = 'Intel Visual Fortran Compiler for 64-bit apps' - - version_match = simple_version_match(start='Intel\(R\).*?64,') - - def get_flags_arch(self): - return ["/arch:SSE2"] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='intel') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/lahey.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index 7a33b4b63ce5d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g', '--chk', '--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d, 'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='lahey') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/mips.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index 6a8d230992266..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu, 'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='mips') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/nag.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index ae1b96faf3e8b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,45 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler'] - -class NAGFCompiler(FCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - version_pattern = r'NAGWare Fortran 95 compiler Release (?P[^\s]*)' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform=='darwin': - return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - version = self.get_version() - if version and version < '5.1': - return ['-target=native'] - else: - return [''] - def get_flags_debug(self): - return ['-g', '-gline', '-g90', '-nan', '-C'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='nag') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/none.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/none.py deleted file mode 100644 index 6f602d734d56a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77': None, - 'compiler_f90': None, - 'compiler_fix': None, - 'linker_so': None, - 'linker_exe': None, - 'archiver': None, - 'ranlib': None, - 'version_cmd': None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - compiler = NoneFCompiler() - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pathf95.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pathf95.py deleted file mode 100644 index 1902bbc242ca8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pathf95.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PathScaleFCompiler'] - -class PathScaleFCompiler(FCompiler): - - compiler_type = 'pathf95' - description = 'PathScale Fortran Compiler' - version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' - - executables = { - 'version_cmd' : ["pathf95", "-version"], - 'compiler_f77' : ["pathf95", "-fixedform"], - 'compiler_fix' : ["pathf95", "-fixedform"], - 'compiler_f90' : ["pathf95"], - 'linker_so' : ["pathf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - #compiler = PathScaleFCompiler() - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='pathf95') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pg.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index f3f5ea22ba755..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,60 +0,0 @@ -# http://www.pgroup.com -from __future__ import division, absolute_import, print_function - -from numpy.distutils.fcompiler import FCompiler -from sys import platform - -compilers = ['PGroupFCompiler'] - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' - - if platform == 'darwin': - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["pgfortran", "-dynamiclib"], - 'compiler_fix' : ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90' : ["pgfortran", "-dynamiclib"], - 'linker_so' : ["libtool"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = [''] - else: - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["pgfortran"], - 'compiler_fix' : ["pgfortran", "-Mfixed"], - 'compiler_f90' : ["pgfortran"], - 'linker_so' : ["pgfortran", "-shared", "-fpic"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - def get_flags_opt(self): - return ['-fast'] - def get_flags_debug(self): - return ['-g'] - - if platform == 'darwin': - def get_flags_linker_so(self): - return ["-dynamic", '-undefined', 'dynamic_lookup'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='pg') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/sun.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index 0955f14a1c42b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,52 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["", "-Bdynamic", "-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast', '-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu', 'sunmath', 'mvec']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='sun') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/vast.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index 05bbc10badb13..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = r'\s*Pacific-Sierra Research vf90 '\ - '(Personal|Professional)\s+(?P[^\s]*)' - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='vast') - compiler.customize() - print(compiler.get_version()) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/from_template.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/from_template.py deleted file mode 100644 index d10b50218d2aa..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/from_template.py +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/python -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separeted words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while True: - m = routine_start_re.search(astr, ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr, start, m.end()): - while True: - i = astr.rfind('\n', ind, start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start, end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace('\,', '@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr, names): - substr = substr.replace('\>', '@rightarrow@') - substr = substr.replace('\<', '@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>", substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace('\,', '@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r, names.get(r, None)) - if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@', ',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print("Mismatch in number of replacements (base <%s=%s>)" - " for <%s=%s>. Ignoring." % - (base_rule, ','.join(rules[base_rule]), r, thelist)) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@', '>') - newstr = newstr.replace('@leftarrow@', '<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' #_head # using _head will break free-format files - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - writestr += newstr[oldend:sub[0]] - names.update(find_repl_patterns(newstr[oldend:sub[0]])) - writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+[.]src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - fid = open(source) - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - print('Including file', fn) - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - fid.close() - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -if __name__ == "__main__": - - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/info.py deleted file mode 100644 index 2f5310665cef3..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/info.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -Enhanced distutils with Fortran compilers support and more. -""" -from __future__ import division, absolute_import, print_function - -postpone_import = True diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/intelccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/intelccompiler.py deleted file mode 100644 index 1d8dcd9fd88dc..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,45 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable - -class IntelCCompiler(UnixCCompiler): - """ A modified Intel compiler compatible with an gcc built Python.""" - compiler_type = 'intel' - cc_exe = 'icc' - cc_args = 'fPIC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - self.cc_exe = 'icc -fPIC' - compiler = self.cc_exe - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - linker_exe=compiler, - linker_so=compiler + ' -shared') - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - - # On Itanium, the Intel Compiler used to be called ecc, let's search for - # it (now it's also icc, so ecc is last in the search). - for cc_exe in map(find_executable, ['icc', 'ecc']): - if cc_exe: - break - -class IntelEM64TCCompiler(UnixCCompiler): - """ A modified Intel x86_64 compiler compatible with a 64bit gcc built Python. - """ - compiler_type = 'intelem' - cc_exe = 'icc -m64 -fPIC' - cc_args = "-fPIC" - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - self.cc_exe = 'icc -m64 -fPIC' - compiler = self.cc_exe - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - linker_exe=compiler, - linker_so=compiler + ' -shared') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/lib2def.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/lib2def.py deleted file mode 100644 index 7316547a37b5c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/lib2def.py +++ /dev/null @@ -1,116 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import re -import sys -import os -import subprocess - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = 'nm -Cs' - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print("I'm assuming that your first argument is the library") - print("and the second is the DEF file.") - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnam(nm_cmd = 'nm -Cs py_lib')""" - f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE) - nm_output = f.stdout.read() - f.stdout.close() - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = [str(DEFAULT_NM), str(libfile)] - nm_output = getnm(nm_cmd) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/line_endings.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/line_endings.py deleted file mode 100644 index 5ecb104ffdf51..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/line_endings.py +++ /dev/null @@ -1,76 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings - -""" -from __future__ import division, absolute_import, print_function - -import sys, re, os - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - data = open(file, "rb").read() - if '\0' in data: - print(file, "Binary!") - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print('dos2unix:', file) - f = open(file, "wb") - f.write(newdata) - f.close() - return file - else: - print(file, 'ok') - -def dos2unix_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, dos2unix_one_dir, modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - data = open(file, "rb").read() - if '\0' in data: - print(file, "Binary!") - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print('unix2dos:', file) - f = open(file, "wb") - f.write(newdata) - f.close() - return file - else: - print(file, 'ok') - -def unix2dos_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, unix2dos_one_dir, modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/log.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/log.py deleted file mode 100644 index 37f9fe5dd0ef6..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/log.py +++ /dev/null @@ -1,93 +0,0 @@ -# Colored log, requires Python 2.3 or up. -from __future__ import division, absolute_import, print_function - -import sys -from distutils.log import * -from distutils.log import Log as old_Log -from distutils.log import _global_log - -if sys.version_info[0] < 3: - from .misc_util import (red_text, default_text, cyan_text, green_text, - is_sequence, is_string) -else: - from numpy.distutils.misc_util import (red_text, default_text, cyan_text, - green_text, is_sequence, is_string) - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%', '%%') - if flag and is_sequence(args): - return tuple([_fix_args(a, flag=0) for a in args]) - return args - - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - msg = msg % _fix_args(args) - if 0: - if msg.startswith('copying ') and msg.find(' -> ') != -1: - return - if msg.startswith('byte-compiling '): - return - print(_global_color_map[level](msg)) - sys.stdout.flush() - - def good(self, msg, *args): - """ - If we log WARN messages, log this message as a 'nice' anti-warn - message. - - """ - if WARN >= self.threshold: - if args: - print(green_text(msg % _fix_args(args))) - else: - print(green_text(msg)) - sys.stdout.flush() - - -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting threshold to DEBUG level,' - ' it can be changed only with force argument') - else: - info('set_threshold: not changing threshold from DEBUG level' - ' %s to %s' % (prev_level, level)) - return prev_level - - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) - - -_global_color_map = { - DEBUG:cyan_text, - INFO:default_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/mingw32ccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index c720d142a0f90..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,582 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import subprocess -import re - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler - -if sys.version_info[0] < 3: - from . import log -else: - from numpy.distutils import log -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.version import StrictVersion -from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options -from distutils.errors import DistutilsExecError, CompileError, UnknownFileError - -from distutils.unixccompiler import UnixCCompiler -from distutils.msvccompiler import get_build_version as get_build_msvc_version -from numpy.distutils.misc_util import msvc_runtime_library, get_build_architecture - -# Useful to generate table of symbols from a dll -_START = re.compile(r'\[Ordinal/Name Pointer\] Table') -_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, - verbose, dry_run, force) - - # we need to support 3.2 which doesn't match the standard - # get_versions methods regex - if self.gcc_version is None: - import re - p = subprocess.Popen(['gcc', '-dumpversion'], shell=True, - stdout=subprocess.PIPE) - out_string = p.stdout.read() - p.stdout.close() - result = re.search('(\d+\.\d+)', out_string) - if result: - self.gcc_version = StrictVersion(result.group(1)) - - # A real mingw32 doesn't need to specify a different entry point, - # but cygwin 2.91.57 in no-cygwin-mode needs it. - if self.gcc_version <= "2.91.57": - entry_point = '--entry _DllMain@12' - else: - entry_point = '' - - if self.linker_dll == 'dllwrap': - # Commented out '--driver-name g++' part that fixes weird - # g++.exe: g++: No such file or directory - # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). - # If the --driver-name part is required for some environment - # then make the inclusion of this part specific to that environment. - self.linker = 'dllwrap' # --driver-name g++' - elif self.linker_dll == 'gcc': - self.linker = 'g++' - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # Check for custom msvc runtime library on Windows. Build if it doesn't exist. - msvcr_success = build_msvcr_library() - msvcr_dbg_success = build_msvcr_library(debug=True) - if msvcr_success or msvcr_dbg_success: - # add preprocessor statement for using customized msvcr lib - self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') - - # Define the MSVC version as hint for MinGW - msvcr_version = '0x%03i0' % int(msvc_runtime_library().lstrip('msvcr')) - self.define_macro('__MSVCRT_VERSION__', msvcr_version) - - # **changes: eric jones 4/11/01 - # 2. increased optimization and turned off all warnings - # 3. also added --driver-name g++ - #self.set_executables(compiler='gcc -mno-cygwin -O2 -w', - # compiler_so='gcc -mno-cygwin -mdll -O2 -w', - # linker_exe='gcc -mno-cygwin', - # linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s' - # % (self.linker, entry_point)) - - # MS_WIN64 should be defined when building for amd64 on windows, but - # python headers define it only for MS compilers, which has all kind of - # bad consequences, like using Py_ModuleInit4 instead of - # Py_ModuleInit4_64, etc... So we add it here - if get_build_architecture() == 'AMD64': - if self.gcc_version < "4.0": - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall -Wstrict-prototypes', - linker_exe='gcc -g -mno-cygwin', - linker_so='gcc -g -mno-cygwin -shared') - else: - # gcc-4 series releases do not support -mno-cygwin option - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', - linker_exe='gcc -g', - linker_so='gcc -g -shared') - else: - if self.gcc_version <= "3.0.0": - self.set_executables(compiler='gcc -mno-cygwin -O2 -w', - compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='%s -mno-cygwin -mdll -static %s' - % (self.linker, entry_point)) - elif self.gcc_version < "4.0": - self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall', - compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='g++ -mno-cygwin -shared') - else: - # gcc-4 series releases do not support -mno-cygwin option - self.set_executables(compiler='gcc -O2 -Wall', - compiler_so='gcc -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished - # dlls need another dll (mingwm10.dll see Mingw32 docs) - # (-mthreads: Support thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropiate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - if self.gcc_version < "3.0.0": - func = distutils.cygwinccompiler.CygwinCCompiler.link - else: - func = UnixCCompiler.link - func(*args[:func.__code__.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv, base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def find_python_dll(): - maj, min, micro = [int(i) for i in sys.version_info[:3]] - dllname = 'python%d%d.dll' % (maj, min) - print("Looking for %s" % dllname) - - # We can't do much here: - # - find it in python main dir - # - in system32, - # - ortherwise (Sxs), I don't know how to get it. - lib_dirs = [] - lib_dirs.append(sys.prefix) - lib_dirs.append(os.path.join(sys.prefix, 'lib')) - try: - lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32')) - except KeyError: - pass - - for d in lib_dirs: - dll = os.path.join(d, dllname) - if os.path.exists(dll): - return dll - - raise ValueError("%s not found in %s" % (dllname, lib_dirs)) - -def dump_table(dll): - st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE) - return st.stdout.readlines() - -def generate_def(dll, dfile): - """Given a dll file location, get all its exported symbols and dump them - into the given def file. - - The .def file will be overwritten""" - dump = dump_table(dll) - for i in range(len(dump)): - if _START.match(dump[i].decode()): - break - else: - raise ValueError("Symbol table not found") - - syms = [] - for j in range(i+1, len(dump)): - m = _TABLE.match(dump[j].decode()) - if m: - syms.append((int(m.group(1).strip()), m.group(2))) - else: - break - - if len(syms) == 0: - log.warn('No symbols found in %s' % dll) - - d = open(dfile, 'w') - d.write('LIBRARY %s\n' % os.path.basename(dll)) - d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') - d.write(';DATA PRELOAD SINGLE\n') - d.write('\nEXPORTS\n') - for s in syms: - #d.write('@%d %s\n' % (s[0], s[1])) - d.write('%s\n' % s[1]) - d.close() - -def find_dll(dll_name): - - arch = {'AMD64' : 'amd64', - 'Intel' : 'x86'}[get_build_architecture()] - - def _find_dll_in_winsxs(dll_name): - # Walk through the WinSxS directory to find the dll. - winsxs_path = os.path.join(os.environ['WINDIR'], 'winsxs') - if not os.path.exists(winsxs_path): - return None - for root, dirs, files in os.walk(winsxs_path): - if dll_name in files and arch in root: - return os.path.join(root, dll_name) - return None - - def _find_dll_in_path(dll_name): - # First, look in the Python directory, then scan PATH for - # the given dll name. - for path in [sys.prefix] + os.environ['PATH'].split(';'): - filepath = os.path.join(path, dll_name) - if os.path.exists(filepath): - return os.path.abspath(filepath) - - return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) - -def build_msvcr_library(debug=False): - if os.name != 'nt': - return False - - msvcr_name = msvc_runtime_library() - - # Skip using a custom library for versions < MSVC 8.0 - if int(msvcr_name.lstrip('msvcr')) < 80: - log.debug('Skip building msvcr library: custom functionality not present') - return False - - if debug: - msvcr_name += 'd' - - # Skip if custom library already exists - out_name = "lib%s.a" % msvcr_name - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building msvcr library: "%s" exists' % (out_file)) - return True - - # Find the msvcr dll - msvcr_dll_name = msvcr_name + '.dll' - dll_file = find_dll(msvcr_dll_name) - if not dll_file: - log.warn('Cannot build msvcr library: "%s" not found' % msvcr_dll_name) - return False - - def_name = "lib%s.def" % msvcr_name - def_file = os.path.join(sys.prefix, 'libs', def_name) - - log.info('Building msvcr library: "%s" (from %s)' \ - % (out_file, dll_file)) - - # Generate a symbol definition file from the msvcr dll - generate_def(dll_file, def_file) - - # Create a custom mingw library for the given symbol definitions - cmd = ['dlltool', '-d', def_file, '-l', out_file] - retcode = subprocess.call(cmd) - - # Clean up symbol definitions - os.remove(def_file) - - return (not retcode) - -def build_import_library(): - if os.name != 'nt': - return - - arch = get_build_architecture() - if arch == 'AMD64': - return _build_import_library_amd64() - elif arch == 'Intel': - return _build_import_library_x86() - else: - raise ValueError("Unhandled arch %s" % arch) - -def _build_import_library_amd64(): - dll_file = find_python_dll() - - out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building import library: "%s" exists' % (out_file)) - return - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - - log.info('Building import library (arch=AMD64): "%s" (from %s)' \ - % (out_file, dll_file)) - - generate_def(dll_file, def_file) - - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.Popen(cmd) - -def _build_import_library_x86(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix, 'libs', lib_name) - out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) - out_file = os.path.join(sys.prefix, 'libs', out_name) - if not os.path.isfile(lib_file): - log.warn('Cannot build import library: "%s" not found' % (lib_file)) - return - if os.path.isfile(out_file): - log.debug('Skip building import library: "%s" exists' % (out_file)) - return - log.info('Building import library (ARCH=x86): "%s"' % (out_file)) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) - nm_output = lib2def.getnm(nm_cmd) - dlist, flist = lib2def.parse_nm(nm_output) - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) - - dll_name = "python%d%d.dll" % tuple(sys.version_info[:2]) - args = (dll_name, def_file, out_file) - cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args - status = os.system(cmd) - # for now, fail silently - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - #if not success: - # msg = "Couldn't find import library, and failed to build it." - # raise DistutilsPlatformError(msg) - return - -#===================================== -# Dealing with Visual Studio MANIFESTS -#===================================== - -# Functions to deal with visual studio manifests. Manifest are a mechanism to -# enforce strong DLL versioning on windows, and has nothing to do with -# distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL not in the -# system path; in particular, official python 2.6 binary is built against the -# MS runtime 9 (the one from VS 2008), which is not available on most windows -# systems; python 2.6 installer does install it in the Win SxS (Side by side) -# directory, but this requires the manifest for this to work. This is a big -# mess, thanks MS for a wonderful system. - -# XXX: ideally, we should use exactly the same version as used by python. I -# submitted a patch to get this version, but it was only included for python -# 2.6.1 and above. So for versions below, we use a "best guess". -_MSVCRVER_TO_FULLVER = {} -if sys.platform == 'win32': - try: - import msvcrt - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" - _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" - # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 on Windows XP: - _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" - if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): - major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) - _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION - del major, minor, rest - except ImportError: - # If we are here, means python was not built with MSVC. Not sure what to do - # in that case: manifest building will fail, but it should not be used in - # that case anyway - log.warn('Cannot import msvcrt: using manifest will not be possible') - -def msvc_manifest_xml(maj, min): - """Given a major and minor version of the MSVCR, returns the - corresponding XML file.""" - try: - fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] - except KeyError: - raise ValueError("Version %d,%d of MSVCRT not supported yet" \ - % (maj, min)) - # Don't be fooled, it looks like an XML, but it is not. In particular, it - # should not have any space before starting, and its size should be - # divisible by 4, most likely for alignement constraints when the xml is - # embedded in the binary... - # This template was copied directly from the python 2.6 binary (using - # strings.exe from mingw on python.exe). - template = """\ - - - - - - - - - - - - - -""" - - return template % {'fullver': fullver, 'maj': maj, 'min': min} - -def manifest_rc(name, type='dll'): - """Return the rc file used to generate the res file which will be embedded - as manifest for given manifest file name, of given type ('dll' or - 'exe'). - - Parameters - ---------- - name : str - name of the manifest file to embed - type : str {'dll', 'exe'} - type of the binary which will embed the manifest - - """ - if type == 'dll': - rctype = 2 - elif type == 'exe': - rctype = 1 - else: - raise ValueError("Type %s not supported" % type) - - return """\ -#include "winuser.h" -%d RT_MANIFEST %s""" % (rctype, name) - -def check_embedded_msvcr_match_linked(msver): - """msver is the ms runtime version used for the MANIFEST.""" - # check msvcr major version are the same for linking and - # embedding - msvcv = msvc_runtime_library() - if msvcv: - assert msvcv.startswith("msvcr"), msvcv - # Dealing with something like "mscvr90" or "mscvr100", the last - # last digit is the minor release, want int("9") or int("10"): - maj = int(msvcv[5:-1]) - if not maj == int(msver): - raise ValueError( - "Discrepancy between linked msvcr " \ - "(%d) and the one about to be embedded " \ - "(%d)" % (int(msver), maj)) - -def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) - return os.path.splitext(base)[0] - -def manifest_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - exext = config.compiler.exe_extension - return root + exext + ".manifest" - -def rc_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - return root + ".rc" - -def generate_manifest(config): - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma = int(msver) - mi = int((msver - ma) * 10) - # Write the manifest file - manxml = msvc_manifest_xml(ma, mi) - man = open(manifest_name(config), "w") - config.temp_files.append(manifest_name(config)) - man.write(manxml) - man.close() - # # Write the rc file - # manrc = manifest_rc(manifest_name(self), "exe") - # rc = open(rc_name(self), "w") - # self.temp_files.append(manrc) - # rc.write(manrc) - # rc.close() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/misc_util.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/misc_util.py deleted file mode 100644 index c146178f06479..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/misc_util.py +++ /dev/null @@ -1,2271 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import re -import sys -import imp -import copy -import glob -import atexit -import tempfile -import subprocess -import shutil - -import distutils -from distutils.errors import DistutilsError - -try: - set -except NameError: - from sets import Set as set - -from numpy.distutils.compat import get_exception - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath', 'njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info'] - -class InstallableLib(object): - """ - Container to hold information on an installable library. - - Parameters - ---------- - name : str - Name of the installed library. - build_info : dict - Dictionary holding build information. - target_dir : str - Absolute path specifying where to install the library. - - See Also - -------- - Configuration.add_installed_library - - Notes - ----- - The three parameters are stored as attributes with the same names. - - """ - def __init__(self, name, build_info, target_dir): - self.name = name - self.build_info = build_info - self.target_dir = target_dir - -def quote_args(args): - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - splitted = name.split('/') - return os.path.join(*splitted) - -def rel_path(path, parent_path): - """Return path relative to parent_path. - """ - pd = os.path.abspath(parent_path) - apath = os.path.abspath(path) - if len(apath)= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7, default=9) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(fg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def default_text(s): - return colour_text(s, 'default') -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path): - if sys.platform=='cygwin' and path.startswith('/cygdrive'): - path = path[10] + ':' + os.path.normcase(path[11:]) - return path - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE', '')=='msys': - return True - if os.environ.get('MSYSTEM', '')=='MINGW32': - return True - return False - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = sys.version[msc_pos+6:msc_pos+10] - lib = {'1300': 'msvcr70', # MSVC 7.0 - '1310': 'msvcr71', # MSVC 7.1 - '1400': 'msvcr80', # MSVC 8 - '1500': 'msvcr90', # MSVC 9 (VS 2008) - '1600': 'msvcr100', # MSVC 10 (aka 2010) - }.get(msc_ver, None) - else: - lib = None - return lib - - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match -fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match -f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - f = open(source, 'r') - for line in f: - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - f.close() - return modules - -def is_string(s): - return isinstance(s, str) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - for item in lst: - if not is_string(item): - return False - return True - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' is s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - for source in sources: - if fortran_ext_match(source): - return True - return False - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - for source in sources: - if cxx_ext_match(source): - return True - return False - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(), abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS', '.svn', 'build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath, f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = [_m for _m in ext.sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = [_m for _m in scripts if is_string(_m)] - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources', []) - sources = [_m for _m in sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends', []) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_shared_lib_extension(is_python_ext=False): - """Return the correct file extension for shared libraries. - - Parameters - ---------- - is_python_ext : bool, optional - Whether the shared library is a Python extension. Default is False. - - Returns - ------- - so_ext : str - The shared library extension. - - Notes - ----- - For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, - and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on - POSIX systems according to PEP 3149. For Python 3.2 this is implemented on - Linux, but not on OS X. - - """ - confvars = distutils.sysconfig.get_config_vars() - # SO is deprecated in 3.3.1, use EXT_SUFFIX instead - so_ext = confvars.get('EXT_SUFFIX', None) - if so_ext is None: - so_ext = confvars.get('SO', '') - - if not is_python_ext: - # hardcode known values, config vars (including SHLIB_SUFFIX) are - # unreliable (see #3182) - # darwin, windows and debug linux are wrong in 3.3.1 and older - if (sys.platform.startswith('linux') or - sys.platform.startswith('gnukfreebsd')): - so_ext = '.so' - elif sys.platform.startswith('darwin'): - so_ext = '.dylib' - elif sys.platform.startswith('win'): - so_ext = '.dll' - else: - # fall back to config vars for unknown platforms - # fix long extension for Python >=3.2, see PEP 3149. - if 'SOABI' in confvars: - # Does nothing unless SOABI config var exists - so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) - - return so_ext - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if hasattr(s, '__call__'): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print('Not existing data file:', s) - else: - raise TypeError(repr(s)) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - - -###################### - -class Configuration(object): - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', - 'installed_libraries', 'define_macros'] - _dict_keys = ['package_dir', 'installed_pkg_config'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - setup_name='setup.py', - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path, package_path)): - package_path = njoin(self.local_path, package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self, n, a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path, '__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1, 3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self', f.f_globals, f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - self.setup_name = setup_name - - def todict(self): - """ - Return a dictionary compatible with the keyword arguments of distutils - setup function. - - Examples - -------- - >>> setup(**config.todict()) #doctest: +SKIP - """ - - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self, n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print(message) - - def warn(self, message): - sys.stderr.write('Warning: %s' % (message,)) - - def set_options(self, **options): - """ - Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError('Unknown option: '+key) - - def get_distribution(self): - """Return the distutils distribution object for self.""" - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)] - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d, '__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0, os.path.dirname(setup_py)) - try: - fo_setup_py = open(setup_py, 'U') - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name, subpackage_name, setup_name) - setup_module = imp.load_module('_'.join(n.split('.')), - fo_setup_py, - setup_py, - ('.py', 'U', 1)) - fo_setup_py.close() - if not hasattr(setup_module, 'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - def fix_args_py2(args): - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - return args - def fix_args_py3(args): - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - return args - if sys.version_info[0] < 3: - args = fix_args_py2(args) - else: - args = fix_args_py3(args) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name, subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name, subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - Parameters - ---------- - subpackage_name : str or None - Name of the subpackage to get the configuration. '*' in - subpackage_name is handled as a wildcard. - subpackage_path : str - If None, then the path is assumed to be the local path plus the - subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - parent_name : str - Parent name. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, self.setup_name) - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add a sub-package to the current Configuration instance. - - This is useful in a setup.py script for adding sub-packages to a - package. - - Parameters - ---------- - subpackage_name : str - name of the subpackage - subpackage_path : str - if given, the subpackage path such as the subpackage is in - subpackage_path / subpackage_name. If None,the subpackage is - assumed to be located in the local path / subpackage_name. - standalone : bool - """ - - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name, subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d, dict), repr(type(d)) - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self, data_path): - """Recursively add files under data_path to data_files list. - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. - - Parameters - ---------- - data_path : seq or str - Argument can be either - - * 2-sequence (, ) - * path to data directory where python datadir suffix defaults - to package dir. - - Notes - ----- - Rules for installation paths: - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - - Examples - -------- - For example suppose the source directory contains fun/foo.dat and - fun/bar/car.dat:: - - >>> self.add_data_dir('fun') #doctest: +SKIP - >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP - >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP - - Will install data-files to the locations:: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d, p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = list(range(len(pattern_list)-1)); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print('Not a directory, skipping', path) - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError('cannot fill pattern %r with %r' \ - % (d, path)) - target_list.append(path_list[i]) - else: - assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list, path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list), path)) - else: - for path in paths: - self.add_data_dir((d, path)) - return - assert not is_glob_pattern(d), repr(d) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1, f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package, d, d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p, files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - for f in files: - data_dict[p].add(f) - self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - - Parameters - ---------- - files : sequence - Argument(s) can be either - - * 2-sequence (,) - * paths to data files where python datadir prefix defaults - to package dir. - - Notes - ----- - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. - - Rules for installation paths: - - #. file.txt -> (., file.txt)-> parent/file.txt - #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - #. *.txt -> parent/a.txt, parent/b.txt - #. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt - #. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt - #. (sun, file.txt) -> parent/sun/file.txt - #. (sun, bar/file.txt) -> parent/sun/file.txt - #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt - #. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - Examples - -------- - Add files to the list of data_files to be included with the package. - - >>> self.add_data_files('foo.dat', - ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - ... 'bar/cat.dat', - ... '/full/path/to/can.dat') #doctest: +SKIP - - will install these data files to:: - - / - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage') or - '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - """ - - if len(files)>1: - for f in files: - self.add_data_files(f) - return - assert len(files)==1 - if is_sequence(files[0]): - d, files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d, f)) - return - else: - raise TypeError(repr(type(files))) - - if d is None: - if hasattr(filepat, '__call__'): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d, files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d, paths)) - return - assert not is_glob_pattern(d), repr((d, filepat)) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package, d), paths)) - - ### XXX Implement add_py_modules - - def add_define_macros(self, macros): - """Add define macros to configuration - - Add the given sequence of macro name and value duples to the beginning - of the define_macros list This list will be visible to all extension - modules of the current package. - """ - dist = self.get_distribution() - if dist is not None: - if not hasattr(dist, 'define_macros'): - dist.define_macros = [] - dist.define_macros.extend(macros) - else: - self.define_macros.extend(macros) - - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - if dist.include_dirs is None: - dist.include_dirs = [] - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_headers(self,*files): - """Add installable headers to configuration. - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under // directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the path. - - Parameters - ---------- - files : str or seq - Argument(s) can be either: - - * 2-sequence (,) - * path(s) to header file(s) where python includedir suffix will - default to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name, p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0], p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - if dist.headers is None: - dist.headers = [] - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - - Applies glob.glob(...) to each path in the sequence (if needed) and - pre-pends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. - - """ - include_non_existing = kws.get('include_non_existing', True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self, kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources', 'depends', 'include_dirs', 'library_dirs', - 'module_dirs', 'extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Create and add an Extension instance to the ext_modules list. This - method also takes the following optional keyword arguments that are - passed on to the Extension constructor. - - Parameters - ---------- - name : str - name of the extension - sources : seq - list of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - include_dirs : - define_macros : - undef_macros : - library_dirs : - libraries : - runtime_library_dirs : - extra_objects : - extra_compile_args : - extra_link_args : - extra_f77_compile_args : - extra_f90_compile_args : - export_symbols : - swig_opts : - depends : - The depends list contains paths to files or directories that the - sources of the extension module depend on. If any path in the - depends list is newer than the extension module, then the module - will be rebuilt. - language : - f2py_options : - module_dirs : - extra_info : dict or list - dict or list of dict of keywords to be appended to keywords. - - Notes - ----- - The self.paths(...) method is applied to all lists that may contain - paths. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name, name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries', []) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname, tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname, lpath = libname.split('@', 1) - lpath = os.path.abspath(njoin(self.local_path, lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None, lpath, - caller_level = 2) - if isinstance(c, Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries', [])]: - llname = l.split('__OF__', 1)[0] - if llname == lname: - c.pop('name', None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - ext_args['define_macros'] = \ - self.define_macros + ext_args.get('define_macros', []) - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """ - Add library to configuration. - - Parameters - ---------- - name : str - Name of the extension. - sources : sequence - List of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compiler_args - * extra_f90_compiler_args - * f2py_options - * language - - """ - self._add_library(name, sources, None, build_info) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def _add_library(self, name, sources, install_dir, build_info): - """Common implementation for add_library and add_installed_library. Do - not use directly""" - build_info = copy.copy(build_info) - name = name #+ '__OF__' + self.name - build_info['sources'] = sources - - # Sometimes, depends is not set up to an empty list by default, and if - # depends is not given to add_library, distutils barfs (#1134) - if not 'depends' in build_info: - build_info['depends'] = [] - - self._fix_paths_dict(build_info) - - # Add to libraries list so that it is build with build_clib - self.libraries.append((name, build_info)) - - def add_installed_library(self, name, sources, install_dir, build_info=None): - """ - Similar to add_library, but the specified library is installed. - - Most C libraries used with `distutils` are only used to build python - extensions, but libraries built through this method will be installed - so that they can be reused by third-party packages. - - Parameters - ---------- - name : str - Name of the installed library. - sources : sequence - List of the library's source files. See `add_library` for details. - install_dir : str - Path to install the library, relative to the current sub-package. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compiler_args - * extra_f90_compiler_args - * f2py_options - * language - - Returns - ------- - None - - See Also - -------- - add_library, add_npy_pkg_config, get_info - - Notes - ----- - The best way to encode the options required to link against the specified - C libraries is to use a "libname.ini" file, and use `get_info` to - retrieve the required options (see `add_npy_pkg_config` for more - information). - - """ - if not build_info: - build_info = {} - - install_dir = os.path.join(self.package_path, install_dir) - self._add_library(name, sources, install_dir, build_info) - self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) - - def add_npy_pkg_config(self, template, install_dir, subst_dict=None): - """ - Generate and install a npy-pkg config file from a template. - - The config file generated from `template` is installed in the - given install directory, using `subst_dict` for variable substitution. - - Parameters - ---------- - template : str - The path of the template, relatively to the current package path. - install_dir : str - Where to install the npy-pkg config file, relatively to the current - package path. - subst_dict : dict, optional - If given, any string of the form ``@key@`` will be replaced by - ``subst_dict[key]`` in the template file when installed. The install - prefix is always available through the variable ``@prefix@``, since the - install prefix is not easy to get reliably from setup.py. - - See also - -------- - add_installed_library, get_info - - Notes - ----- - This works for both standard installs and in-place builds, i.e. the - ``@prefix@`` refer to the source directory for in-place builds. - - Examples - -------- - :: - - config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) - - Assuming the foo.ini.in file has the following content:: - - [meta] - Name=@foo@ - Version=1.0 - Description=dummy description - - [default] - Cflags=-I@prefix@/include - Libs= - - The generated file will have the following content:: - - [meta] - Name=bar - Version=1.0 - Description=dummy description - - [default] - Cflags=-Iprefix_dir/include - Libs= - - and will be installed as foo.ini in the 'lib' subpath. - - """ - if subst_dict is None: - subst_dict = {} - basename = os.path.splitext(template)[0] - template = os.path.join(self.package_path, template) - - if self.name in self.installed_pkg_config: - self.installed_pkg_config[self.name].append((template, install_dir, - subst_dict)) - else: - self.installed_pkg_config[self.name] = [(template, install_dir, - subst_dict)] - - - def add_scripts(self,*files): - """Add scripts to configuration. - - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the /bin/ directory. - - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - if dist.scripts is None: - dist.scripts = [] - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self, key) - a.extend(dict.get(key, [])) - for key in self.dict_keys: - a = getattr(self, key) - a.update(dict.get(key, {})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key, dict[key], dict.get('name', '?'))) - setattr(self, key, dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self, key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError("Don't know about key=%r" % (key)) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self, k, None) - if a: - s += '%s = %s\n' % (k, pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - """ - Returns the numpy.distutils config command instance. - """ - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.', old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - """ - Return a path to a temporary directory where temporary files should be - placed. - """ - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 77 compiler is available (because a simple Fortran 77 - code was able to be compiled successfully). - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib, Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self, path): - """Return path's SVN revision number. - """ - revision = None - m = None - cwd = os.getcwd() - try: - os.chdir(path or '.') - p = subprocess.Popen(['svnversion'], shell=True, - stdout=subprocess.PIPE, stderr=None, - close_fds=True) - sout = p.stdout - m = re.match(r'(?P\d+)', sout.read()) - except: - pass - os.chdir(cwd) - if m: - revision = int(m.group('revision')) - return revision - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): - entries = njoin(path, '_svn', 'entries') - else: - entries = njoin(path, '.svn', 'entries') - if os.path.isfile(entries): - f = open(entries) - fstr = f.read() - f.close() - if fstr[:5] == '\d+)"', fstr) - if m: - revision = int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - revision = int(m.group('revision')) - return revision - - def _get_hg_revision(self, path): - """Return path's Mercurial revision number. - """ - revision = None - m = None - cwd = os.getcwd() - try: - os.chdir(path or '.') - p = subprocess.Popen(['hg identify --num'], shell=True, - stdout=subprocess.PIPE, stderr=None, - close_fds=True) - sout = p.stdout - m = re.match(r'(?P\d+)', sout.read()) - except: - pass - os.chdir(cwd) - if m: - revision = int(m.group('revision')) - return revision - branch_fn = njoin(path, '.hg', 'branch') - branch_cache_fn = njoin(path, '.hg', 'branch.cache') - - if os.path.isfile(branch_fn): - branch0 = None - f = open(branch_fn) - revision0 = f.read().strip() - f.close() - - branch_map = {} - for line in file(branch_cache_fn, 'r'): - branch1, revision1 = line.split()[:2] - if revision1==revision0: - branch0 = branch1 - try: - revision1 = int(revision1) - except ValueError: - continue - branch_map[branch1] = revision1 - - revision = branch_map.get(branch0) - return revision - - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - - Return a version string of the current package or None if the version - information could not be detected. - - Notes - ----- - This method scans files named - __version__.py, _version.py, version.py, and - __svn_version__.py for string variables version, __version\__, and - _version, until a version number is found. - """ - version = getattr(self, 'version', None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py', - '__hg_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path, f) - if os.path.isfile(fn): - info = (open(fn), fn, ('.py', 'U', 1)) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name, name) - try: - version_module = imp.load_module('_'.join(n.split('.')),*info) - except ImportError: - msg = get_exception() - self.warn(str(msg)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module, a, None) - if version is not None: - break - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN or Mercurial revision number - revision = self._get_svn_revision(self.local_path) - if revision is None: - revision = self._get_hg_revision(self.local_path) - - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. - - Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __svn_version__.py existed before, nothing is done. - - This is - intended for working with source directories that are in an SVN - repository. - """ - target = njoin(self.local_path, '__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - f = open(target, 'w') - f.write('version = %r\n' % (version)) - f.close() - - import atexit - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_hg_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __hg_version__.py file to the current package directory. - - Generate package __hg_version__.py file from Mercurial revision, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __hg_version__.py existed before, nothing is done. - - This is intended for working with source directories that are - in an Mercurial repository. - """ - target = njoin(self.local_path, '__hg_version__.py') - revision = self._get_hg_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_hg_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - f = open(target, 'w') - f.write('version = %r\n' % (version)) - f.close() - - import atexit - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_hg_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - - This file is installed to the - package installation directory. - - """ - self.py_modules.append((self.name, name, generate_config_py)) - - - def get_info(self,*names): - """Get resources information. - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. - """ - from .system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/core/setup.py - return include_dirs - -def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory.""" - # XXX: import here for bootstrapping reasons - import numpy - d = os.path.join(os.path.dirname(numpy.__file__), - 'core', 'lib', 'npy-pkg-config') - return d - -def get_pkg_info(pkgname, dirs=None): - """ - Return library info for the given package. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_info - - """ - from numpy.distutils.npy_pkg_config import read_config - - if dirs: - dirs.append(get_npy_pkg_dir()) - else: - dirs = [get_npy_pkg_dir()] - return read_config(pkgname, dirs) - -def get_info(pkgname, dirs=None): - """ - Return an info dict for a given C library. - - The info dict contains the necessary options to use the C library. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - info : dict - The dictionary with build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_pkg_info - - Examples - -------- - To get the necessary information for the npymath library from NumPy: - - >>> npymath_info = np.distutils.misc_util.get_info('npymath') - >>> npymath_info #doctest: +SKIP - {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': - ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} - - This info dict can then be used as input to a `Configuration` instance:: - - config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) - - """ - from numpy.distutils.npy_pkg_config import parse_flags - pkg_info = get_pkg_info(pkgname, dirs) - - # Translate LibraryInfo instance into a build_info dict - info = parse_flags(pkg_info.cflags()) - for k, v in parse_flags(pkg_info.libs()).items(): - info[k].extend(v) - - # add_extension extra_info argument is ANAL - info['define_macros'] = info['macros'] - del info['macros'] - del info['ignored'] - - return info - -def is_bootstrapping(): - if sys.version_info[0] >= 3: - import builtins - else: - import __builtin__ as builtins - - try: - builtins.__NUMPY_SETUP__ - return True - except AttributeError: - return False - __NUMPY_SETUP__ = False - - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - )) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov, str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - f = open(target, 'w') - f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(r''' -def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - -def show(): - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - ''') - - f.close() - return target - -def msvc_version(compiler): - """Return version major and minor of compiler instance if it is - MSVC, raise an exception otherwise.""" - if not compiler.compiler_type == "msvc": - raise ValueError("Compiler instance is not msvc (%s)"\ - % compiler.compiler_type) - return compiler._MSVCCompiler__version - -if sys.version[:3] >= '2.5': - def get_build_architecture(): - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() -else: - #copied from python 2.5.1 distutils/msvccompiler.py - def get_build_architecture(): - """Return the processor architecture. - - Possible results are "Intel", "Itanium", or "AMD64". - """ - prefix = " bit (" - i = sys.version.find(prefix) - if i == -1: - return "Intel" - j = sys.version.find(")", i) - return sys.version[i+len(prefix):j] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/npy_pkg_config.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/npy_pkg_config.py deleted file mode 100644 index ceab906a4edff..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/npy_pkg_config.py +++ /dev/null @@ -1,464 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import re -import os -import shlex - -if sys.version_info[0] < 3: - from ConfigParser import SafeConfigParser, NoOptionError -else: - from configparser import ConfigParser, SafeConfigParser, NoOptionError - -__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', - 'read_config', 'parse_flags'] - -_VAR = re.compile('\$\{([a-zA-Z0-9_-]+)\}') - -class FormatError(IOError): - """ - Exception thrown when there is a problem parsing a configuration file. - - """ - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -class PkgNotFound(IOError): - """Exception raised when a package can not be located.""" - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -def parse_flags(line): - """ - Parse a line from a config file containing compile flags. - - Parameters - ---------- - line : str - A single line containing one or more compile flags. - - Returns - ------- - d : dict - Dictionary of parsed flags, split into relevant categories. - These categories are the keys of `d`: - - * 'include_dirs' - * 'library_dirs' - * 'libraries' - * 'macros' - * 'ignored' - - """ - lexer = shlex.shlex(line) - lexer.whitespace_split = True - - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - def next_token(t): - if t.startswith('-I'): - if len(t) > 2: - d['include_dirs'].append(t[2:]) - else: - t = lexer.get_token() - d['include_dirs'].append(t) - elif t.startswith('-L'): - if len(t) > 2: - d['library_dirs'].append(t[2:]) - else: - t = lexer.get_token() - d['library_dirs'].append(t) - elif t.startswith('-l'): - d['libraries'].append(t[2:]) - elif t.startswith('-D'): - d['macros'].append(t[2:]) - else: - d['ignored'].append(t) - return lexer.get_token() - - t = lexer.get_token() - while t: - t = next_token(t) - - return d - -def _escape_backslash(val): - return val.replace('\\', '\\\\') - -class LibraryInfo(object): - """ - Object containing build information about a library. - - Parameters - ---------- - name : str - The library name. - description : str - Description of the library. - version : str - Version string. - sections : dict - The sections of the configuration file for the library. The keys are - the section headers, the values the text under each header. - vars : class instance - A `VariableSet` instance, which contains ``(name, value)`` pairs for - variables defined in the configuration file for the library. - requires : sequence, optional - The required libraries for the library to be installed. - - Notes - ----- - All input parameters (except "sections" which is a method) are available as - attributes of the same name. - - """ - def __init__(self, name, description, version, sections, vars, requires=None): - self.name = name - self.description = description - if requires: - self.requires = requires - else: - self.requires = [] - self.version = version - self._sections = sections - self.vars = vars - - def sections(self): - """ - Return the section headers of the config file. - - Parameters - ---------- - None - - Returns - ------- - keys : list of str - The list of section headers. - - """ - return list(self._sections.keys()) - - def cflags(self, section="default"): - val = self.vars.interpolate(self._sections[section]['cflags']) - return _escape_backslash(val) - - def libs(self, section="default"): - val = self.vars.interpolate(self._sections[section]['libs']) - return _escape_backslash(val) - - def __str__(self): - m = ['Name: %s' % self.name] - m.append('Description: %s' % self.description) - if self.requires: - m.append('Requires:') - else: - m.append('Requires: %s' % ",".join(self.requires)) - m.append('Version: %s' % self.version) - - return "\n".join(m) - -class VariableSet(object): - """ - Container object for the variables defined in a config file. - - `VariableSet` can be used as a plain dictionary, with the variable names - as keys. - - Parameters - ---------- - d : dict - Dict of items in the "variables" section of the configuration file. - - """ - def __init__(self, d): - self._raw_data = dict([(k, v) for k, v in d.items()]) - - self._re = {} - self._re_sub = {} - - self._init_parse() - - def _init_parse(self): - for k, v in self._raw_data.items(): - self._init_parse_var(k, v) - - def _init_parse_var(self, name, value): - self._re[name] = re.compile(r'\$\{%s\}' % name) - self._re_sub[name] = value - - def interpolate(self, value): - # Brute force: we keep interpolating until there is no '${var}' anymore - # or until interpolated string is equal to input string - def _interpolate(value): - for k in self._re.keys(): - value = self._re[k].sub(self._re_sub[k], value) - return value - while _VAR.search(value): - nvalue = _interpolate(value) - if nvalue == value: - break - value = nvalue - - return value - - def variables(self): - """ - Return the list of variable names. - - Parameters - ---------- - None - - Returns - ------- - names : list of str - The names of all variables in the `VariableSet` instance. - - """ - return list(self._raw_data.keys()) - - # Emulate a dict to set/get variables values - def __getitem__(self, name): - return self._raw_data[name] - - def __setitem__(self, name, value): - self._raw_data[name] = value - self._init_parse_var(name, value) - -def parse_meta(config): - if not config.has_section('meta'): - raise FormatError("No meta section found !") - - d = {} - for name, value in config.items('meta'): - d[name] = value - - for k in ['name', 'description', 'version']: - if not k in d: - raise FormatError("Option %s (section [meta]) is mandatory, " - "but not found" % k) - - if not 'requires' in d: - d['requires'] = [] - - return d - -def parse_variables(config): - if not config.has_section('variables'): - raise FormatError("No variables section found !") - - d = {} - - for name, value in config.items("variables"): - d[name] = value - - return VariableSet(d) - -def parse_sections(config): - return meta_d, r - -def pkg_to_filename(pkg_name): - return "%s.ini" % pkg_name - -def parse_config(filename, dirs=None): - if dirs: - filenames = [os.path.join(d, filename) for d in dirs] - else: - filenames = [filename] - - if sys.version[:3] > '3.1': - # SafeConfigParser is deprecated in py-3.2 and renamed to ConfigParser - config = ConfigParser() - else: - config = SafeConfigParser() - - n = config.read(filenames) - if not len(n) >= 1: - raise PkgNotFound("Could not find file(s) %s" % str(filenames)) - - # Parse meta and variables sections - meta = parse_meta(config) - - vars = {} - if config.has_section('variables'): - for name, value in config.items("variables"): - vars[name] = _escape_backslash(value) - - # Parse "normal" sections - secs = [s for s in config.sections() if not s in ['meta', 'variables']] - sections = {} - - requires = {} - for s in secs: - d = {} - if config.has_option(s, "requires"): - requires[s] = config.get(s, 'requires') - - for name, value in config.items(s): - d[name] = value - sections[s] = d - - return meta, vars, sections, requires - -def _read_config_imp(filenames, dirs=None): - def _read_config(f): - meta, vars, sections, reqs = parse_config(f, dirs) - # recursively add sections and variables of required libraries - for rname, rvalue in reqs.items(): - nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) - - # Update var dict for variables not in 'top' config file - for k, v in nvars.items(): - if not k in vars: - vars[k] = v - - # Update sec dict - for oname, ovalue in nsections[rname].items(): - if ovalue: - sections[rname][oname] += ' %s' % ovalue - - return meta, vars, sections, reqs - - meta, vars, sections, reqs = _read_config(filenames) - - # FIXME: document this. If pkgname is defined in the variables section, and - # there is no pkgdir variable defined, pkgdir is automatically defined to - # the path of pkgname. This requires the package to be imported to work - if not 'pkgdir' in vars and "pkgname" in vars: - pkgname = vars["pkgname"] - if not pkgname in sys.modules: - raise ValueError("You should import %s to get information on %s" % - (pkgname, meta["name"])) - - mod = sys.modules[pkgname] - vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) - - return LibraryInfo(name=meta["name"], description=meta["description"], - version=meta["version"], sections=sections, vars=VariableSet(vars)) - -# Trivial cache to cache LibraryInfo instances creation. To be really -# efficient, the cache should be handled in read_config, since a same file can -# be parsed many time outside LibraryInfo creation, but I doubt this will be a -# problem in practice -_CACHE = {} -def read_config(pkgname, dirs=None): - """ - Return library info for a package from its configuration file. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of directories - usually including - the NumPy base directory - where to look for npy-pkg-config files. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - misc_util.get_info, misc_util.get_pkg_info - - Examples - -------- - >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') - >>> type(npymath_info) - - >>> print npymath_info - Name: npymath - Description: Portable, core math library implementing C99 standard - Requires: - Version: 0.1 #random - - """ - try: - return _CACHE[pkgname] - except KeyError: - v = _read_config_imp(pkg_to_filename(pkgname), dirs) - _CACHE[pkgname] = v - return v - -# TODO: -# - implements version comparison (modversion + atleast) - -# pkg-config simple emulator - useful for debugging, and maybe later to query -# the system -if __name__ == '__main__': - import sys - from optparse import OptionParser - import glob - - parser = OptionParser() - parser.add_option("--cflags", dest="cflags", action="store_true", - help="output all preprocessor and compiler flags") - parser.add_option("--libs", dest="libs", action="store_true", - help="output all linker flags") - parser.add_option("--use-section", dest="section", - help="use this section instead of default for options") - parser.add_option("--version", dest="version", action="store_true", - help="output version") - parser.add_option("--atleast-version", dest="min_version", - help="Minimal version") - parser.add_option("--list-all", dest="list_all", action="store_true", - help="Minimal version") - parser.add_option("--define-variable", dest="define_variable", - help="Replace variable with the given value") - - (options, args) = parser.parse_args(sys.argv) - - if len(args) < 2: - raise ValueError("Expect package name on the command line:") - - if options.list_all: - files = glob.glob("*.ini") - for f in files: - info = read_config(f) - print("%s\t%s - %s" % (info.name, info.name, info.description)) - - pkg_name = args[1] - import os - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d: - info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) - else: - info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) - - if options.section: - section = options.section - else: - section = "default" - - if options.define_variable: - m = re.search('([\S]+)=([\S]+)', options.define_variable) - if not m: - raise ValueError("--define-variable option should be of " \ - "the form --define-variable=foo=bar") - else: - name = m.group(1) - value = m.group(2) - info.vars[name] = value - - if options.cflags: - print(info.cflags(section)) - if options.libs: - print(info.libs(section)) - if options.version: - print(info.version) - if options.min_version: - print(info.version >= options.min_version) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/numpy_distribution.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/numpy_distribution.py deleted file mode 100644 index 6ae19d16b18f3..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/numpy_distribution.py +++ /dev/null @@ -1,19 +0,0 @@ -# XXX: Handle setuptools ? -from __future__ import division, absolute_import, print_function - -from distutils.core import Distribution - -# This class is used because we add new files (sconscripts, and so on) with the -# scons command -class NumpyDistribution(Distribution): - def __init__(self, attrs = None): - # A list of (sconscripts, pre_hook, post_hook, src, parent_names) - self.scons_data = [] - # A list of installable libraries - self.installed_libraries = [] - # A dict of pkg_config files to generate/install - self.installed_pkg_config = {} - Distribution.__init__(self, attrs) - - def has_scons_scripts(self): - return bool(self.scons_data) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/pathccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/pathccompiler.py deleted file mode 100644 index fc9872db34da8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/pathccompiler.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from distutils.unixccompiler import UnixCCompiler - -class PathScaleCCompiler(UnixCCompiler): - - """ - PathScale compiler compatible with an gcc built Python. - """ - - compiler_type = 'pathcc' - cc_exe = 'pathcc' - cxx_exe = 'pathCC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler, - compiler_so=cc_compiler, - compiler_cxx=cxx_compiler, - linker_exe=cc_compiler, - linker_so=cc_compiler + ' -shared') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/setup.py deleted file mode 100644 index 82a53bd08dbe3..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/setup.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('distutils', parent_package, top_path) - config.add_subpackage('command') - config.add_subpackage('fcompiler') - config.add_data_dir('tests') - config.add_data_files('site.cfg') - config.add_data_files('mingw/gfortran_vs2003_hack.c') - config.make_config_py() - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/system_info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/system_info.py deleted file mode 100644 index 48c92c5482248..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/system_info.py +++ /dev/null @@ -1,2242 +0,0 @@ -#!/bin/env python -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Currently, the following -classes are available: - - atlas_info - atlas_threads_info - atlas_blas_info - atlas_blas_threads_info - lapack_atlas_info - blas_info - lapack_info - openblas_info - blas_opt_info # usage recommended - lapack_opt_info # usage recommended - fftw_info,dfftw_info,sfftw_info - fftw_threads_info,dfftw_threads_info,sfftw_threads_info - djbfft_info - x11_info - lapack_src_info - blas_src_info - numpy_info - numarray_info - numpy_info - boost_python_info - agg2_info - wx_info - gdk_pixbuf_xlib_2_info - gdk_pixbuf_2_info - gdk_x11_2_info - gtkp_x11_2_info - gtkp_2_info - xft_info - freetype2_info - umfpack_info - -Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section ALL has options that are the default for each section. The -available sections are fftw, atlas, and x11. Appropiate defaults are -used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. ALL section in site.cfg -Only the first complete match is returned. - -Example: ----------- -[ALL] -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -fftw_libs = rfftw, fftw -fftw_opt_libs = rfftw_threaded, fftw_threaded -# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -atlas_libs = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import re -import copy -import warnings -from glob import glob -from functools import reduce -if sys.version_info[0] < 3: - from ConfigParser import NoOptionError, ConfigParser -else: - from configparser import NoOptionError, ConfigParser - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import distutils.sysconfig -from distutils import log -from distutils.util import get_platform - -from numpy.distutils.exec_command import \ - find_executable, exec_command, get_pythonexe -from numpy.distutils.misc_util import is_sequence, is_string, \ - get_shared_lib_extension -from numpy.distutils.command.config import config as cmd_config -from numpy.distutils.compat import get_exception -import distutils.ccompiler -import tempfile -import shutil - - -# Determine number of bits -import platform -_bits = {'32bit': 32, '64bit': 64} -platform_bits = _bits[platform.architecture()[0]] - - -def libpaths(paths, bits): - """Return a list of library paths valid on 32 or 64 bit systems. - - Inputs: - paths : sequence - A sequence of strings (typically paths) - bits : int - An integer, the only valid values are 32 or 64. A ValueError exception - is raised otherwise. - - Examples: - - Consider a list of directories - >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - For a 32-bit platform, this is already valid: - >>> np.distutils.system_info.libpaths(paths,32) - ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] - - On 64 bits, we prepend the '64' postfix - >>> np.distutils.system_info.libpaths(paths,64) - ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', - '/usr/lib64', '/usr/lib'] - """ - if bits not in (32, 64): - raise ValueError("Invalid bit size in libpaths: 32 or 64 only") - - # Handle 32bit case - if bits == 32: - return paths - - # Handle 64bit case - out = [] - for p in paths: - out.extend([p + '64', p]) - - return out - - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(distutils.sysconfig.EXEC_PREFIX, - 'libs')] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] -else: - default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'], platform_bits) - default_include_dirs = ['/usr/local/include', - '/opt/include', '/usr/include', - # path of umfpack under macports - '/opt/local/include/ufsparse', - '/opt/local/include', '/sw/include', - '/usr/include/suitesparse'] - default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] - - default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', - '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include', - '/usr/include'] - - if os.path.exists('/usr/lib/X11'): - globbed_x11_dir = glob('/usr/lib/*/libX11.so') - if globbed_x11_dir: - x11_so_dir = os.path.split(globbed_x11_dir[0])[0] - default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) - default_x11_include_dirs.extend(['/usr/lib/X11/include', - '/usr/include/X11']) - - import subprocess as sp - tmp = None - try: - # Explicitly open/close file to avoid ResourceWarning when - # tests are run in debug mode Python 3. - tmp = open(os.devnull, 'w') - p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE, - stderr=tmp) - except (OSError, DistutilsError): - # OSError if gcc is not installed, or SandboxViolation (DistutilsError - # subclass) if an old setuptools bug is triggered (see gh-3160). - pass - else: - triplet = str(p.communicate()[0].decode().strip()) - if p.returncode == 0: - # gcc supports the "-print-multiarch" option - default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] - default_lib_dirs += [os.path.join("/usr/lib/", triplet)] - finally: - if tmp is not None: - tmp.close() - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] -default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] -default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] - -so_ext = get_shared_lib_extension() - - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - else: - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.path.expanduser('~') - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - - -def get_info(name, notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads': atlas_threads_info, # ditto - 'atlas_blas': atlas_blas_info, - 'atlas_blas_threads': atlas_blas_threads_info, - 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto - 'mkl': mkl_info, - # openblas which may or may not have embedded lapack - 'openblas': openblas_info, # use blas_opt instead - # openblas with embedded lapack - 'openblas_lapack': openblas_lapack_info, # use blas_opt instead - 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead - 'blas_mkl': blas_mkl_info, # use blas_opt instead - 'x11': x11_info, - 'fft_opt': fft_opt_info, - 'fftw': fftw_info, - 'fftw2': fftw2_info, - 'fftw3': fftw3_info, - 'dfftw': dfftw_info, - 'sfftw': sfftw_info, - 'fftw_threads': fftw_threads_info, - 'dfftw_threads': dfftw_threads_info, - 'sfftw_threads': sfftw_threads_info, - 'djbfft': djbfft_info, - 'blas': blas_info, # use blas_opt instead - 'lapack': lapack_info, # use lapack_opt instead - 'lapack_src': lapack_src_info, - 'blas_src': blas_src_info, - 'numpy': numpy_info, - 'f2py': f2py_info, - 'Numeric': Numeric_info, - 'numeric': Numeric_info, - 'numarray': numarray_info, - 'numerix': numerix_info, - 'lapack_opt': lapack_opt_info, - 'blas_opt': blas_opt_info, - 'boost_python': boost_python_info, - 'agg2': agg2_info, - 'wx': wx_info, - 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2': gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, - 'gdk': gdk_info, - 'gdk_2': gdk_2_info, - 'gdk-2.0': gdk_2_info, - 'gdk_x11_2': gdk_x11_2_info, - 'gdk-x11-2.0': gdk_x11_2_info, - 'gtkp_x11_2': gtkp_x11_2_info, - 'gtk+-x11-2.0': gtkp_x11_2_info, - 'gtkp_2': gtkp_2_info, - 'gtk+-2.0': gtkp_2_info, - 'xft': xft_info, - 'freetype2': freetype2_info, - 'umfpack': umfpack_info, - 'amd': amd_info, - }.get(name.lower(), system_info) - return cl().get_info(notfound_action) - - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://math-atlas.sourceforge.net/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (http://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - - -class NumericNotFoundError(NotFoundError): - """ - Numeric (http://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - - -class system_info: - - """ get_info() is the only public method. Don't use others. - """ - section = 'ALL' - dir_env_var = None - search_static_first = 0 # XXX: disabled by default, may disappear in - # future unless it is proved to be useful. - verbosity = 1 - saved_results = {} - - notfounderror = NotFoundError - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity=1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {} - defaults['library_dirs'] = os.pathsep.join(default_lib_dirs) - defaults['include_dirs'] = os.pathsep.join(default_include_dirs) - defaults['src_dirs'] = os.pathsep.join(default_src_dirs) - defaults['search_static_first'] = str(self.search_static_first) - self.cp = ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - if self.section is not None: - self.search_static_first = self.cp.getboolean( - self.section, 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - if self.section is not None: - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - info = {} - for lib in libs: - i = self.check_libs(dirs, [lib]) - if i is not None: - dict_append(info, **i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - return info - - def set_info(self, **info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info, **lib_info) - self.saved_results[self.__class__.__name__] = info - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def get_info(self, notfound_action=0): - """ Return a dictonary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action == 1: - warnings.warn(self.notfounderror.__doc__) - elif notfound_action == 2: - raise self.notfounderror(self.notfounderror.__doc__) - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if self.verbosity > 0 and flag: - for k, v in res.items(): - v = str(v) - if k in ['sources', 'libraries'] and len(v) > 270: - v = v[:120] + '...\n...\n...' + v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0] == e0: - log.info('Setting %s=%s' % (env_var[0], e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d == 'None': - log.info('Disabled %s: %s', - self.__class__.__name__, '(%s is None)' - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self, '_lib_names', []) - if len(l) == 1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3] == 'lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include', 'lib']: - d1 = os.path.join(d, dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get(self.section, key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if not os.path.isdir(d): - warnings.warn('Specified path %s is invalid.' % d) - continue - - if d not in ret: - ret.append(d) - - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - return self.get_libs(key, '') - - def library_extensions(self): - static_exts = ['.a'] - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - # Debian and Ubuntu added a g3f suffix to shared library to deal with - # g77 -> gfortran ABI transition - # XXX: disabled, it hides more problem than it solves. - #if sys.platform[:5] == 'linux': - # exts.append('.so.3gf') - return exts - - def check_libs(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - return info - - def check_libs2(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dirs, libs, opt_libs, exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - return info - - def _lib_list(self, lib_dir, libs, exts): - assert is_string(lib_dir) - liblist = [] - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for l in libs: - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix + l + ext) - if p: - break - if p: - assert len(p) == 1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - l += '.dll' - liblist.append(l) - break - return liblist - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Find mandatory and optional libs in expected paths. - - Missing optional libraries are silently forgotten. - """ - # First, try to find the mandatory libraries - if is_sequence(lib_dirs): - found_libs, found_dirs = [], [] - for dir_ in lib_dirs: - found_libs1 = self._lib_list(dir_, libs, exts) - # It's possible that we'll find the same library in multiple - # directories. It's also possible that we'll find some - # libraries on in directory, and some in another. So the - # obvious thing would be to use a set instead of a list, but I - # don't know if preserving order matters (does it?). - for found_lib in found_libs1: - if found_lib not in found_libs: - found_libs.append(found_lib) - if dir_ not in found_dirs: - found_dirs.append(dir_) - else: - found_libs = self._lib_list(lib_dirs, libs, exts) - found_dirs = [lib_dirs] - if len(found_libs) > 0 and len(found_libs) == len(libs): - info = {'libraries': found_libs, 'library_dirs': found_dirs} - # Now, check for optional libraries - if is_sequence(lib_dirs): - for dir_ in lib_dirs: - opt_found_libs = self._lib_list(dir_, opt_libs, exts) - if opt_found_libs: - if dir_ not in found_dirs: - found_dirs.extend(dir_) - found_libs.extend(opt_found_libs) - else: - opt_found_libs = self._lib_list(lib_dirs, opt_libs, exts) - if opt_found_libs: - found_libs.extend(opt_found_libs) - return info - else: - return None - - def combine_paths(self, *args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args, **{'verbosity': self.verbosity}) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info, **fftw_info) - if djbfft_info: - dict_append(info, **djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - {'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]}] - - def calc_ver_info(self, ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - incl_dir = None - libs = self.get_libs(self.section + '_libs', ver_param['libs']) - info = self.check_libs(lib_dirs, libs) - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d, ver_param['includes'])) \ - == len(ver_param['includes']): - dict_append(info, include_dirs=[d]) - flag = 1 - incl_dirs = [d] - break - if flag: - dict_append(info, define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]} - ] - - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - ] - - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw', - 'libs':['drfftw', 'dfftw'], - 'includes':['dfftw.h', 'drfftw.h'], - 'macros':[('SCIPY_DFFTW_H', None)]}] - - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw', - 'libs':['srfftw', 'sfftw'], - 'includes':['sfftw.h', 'srfftw.h'], - 'macros':[('SCIPY_SFFTW_H', None)]}] - - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'fftw threads', - 'libs':['rfftw_threads', 'fftw_threads'], - 'includes':['fftw_threads.h', 'rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] - - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw threads', - 'libs':['drfftw_threads', 'dfftw_threads'], - 'includes':['dfftw_threads.h', 'drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] - - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw threads', - 'libs':['srfftw_threads', 'sfftw_threads'], - 'includes':['sfftw_threads.h', 'srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] - - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths(d, ['djbfft.a']) - if p: - info = {'extra_objects': p} - break - p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) - if p: - info = {'libraries': ['djbfft'], 'library_dirs': [d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: - dict_append(info, include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H', None)]) - self.set_info(**info) - return - return - - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKL' - _lib_mkl = ['mkl', 'vml', 'guide'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT', None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - for d in open(ld_so_conf, 'r'): - d = d.strip() - if d: - paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d, 'mkl', '*')) - dirs += glob(os.path.join(d, 'mkl*')) - for d in dirs: - if os.path.isdir(os.path.join(d, 'lib')): - return d - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from .cpuinfo import cpu - l = 'mkl' # use shared library - if cpu.is_Itanium(): - plt = '64' - #l = 'mkl_ipf' - elif cpu.is_Xeon(): - plt = 'em64t' - #l = 'mkl_em64t' - else: - plt = '32' - #l = 'mkl_ia32' - if l not in self._lib_mkl: - self._lib_mkl.insert(0, l) - system_info.__init__( - self, - default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], - default_include_dirs=[os.path.join(mklroot, 'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - mkl_libs = self.get_libs('mkl_libs', self._lib_mkl) - info = self.check_libs2(lib_dirs, mkl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None)], - include_dirs=incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - - -class lapack_mkl_info(mkl_info): - - def calc_info(self): - mkl = get_info('mkl') - if not mkl: - return - if sys.platform == 'win32': - lapack_libs = self.get_libs('lapack_libs', ['mkl_lapack']) - else: - lapack_libs = self.get_libs('lapack_libs', - ['mkl_lapack32', 'mkl_lapack64']) - - info = {'libraries': lapack_libs} - dict_append(info, **mkl) - self.set_info(**info) - - -class blas_mkl_info(mkl_info): - pass - - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas', 'cblas'] - if sys.platform[:7] == 'freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', - 'sse', '3dnow', 'sse2']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - atlas_libs = self.get_libs('atlas_libs', - self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - atlas = self.check_libs2(d, atlas_libs, []) - lapack_atlas = self.check_libs2(d, ['lapack_atlas'], []) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) - lapack = self.check_libs2(lib_dirs2, lapack_libs, []) - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info, **lapack) - dict_append(info, **atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info, **atlas) - dict_append(info, - define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) - self.set_info(**info) - return - else: - dict_append(info, **atlas) - dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) - message = """ -********************************************************************* - Could not find lapack library within the ATLAS installation. -********************************************************************* -""" - warnings.warn(message) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir, prefix + lapack_name + e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000 * 1024: - message = """ -********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. -********************************************************************* -""" % (lapack_lib, sz / 1024) - warnings.warn(message) - else: - info['language'] = 'f77' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(info, **atlas_extra_info) - - self.set_info(**info) - - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas', 'cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - atlas_libs = self.get_libs('atlas_libs', - self._lib_names + self._lib_atlas) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - lapack_libs = self.get_libs('lapack_libs', self._lib_names) - info = self.check_libs(lib_dirs, lapack_libs, []) - if info is None: - return - info['language'] = 'f77' - self.set_info(**info) - - -class lapack_src_info(system_info): - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux = ''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ - + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ - + ['c%s.f' % f for f in (clasrc).split()] \ - + ['z%s.f' % f for f in (zlasrc).split()] \ - + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] - sources = [os.path.join(src_dir, f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir, '..', 'INSTALL') - sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] - # Lapack 3.2.1: - sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} - - -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - info = {} - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs, - use_tee=(system_info.verbosity > 0)) - if s and re.search(r'undefined reference to `_gfortran', o, re.M): - s, o = c.get_output(atlas_version_c_text, - libraries=libraries + ['gfortran'], - library_dirs=library_dirs, - use_tee=(system_info.verbosity > 0)) - if not s: - warnings.warn(""" -***************************************************** -Linkage with ATLAS requires gfortran. Use - - python setup.py config_fc --fcompiler=gnu95 ... - -when building extension libraries that use ATLAS. -Make sure that -lgfortran is used for C++ extensions. -***************************************************** -""") - dict_append(info, language='f90', - define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) - except Exception: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - break - - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION', None) - if atlas_version: - dict_append(info, define_macros=[( - 'ATLAS_INFO', '"\\"%s\\""' % atlas_version) - ]) - else: - dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) - return atlas_version or '?.?.?', info - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - - if atlas_version == '3.2.1_pre3.3.6': - dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) - else: - dict_append(info, define_macros=[( - 'ATLAS_INFO', '"\\"%s\\""' % atlas_version) - ]) - result = _cached_atlas_version[key] = atlas_version, info - return result - - - -class lapack_opt_info(system_info): - - notfounderror = LapackNotFoundError - - def calc_info(self): - - openblas_info = get_info('openblas_lapack') - if openblas_info: - self.set_info(**openblas_info) - return - - lapack_mkl_info = get_info('lapack_mkl') - if lapack_mkl_info: - self.set_info(**lapack_mkl_info) - return - - atlas_info = get_info('atlas_threads') - if not atlas_info: - atlas_info = get_info('atlas') - - if sys.platform == 'darwin' and not atlas_info: - # Use the system lapack from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO', 3)]) - return - - #atlas_info = {} ## uncomment for testing - need_lapack = 0 - need_blas = 0 - info = {} - if atlas_info: - l = atlas_info.get('define_macros', []) - if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ - or ('ATLAS_WITHOUT_LAPACK', None) in l: - need_lapack = 1 - info = atlas_info - - else: - warnings.warn(AtlasNotFoundError.__doc__) - need_blas = 1 - need_lapack = 1 - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - - if need_lapack: - lapack_info = get_info('lapack') - #lapack_info = {} ## uncomment for testing - if lapack_info: - dict_append(info, **lapack_info) - else: - warnings.warn(LapackNotFoundError.__doc__) - lapack_src_info = get_info('lapack_src') - if not lapack_src_info: - warnings.warn(LapackSrcNotFoundError.__doc__) - return - dict_append(info, libraries=[('flapack_src', lapack_src_info)]) - - if need_blas: - blas_info = get_info('blas') - #blas_info = {} ## uncomment for testing - if blas_info: - dict_append(info, **blas_info) - else: - warnings.warn(BlasNotFoundError.__doc__) - blas_src_info = get_info('blas_src') - if not blas_src_info: - warnings.warn(BlasSrcNotFoundError.__doc__) - return - dict_append(info, libraries=[('fblas_src', blas_src_info)]) - - self.set_info(**info) - return - - -class blas_opt_info(system_info): - - notfounderror = BlasNotFoundError - - def calc_info(self): - - blas_mkl_info = get_info('blas_mkl') - if blas_mkl_info: - self.set_info(**blas_mkl_info) - return - - openblas_info = get_info('openblas') - if openblas_info: - self.set_info(**openblas_info) - return - - atlas_info = get_info('atlas_blas_threads') - if not atlas_info: - atlas_info = get_info('atlas_blas') - - if sys.platform == 'darwin' and not atlas_info: - # Use the system BLAS from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/'): - if intel: - args.extend(['-msse3']) - else: - args.extend(['-faltivec']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - if args: - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=[('NO_ATLAS_INFO', 3)]) - return - - need_blas = 0 - info = {} - if atlas_info: - info = atlas_info - else: - warnings.warn(AtlasNotFoundError.__doc__) - need_blas = 1 - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - - if need_blas: - blas_info = get_info('blas') - if blas_info: - dict_append(info, **blas_info) - else: - warnings.warn(BlasNotFoundError.__doc__) - blas_src_info = get_info('blas_src') - if not blas_src_info: - warnings.warn(BlasSrcNotFoundError.__doc__) - return - dict_append(info, libraries=[('fblas_src', blas_src_info)]) - - self.set_info(**info) - return - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - blas_libs = self.get_libs('blas_libs', self._lib_names) - info = self.check_libs(lib_dirs, blas_libs, []) - if info is None: - return - info['language'] = 'f77' # XXX: is it generally true? - self.set_info(**info) - - -class openblas_info(blas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - notfounderror = BlasNotFoundError - - def check_embedded_lapack(self, info): - return True - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - openblas_libs = self.get_libs('libraries', self._lib_names) - if openblas_libs == self._lib_names: # backward compat with 1.8.0 - openblas_libs = self.get_libs('openblas_libs', self._lib_names) - info = self.check_libs(lib_dirs, openblas_libs, []) - if info is None: - return - - if not self.check_embedded_lapack(info): - return None - - info['language'] = 'f77' # XXX: is it generally true? - self.set_info(**info) - - -class openblas_lapack_info(openblas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - notfounderror = BlasNotFoundError - - def check_embedded_lapack(self, info): - res = False - c = distutils.ccompiler.new_compiler() - tmpdir = tempfile.mkdtemp() - s = """void zungqr(); - int main(int argc, const char *argv[]) - { - zungqr_(); - return 0; - }""" - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - try: - with open(src, 'wt') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs']) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) - return res - - -class blas_src_info(system_info): - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['blas'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir, f + '.f') \ - for f in (blas1 + blas2 + blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - x11_libs = self.get_libs('x11_libs', ['X11']) - info = self.check_libs(lib_dirs, x11_libs, []) - if info is None: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name == 'lib': - break - prefix.append(name) - - # Ask numpy for its own include path before attempting - # anything else - try: - include_dirs.append(getattr(module, 'get_include')()) - except AttributeError: - pass - - include_dirs.append(distutils.sysconfig.get_python_inc( - prefix=os.sep.join(prefix))) - except ImportError: - pass - py_incl_dir = distutils.sysconfig.get_python_inc() - include_dirs.append(py_incl_dir) - py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) - if py_pincl_dir not in include_dirs: - include_dirs.append(py_pincl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__', 'version']: - vrs = getattr(module, v, None) - if vrs is None: - continue - macros = [(self.modulename.upper() + '_VERSION', - '"\\"%s\\""' % (vrs)), - (self.modulename.upper(), None)] - break -## try: -## macros.append( -## (self.modulename.upper()+'_VERSION_HEX', -## hex(vstr2hex(module.__version__))), -## ) -## except Exception as msg: -## print msg - dict_append(info, define_macros=macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - - -class numerix_info(system_info): - section = 'numerix' - - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy - which = "numpy", "defaulted" - except ImportError: - msg1 = str(get_exception()) - try: - import Numeric - which = "numeric", "defaulted" - except ImportError: - msg2 = str(get_exception()) - try: - import numarray - which = "numarray", "defaulted" - except ImportError: - msg3 = str(get_exception()) - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') - self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], - include_dirs=[f2py_dir]) - return - - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['boost*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', - 'module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dirs = [distutils.sysconfig.get_python_inc()] - py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) - if py_pincl_dir not in py_incl_dirs: - py_incl_dirs.append(py_pincl_dir) - srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') - bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) - info = {'libraries': [('boost_python_src', - {'include_dirs': [src_dir] + py_incl_dirs, - 'sources':bpl_srcs} - )], - 'include_dirs': [src_dir], - } - if info: - self.set_info(**info) - return - - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['agg2*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform == 'win32': - agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', - 'win32', 'agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) - agg2_srcs += [os.path.join(src_dir, 'src', 'platform', - 'X11', - 'agg_platform_support.cpp')] - - info = {'libraries': - [('agg2_src', - {'sources': agg2_srcs, - 'include_dirs': [os.path.join(src_dir, 'include')], - } - )], - 'include_dirs': [os.path.join(src_dir, 'include')], - } - if info: - self.set_info(**info) - return - - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - - def get_config_output(self, config_exe, option): - cmd = config_exe + ' ' + self.append_config_exe + ' ' + option - s, o = exec_command(cmd, use_tee=0) - if not s: - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe, self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - '"\\"%s\\""' % (version))) - if self.version_macro_name: - macros.append((self.version_macro_name + '_%s' - % (version.replace('.', '_')), None)) - if self.release_macro_name: - release = self.get_config_output(config_exe, '--release') - if release: - macros.append((self.release_macro_name + '_%s' - % (release.replace('.', '_')), None)) - opts = self.get_config_output(config_exe, '--libs') - if opts: - for opt in opts.split(): - if opt[:2] == '-l': - libraries.append(opt[2:]) - elif opt[:2] == '-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe, self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2] == '-I': - include_dirs.append(opt[2:]) - elif opt[:2] == '-D': - if '=' in opt: - n, v = opt[2:].split('=') - macros.append((n, v)) - else: - macros.append((opt[2:], None)) - else: - extra_compile_args.append(opt) - if macros: - dict_append(info, define_macros=macros) - if libraries: - dict_append(info, libraries=libraries) - if library_dirs: - dict_append(info, library_dirs=library_dirs) - if include_dirs: - dict_append(info, include_dirs=include_dirs) - if extra_link_args: - dict_append(info, extra_link_args=extra_link_args) - if extra_compile_args: - dict_append(info, extra_compile_args=extra_compile_args) - if info: - self.set_info(**info) - return - - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - amd_libs = self.get_libs('amd_libs', self._lib_names) - info = self.check_libs(lib_dirs, amd_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, 'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H', None)], - swig_opts=['-I' + inc_dir]) - - self.set_info(**info) - return - - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - umfpack_libs = self.get_libs('umfpack_libs', self._lib_names) - info = self.check_libs(lib_dirs, umfpack_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H', None)], - swig_opts=['-I' + inc_dir]) - - amd = get_info('amd') - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - -## def vstr2hex(version): -## bits = [] -## n = [24,16,8,4,0] -## r = 0 -## for s in version.split('.'): -## r |= int(s) << n[0] -## del n[0] -## return r - -#-------------------------------------------------------------------- - - -def combine_paths(*args, **kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: - continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: - return [] - if len(args) == 1: - result = reduce(lambda a, b: a + b, map(glob, args[0]), []) - elif len(args) == 2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0, a1))) - else: - result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) - verbosity = kws.get('verbosity', 1) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} -inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} - - -def dict_append(d, **kws): - languages = [] - for k, v in kws.items(): - if k == 'language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs', 'include_dirs', 'define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l, 0) for l in languages])] - d['language'] = l - return - - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.items(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - r = conf.get_info() - if show_only: - log.info('Info classes not defined: %s', ','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8fa..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/setup.py deleted file mode 100644 index bb7d4bc1c8c8d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_ext', parent_package, top_path) - config.add_extension('fib2', ['src/fib2.pyf', 'src/fib1.f']) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib1.f b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib1.f deleted file mode 100644 index cfbb1eea0df7a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib1.f +++ /dev/null @@ -1,18 +0,0 @@ -C FILE: FIB1.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB1.F diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib2.pyf b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib2.pyf deleted file mode 100644 index 90a8cf00cb47e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/src/fib2.pyf +++ /dev/null @@ -1,9 +0,0 @@ -! -*- f90 -*- -python module fib2 - interface - subroutine fib(a,n) - real*8 dimension(n),intent(out),depend(n) :: a - integer intent(in) :: n - end subroutine fib - end interface -end python module fib2 diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/tests/test_fib2.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/tests/test_fib2.py deleted file mode 100644 index 5252db2830d1b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_ext/tests/test_fib2.py +++ /dev/null @@ -1,13 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from numpy.testing import * -from f2py_ext import fib2 - -class TestFib2(TestCase): - - def test_fib(self): - assert_array_equal(fib2.fib(6), [0, 1, 1, 2, 3, 5]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8fa..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/include/body.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/include/body.f90 deleted file mode 100644 index 90b44e29dc850..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/include/body.f90 +++ /dev/null @@ -1,5 +0,0 @@ - subroutine bar13(a) - !f2py intent(out) a - integer a - a = 13 - end subroutine bar13 diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/setup.py deleted file mode 100644 index 7cca81637c578..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_f90_ext', parent_package, top_path) - config.add_extension('foo', - ['src/foo_free.f90'], - include_dirs=['include'], - f2py_options=['--include_paths', - config.paths('include')[0]] - ) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 deleted file mode 100644 index c7713be59e169..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 +++ /dev/null @@ -1,6 +0,0 @@ -module foo_free -contains - -include "body.f90" - -end module foo_free diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py deleted file mode 100644 index 9653b9023cd2b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from numpy.testing import * -from f2py_f90_ext import foo - -class TestFoo(TestCase): - def test_foo_free(self): - assert_equal(foo.foo_free.bar13(), 13) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8fa..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/setup.py deleted file mode 100644 index de6b941e07f03..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/setup.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -fib3_f = ''' -C FILE: FIB3.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) -Cf2py intent(in) n -Cf2py intent(out) a -Cf2py depend(n) a - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB3.F -''' - -def source_func(ext, build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir, 'fib3.f') - if newer(__file__, target): - f = open(target, 'w') - f.write(fib3_f) - f.close() - return [target] - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('gen_ext', parent_package, top_path) - config.add_extension('fib3', - [source_func] - ) - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/tests/test_fib3.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/tests/test_fib3.py deleted file mode 100644 index 5fd9be439485a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/gen_ext/tests/test_fib3.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from numpy.testing import * -from gen_ext import fib3 - -class TestFib3(TestCase): - def test_fib(self): - assert_array_equal(fib3.fib(6), [0, 1, 1, 2, 3, 5]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8fa..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/primes.pyx b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/primes.pyx deleted file mode 100644 index 2ada0c5a08d4f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/primes.pyx +++ /dev/null @@ -1,22 +0,0 @@ -# -# Calculate prime numbers -# - -def primes(int kmax): - cdef int n, k, i - cdef int p[1000] - result = [] - if kmax > 1000: - kmax = 1000 - k = 0 - n = 2 - while k < kmax: - i = 0 - while i < k and n % p[i] <> 0: - i = i + 1 - if i == k: - p[k] = n - k = k + 1 - result.append(n) - n = n + 1 - return result diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/setup.py deleted file mode 100644 index 819dd3154a11b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('pyrex_ext', parent_package, top_path) - config.add_extension('primes', - ['primes.pyx']) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/tests/test_primes.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/tests/test_primes.py deleted file mode 100644 index c9fdd6c6d5c4d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/pyrex_ext/tests/test_primes.py +++ /dev/null @@ -1,14 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from numpy.testing import * -from pyrex_ext.primes import primes - -class TestPrimes(TestCase): - def test_simple(self, level=1): - l = primes(10) - assert_equal(l, [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/setup.py deleted file mode 100644 index 135de7c470d5c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/setup.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('testnumpydistutils', parent_package, top_path) - config.add_subpackage('pyrex_ext') - config.add_subpackage('f2py_ext') - #config.add_subpackage('f2py_f90_ext') - config.add_subpackage('swig_ext') - config.add_subpackage('gen_ext') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8fa..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/setup.py deleted file mode 100644 index f6e07303bea64..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('swig_ext', parent_package, top_path) - config.add_extension('_example', - ['src/example.i', 'src/example.c'] - ) - config.add_extension('_example2', - ['src/zoo.i', 'src/zoo.cc'], - depends=['src/zoo.h'], - include_dirs=['src'] - ) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/example.i b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/example.i deleted file mode 100644 index f4fc11e663701..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/example.i +++ /dev/null @@ -1,14 +0,0 @@ -/* -*- c -*- */ - -/* File : example.i */ -%module example -%{ -/* Put headers and other declarations here */ -extern double My_variable; -extern int fact(int); -extern int my_mod(int n, int m); -%} - -extern double My_variable; -extern int fact(int); -extern int my_mod(int n, int m); diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.cc b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.cc deleted file mode 100644 index 0a643d1e5d4f2..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.cc +++ /dev/null @@ -1,23 +0,0 @@ -#include "zoo.h" -#include -#include - -Zoo::Zoo() -{ - n = 0; -} - -void Zoo::shut_up(char *animal) -{ - if (n < 10) { - strcpy(animals[n], animal); - n++; - } -} - -void Zoo::display() -{ - int i; - for(i = 0; i < n; i++) - printf("%s\n", animals[i]); -} diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.h deleted file mode 100644 index cb26e6ceff5df..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.h +++ /dev/null @@ -1,9 +0,0 @@ - -class Zoo{ - int n; - char animals[10][50]; -public: - Zoo(); - void shut_up(char *animal); - void display(); -}; diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.i b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.i deleted file mode 100644 index a029c03e844b6..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/src/zoo.i +++ /dev/null @@ -1,10 +0,0 @@ -// -*- c++ -*- -// Example copied from http://linuxgazette.net/issue49/pramode.html - -%module example2 - -%{ -#include "zoo.h" -%} - -%include "zoo.h" diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example.py deleted file mode 100644 index e81f98b1de200..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from numpy.testing import * -from swig_ext import example - -class TestExample(TestCase): - def test_fact(self): - assert_equal(example.fact(10), 3628800) - - def test_cvar(self): - assert_equal(example.cvar.My_variable, 3.0) - example.cvar.My_variable = 5 - assert_equal(example.cvar.My_variable, 5.0) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example2.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example2.py deleted file mode 100644 index 82daed72894f9..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/swig_ext/tests/test_example2.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from numpy.testing import * -from swig_ext import example2 - -class TestExample2(TestCase): - def test_zoo(self): - z = example2.Zoo() - z.shut_up('Tiger') - z.shut_up('Lion') - z.display() - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_exec_command.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_exec_command.py deleted file mode 100644 index 0931f749b39c2..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_exec_command.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -from tempfile import TemporaryFile - -from numpy.distutils import exec_command - -# In python 3 stdout, stderr are text (unicode compliant) devices, so to -# emulate them import StringIO from the io module. -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - -class redirect_stdout(object): - """Context manager to redirect stdout for exec_command test.""" - def __init__(self, stdout=None): - self._stdout = stdout or sys.stdout - - def __enter__(self): - self.old_stdout = sys.stdout - sys.stdout = self._stdout - - def __exit__(self, exc_type, exc_value, traceback): - self._stdout.flush() - sys.stdout = self.old_stdout - # note: closing sys.stdout won't close it. - self._stdout.close() - -class redirect_stderr(object): - """Context manager to redirect stderr for exec_command test.""" - def __init__(self, stderr=None): - self._stderr = stderr or sys.stderr - - def __enter__(self): - self.old_stderr = sys.stderr - sys.stderr = self._stderr - - def __exit__(self, exc_type, exc_value, traceback): - self._stderr.flush() - sys.stderr = self.old_stderr - # note: closing sys.stderr won't close it. - self._stderr.close() - -class emulate_nonposix(object): - """Context manager to emulate os.name != 'posix' """ - def __init__(self, osname='non-posix'): - self._new_name = osname - - def __enter__(self): - self._old_name = os.name - os.name = self._new_name - - def __exit__(self, exc_type, exc_value, traceback): - os.name = self._old_name - - -def test_exec_command_stdout(): - # Regression test for gh-2999 and gh-2915. - # There are several packages (nose, scipy.weave.inline, Sage inline - # Fortran) that replace stdout, in which case it doesn't have a fileno - # method. This is tested here, with a do-nothing command that fails if the - # presence of fileno() is assumed in exec_command. - - # The code has a special case for posix systems, so if we are on posix test - # both that the special case works and that the generic code works. - - # Test posix version: - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - exec_command.exec_command("cd '.'") - -def test_exec_command_stderr(): - # Test posix version: - with redirect_stdout(TemporaryFile(mode='w+')): - with redirect_stderr(StringIO()): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(TemporaryFile()): - with redirect_stderr(StringIO()): - exec_command.exec_command("cd '.'") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index a0d191819cc10..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,53 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * - -import numpy.distutils.fcompiler - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), - ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), -] - -class TestG77Versions(TestCase): - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) - -class TestGortranVersions(TestCase): - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) - - -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_intel.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_intel.py deleted file mode 100644 index eda209ebe060c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_fcompiler_intel.py +++ /dev/null @@ -1,36 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * - -import numpy.distutils.fcompiler - -intel_32bit_version_strings = [ - ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"\ - "running on Intel(R) 32, Version 11.1", '11.1'), -] - -intel_64bit_version_strings = [ - ("Intel(R) Fortran IA-64 Compiler Professional for applications"\ - "running on IA-64, Version 11.0", '11.0'), - ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"\ - "running on Intel(R) 64, Version 11.1", '11.1') -] - -class TestIntelFCompilerVersions(TestCase): - def test_32bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') - for vs, version in intel_32bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -class TestIntelEM64TFCompilerVersions(TestCase): - def test_64bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') - for vs, version in intel_64bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_misc_util.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index fd6af638fb416..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, absolute_import, print_function - -from numpy.testing import * -from numpy.distutils.misc_util import appendpath, minrelpath, \ - gpaths, get_shared_lib_extension -from os.path import join, sep, dirname - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath(TestCase): - - def test_1(self): - assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) - assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) - assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) - assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) - - def test_2(self): - assert_equal(appendpath('prefix/sub', 'name'), - join('prefix', 'sub', 'name')) - assert_equal(appendpath('prefix/sub', 'sup/name'), - join('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub', '/prefix/name'), - ajoin('prefix', 'sub', 'name')) - - def test_3(self): - assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), - ajoin('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) - -class TestMinrelpath(TestCase): - - def test_1(self): - n = lambda path: path.replace('/', sep) - assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) - assert_equal(minrelpath('..'), '..') - assert_equal(minrelpath(n('aa/..')), '') - assert_equal(minrelpath(n('aa/../bb')), 'bb') - assert_equal(minrelpath(n('aa/bb/..')), 'aa') - assert_equal(minrelpath(n('aa/bb/../..')), '') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) - assert_equal(minrelpath(n('.././..')), n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) - -class TestGpaths(TestCase): - - def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__), '..')) - ls = gpaths('command/*.py', local_path) - assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) - f = gpaths('system_info.py', local_path) - assert_(join(local_path, 'system_info.py')==f[0], repr(f)) - -class TestSharedExtension(TestCase): - - def test_get_shared_lib_extension(self): - import sys - ext = get_shared_lib_extension(is_python_ext=False) - if sys.platform.startswith('linux'): - assert_equal(ext, '.so') - elif sys.platform.startswith('gnukfreebsd'): - assert_equal(ext, '.so') - elif sys.platform.startswith('darwin'): - assert_equal(ext, '.dylib') - elif sys.platform.startswith('win'): - assert_equal(ext, '.dll') - # just check for no crash - assert_(get_shared_lib_extension(is_python_ext=True)) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_npy_pkg_config.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_npy_pkg_config.py deleted file mode 100644 index 5443ece485b2a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/tests/test_npy_pkg_config.py +++ /dev/null @@ -1,98 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -from tempfile import mkstemp - -from numpy.testing import * -from numpy.distutils.npy_pkg_config import read_config, parse_flags - -simple = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[default] -cflags = -I/usr/include -libs = -L/usr/lib -""" -simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', - 'version': '0.1', 'name': 'foo'} - -simple_variable = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[variables] -prefix = /foo/bar -libdir = ${prefix}/lib -includedir = ${prefix}/include - -[default] -cflags = -I${includedir} -libs = -L${libdir} -""" -simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', - 'version': '0.1', 'name': 'foo'} - -class TestLibraryInfo(TestCase): - def test_simple(self): - fd, filename = mkstemp('foo.ini') - try: - pkg = os.path.splitext(filename)[0] - try: - os.write(fd, simple.encode('ascii')) - finally: - os.close(fd) - - out = read_config(pkg) - self.assertTrue(out.cflags() == simple_d['cflags']) - self.assertTrue(out.libs() == simple_d['libflags']) - self.assertTrue(out.name == simple_d['name']) - self.assertTrue(out.version == simple_d['version']) - finally: - os.remove(filename) - - def test_simple_variable(self): - fd, filename = mkstemp('foo.ini') - try: - pkg = os.path.splitext(filename)[0] - try: - os.write(fd, simple_variable.encode('ascii')) - finally: - os.close(fd) - - out = read_config(pkg) - self.assertTrue(out.cflags() == simple_variable_d['cflags']) - self.assertTrue(out.libs() == simple_variable_d['libflags']) - self.assertTrue(out.name == simple_variable_d['name']) - self.assertTrue(out.version == simple_variable_d['version']) - - out.vars['prefix'] = '/Users/david' - self.assertTrue(out.cflags() == '-I/Users/david/include') - finally: - os.remove(filename) - -class TestParseFlags(TestCase): - def test_simple_cflags(self): - d = parse_flags("-I/usr/include") - self.assertTrue(d['include_dirs'] == ['/usr/include']) - - d = parse_flags("-I/usr/include -DFOO") - self.assertTrue(d['include_dirs'] == ['/usr/include']) - self.assertTrue(d['macros'] == ['FOO']) - - d = parse_flags("-I /usr/include -DFOO") - self.assertTrue(d['include_dirs'] == ['/usr/include']) - self.assertTrue(d['macros'] == ['FOO']) - - def test_simple_lflags(self): - d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - self.assertTrue(d['libraries'] == ['foo', 'bar']) - - d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - self.assertTrue(d['libraries'] == ['foo', 'bar']) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/unixccompiler.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/unixccompiler.py deleted file mode 100644 index 955407aa0384f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. - -""" -from __future__ import division, absolute_import, print_function - -import os - -from distutils.errors import DistutilsExecError, CompileError -from distutils.unixccompiler import * -from numpy.distutils.ccompiler import replace_method -from numpy.distutils.compat import get_exception - -if sys.version_info[0] < 3: - from . import log -else: - from numpy.distutils import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile a single source files with a Unix-style compiler.""" - # HP ad-hoc fix, see ticket 1383 - ccomp = self.compiler_so - if ccomp[0] == 'aCC': - # remove flags that will trigger ANSI-C mode for aCC - if '-Ae' in ccomp: - ccomp.remove('-Ae') - if '-Aa' in ccomp: - ccomp.remove('-Aa') - # add flags for (almost) sane C++ handling - ccomp += ['-AA'] - self.compiler_so = ccomp - - display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + - extra_postargs, display = display) - except DistutilsExecError: - msg = str(get_exception()) - raise CompileError(msg) - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - """ - Build a static library in a separate sub-process. - - Parameters - ---------- - objects : list or tuple of str - List of paths to object files used to build the static library. - output_libname : str - The library name as an absolute or relative (if `output_dir` is used) - path. - output_dir : str, optional - The path to the output directory. Default is None, in which case - the ``output_dir`` attribute of the UnixCCompiler instance. - debug : bool, optional - This parameter is not used. - target_lang : str, optional - This parameter is not used. - - Returns - ------- - None - - """ - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except (IOError, OSError): - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError: - msg = str(get_exception()) - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/__init__.py deleted file mode 100644 index b6f1fa71c54a1..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os - -ref_dir = os.path.join(os.path.dirname(__file__)) - -__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and - not f.startswith('__')) - -for f in __all__: - __import__(__name__ + '.' + f) - -del f, ref_dir - -__doc__ = """\ -Topical documentation -===================== - -The following topics are available: -%s - -You can view them by - ->>> help(np.doc.TOPIC) #doctest: +SKIP - -""" % '\n- '.join([''] + __all__) - -__all__.extend(['__doc__']) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/basics.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/basics.py deleted file mode 100644 index 86a3984c27e23..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/basics.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -============ -Array basics -============ - -Array types and conversions between types -========================================= - -Numpy supports a much greater variety of numerical types than Python does. -This section shows which are available, and how to modify an array's data-type. - -========== ========================================================== -Data type Description -========== ========================================================== -bool_ Boolean (True or False) stored as a byte -int_ Default integer type (same as C ``long``; normally either - ``int64`` or ``int32``) -intc Identical to C ``int`` (normally ``int32`` or ``int64``) -intp Integer used for indexing (same as C ``ssize_t``; normally - either ``int32`` or ``int64``) -int8 Byte (-128 to 127) -int16 Integer (-32768 to 32767) -int32 Integer (-2147483648 to 2147483647) -int64 Integer (-9223372036854775808 to 9223372036854775807) -uint8 Unsigned integer (0 to 255) -uint16 Unsigned integer (0 to 65535) -uint32 Unsigned integer (0 to 4294967295) -uint64 Unsigned integer (0 to 18446744073709551615) -float_ Shorthand for ``float64``. -float16 Half precision float: sign bit, 5 bits exponent, - 10 bits mantissa -float32 Single precision float: sign bit, 8 bits exponent, - 23 bits mantissa -float64 Double precision float: sign bit, 11 bits exponent, - 52 bits mantissa -complex_ Shorthand for ``complex128``. -complex64 Complex number, represented by two 32-bit floats (real - and imaginary components) -complex128 Complex number, represented by two 64-bit floats (real - and imaginary components) -========== ========================================================== - -Additionally to ``intc`` the platform dependent C integer types ``short``, -``long``, ``longlong`` and their unsigned versions are defined. - -Numpy numerical types are instances of ``dtype`` (data-type) objects, each -having unique characteristics. Once you have imported NumPy using - - :: - - >>> import numpy as np - -the dtypes are available as ``np.bool_``, ``np.float32``, etc. - -Advanced types, not listed in the table above, are explored in -section :ref:`structured_arrays`. - -There are 5 basic numerical types representing booleans (bool), integers (int), -unsigned integers (uint) floating point (float) and complex. Those with numbers -in their name indicate the bitsize of the type (i.e. how many bits are needed -to represent a single value in memory). Some types, such as ``int`` and -``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit -vs. 64-bit machines). This should be taken into account when interfacing -with low-level code (such as C or Fortran) where the raw memory is addressed. - -Data-types can be used as functions to convert python numbers to array scalars -(see the array scalar section for an explanation), python sequences of numbers -to arrays of that type, or as arguments to the dtype keyword that many numpy -functions or methods accept. Some examples:: - - >>> import numpy as np - >>> x = np.float32(1.0) - >>> x - 1.0 - >>> y = np.int_([1,2,4]) - >>> y - array([1, 2, 4]) - >>> z = np.arange(3, dtype=np.uint8) - >>> z - array([0, 1, 2], dtype=uint8) - -Array types can also be referred to by character codes, mostly to retain -backward compatibility with older packages such as Numeric. Some -documentation may still refer to these, for example:: - - >>> np.array([1, 2, 3], dtype='f') - array([ 1., 2., 3.], dtype=float32) - -We recommend using dtype objects instead. - -To convert the type of an array, use the .astype() method (preferred) or -the type itself as a function. For example: :: - - >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE - array([ 0., 1., 2.]) - >>> np.int8(z) - array([0, 1, 2], dtype=int8) - -Note that, above, we use the *Python* float object as a dtype. NumPy knows -that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``, -that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``. -The other data-types do not have Python equivalents. - -To determine the type of an array, look at the dtype attribute:: - - >>> z.dtype - dtype('uint8') - -dtype objects also contain information about the type, such as its bit-width -and its byte-order. The data type can also be used indirectly to query -properties of the type, such as whether it is an integer:: - - >>> d = np.dtype(int) - >>> d - dtype('int32') - - >>> np.issubdtype(d, int) - True - - >>> np.issubdtype(d, float) - False - - -Array Scalars -============= - -Numpy generally returns elements of arrays as array scalars (a scalar -with an associated dtype). Array scalars differ from Python scalars, but -for the most part they can be used interchangeably (the primary -exception is for versions of Python older than v2.x, where integer array -scalars cannot act as indices for lists and tuples). There are some -exceptions, such as when code requires very specific attributes of a scalar -or when it checks specifically whether a value is a Python scalar. Generally, -problems are easily fixed by explicitly converting array scalars -to Python scalars, using the corresponding Python type function -(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``). - -The primary advantage of using array scalars is that -they preserve the array type (Python may not have a matching scalar type -available, e.g. ``int16``). Therefore, the use of array scalars ensures -identical behaviour between arrays and scalars, irrespective of whether the -value is inside an array or not. NumPy scalars also have many of the same -methods arrays do. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/broadcasting.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/broadcasting.py deleted file mode 100644 index 717914cda28c5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/broadcasting.py +++ /dev/null @@ -1,178 +0,0 @@ -""" -======================== -Broadcasting over arrays -======================== - -The term broadcasting describes how numpy treats arrays with different -shapes during arithmetic operations. Subject to certain constraints, -the smaller array is "broadcast" across the larger array so that they -have compatible shapes. Broadcasting provides a means of vectorizing -array operations so that looping occurs in C instead of Python. It does -this without making needless copies of data and usually leads to -efficient algorithm implementations. There are, however, cases where -broadcasting is a bad idea because it leads to inefficient use of memory -that slows computation. - -NumPy operations are usually done on pairs of arrays on an -element-by-element basis. In the simplest case, the two arrays must -have exactly the same shape, as in the following example: - - >>> a = np.array([1.0, 2.0, 3.0]) - >>> b = np.array([2.0, 2.0, 2.0]) - >>> a * b - array([ 2., 4., 6.]) - -NumPy's broadcasting rule relaxes this constraint when the arrays' -shapes meet certain constraints. The simplest broadcasting example occurs -when an array and a scalar value are combined in an operation: - ->>> a = np.array([1.0, 2.0, 3.0]) ->>> b = 2.0 ->>> a * b -array([ 2., 4., 6.]) - -The result is equivalent to the previous example where ``b`` was an array. -We can think of the scalar ``b`` being *stretched* during the arithmetic -operation into an array with the same shape as ``a``. The new elements in -``b`` are simply copies of the original scalar. The stretching analogy is -only conceptual. NumPy is smart enough to use the original scalar value -without actually making copies, so that broadcasting operations are as -memory and computationally efficient as possible. - -The code in the second example is more efficient than that in the first -because broadcasting moves less memory around during the multiplication -(``b`` is a scalar rather than an array). - -General Broadcasting Rules -========================== -When operating on two arrays, NumPy compares their shapes element-wise. -It starts with the trailing dimensions, and works its way forward. Two -dimensions are compatible when - -1) they are equal, or -2) one of them is 1 - -If these conditions are not met, a -``ValueError: frames are not aligned`` exception is thrown, indicating that -the arrays have incompatible shapes. The size of the resulting array -is the maximum size along each dimension of the input arrays. - -Arrays do not need to have the same *number* of dimensions. For example, -if you have a ``256x256x3`` array of RGB values, and you want to scale -each color in the image by a different value, you can multiply the image -by a one-dimensional array with 3 values. Lining up the sizes of the -trailing axes of these arrays according to the broadcast rules, shows that -they are compatible:: - - Image (3d array): 256 x 256 x 3 - Scale (1d array): 3 - Result (3d array): 256 x 256 x 3 - -When either of the dimensions compared is one, the other is -used. In other words, dimensions with size 1 are stretched or "copied" -to match the other. - -In the following example, both the ``A`` and ``B`` arrays have axes with -length one that are expanded to a larger size during the broadcast -operation:: - - A (4d array): 8 x 1 x 6 x 1 - B (3d array): 7 x 1 x 5 - Result (4d array): 8 x 7 x 6 x 5 - -Here are some more examples:: - - A (2d array): 5 x 4 - B (1d array): 1 - Result (2d array): 5 x 4 - - A (2d array): 5 x 4 - B (1d array): 4 - Result (2d array): 5 x 4 - - A (3d array): 15 x 3 x 5 - B (3d array): 15 x 1 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 5 - Result (3d array): 15 x 3 x 5 - - A (3d array): 15 x 3 x 5 - B (2d array): 3 x 1 - Result (3d array): 15 x 3 x 5 - -Here are examples of shapes that do not broadcast:: - - A (1d array): 3 - B (1d array): 4 # trailing dimensions do not match - - A (2d array): 2 x 1 - B (3d array): 8 x 4 x 3 # second from last dimensions mismatched - -An example of broadcasting in practice:: - - >>> x = np.arange(4) - >>> xx = x.reshape(4,1) - >>> y = np.ones(5) - >>> z = np.ones((3,4)) - - >>> x.shape - (4,) - - >>> y.shape - (5,) - - >>> x + y - : shape mismatch: objects cannot be broadcast to a single shape - - >>> xx.shape - (4, 1) - - >>> y.shape - (5,) - - >>> (xx + y).shape - (4, 5) - - >>> xx + y - array([[ 1., 1., 1., 1., 1.], - [ 2., 2., 2., 2., 2.], - [ 3., 3., 3., 3., 3.], - [ 4., 4., 4., 4., 4.]]) - - >>> x.shape - (4,) - - >>> z.shape - (3, 4) - - >>> (x + z).shape - (3, 4) - - >>> x + z - array([[ 1., 2., 3., 4.], - [ 1., 2., 3., 4.], - [ 1., 2., 3., 4.]]) - -Broadcasting provides a convenient way of taking the outer product (or -any other outer operation) of two arrays. The following example shows an -outer addition operation of two 1-d arrays:: - - >>> a = np.array([0.0, 10.0, 20.0, 30.0]) - >>> b = np.array([1.0, 2.0, 3.0]) - >>> a[:, np.newaxis] + b - array([[ 1., 2., 3.], - [ 11., 12., 13.], - [ 21., 22., 23.], - [ 31., 32., 33.]]) - -Here the ``newaxis`` index operator inserts a new axis into ``a``, -making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array -with ``b``, which has shape ``(3,)``, yields a ``4x3`` array. - -See `this article `_ -for illustrations of broadcasting concepts. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/byteswapping.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/byteswapping.py deleted file mode 100644 index 430683d308d4a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/byteswapping.py +++ /dev/null @@ -1,147 +0,0 @@ -""" - -============================= - Byteswapping and byte order -============================= - -Introduction to byte ordering and ndarrays -========================================== - -The ``ndarray`` is an object that provide a python array interface to data -in memory. - -It often happens that the memory that you want to view with an array is -not of the same byte ordering as the computer on which you are running -Python. - -For example, I might be working on a computer with a little-endian CPU - -such as an Intel Pentium, but I have loaded some data from a file -written by a computer that is big-endian. Let's say I have loaded 4 -bytes from a file written by a Sun (big-endian) computer. I know that -these 4 bytes represent two 16-bit integers. On a big-endian machine, a -two-byte integer is stored with the Most Significant Byte (MSB) first, -and then the Least Significant Byte (LSB). Thus the bytes are, in memory order: - -#. MSB integer 1 -#. LSB integer 1 -#. MSB integer 2 -#. LSB integer 2 - -Let's say the two integers were in fact 1 and 770. Because 770 = 256 * -3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2. -The bytes I have loaded from the file would have these contents: - ->>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2) ->>> big_end_str -'\\x00\\x01\\x03\\x02' - -We might want to use an ``ndarray`` to access these integers. In that -case, we can create an array around this memory, and tell numpy that -there are two integers, and that they are 16 bit and big-endian: - ->>> import numpy as np ->>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str) ->>> big_end_arr[0] -1 ->>> big_end_arr[1] -770 - -Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian' -(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For -example, if our data represented a single unsigned 4-byte little-endian -integer, the dtype string would be ``>> little_end_u4 = np.ndarray(shape=(1,),dtype='>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3 -True - -Returning to our ``big_end_arr`` - in this case our underlying data is -big-endian (data endianness) and we've set the dtype to match (the dtype -is also big-endian). However, sometimes you need to flip these around. - -Changing byte ordering -====================== - -As you can imagine from the introduction, there are two ways you can -affect the relationship between the byte ordering of the array and the -underlying memory it is looking at: - -* Change the byte-ordering information in the array dtype so that it - interprets the undelying data as being in a different byte order. - This is the role of ``arr.newbyteorder()`` -* Change the byte-ordering of the underlying data, leaving the dtype - interpretation as it was. This is what ``arr.byteswap()`` does. - -The common situations in which you need to change byte ordering are: - -#. Your data and dtype endianess don't match, and you want to change - the dtype so that it matches the data. -#. Your data and dtype endianess don't match, and you want to swap the - data so that they match the dtype -#. Your data and dtype endianess match, but you want the data swapped - and the dtype to reflect this - -Data and dtype endianness don't match, change dtype to match data ------------------------------------------------------------------ - -We make something where they don't match: - ->>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='>> wrong_end_dtype_arr[0] -256 - -The obvious fix for this situation is to change the dtype so it gives -the correct endianness: - ->>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder() ->>> fixed_end_dtype_arr[0] -1 - -Note the the array has not changed in memory: - ->>> fixed_end_dtype_arr.tobytes() == big_end_str -True - -Data and type endianness don't match, change data to match dtype ----------------------------------------------------------------- - -You might want to do this if you need the data in memory to be a certain -ordering. For example you might be writing the memory out to a file -that needs a certain byte ordering. - ->>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap() ->>> fixed_end_mem_arr[0] -1 - -Now the array *has* changed in memory: - ->>> fixed_end_mem_arr.tobytes() == big_end_str -False - -Data and dtype endianness match, swap data and dtype ----------------------------------------------------- - -You may have a correctly specified array dtype, but you need the array -to have the opposite byte order in memory, and you want the dtype to -match so the array values make sense. In this case you just do both of -the previous operations: - ->>> swapped_end_arr = big_end_arr.byteswap().newbyteorder() ->>> swapped_end_arr[0] -1 ->>> swapped_end_arr.tobytes() == big_end_str -False - -An easier way of casting the data to a specific dtype and byte ordering -can be achieved with the ndarray astype method: - ->>> swapped_end_arr = big_end_arr.astype('>> swapped_end_arr[0] -1 ->>> swapped_end_arr.tobytes() == big_end_str -False - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/constants.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/constants.py deleted file mode 100644 index 36f94d3070517..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/constants.py +++ /dev/null @@ -1,393 +0,0 @@ -""" -========= -Constants -========= - -Numpy includes several constants: - -%(constant_list)s -""" -# -# Note: the docstring is autogenerated. -# -from __future__ import division, absolute_import, print_function - -import textwrap, re - -# Maintain same format as in numpy.add_newdocs -constants = [] -def add_newdoc(module, name, doc): - constants.append((name, doc)) - -add_newdoc('numpy', 'Inf', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'Infinity', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'NAN', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - `NaN` and `NAN` are equivalent definitions of `nan`. Please use - `nan` instead of `NAN`. - - See Also - -------- - nan - - """) - -add_newdoc('numpy', 'NINF', - """ - IEEE 754 floating point representation of negative infinity. - - Returns - ------- - y : float - A floating point representation of negative infinity. - - See Also - -------- - isinf : Shows which elements are positive or negative infinity - - isposinf : Shows which elements are positive infinity - - isneginf : Shows which elements are negative infinity - - isnan : Shows which elements are Not a Number - - isfinite : Shows which elements are finite (not one of Not a Number, - positive infinity and negative infinity) - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - Examples - -------- - >>> np.NINF - -inf - >>> np.log(0) - -inf - - """) - -add_newdoc('numpy', 'NZERO', - """ - IEEE 754 floating point representation of negative zero. - - Returns - ------- - y : float - A floating point representation of negative zero. - - See Also - -------- - PZERO : Defines positive zero. - - isinf : Shows which elements are positive or negative infinity. - - isposinf : Shows which elements are positive infinity. - - isneginf : Shows which elements are negative infinity. - - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite - not one of - Not a Number, positive infinity and negative infinity. - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). Negative zero is considered to be a finite number. - - Examples - -------- - >>> np.NZERO - -0.0 - >>> np.PZERO - 0.0 - - >>> np.isfinite([np.NZERO]) - array([ True], dtype=bool) - >>> np.isnan([np.NZERO]) - array([False], dtype=bool) - >>> np.isinf([np.NZERO]) - array([False], dtype=bool) - - """) - -add_newdoc('numpy', 'NaN', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - `NaN` and `NAN` are equivalent definitions of `nan`. Please use - `nan` instead of `NaN`. - - See Also - -------- - nan - - """) - -add_newdoc('numpy', 'PINF', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'PZERO', - """ - IEEE 754 floating point representation of positive zero. - - Returns - ------- - y : float - A floating point representation of positive zero. - - See Also - -------- - NZERO : Defines negative zero. - - isinf : Shows which elements are positive or negative infinity. - - isposinf : Shows which elements are positive infinity. - - isneginf : Shows which elements are negative infinity. - - isnan : Shows which elements are Not a Number. - - isfinite : Shows which elements are finite - not one of - Not a Number, positive infinity and negative infinity. - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). Positive zero is considered to be a finite number. - - Examples - -------- - >>> np.PZERO - 0.0 - >>> np.NZERO - -0.0 - - >>> np.isfinite([np.PZERO]) - array([ True], dtype=bool) - >>> np.isnan([np.PZERO]) - array([False], dtype=bool) - >>> np.isinf([np.PZERO]) - array([False], dtype=bool) - - """) - -add_newdoc('numpy', 'e', - """ - Euler's constant, base of natural logarithms, Napier's constant. - - ``e = 2.71828182845904523536028747135266249775724709369995...`` - - See Also - -------- - exp : Exponential function - log : Natural logarithm - - References - ---------- - .. [1] http://en.wikipedia.org/wiki/Napier_constant - - """) - -add_newdoc('numpy', 'inf', - """ - IEEE 754 floating point representation of (positive) infinity. - - Returns - ------- - y : float - A floating point representation of positive infinity. - - See Also - -------- - isinf : Shows which elements are positive or negative infinity - - isposinf : Shows which elements are positive infinity - - isneginf : Shows which elements are negative infinity - - isnan : Shows which elements are Not a Number - - isfinite : Shows which elements are finite (not one of Not a Number, - positive infinity and negative infinity) - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. - - Examples - -------- - >>> np.inf - inf - >>> np.array([1]) / 0. - array([ Inf]) - - """) - -add_newdoc('numpy', 'infty', - """ - IEEE 754 floating point representation of (positive) infinity. - - Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for - `inf`. For more details, see `inf`. - - See Also - -------- - inf - - """) - -add_newdoc('numpy', 'nan', - """ - IEEE 754 floating point representation of Not a Number (NaN). - - Returns - ------- - y : A floating point representation of Not a Number. - - See Also - -------- - isnan : Shows which elements are Not a Number. - isfinite : Shows which elements are finite (not one of - Not a Number, positive infinity and negative infinity) - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - - `NaN` and `NAN` are aliases of `nan`. - - Examples - -------- - >>> np.nan - nan - >>> np.log(-1) - nan - >>> np.log([-1, 1, 2]) - array([ NaN, 0. , 0.69314718]) - - """) - -add_newdoc('numpy', 'newaxis', - """ - A convenient alias for None, useful for indexing arrays. - - See Also - -------- - `numpy.doc.indexing` - - Examples - -------- - >>> newaxis is None - True - >>> x = np.arange(3) - >>> x - array([0, 1, 2]) - >>> x[:, newaxis] - array([[0], - [1], - [2]]) - >>> x[:, newaxis, newaxis] - array([[[0]], - [[1]], - [[2]]]) - >>> x[:, newaxis] * x - array([[0, 0, 0], - [0, 1, 2], - [0, 2, 4]]) - - Outer product, same as ``outer(x, y)``: - - >>> y = np.arange(3, 6) - >>> x[:, newaxis] * y - array([[ 0, 0, 0], - [ 3, 4, 5], - [ 6, 8, 10]]) - - ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``: - - >>> x[newaxis, :].shape - (1, 3) - >>> x[newaxis].shape - (1, 3) - >>> x[None].shape - (1, 3) - >>> x[:, newaxis].shape - (3, 1) - - """) - -if __doc__: - constants_str = [] - constants.sort() - for name, doc in constants: - s = textwrap.dedent(doc).replace("\n", "\n ") - - # Replace sections by rubrics - lines = s.split("\n") - new_lines = [] - for line in lines: - m = re.match(r'^(\s+)[-=]+\s*$', line) - if m and new_lines: - prev = textwrap.dedent(new_lines.pop()) - new_lines.append('%s.. rubric:: %s' % (m.group(1), prev)) - new_lines.append('') - else: - new_lines.append(line) - s = "\n".join(new_lines) - - # Done. - constants_str.append(""".. const:: %s\n %s""" % (name, s)) - constants_str = "\n".join(constants_str) - - __doc__ = __doc__ % dict(constant_list=constants_str) - del constants_str, name, doc - del line, lines, new_lines, m, s, prev - -del constants, add_newdoc diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/creation.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/creation.py deleted file mode 100644 index 7979b51aabdc7..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/creation.py +++ /dev/null @@ -1,144 +0,0 @@ -""" -============== -Array Creation -============== - -Introduction -============ - -There are 5 general mechanisms for creating arrays: - -1) Conversion from other Python structures (e.g., lists, tuples) -2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros, - etc.) -3) Reading arrays from disk, either from standard or custom formats -4) Creating arrays from raw bytes through the use of strings or buffers -5) Use of special library functions (e.g., random) - -This section will not cover means of replicating, joining, or otherwise -expanding or mutating existing arrays. Nor will it cover creating object -arrays or record arrays. Both of those are covered in their own sections. - -Converting Python array_like Objects to Numpy Arrays -==================================================== - -In general, numerical data arranged in an array-like structure in Python can -be converted to arrays through the use of the array() function. The most -obvious examples are lists and tuples. See the documentation for array() for -details for its use. Some objects may support the array-protocol and allow -conversion to arrays this way. A simple way to find out if the object can be -converted to a numpy array using array() is simply to try it interactively and -see if it works! (The Python Way). - -Examples: :: - - >>> x = np.array([2,3,1,0]) - >>> x = np.array([2, 3, 1, 0]) - >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, - and types - >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]]) - -Intrinsic Numpy Array Creation -============================== - -Numpy has built-in functions for creating arrays from scratch: - -zeros(shape) will create an array filled with 0 values with the specified -shape. The default dtype is float64. - -``>>> np.zeros((2, 3)) -array([[ 0., 0., 0.], [ 0., 0., 0.]])`` - -ones(shape) will create an array filled with 1 values. It is identical to -zeros in all other respects. - -arange() will create arrays with regularly incrementing values. Check the -docstring for complete information on the various ways it can be used. A few -examples will be given here: :: - - >>> np.arange(10) - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=np.float) - array([ 2., 3., 4., 5., 6., 7., 8., 9.]) - >>> np.arange(2, 3, 0.1) - array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) - -Note that there are some subtleties regarding the last usage that the user -should be aware of that are described in the arange docstring. - -linspace() will create arrays with a specified number of elements, and -spaced equally between the specified beginning and end values. For -example: :: - - >>> np.linspace(1., 4., 6) - array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ]) - -The advantage of this creation function is that one can guarantee the -number of elements and the starting and end point, which arange() -generally will not do for arbitrary start, stop, and step values. - -indices() will create a set of arrays (stacked as a one-higher dimensioned -array), one per dimension with each representing variation in that dimension. -An example illustrates much better than a verbal description: :: - - >>> np.indices((3,3)) - array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]) - -This is particularly useful for evaluating functions of multiple dimensions on -a regular grid. - -Reading Arrays From Disk -======================== - -This is presumably the most common case of large array creation. The details, -of course, depend greatly on the format of data on disk and so this section -can only give general pointers on how to handle various formats. - -Standard Binary Formats ------------------------ - -Various fields have standard formats for array data. The following lists the -ones with known python libraries to read them and return numpy arrays (there -may be others for which it is possible to read and convert to numpy arrays so -check the last section as well) -:: - - HDF5: PyTables - FITS: PyFITS - -Examples of formats that cannot be read directly but for which it is not hard to -convert are those formats supported by libraries like PIL (able to read and -write many image formats such as jpg, png, etc). - -Common ASCII Formats ------------------------- - -Comma Separated Value files (CSV) are widely used (and an export and import -option for programs like Excel). There are a number of ways of reading these -files in Python. There are CSV functions in Python and functions in pylab -(part of matplotlib). - -More generic ascii files can be read using the io package in scipy. - -Custom Binary Formats ---------------------- - -There are a variety of approaches one can use. If the file has a relatively -simple format then one can write a simple I/O library and use the numpy -fromfile() function and .tofile() method to read and write numpy arrays -directly (mind your byteorder though!) If a good C or C++ library exists that -read the data, one can wrap that library with a variety of techniques though -that certainly is much more work and requires significantly more advanced -knowledge to interface with C or C++. - -Use of Special Libraries ------------------------- - -There are libraries that can be used to generate arrays for special purposes -and it isn't possible to enumerate all of them. The most common uses are use -of the many array generation functions in random that can generate arrays of -random values, and some utility functions to generate special matrices (e.g. -diagonal). - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/glossary.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/glossary.py deleted file mode 100644 index 3770f5761f2b4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/glossary.py +++ /dev/null @@ -1,418 +0,0 @@ -""" -======== -Glossary -======== - -.. glossary:: - - along an axis - Axes are defined for arrays with more than one dimension. A - 2-dimensional array has two corresponding axes: the first running - vertically downwards across rows (axis 0), and the second running - horizontally across columns (axis 1). - - Many operation can take place along one of these axes. For example, - we can sum each row of an array, in which case we operate along - columns, or axis 1:: - - >>> x = np.arange(12).reshape((3,4)) - - >>> x - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - - >>> x.sum(axis=1) - array([ 6, 22, 38]) - - array - A homogeneous container of numerical elements. Each element in the - array occupies a fixed amount of memory (hence homogeneous), and - can be a numerical element of a single type (such as float, int - or complex) or a combination (such as ``(float, int, float)``). Each - array has an associated data-type (or ``dtype``), which describes - the numerical type of its elements:: - - >>> x = np.array([1, 2, 3], float) - - >>> x - array([ 1., 2., 3.]) - - >>> x.dtype # floating point number, 64 bits of memory per element - dtype('float64') - - - # More complicated data type: each array element is a combination of - # and integer and a floating point number - >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) - array([(1, 2.0), (3, 4.0)], - dtype=[('x', '>> x = np.array([1, 2, 3]) - >>> x.shape - (3,) - - BLAS - `Basic Linear Algebra Subprograms `_ - - broadcast - NumPy can do operations on arrays whose shapes are mismatched:: - - >>> x = np.array([1, 2]) - >>> y = np.array([[3], [4]]) - - >>> x - array([1, 2]) - - >>> y - array([[3], - [4]]) - - >>> x + y - array([[4, 5], - [5, 6]]) - - See `doc.broadcasting`_ for more information. - - C order - See `row-major` - - column-major - A way to represent items in a N-dimensional array in the 1-dimensional - computer memory. In column-major order, the leftmost index "varies the - fastest": for example the array:: - - [[1, 2, 3], - [4, 5, 6]] - - is represented in the column-major order as:: - - [1, 4, 2, 5, 3, 6] - - Column-major order is also known as the Fortran order, as the Fortran - programming language uses it. - - decorator - An operator that transforms a function. For example, a ``log`` - decorator may be defined to print debugging information upon - function execution:: - - >>> def log(f): - ... def new_logging_func(*args, **kwargs): - ... print "Logging call with parameters:", args, kwargs - ... return f(*args, **kwargs) - ... - ... return new_logging_func - - Now, when we define a function, we can "decorate" it using ``log``:: - - >>> @log - ... def add(a, b): - ... return a + b - - Calling ``add`` then yields: - - >>> add(1, 2) - Logging call with parameters: (1, 2) {} - 3 - - dictionary - Resembling a language dictionary, which provides a mapping between - words and descriptions thereof, a Python dictionary is a mapping - between two objects:: - - >>> x = {1: 'one', 'two': [1, 2]} - - Here, `x` is a dictionary mapping keys to values, in this case - the integer 1 to the string "one", and the string "two" to - the list ``[1, 2]``. The values may be accessed using their - corresponding keys:: - - >>> x[1] - 'one' - - >>> x['two'] - [1, 2] - - Note that dictionaries are not stored in any specific order. Also, - most mutable (see *immutable* below) objects, such as lists, may not - be used as keys. - - For more information on dictionaries, read the - `Python tutorial `_. - - Fortran order - See `column-major` - - flattened - Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details. - - immutable - An object that cannot be modified after execution is called - immutable. Two common examples are strings and tuples. - - instance - A class definition gives the blueprint for constructing an object:: - - >>> class House(object): - ... wall_colour = 'white' - - Yet, we have to *build* a house before it exists:: - - >>> h = House() # build a house - - Now, ``h`` is called a ``House`` instance. An instance is therefore - a specific realisation of a class. - - iterable - A sequence that allows "walking" (iterating) over items, typically - using a loop such as:: - - >>> x = [1, 2, 3] - >>> [item**2 for item in x] - [1, 4, 9] - - It is often used in combintion with ``enumerate``:: - >>> keys = ['a','b','c'] - >>> for n, k in enumerate(keys): - ... print "Key %d: %s" % (n, k) - ... - Key 0: a - Key 1: b - Key 2: c - - list - A Python container that can hold any number of objects or items. - The items do not have to be of the same type, and can even be - lists themselves:: - - >>> x = [2, 2.0, "two", [2, 2.0]] - - The list `x` contains 4 items, each which can be accessed individually:: - - >>> x[2] # the string 'two' - 'two' - - >>> x[3] # a list, containing an integer 2 and a float 2.0 - [2, 2.0] - - It is also possible to select more than one item at a time, - using *slicing*:: - - >>> x[0:2] # or, equivalently, x[:2] - [2, 2.0] - - In code, arrays are often conveniently expressed as nested lists:: - - - >>> np.array([[1, 2], [3, 4]]) - array([[1, 2], - [3, 4]]) - - For more information, read the section on lists in the `Python - tutorial `_. For a mapping - type (key-value), see *dictionary*. - - mask - A boolean array, used to select only certain elements for an operation:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> mask = (x > 2) - >>> mask - array([False, False, False, True, True], dtype=bool) - - >>> x[mask] = -1 - >>> x - array([ 0, 1, 2, -1, -1]) - - masked array - Array that suppressed values indicated by a mask:: - - >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) - >>> x - masked_array(data = [-- 2.0 --], - mask = [ True False True], - fill_value = 1e+20) - - - >>> x + [1, 2, 3] - masked_array(data = [-- 4.0 --], - mask = [ True False True], - fill_value = 1e+20) - - - - Masked arrays are often used when operating on arrays containing - missing or invalid entries. - - matrix - A 2-dimensional ndarray that preserves its two-dimensional nature - throughout operations. It has certain special operations, such as ``*`` - (matrix multiplication) and ``**`` (matrix power), defined:: - - >>> x = np.mat([[1, 2], [3, 4]]) - - >>> x - matrix([[1, 2], - [3, 4]]) - - >>> x**2 - matrix([[ 7, 10], - [15, 22]]) - - method - A function associated with an object. For example, each ndarray has a - method called ``repeat``:: - - >>> x = np.array([1, 2, 3]) - - >>> x.repeat(2) - array([1, 1, 2, 2, 3, 3]) - - ndarray - See *array*. - - reference - If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, - ``a`` and ``b`` are different names for the same Python object. - - row-major - A way to represent items in a N-dimensional array in the 1-dimensional - computer memory. In row-major order, the rightmost index "varies - the fastest": for example the array:: - - [[1, 2, 3], - [4, 5, 6]] - - is represented in the row-major order as:: - - [1, 2, 3, 4, 5, 6] - - Row-major order is also known as the C order, as the C programming - language uses it. New Numpy arrays are by default in row-major order. - - self - Often seen in method signatures, ``self`` refers to the instance - of the associated class. For example: - - >>> class Paintbrush(object): - ... color = 'blue' - ... - ... def paint(self): - ... print "Painting the city %s!" % self.color - ... - >>> p = Paintbrush() - >>> p.color = 'red' - >>> p.paint() # self refers to 'p' - Painting the city red! - - slice - Used to select only certain elements from a sequence:: - - >>> x = range(5) - >>> x - [0, 1, 2, 3, 4] - - >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) - [1, 2] - - >>> x[1:5:2] # slice from 1 to 5, but skipping every second element - [1, 3] - - >>> x[::-1] # slice a sequence in reverse - [4, 3, 2, 1, 0] - - Arrays may have more than one dimension, each which can be sliced - individually:: - - >>> x = np.array([[1, 2], [3, 4]]) - >>> x - array([[1, 2], - [3, 4]]) - - >>> x[:, 1] - array([2, 4]) - - tuple - A sequence that may contain a variable number of types of any - kind. A tuple is immutable, i.e., once constructed it cannot be - changed. Similar to a list, it can be indexed and sliced:: - - >>> x = (1, 'one', [1, 2]) - - >>> x - (1, 'one', [1, 2]) - - >>> x[0] - 1 - - >>> x[:2] - (1, 'one') - - A useful concept is "tuple unpacking", which allows variables to - be assigned to the contents of a tuple:: - - >>> x, y = (1, 2) - >>> x, y = 1, 2 - - This is often used when a function returns multiple values: - - >>> def return_many(): - ... return 1, 'alpha', None - - >>> a, b, c = return_many() - >>> a, b, c - (1, 'alpha', None) - - >>> a - 1 - >>> b - 'alpha' - - ufunc - Universal function. A fast element-wise array operation. Examples include - ``add``, ``sin`` and ``logical_or``. - - view - An array that does not own its data, but refers to another array's - data instead. For example, we may create a view that only shows - every second element of another array:: - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - - >>> y = x[::2] - >>> y - array([0, 2, 4]) - - >>> x[0] = 3 # changing x changes y as well, since y is a view on x - >>> y - array([3, 2, 4]) - - wrapper - Python is a high-level (highly abstracted, or English-like) language. - This abstraction comes at a price in execution speed, and sometimes - it becomes necessary to use lower level languages to do fast - computations. A wrapper is code that provides a bridge between - high and the low level languages, allowing, e.g., Python to execute - code written in C or Fortran. - - Examples include ctypes, SWIG and Cython (which wraps C and C++) - and f2py (which wraps Fortran). - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/howtofind.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/howtofind.py deleted file mode 100644 index e080d263a2791..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/howtofind.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -================= -How to Find Stuff -================= - -How to find things in NumPy. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/indexing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/indexing.py deleted file mode 100644 index d3f442c212e1b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/indexing.py +++ /dev/null @@ -1,437 +0,0 @@ -""" -============== -Array indexing -============== - -Array indexing refers to any use of the square brackets ([]) to index -array values. There are many options to indexing, which give numpy -indexing great power, but with power comes some complexity and the -potential for confusion. This section is just an overview of the -various options and issues related to indexing. Aside from single -element indexing, the details on most of these options are to be -found in related sections. - -Assignment vs referencing -========================= - -Most of the following examples show the use of indexing when -referencing data in an array. The examples work just as well -when assigning to an array. See the section at the end for -specific examples and explanations on how assignments work. - -Single element indexing -======================= - -Single element indexing for a 1-D array is what one expects. It work -exactly like that for other standard Python sequences. It is 0-based, -and accepts negative indices for indexing from the end of the array. :: - - >>> x = np.arange(10) - >>> x[2] - 2 - >>> x[-2] - 8 - -Unlike lists and tuples, numpy arrays support multidimensional indexing -for multidimensional arrays. That means that it is not necessary to -separate each dimension's index into its own set of square brackets. :: - - >>> x.shape = (2,5) # now x is 2-dimensional - >>> x[1,3] - 8 - >>> x[1,-1] - 9 - -Note that if one indexes a multidimensional array with fewer indices -than dimensions, one gets a subdimensional array. For example: :: - - >>> x[0] - array([0, 1, 2, 3, 4]) - -That is, each index specified selects the array corresponding to the -rest of the dimensions selected. In the above example, choosing 0 -means that remaining dimension of lenth 5 is being left unspecified, -and that what is returned is an array of that dimensionality and size. -It must be noted that the returned array is not a copy of the original, -but points to the same values in memory as does the original array. -In this case, the 1-D array at the first position (0) is returned. -So using a single index on the returned array, results in a single -element being returned. That is: :: - - >>> x[0][2] - 2 - -So note that ``x[0,2] = x[0][2]`` though the second case is more -inefficient a new temporary array is created after the first index -that is subsequently indexed by 2. - -Note to those used to IDL or Fortran memory order as it relates to -indexing. Numpy uses C-order indexing. That means that the last -index usually represents the most rapidly changing memory location, -unlike Fortran or IDL, where the first index represents the most -rapidly changing location in memory. This difference represents a -great potential for confusion. - -Other indexing options -====================== - -It is possible to slice and stride arrays to extract arrays of the -same number of dimensions, but of different sizes than the original. -The slicing and striding works exactly the same way it does for lists -and tuples except that they can be applied to multiple dimensions as -well. A few examples illustrates best: :: - - >>> x = np.arange(10) - >>> x[2:5] - array([2, 3, 4]) - >>> x[:-7] - array([0, 1, 2]) - >>> x[1:7:2] - array([1, 3, 5]) - >>> y = np.arange(35).reshape(5,7) - >>> y[1:5:2,::3] - array([[ 7, 10, 13], - [21, 24, 27]]) - -Note that slices of arrays do not copy the internal array data but -also produce new views of the original data. - -It is possible to index arrays with other arrays for the purposes of -selecting lists of values out of arrays into new arrays. There are -two different ways of accomplishing this. One uses one or more arrays -of index values. The other involves giving a boolean array of the proper -shape to indicate the values to be selected. Index arrays are a very -powerful tool that allow one to avoid looping over individual elements in -arrays and thus greatly improve performance. - -It is possible to use special features to effectively increase the -number of dimensions in an array through indexing so the resulting -array aquires the shape needed for use in an expression or with a -specific function. - -Index arrays -============ - -Numpy arrays may be indexed with other arrays (or any other sequence- -like object that can be converted to an array, such as lists, with the -exception of tuples; see the end of this document for why this is). The -use of index arrays ranges from simple, straightforward cases to -complex, hard-to-understand cases. For all cases of index arrays, what -is returned is a copy of the original data, not a view as one gets for -slices. - -Index arrays must be of integer type. Each value in the array indicates -which value in the array to use in place of the index. To illustrate: :: - - >>> x = np.arange(10,1,-1) - >>> x - array([10, 9, 8, 7, 6, 5, 4, 3, 2]) - >>> x[np.array([3, 3, 1, 8])] - array([7, 7, 9, 2]) - - -The index array consisting of the values 3, 3, 1 and 8 correspondingly -create an array of length 4 (same as the index array) where each index -is replaced by the value the index array has in the array being indexed. - -Negative values are permitted and work as they do with single indices -or slices: :: - - >>> x[np.array([3,3,-3,8])] - array([7, 7, 4, 2]) - -It is an error to have index values out of bounds: :: - - >>> x[np.array([3, 3, 20, 8])] - : index 20 out of bounds 0<=index<9 - -Generally speaking, what is returned when index arrays are used is -an array with the same shape as the index array, but with the type -and values of the array being indexed. As an example, we can use a -multidimensional index array instead: :: - - >>> x[np.array([[1,1],[2,3]])] - array([[9, 9], - [8, 7]]) - -Indexing Multi-dimensional arrays -================================= - -Things become more complex when multidimensional arrays are indexed, -particularly with multidimensional index arrays. These tend to be -more unusal uses, but theyare permitted, and they are useful for some -problems. We'll start with thesimplest multidimensional case (using -the array y from the previous examples): :: - - >>> y[np.array([0,2,4]), np.array([0,1,2])] - array([ 0, 15, 30]) - -In this case, if the index arrays have a matching shape, and there is -an index array for each dimension of the array being indexed, the -resultant array has the same shape as the index arrays, and the values -correspond to the index set for each position in the index arrays. In -this example, the first index value is 0 for both index arrays, and -thus the first value of the resultant array is y[0,0]. The next value -is y[2,1], and the last is y[4,2]. - -If the index arrays do not have the same shape, there is an attempt to -broadcast them to the same shape. If they cannot be broadcast to the -same shape, an exception is raised: :: - - >>> y[np.array([0,2,4]), np.array([0,1])] - : shape mismatch: objects cannot be - broadcast to a single shape - -The broadcasting mechanism permits index arrays to be combined with -scalars for other indices. The effect is that the scalar value is used -for all the corresponding values of the index arrays: :: - - >>> y[np.array([0,2,4]), 1] - array([ 1, 15, 29]) - -Jumping to the next level of complexity, it is possible to only -partially index an array with index arrays. It takes a bit of thought -to understand what happens in such cases. For example if we just use -one index array with y: :: - - >>> y[np.array([0,2,4])] - array([[ 0, 1, 2, 3, 4, 5, 6], - [14, 15, 16, 17, 18, 19, 20], - [28, 29, 30, 31, 32, 33, 34]]) - -What results is the construction of a new array where each value of -the index array selects one row from the array being indexed and the -resultant array has the resulting shape (size of row, number index -elements). - -An example of where this may be useful is for a color lookup table -where we want to map the values of an image into RGB triples for -display. The lookup table could have a shape (nlookup, 3). Indexing -such an array with an image with shape (ny, nx) with dtype=np.uint8 -(or any integer type so long as values are with the bounds of the -lookup table) will result in an array of shape (ny, nx, 3) where a -triple of RGB values is associated with each pixel location. - -In general, the shape of the resulant array will be the concatenation -of the shape of the index array (or the shape that all the index arrays -were broadcast to) with the shape of any unused dimensions (those not -indexed) in the array being indexed. - -Boolean or "mask" index arrays -============================== - -Boolean arrays used as indices are treated in a different manner -entirely than index arrays. Boolean arrays must be of the same shape -as the initial dimensions of the array being indexed. In the -most straightforward case, the boolean array has the same shape: :: - - >>> b = y>20 - >>> y[b] - array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]) - -The result is a 1-D array containing all the elements in the indexed -array corresponding to all the true elements in the boolean array. As -with index arrays, what is returned is a copy of the data, not a view -as one gets with slices. - -The result will be multidimensional if y has more dimensions than b. -For example: :: - - >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y - array([False, False, False, True, True], dtype=bool) - >>> y[b[:,5]] - array([[21, 22, 23, 24, 25, 26, 27], - [28, 29, 30, 31, 32, 33, 34]]) - -Here the 4th and 5th rows are selected from the indexed array and -combined to make a 2-D array. - -In general, when the boolean array has fewer dimensions than the array -being indexed, this is equivalent to y[b, ...], which means -y is indexed by b followed by as many : as are needed to fill -out the rank of y. -Thus the shape of the result is one dimension containing the number -of True elements of the boolean array, followed by the remaining -dimensions of the array being indexed. - -For example, using a 2-D boolean array of shape (2,3) -with four True elements to select rows from a 3-D array of shape -(2,3,5) results in a 2-D result of shape (4,5): :: - - >>> x = np.arange(30).reshape(2,3,5) - >>> x - array([[[ 0, 1, 2, 3, 4], - [ 5, 6, 7, 8, 9], - [10, 11, 12, 13, 14]], - [[15, 16, 17, 18, 19], - [20, 21, 22, 23, 24], - [25, 26, 27, 28, 29]]]) - >>> b = np.array([[True, True, False], [False, True, True]]) - >>> x[b] - array([[ 0, 1, 2, 3, 4], - [ 5, 6, 7, 8, 9], - [20, 21, 22, 23, 24], - [25, 26, 27, 28, 29]]) - -For further details, consult the numpy reference documentation on array indexing. - -Combining index arrays with slices -================================== - -Index arrays may be combined with slices. For example: :: - - >>> y[np.array([0,2,4]),1:3] - array([[ 1, 2], - [15, 16], - [29, 30]]) - -In effect, the slice is converted to an index array -np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array -to produce a resultant array of shape (3,2). - -Likewise, slicing can be combined with broadcasted boolean indices: :: - - >>> y[b[:,5],1:3] - array([[22, 23], - [29, 30]]) - -Structural indexing tools -========================= - -To facilitate easy matching of array shapes with expressions and in -assignments, the np.newaxis object can be used within array indices -to add new dimensions with a size of 1. For example: :: - - >>> y.shape - (5, 7) - >>> y[:,np.newaxis,:].shape - (5, 1, 7) - -Note that there are no new elements in the array, just that the -dimensionality is increased. This can be handy to combine two -arrays in a way that otherwise would require explicitly reshaping -operations. For example: :: - - >>> x = np.arange(5) - >>> x[:,np.newaxis] + x[np.newaxis,:] - array([[0, 1, 2, 3, 4], - [1, 2, 3, 4, 5], - [2, 3, 4, 5, 6], - [3, 4, 5, 6, 7], - [4, 5, 6, 7, 8]]) - -The ellipsis syntax maybe used to indicate selecting in full any -remaining unspecified dimensions. For example: :: - - >>> z = np.arange(81).reshape(3,3,3,3) - >>> z[1,...,2] - array([[29, 32, 35], - [38, 41, 44], - [47, 50, 53]]) - -This is equivalent to: :: - - >>> z[1,:,:,2] - array([[29, 32, 35], - [38, 41, 44], - [47, 50, 53]]) - -Assigning values to indexed arrays -================================== - -As mentioned, one can select a subset of an array to assign to using -a single index, slices, and index and mask arrays. The value being -assigned to the indexed array must be shape consistent (the same shape -or broadcastable to the shape the index produces). For example, it is -permitted to assign a constant to a slice: :: - - >>> x = np.arange(10) - >>> x[2:7] = 1 - -or an array of the right size: :: - - >>> x[2:7] = np.arange(5) - -Note that assignments may result in changes if assigning -higher types to lower types (like floats to ints) or even -exceptions (assigning complex to floats or ints): :: - - >>> x[1] = 1.2 - >>> x[1] - 1 - >>> x[1] = 1.2j - : can't convert complex to long; use - long(abs(z)) - - -Unlike some of the references (such as array and mask indices) -assignments are always made to the original data in the array -(indeed, nothing else would make sense!). Note though, that some -actions may not work as one may naively expect. This particular -example is often surprising to people: :: - - >>> x = np.arange(0, 50, 10) - >>> x - array([ 0, 10, 20, 30, 40]) - >>> x[np.array([1, 1, 3, 1])] += 1 - >>> x - array([ 0, 11, 20, 31, 40]) - -Where people expect that the 1st location will be incremented by 3. -In fact, it will only be incremented by 1. The reason is because -a new array is extracted from the original (as a temporary) containing -the values at 1, 1, 3, 1, then the value 1 is added to the temporary, -and then the temporary is assigned back to the original array. Thus -the value of the array at x[1]+1 is assigned to x[1] three times, -rather than being incremented 3 times. - -Dealing with variable numbers of indices within programs -======================================================== - -The index syntax is very powerful but limiting when dealing with -a variable number of indices. For example, if you want to write -a function that can handle arguments with various numbers of -dimensions without having to write special case code for each -number of possible dimensions, how can that be done? If one -supplies to the index a tuple, the tuple will be interpreted -as a list of indices. For example (using the previous definition -for the array z): :: - - >>> indices = (1,1,1,1) - >>> z[indices] - 40 - -So one can use code to construct tuples of any number of indices -and then use these within an index. - -Slices can be specified within programs by using the slice() function -in Python. For example: :: - - >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2] - >>> z[indices] - array([39, 40]) - -Likewise, ellipsis can be specified by code by using the Ellipsis -object: :: - - >>> indices = (1, Ellipsis, 1) # same as [1,...,1] - >>> z[indices] - array([[28, 31, 34], - [37, 40, 43], - [46, 49, 52]]) - -For this reason it is possible to use the output from the np.where() -function directly as an index since it always returns a tuple of index -arrays. - -Because the special treatment of tuples, they are not automatically -converted to an array as a list would be. As an example: :: - - >>> z[[1,1,1,1]] # produces a large array - array([[[[27, 28, 29], - [30, 31, 32], ... - >>> z[(1,1,1,1)] # returns a single value - 40 - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/internals.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/internals.py deleted file mode 100644 index 6bd6b1ae9474e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/internals.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -=============== -Array Internals -=============== - -Internal organization of numpy arrays -===================================== - -It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to Numpy". - -Numpy arrays consist of two major components, the raw array data (from now on, -referred to as the data buffer), and the information about the raw array data. -The data buffer is typically what people think of as arrays in C or Fortran, -a contiguous (and fixed) block of memory containing fixed sized data items. -Numpy also contains a significant set of data that describes how to interpret -the data in the data buffer. This extra information contains (among other things): - - 1) The basic data element's size in bytes - 2) The start of the data within the data buffer (an offset relative to the - beginning of the data buffer). - 3) The number of dimensions and the size of each dimension - 4) The separation between elements for each dimension (the 'stride'). This - does not have to be a multiple of the element size - 5) The byte order of the data (which may not be the native byte order) - 6) Whether the buffer is read-only - 7) Information (via the dtype object) about the interpretation of the basic - data element. The basic data element may be as simple as a int or a float, - or it may be a compound object (e.g., struct-like), a fixed character field, - or Python object pointers. - 8) Whether the array is to interpreted as C-order or Fortran-order. - -This arrangement allow for very flexible use of arrays. One thing that it allows -is simple changes of the metadata to change the interpretation of the array buffer. -Changing the byteorder of the array is a simple change involving no rearrangement -of the data. The shape of the array can be changed very easily without changing -anything in the data buffer or any data copying at all - -Among other things that are made possible is one can create a new array metadata -object that uses the same data buffer -to create a new view of that data buffer that has a different interpretation -of the buffer (e.g., different shape, offset, byte order, strides, etc) but -shares the same data bytes. Many operations in numpy do just this such as -slices. Other operations, such as transpose, don't move data elements -around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move. - -Typically these new versions of the array metadata but the same data buffer are -new 'views' into the data buffer. There is a different ndarray object, but it -uses the same data buffer. This is why it is necessary to force copies through -use of the .copy() method if one really wants to make a new and independent -copy of the data buffer. - -New views into arrays mean the the object reference counts for the data buffer -increase. Simply doing away with the original array object will not remove the -data buffer if other views of it still exist. - -Multidimensional Array Indexing Order Issues -============================================ - -What is the right way to index -multi-dimensional arrays? Before you jump to conclusions about the one and -true way to index multi-dimensional arrays, it pays to understand why this is -a confusing issue. This section will try to explain in detail how numpy -indexing works and why we adopt the convention we do for images, and when it -may be appropriate to adopt other conventions. - -The first thing to understand is -that there are two conflicting conventions for indexing 2-dimensional arrays. -Matrix notation uses the first index to indicate which row is being selected and -the second index to indicate which column is selected. This is opposite the -geometrically oriented-convention for images where people generally think the -first index represents x position (i.e., column) and the second represents y -position (i.e., row). This alone is the source of much confusion; -matrix-oriented users and image-oriented users expect two different things with -regard to indexing. - -The second issue to understand is how indices correspond -to the order the array is stored in memory. In Fortran the first index is the -most rapidly varying index when moving through the elements of a two -dimensional array as it is stored in memory. If you adopt the matrix -convention for indexing, then this means the matrix is stored one column at a -time (since the first index moves to the next row as it changes). Thus Fortran -is considered a Column-major language. C has just the opposite convention. In -C, the last index changes most rapidly as one moves through the array as -stored in memory. Thus C is a Row-major language. The matrix is stored by -rows. Note that in both cases it presumes that the matrix convention for -indexing is being used, i.e., for both Fortran and C, the first index is the -row. Note this convention implies that the indexing convention is invariant -and that the data order changes to keep that so. - -But that's not the only way -to look at it. Suppose one has large two-dimensional arrays (images or -matrices) stored in data files. Suppose the data are stored by rows rather than -by columns. If we are to preserve our index convention (whether matrix or -image) that means that depending on the language we use, we may be forced to -reorder the data if it is read into memory to preserve our indexing -convention. For example if we read row-ordered data into memory without -reordering, it will match the matrix indexing convention for C, but not for -Fortran. Conversely, it will match the image indexing convention for Fortran, -but not for C. For C, if one is using data stored in row order, and one wants -to preserve the image index convention, the data must be reordered when -reading into memory. - -In the end, which you do for Fortran or C depends on -which is more important, not reordering data or preserving the indexing -convention. For large images, reordering data is potentially expensive, and -often the indexing convention is inverted to avoid that. - -The situation with -numpy makes this issue yet more complicated. The internal machinery of numpy -arrays is flexible enough to accept any ordering of indices. One can simply -reorder indices by manipulating the internal stride information for arrays -without reordering the data at all. Numpy will know how to map the new index -order to the data without moving the data. - -So if this is true, why not choose -the index order that matches what you most expect? In particular, why not define -row-ordered images to use the image convention? (This is sometimes referred -to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN' -order options for array ordering in numpy.) The drawback of doing this is -potential performance penalties. It's common to access the data sequentially, -either implicitly in array operations or explicitly by looping over rows of an -image. When that is done, then the data will be accessed in non-optimal order. -As the first index is incremented, what is actually happening is that elements -spaced far apart in memory are being sequentially accessed, with usually poor -memory access speeds. For example, for a two dimensional image 'im' defined so -that im[0, 10] represents the value at x=0, y=10. To be consistent with usual -Python behavior then im[0] would represent a column at x=0. Yet that data -would be spread over the whole array since the data are stored in row order. -Despite the flexibility of numpy's indexing, it can't really paper over the fact -basic operations are rendered inefficient because of data order or that getting -contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs -im[0]), thus one can't use an idiom such as for row in im; for col in im does -work, but doesn't yield contiguous column data. - -As it turns out, numpy is -smart enough when dealing with ufuncs to determine which index is the most -rapidly varying one in memory and uses that for the innermost loop. Thus for -ufuncs there is no large intrinsic advantage to either approach in most cases. -On the other hand, use of .flat with an FORTRAN ordered array will lead to -non-optimal memory access as adjacent elements in the flattened array (iterator, -actually) are not contiguous in memory. - -Indeed, the fact is that Python -indexing on lists and other sequences naturally leads to an outside-to inside -ordering (the first index gets the largest grouping, the next the next largest, -and the last gets the smallest element). Since image data are normally stored -by rows, this corresponds to position within rows being the last item indexed. - -If you do want to use Fortran ordering realize that -there are two approaches to consider: 1) accept that the first index is just not -the most rapidly changing in memory and have all your I/O routines reorder -your data when going from memory to disk or visa versa, or use numpy's -mechanism for mapping the first index to the most rapidly varying data. We -recommend the former if possible. The disadvantage of the latter is that many -of numpy's functions will yield arrays without Fortran ordering unless you are -careful to use the 'order' keyword. Doing this would be highly inconvenient. - -Otherwise we recommend simply learning to reverse the usual order of indices -when accessing elements of an array. Granted, it goes against the grain, but -it is more in line with Python semantics and the natural order of the data. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/io.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/io.py deleted file mode 100644 index e45bfc9b32110..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/io.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -========= -Array I/O -========= - -Placeholder for array I/O documentation. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/jargon.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/jargon.py deleted file mode 100644 index 3fcbc7d23f2f8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/jargon.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -====== -Jargon -====== - -Placeholder for computer science, engineering and other jargon. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/methods_vs_functions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/methods_vs_functions.py deleted file mode 100644 index 4149000bc80ac..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/methods_vs_functions.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -===================== -Methods vs. Functions -===================== - -Placeholder for Methods vs. Functions documentation. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/misc.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/misc.py deleted file mode 100644 index 1709ad66da7a8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/misc.py +++ /dev/null @@ -1,226 +0,0 @@ -""" -============= -Miscellaneous -============= - -IEEE 754 Floating Point Special Values --------------------------------------- - -Special values defined in numpy: nan, inf, - -NaNs can be used as a poor-man's mask (if you don't care what the -original value was) - -Note: cannot use equality to test NaNs. E.g.: :: - - >>> myarr = np.array([1., 0., np.nan, 3.]) - >>> np.where(myarr == np.nan) - >>> np.nan == np.nan # is always False! Use special numpy functions instead. - False - >>> myarr[myarr == np.nan] = 0. # doesn't work - >>> myarr - array([ 1., 0., NaN, 3.]) - >>> myarr[np.isnan(myarr)] = 0. # use this instead find - >>> myarr - array([ 1., 0., 0., 3.]) - -Other related special value functions: :: - - isinf(): True if value is inf - isfinite(): True if not nan or inf - nan_to_num(): Map nan to 0, inf to max float, -inf to min float - -The following corresponds to the usual functions except that nans are excluded -from the results: :: - - nansum() - nanmax() - nanmin() - nanargmax() - nanargmin() - - >>> x = np.arange(10.) - >>> x[3] = np.nan - >>> x.sum() - nan - >>> np.nansum(x) - 42.0 - -How numpy handles numerical exceptions --------------------------------------- - -The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` -and ``'ignore'`` for ``underflow``. But this can be changed, and it can be -set individually for different kinds of exceptions. The different behaviors -are: - - - 'ignore' : Take no action when the exception occurs. - - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - - 'raise' : Raise a `FloatingPointError`. - - 'call' : Call a function specified using the `seterrcall` function. - - 'print' : Print a warning directly to ``stdout``. - - 'log' : Record error in a Log object specified by `seterrcall`. - -These behaviors can be set for all kinds of errors or specific ones: - - - all : apply to all numeric exceptions - - invalid : when NaNs are generated - - divide : divide by zero (for integers as well!) - - overflow : floating point overflows - - underflow : floating point underflows - -Note that integer divide-by-zero is handled by the same machinery. -These behaviors are set on a per-thread basis. - -Examples --------- - -:: - - >>> oldsettings = np.seterr(all='warn') - >>> np.zeros(5,dtype=np.float32)/0. - invalid value encountered in divide - >>> j = np.seterr(under='ignore') - >>> np.array([1.e-100])**10 - >>> j = np.seterr(invalid='raise') - >>> np.sqrt(np.array([-1.])) - FloatingPointError: invalid value encountered in sqrt - >>> def errorhandler(errstr, errflag): - ... print "saw stupid error!" - >>> np.seterrcall(errorhandler) - - >>> j = np.seterr(all='call') - >>> np.zeros(5, dtype=np.int32)/0 - FloatingPointError: invalid value encountered in divide - saw stupid error! - >>> j = np.seterr(**oldsettings) # restore previous - ... # error-handling settings - -Interfacing to C ----------------- -Only a survey of the choices. Little detail on how each works. - -1) Bare metal, wrap your own C-code manually. - - - Plusses: - - - Efficient - - No dependencies on other tools - - - Minuses: - - - Lots of learning overhead: - - - need to learn basics of Python C API - - need to learn basics of numpy C API - - need to learn how to handle reference counting and love it. - - - Reference counting often difficult to get right. - - - getting it wrong leads to memory leaks, and worse, segfaults - - - API will change for Python 3.0! - -2) Cython - - - Plusses: - - - avoid learning C API's - - no dealing with reference counting - - can code in pseudo python and generate C code - - can also interface to existing C code - - should shield you from changes to Python C api - - has become the de-facto standard within the scientific Python community - - fast indexing support for arrays - - - Minuses: - - - Can write code in non-standard form which may become obsolete - - Not as flexible as manual wrapping - -4) ctypes - - - Plusses: - - - part of Python standard library - - good for interfacing to existing sharable libraries, particularly - Windows DLLs - - avoids API/reference counting issues - - good numpy support: arrays have all these in their ctypes - attribute: :: - - a.ctypes.data a.ctypes.get_strides - a.ctypes.data_as a.ctypes.shape - a.ctypes.get_as_parameter a.ctypes.shape_as - a.ctypes.get_data a.ctypes.strides - a.ctypes.get_shape a.ctypes.strides_as - - - Minuses: - - - can't use for writing code to be turned into C extensions, only a wrapper - tool. - -5) SWIG (automatic wrapper generator) - - - Plusses: - - - around a long time - - multiple scripting language support - - C++ support - - Good for wrapping large (many functions) existing C libraries - - - Minuses: - - - generates lots of code between Python and the C code - - can cause performance problems that are nearly impossible to optimize - out - - interface files can be hard to write - - doesn't necessarily avoid reference counting issues or needing to know - API's - -7) scipy.weave - - - Plusses: - - - can turn many numpy expressions into C code - - dynamic compiling and loading of generated C code - - can embed pure C code in Python module and have weave extract, generate - interfaces and compile, etc. - - - Minuses: - - - Future very uncertain: it's the only part of Scipy not ported to Python 3 - and is effectively deprecated in favor of Cython. - -8) Psyco - - - Plusses: - - - Turns pure python into efficient machine code through jit-like - optimizations - - very fast when it optimizes well - - - Minuses: - - - Only on intel (windows?) - - Doesn't do much for numpy? - -Interfacing to Fortran: ------------------------ -The clear choice to wrap Fortran code is -`f2py `_. - -Pyfort is an older alternative, but not supported any longer. -Fwrap is a newer project that looked promising but isn't being developed any -longer. - -Interfacing to C++: -------------------- - 1) Cython - 2) CXX - 3) Boost.python - 4) SWIG - 5) SIP (used mainly in PyQT) - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/performance.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/performance.py deleted file mode 100644 index b0c158bf33c20..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/performance.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -=========== -Performance -=========== - -Placeholder for Improving Performance documentation. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/structured_arrays.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/structured_arrays.py deleted file mode 100644 index 0444bdf90c0c2..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/structured_arrays.py +++ /dev/null @@ -1,223 +0,0 @@ -""" -===================================== -Structured Arrays (and Record Arrays) -===================================== - -Introduction -============ - -Numpy provides powerful capabilities to create arrays of structs or records. -These arrays permit one to manipulate the data by the structs or by fields of -the struct. A simple example will show what is meant.: :: - - >>> x = np.zeros((2,),dtype=('i4,f4,a10')) - >>> x[:] = [(1,2.,'Hello'),(2,3.,"World")] - >>> x - array([(1, 2.0, 'Hello'), (2, 3.0, 'World')], - dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) - -Here we have created a one-dimensional array of length 2. Each element of -this array is a record that contains three items, a 32-bit integer, a 32-bit -float, and a string of length 10 or less. If we index this array at the second -position we get the second record: :: - - >>> x[1] - (2,3.,"World") - -Conveniently, one can access any field of the array by indexing using the -string that names that field. In this case the fields have received the -default names 'f0', 'f1' and 'f2'. :: - - >>> y = x['f1'] - >>> y - array([ 2., 3.], dtype=float32) - >>> y[:] = 2*y - >>> y - array([ 4., 6.], dtype=float32) - >>> x - array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], - dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) - -In these examples, y is a simple float array consisting of the 2nd field -in the record. But, rather than being a copy of the data in the structured -array, it is a view, i.e., it shares exactly the same memory locations. -Thus, when we updated this array by doubling its values, the structured -array shows the corresponding values as doubled as well. Likewise, if one -changes the record, the field view also changes: :: - - >>> x[1] = (-1,-1.,"Master") - >>> x - array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')], - dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')]) - >>> y - array([ 4., -1.], dtype=float32) - -Defining Structured Arrays -========================== - -One defines a structured array through the dtype object. There are -**several** alternative ways to define the fields of a record. Some of -these variants provide backward compatibility with Numeric, numarray, or -another module, and should not be used except for such purposes. These -will be so noted. One specifies record structure in -one of four alternative ways, using an argument (as supplied to a dtype -function keyword or a dtype object constructor itself). This -argument must be one of the following: 1) string, 2) tuple, 3) list, or -4) dictionary. Each of these is briefly described below. - -1) String argument (as used in the above examples). -In this case, the constructor expects a comma-separated list of type -specifiers, optionally with extra shape information. -The type specifiers can take 4 different forms: :: - - a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a - (representing bytes, ints, unsigned ints, floats, complex and - fixed length strings of specified byte lengths) - b) int8,...,uint8,...,float16, float32, float64, complex64, complex128 - (this time with bit sizes) - c) older Numeric/numarray type specifications (e.g. Float32). - Don't use these in new code! - d) Single character type specifiers (e.g H for unsigned short ints). - Avoid using these unless you must. Details can be found in the - Numpy book - -These different styles can be mixed within the same string (but why would you -want to do that?). Furthermore, each type specifier can be prefixed -with a repetition number, or a shape. In these cases an array -element is created, i.e., an array within a record. That array -is still referred to as a single field. An example: :: - - >>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64') - >>> x - array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), - ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), - ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])], - dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))]) - -By using strings to define the record structure, it precludes being -able to name the fields in the original definition. The names can -be changed as shown later, however. - -2) Tuple argument: The only relevant tuple case that applies to record -structures is when a structure is mapped to an existing data type. This -is done by pairing in a tuple, the existing data type with a matching -dtype definition (using any of the variants being described here). As -an example (using a definition using a list, so see 3) for further -details): :: - - >>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')])) - >>> x - array([0, 0, 0]) - >>> x['r'] - array([0, 0, 0], dtype=uint8) - -In this case, an array is produced that looks and acts like a simple int32 array, -but also has definitions for fields that use only one byte of the int32 (a bit -like Fortran equivalencing). - -3) List argument: In this case the record structure is defined with a list of -tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field -('' is permitted), 2) the type of the field, and 3) the shape (optional). -For example:: - - >>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) - >>> x - array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), - (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), - (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])], - dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))]) - -4) Dictionary argument: two different forms are permitted. The first consists -of a dictionary with two required keys ('names' and 'formats'), each having an -equal sized list of values. The format list contains any type/shape specifier -allowed in other contexts. The names must be strings. There are two optional -keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to -the required two where offsets contain integer offsets for each field, and -titles are objects containing metadata for each field (these do not have -to be strings), where the value of None is permitted. As an example: :: - - >>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']}) - >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[('col1', '>i4'), ('col2', '>f4')]) - -The other dictionary form permitted is a dictionary of name keys with tuple -values specifying type, offset, and an optional title. :: - - >>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')}) - >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')]) - -Accessing and modifying field names -=================================== - -The field names are an attribute of the dtype object defining the record structure. -For the last example: :: - - >>> x.dtype.names - ('col1', 'col2') - >>> x.dtype.names = ('x', 'y') - >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')]) - >>> x.dtype.names = ('x', 'y', 'z') # wrong number of names - : must replace all names at once with a sequence of length 2 - -Accessing field titles -==================================== - -The field titles provide a standard place to put associated info for fields. -They do not have to be strings. :: - - >>> x.dtype.fields['x'][2] - 'title 1' - -Accessing multiple fields at once -==================================== - -You can access multiple fields at once using a list of field names: :: - - >>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))], - dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) - -Notice that `x` is created with a list of tuples. :: - - >>> x[['x','y']] - array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)], - dtype=[('x', '>> x[['x','value']] - array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]), - (1.0, [[2.0, 6.0], [2.0, 6.0]])], - dtype=[('x', '>> x[['y','x']] - array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)], - dtype=[('y', '>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')]) - >>> arr['var1'] = np.arange(5) - -If you fill it in row by row, it takes a take a tuple -(but not a list or array!):: - - >>> arr[0] = (10,20) - >>> arr - array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)], - dtype=[('var1', '`_. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/subclassing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/subclassing.py deleted file mode 100644 index a62fc2d6de922..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/subclassing.py +++ /dev/null @@ -1,560 +0,0 @@ -""" -============================= -Subclassing ndarray in python -============================= - -Credits -------- - -This page is based with thanks on the wiki page on subclassing by Pierre -Gerard-Marchant - http://www.scipy.org/Subclasses. - -Introduction ------------- - -Subclassing ndarray is relatively simple, but it has some complications -compared to other Python objects. On this page we explain the machinery -that allows you to subclass ndarray, and the implications for -implementing a subclass. - -ndarrays and object creation -============================ - -Subclassing ndarray is complicated by the fact that new instances of -ndarray classes can come about in three different ways. These are: - -#. Explicit constructor call - as in ``MySubClass(params)``. This is - the usual route to Python instance creation. -#. View casting - casting an existing ndarray as a given subclass -#. New from template - creating a new instance from a template - instance. Examples include returning slices from a subclassed array, - creating return types from ufuncs, and copying arrays. See - :ref:`new-from-template` for more details - -The last two are characteristics of ndarrays - in order to support -things like array slicing. The complications of subclassing ndarray are -due to the mechanisms numpy has to support these latter two routes of -instance creation. - -.. _view-casting: - -View casting ------------- - -*View casting* is the standard ndarray mechanism by which you take an -ndarray of any subclass, and return a view of the array as another -(specified) subclass: - ->>> import numpy as np ->>> # create a completely useless ndarray subclass ->>> class C(np.ndarray): pass ->>> # create a standard ndarray ->>> arr = np.zeros((3,)) ->>> # take a view of it, as our useless subclass ->>> c_arr = arr.view(C) ->>> type(c_arr) - - -.. _new-from-template: - -Creating new from template --------------------------- - -New instances of an ndarray subclass can also come about by a very -similar mechanism to :ref:`view-casting`, when numpy finds it needs to -create a new instance from a template instance. The most obvious place -this has to happen is when you are taking slices of subclassed arrays. -For example: - ->>> v = c_arr[1:] ->>> type(v) # the view is of type 'C' - ->>> v is c_arr # but it's a new instance -False - -The slice is a *view* onto the original ``c_arr`` data. So, when we -take a view from the ndarray, we return a new ndarray, of the same -class, that points to the data in the original. - -There are other points in the use of ndarrays where we need such views, -such as copying arrays (``c_arr.copy()``), creating ufunc output arrays -(see also :ref:`array-wrap`), and reducing methods (like -``c_arr.mean()``. - -Relationship of view casting and new-from-template --------------------------------------------------- - -These paths both use the same machinery. We make the distinction here, -because they result in different input to your methods. Specifically, -:ref:`view-casting` means you have created a new instance of your array -type from any potential subclass of ndarray. :ref:`new-from-template` -means you have created a new instance of your class from a pre-existing -instance, allowing you - for example - to copy across attributes that -are particular to your subclass. - -Implications for subclassing ----------------------------- - -If we subclass ndarray, we need to deal not only with explicit -construction of our array type, but also :ref:`view-casting` or -:ref:`new-from-template`. Numpy has the machinery to do this, and this -machinery that makes subclassing slightly non-standard. - -There are two aspects to the machinery that ndarray uses to support -views and new-from-template in subclasses. - -The first is the use of the ``ndarray.__new__`` method for the main work -of object initialization, rather then the more usual ``__init__`` -method. The second is the use of the ``__array_finalize__`` method to -allow subclasses to clean up after the creation of views and new -instances from templates. - -A brief Python primer on ``__new__`` and ``__init__`` -===================================================== - -``__new__`` is a standard Python method, and, if present, is called -before ``__init__`` when we create a class instance. See the `python -__new__ documentation -`_ for more detail. - -For example, consider the following Python code: - -.. testcode:: - - class C(object): - def __new__(cls, *args): - print 'Cls in __new__:', cls - print 'Args in __new__:', args - return object.__new__(cls, *args) - - def __init__(self, *args): - print 'type(self) in __init__:', type(self) - print 'Args in __init__:', args - -meaning that we get: - ->>> c = C('hello') -Cls in __new__: -Args in __new__: ('hello',) -type(self) in __init__: -Args in __init__: ('hello',) - -When we call ``C('hello')``, the ``__new__`` method gets its own class -as first argument, and the passed argument, which is the string -``'hello'``. After python calls ``__new__``, it usually (see below) -calls our ``__init__`` method, with the output of ``__new__`` as the -first argument (now a class instance), and the passed arguments -following. - -As you can see, the object can be initialized in the ``__new__`` -method or the ``__init__`` method, or both, and in fact ndarray does -not have an ``__init__`` method, because all the initialization is -done in the ``__new__`` method. - -Why use ``__new__`` rather than just the usual ``__init__``? Because -in some cases, as for ndarray, we want to be able to return an object -of some other class. Consider the following: - -.. testcode:: - - class D(C): - def __new__(cls, *args): - print 'D cls is:', cls - print 'D args in __new__:', args - return C.__new__(C, *args) - - def __init__(self, *args): - # we never get here - print 'In D __init__' - -meaning that: - ->>> obj = D('hello') -D cls is: -D args in __new__: ('hello',) -Cls in __new__: -Args in __new__: ('hello',) ->>> type(obj) - - -The definition of ``C`` is the same as before, but for ``D``, the -``__new__`` method returns an instance of class ``C`` rather than -``D``. Note that the ``__init__`` method of ``D`` does not get -called. In general, when the ``__new__`` method returns an object of -class other than the class in which it is defined, the ``__init__`` -method of that class is not called. - -This is how subclasses of the ndarray class are able to return views -that preserve the class type. When taking a view, the standard -ndarray machinery creates the new ndarray object with something -like:: - - obj = ndarray.__new__(subtype, shape, ... - -where ``subdtype`` is the subclass. Thus the returned view is of the -same class as the subclass, rather than being of class ``ndarray``. - -That solves the problem of returning views of the same type, but now -we have a new problem. The machinery of ndarray can set the class -this way, in its standard methods for taking views, but the ndarray -``__new__`` method knows nothing of what we have done in our own -``__new__`` method in order to set attributes, and so on. (Aside - -why not call ``obj = subdtype.__new__(...`` then? Because we may not -have a ``__new__`` method with the same call signature). - -The role of ``__array_finalize__`` -================================== - -``__array_finalize__`` is the mechanism that numpy provides to allow -subclasses to handle the various ways that new instances get created. - -Remember that subclass instances can come about in these three ways: - -#. explicit constructor call (``obj = MySubClass(params)``). This will - call the usual sequence of ``MySubClass.__new__`` then (if it exists) - ``MySubClass.__init__``. -#. :ref:`view-casting` -#. :ref:`new-from-template` - -Our ``MySubClass.__new__`` method only gets called in the case of the -explicit constructor call, so we can't rely on ``MySubClass.__new__`` or -``MySubClass.__init__`` to deal with the view casting and -new-from-template. It turns out that ``MySubClass.__array_finalize__`` -*does* get called for all three methods of object creation, so this is -where our object creation housekeeping usually goes. - -* For the explicit constructor call, our subclass will need to create a - new ndarray instance of its own class. In practice this means that - we, the authors of the code, will need to make a call to - ``ndarray.__new__(MySubClass,...)``, or do view casting of an existing - array (see below) -* For view casting and new-from-template, the equivalent of - ``ndarray.__new__(MySubClass,...`` is called, at the C level. - -The arguments that ``__array_finalize__`` recieves differ for the three -methods of instance creation above. - -The following code allows us to look at the call sequences and arguments: - -.. testcode:: - - import numpy as np - - class C(np.ndarray): - def __new__(cls, *args, **kwargs): - print 'In __new__ with class %s' % cls - return np.ndarray.__new__(cls, *args, **kwargs) - - def __init__(self, *args, **kwargs): - # in practice you probably will not need or want an __init__ - # method for your subclass - print 'In __init__ with class %s' % self.__class__ - - def __array_finalize__(self, obj): - print 'In array_finalize:' - print ' self type is %s' % type(self) - print ' obj type is %s' % type(obj) - - -Now: - ->>> # Explicit constructor ->>> c = C((10,)) -In __new__ with class -In array_finalize: - self type is - obj type is -In __init__ with class ->>> # View casting ->>> a = np.arange(10) ->>> cast_a = a.view(C) -In array_finalize: - self type is - obj type is ->>> # Slicing (example of new-from-template) ->>> cv = c[:1] -In array_finalize: - self type is - obj type is - -The signature of ``__array_finalize__`` is:: - - def __array_finalize__(self, obj): - -``ndarray.__new__`` passes ``__array_finalize__`` the new object, of our -own class (``self``) as well as the object from which the view has been -taken (``obj``). As you can see from the output above, the ``self`` is -always a newly created instance of our subclass, and the type of ``obj`` -differs for the three instance creation methods: - -* When called from the explicit constructor, ``obj`` is ``None`` -* When called from view casting, ``obj`` can be an instance of any - subclass of ndarray, including our own. -* When called in new-from-template, ``obj`` is another instance of our - own subclass, that we might use to update the new ``self`` instance. - -Because ``__array_finalize__`` is the only method that always sees new -instances being created, it is the sensible place to fill in instance -defaults for new object attributes, among other tasks. - -This may be clearer with an example. - -Simple example - adding an extra attribute to ndarray ------------------------------------------------------ - -.. testcode:: - - import numpy as np - - class InfoArray(np.ndarray): - - def __new__(subtype, shape, dtype=float, buffer=None, offset=0, - strides=None, order=None, info=None): - # Create the ndarray instance of our type, given the usual - # ndarray input arguments. This will call the standard - # ndarray constructor, but return an object of our type. - # It also triggers a call to InfoArray.__array_finalize__ - obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides, - order) - # set the new 'info' attribute to the value passed - obj.info = info - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # ``self`` is a new object resulting from - # ndarray.__new__(InfoArray, ...), therefore it only has - # attributes that the ndarray.__new__ constructor gave it - - # i.e. those of a standard ndarray. - # - # We could have got to the ndarray.__new__ call in 3 ways: - # From an explicit constructor - e.g. InfoArray(): - # obj is None - # (we're in the middle of the InfoArray.__new__ - # constructor, and self.info will be set when we return to - # InfoArray.__new__) - if obj is None: return - # From view casting - e.g arr.view(InfoArray): - # obj is arr - # (type(obj) can be InfoArray) - # From new-from-template - e.g infoarr[:3] - # type(obj) is InfoArray - # - # Note that it is here, rather than in the __new__ method, - # that we set the default value for 'info', because this - # method sees all creation of default objects - with the - # InfoArray.__new__ constructor, but also with - # arr.view(InfoArray). - self.info = getattr(obj, 'info', None) - # We do not need to return anything - - -Using the object looks like this: - - >>> obj = InfoArray(shape=(3,)) # explicit constructor - >>> type(obj) - - >>> obj.info is None - True - >>> obj = InfoArray(shape=(3,), info='information') - >>> obj.info - 'information' - >>> v = obj[1:] # new-from-template - here - slicing - >>> type(v) - - >>> v.info - 'information' - >>> arr = np.arange(10) - >>> cast_arr = arr.view(InfoArray) # view casting - >>> type(cast_arr) - - >>> cast_arr.info is None - True - -This class isn't very useful, because it has the same constructor as the -bare ndarray object, including passing in buffers and shapes and so on. -We would probably prefer the constructor to be able to take an already -formed ndarray from the usual numpy calls to ``np.array`` and return an -object. - -Slightly more realistic example - attribute added to existing array -------------------------------------------------------------------- - -Here is a class that takes a standard ndarray that already exists, casts -as our type, and adds an extra attribute. - -.. testcode:: - - import numpy as np - - class RealisticInfoArray(np.ndarray): - - def __new__(cls, input_array, info=None): - # Input array is an already formed ndarray instance - # We first cast to be our class type - obj = np.asarray(input_array).view(cls) - # add the new attribute to the created instance - obj.info = info - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # see InfoArray.__array_finalize__ for comments - if obj is None: return - self.info = getattr(obj, 'info', None) - - -So: - - >>> arr = np.arange(5) - >>> obj = RealisticInfoArray(arr, info='information') - >>> type(obj) - - >>> obj.info - 'information' - >>> v = obj[1:] - >>> type(v) - - >>> v.info - 'information' - -.. _array-wrap: - -``__array_wrap__`` for ufuncs -------------------------------------------------------- - -``__array_wrap__`` gets called at the end of numpy ufuncs and other numpy -functions, to allow a subclass to set the type of the return value -and update attributes and metadata. Let's show how this works with an example. -First we make the same subclass as above, but with a different name and -some print statements: - -.. testcode:: - - import numpy as np - - class MySubClass(np.ndarray): - - def __new__(cls, input_array, info=None): - obj = np.asarray(input_array).view(cls) - obj.info = info - return obj - - def __array_finalize__(self, obj): - print 'In __array_finalize__:' - print ' self is %s' % repr(self) - print ' obj is %s' % repr(obj) - if obj is None: return - self.info = getattr(obj, 'info', None) - - def __array_wrap__(self, out_arr, context=None): - print 'In __array_wrap__:' - print ' self is %s' % repr(self) - print ' arr is %s' % repr(out_arr) - # then just call the parent - return np.ndarray.__array_wrap__(self, out_arr, context) - -We run a ufunc on an instance of our new array: - ->>> obj = MySubClass(np.arange(5), info='spam') -In __array_finalize__: - self is MySubClass([0, 1, 2, 3, 4]) - obj is array([0, 1, 2, 3, 4]) ->>> arr2 = np.arange(5)+1 ->>> ret = np.add(arr2, obj) -In __array_wrap__: - self is MySubClass([0, 1, 2, 3, 4]) - arr is array([1, 3, 5, 7, 9]) -In __array_finalize__: - self is MySubClass([1, 3, 5, 7, 9]) - obj is MySubClass([0, 1, 2, 3, 4]) ->>> ret -MySubClass([1, 3, 5, 7, 9]) ->>> ret.info -'spam' - -Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method of the -input with the highest ``__array_priority__`` value, in this case -``MySubClass.__array_wrap__``, with arguments ``self`` as ``obj``, and -``out_arr`` as the (ndarray) result of the addition. In turn, the -default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the -result to class ``MySubClass``, and called ``__array_finalize__`` - -hence the copying of the ``info`` attribute. This has all happened at the C level. - -But, we could do anything we wanted: - -.. testcode:: - - class SillySubClass(np.ndarray): - - def __array_wrap__(self, arr, context=None): - return 'I lost your data' - ->>> arr1 = np.arange(5) ->>> obj = arr1.view(SillySubClass) ->>> arr2 = np.arange(5) ->>> ret = np.multiply(obj, arr2) ->>> ret -'I lost your data' - -So, by defining a specific ``__array_wrap__`` method for our subclass, -we can tweak the output from ufuncs. The ``__array_wrap__`` method -requires ``self``, then an argument - which is the result of the ufunc - -and an optional parameter *context*. This parameter is returned by some -ufuncs as a 3-element tuple: (name of the ufunc, argument of the ufunc, -domain of the ufunc). ``__array_wrap__`` should return an instance of -its containing class. See the masked array subclass for an -implementation. - -In addition to ``__array_wrap__``, which is called on the way out of the -ufunc, there is also an ``__array_prepare__`` method which is called on -the way into the ufunc, after the output arrays are created but before any -computation has been performed. The default implementation does nothing -but pass through the array. ``__array_prepare__`` should not attempt to -access the array data or resize the array, it is intended for setting the -output array type, updating attributes and metadata, and performing any -checks based on the input that may be desired before computation begins. -Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or -subclass thereof or raise an error. - -Extra gotchas - custom ``__del__`` methods and ndarray.base ------------------------------------------------------------ - -One of the problems that ndarray solves is keeping track of memory -ownership of ndarrays and their views. Consider the case where we have -created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. -The two objects are looking at the same memory. Numpy keeps track of -where the data came from for a particular array or view, with the -``base`` attribute: - ->>> # A normal ndarray, that owns its own data ->>> arr = np.zeros((4,)) ->>> # In this case, base is None ->>> arr.base is None -True ->>> # We take a view ->>> v1 = arr[1:] ->>> # base now points to the array that it derived from ->>> v1.base is arr -True ->>> # Take a view of a view ->>> v2 = v1[1:] ->>> # base points to the view it derived from ->>> v2.base is v1 -True - -In general, if the array owns its own memory, as for ``arr`` in this -case, then ``arr.base`` will be None - there are some exceptions to this -- see the numpy book for more details. - -The ``base`` attribute is useful in being able to tell whether we have -a view or the original array. This in turn can be useful if we need -to know whether or not to do some specific cleanup when the subclassed -array is deleted. For example, we may only want to do the cleanup if -the original array is deleted, but not the views. For an example of -how this can work, have a look at the ``memmap`` class in -``numpy.core``. - - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/ufuncs.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/ufuncs.py deleted file mode 100644 index 0132202adc55c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/doc/ufuncs.py +++ /dev/null @@ -1,138 +0,0 @@ -""" -=================== -Universal Functions -=================== - -Ufuncs are, generally speaking, mathematical functions or operations that are -applied element-by-element to the contents of an array. That is, the result -in each output array element only depends on the value in the corresponding -input array (or arrays) and on no other array elements. Numpy comes with a -large suite of ufuncs, and scipy extends that suite substantially. The simplest -example is the addition operator: :: - - >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) - array([1, 3, 2, 6]) - -The unfunc module lists all the available ufuncs in numpy. Documentation on -the specific ufuncs may be found in those modules. This documentation is -intended to address the more general aspects of unfuncs common to most of -them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) -have equivalent functions defined (e.g. add() for +) - -Type coercion -============= - -What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of -two different types? What is the type of the result? Typically, the result is -the higher of the two types. For example: :: - - float32 + float64 -> float64 - int8 + int32 -> int32 - int16 + float32 -> float32 - float32 + complex64 -> complex64 - -There are some less obvious cases generally involving mixes of types -(e.g. uints, ints and floats) where equal bit sizes for each are not -capable of saving all the information in a different type of equivalent -bit size. Some examples are int32 vs float32 or uint32 vs int32. -Generally, the result is the higher type of larger size than both -(if available). So: :: - - int32 + float32 -> float64 - uint32 + int32 -> int64 - -Finally, the type coercion behavior when expressions involve Python -scalars is different than that seen for arrays. Since Python has a -limited number of types, combining a Python int with a dtype=np.int8 -array does not coerce to the higher type but instead, the type of the -array prevails. So the rules for Python scalars combined with arrays is -that the result will be that of the array equivalent the Python scalar -if the Python scalar is of a higher 'kind' than the array (e.g., float -vs. int), otherwise the resultant type will be that of the array. -For example: :: - - Python int + int8 -> int8 - Python float + int8 -> float64 - -ufunc methods -============= - -Binary ufuncs support 4 methods. - -**.reduce(arr)** applies the binary operator to elements of the array in - sequence. For example: :: - - >>> np.add.reduce(np.arange(10)) # adds all elements of array - 45 - -For multidimensional arrays, the first dimension is reduced by default: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5)) - array([ 5, 7, 9, 11, 13]) - -The axis keyword can be used to specify different axes to reduce: :: - - >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) - array([10, 35]) - -**.accumulate(arr)** applies the binary operator and generates an an -equivalently shaped array that includes the accumulated amount for each -element of the array. A couple examples: :: - - >>> np.add.accumulate(np.arange(10)) - array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) - >>> np.multiply.accumulate(np.arange(1,9)) - array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) - -The behavior for multidimensional arrays is the same as for .reduce(), -as is the use of the axis keyword). - -**.reduceat(arr,indices)** allows one to apply reduce to selected parts - of an array. It is a difficult method to understand. See the documentation - at: - -**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and - arr2. It will work on multidimensional arrays (the shape of the result is - the concatenation of the two input shapes.: :: - - >>> np.multiply.outer(np.arange(3),np.arange(4)) - array([[0, 0, 0, 0], - [0, 1, 2, 3], - [0, 2, 4, 6]]) - -Output arguments -================ - -All ufuncs accept an optional output array. The array must be of the expected -output shape. Beware that if the type of the output array is of a different -(and lower) type than the output result, the results may be silently truncated -or otherwise corrupted in the downcast to the lower type. This usage is useful -when one wants to avoid creating large temporary arrays and instead allows one -to reuse the same array memory repeatedly (at the expense of not being able to -use more convenient operator notation in expressions). Note that when the -output argument is used, the ufunc still returns a reference to the result. - - >>> x = np.arange(2) - >>> np.add(np.arange(2),np.arange(2.),x) - array([0, 2]) - >>> x - array([0, 2]) - -and & or as ufuncs -================== - -Invariably people try to use the python 'and' and 'or' as logical operators -(and quite understandably). But these operators do not behave as normal -operators since Python treats these quite differently. They cannot be -overloaded with array equivalents. Thus using 'and' or 'or' with an array -results in an error. There are two alternatives: - - 1) use the ufunc functions logical_and() and logical_or(). - 2) use the bitwise operators & and \\|. The drawback of these is that if - the arguments to these operators are not boolean arrays, the result is - likely incorrect. On the other hand, most usages of logical_and and - logical_or are with boolean arrays. As long as one is careful, this is - a convenient way to apply these operators. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/dual.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/dual.py deleted file mode 100644 index 1517d8421345c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/dual.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -Aliases for functions which may be accelerated by Scipy. - -Scipy_ can be built to use accelerated or otherwise improved libraries -for FFTs, linear algebra, and special functions. This module allows -developers to transparently support these accelerated functions when -scipy is available but still support users who have only installed -Numpy. - -.. _Scipy : http://www.scipy.org - -""" -from __future__ import division, absolute_import, print_function - -# This module should be used for functions both in numpy and scipy if -# you want to use the numpy version if available but the scipy version -# otherwise. -# Usage --- from numpy.dual import fft, inv - -__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2', - 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals', - 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0'] - -import numpy.linalg as linpkg -import numpy.fft as fftpkg -from numpy.lib import i0 -import sys - - -fft = fftpkg.fft -ifft = fftpkg.ifft -fftn = fftpkg.fftn -ifftn = fftpkg.ifftn -fft2 = fftpkg.fft2 -ifft2 = fftpkg.ifft2 - -norm = linpkg.norm -inv = linpkg.inv -svd = linpkg.svd -solve = linpkg.solve -det = linpkg.det -eig = linpkg.eig -eigvals = linpkg.eigvals -eigh = linpkg.eigh -eigvalsh = linpkg.eigvalsh -lstsq = linpkg.lstsq -pinv = linpkg.pinv -cholesky = linpkg.cholesky - -_restore_dict = {} - -def register_func(name, func): - if name not in __all__: - raise ValueError("%s not a dual function." % name) - f = sys._getframe(0).f_globals - _restore_dict[name] = f[name] - f[name] = func - -def restore_func(name): - if name not in __all__: - raise ValueError("%s not a dual function." % name) - try: - val = _restore_dict[name] - except KeyError: - return - else: - sys._getframe(0).f_globals[name] = val - -def restore_all(): - for name in _restore_dict.keys(): - restore_func(name) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__init__.py deleted file mode 100644 index fcfd1853e2392..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, absolute_import, print_function - -__all__ = ['run_main', 'compile', 'f2py_testing'] - -import os -import sys -import subprocess - -from . import f2py2e -from . import f2py_testing -from . import diagnose - -from .info import __doc__ - -run_main = f2py2e.run_main -main = f2py2e.main - -def compile(source, - modulename = 'untitled', - extra_args = '', - verbose = 1, - source_fn = None - ): - ''' Build extension module from processing source with f2py. - Read the source of this function for more information. - ''' - from numpy.distutils.exec_command import exec_command - import tempfile - if source_fn is None: - f = tempfile.NamedTemporaryFile(suffix='.f') - else: - f = open(source_fn, 'w') - - try: - f.write(source) - f.flush() - - args = ' -c -m %s %s %s'%(modulename, f.name, extra_args) - c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' % \ - (sys.executable, args) - s, o = exec_command(c) - finally: - f.close() - return s - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__version__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__version__.py deleted file mode 100644 index 49a2199bf38b0..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/__version__.py +++ /dev/null @@ -1,10 +0,0 @@ -from __future__ import division, absolute_import, print_function - -major = 2 - -try: - from __svn_version__ import version - version_info = (major, version) - version = '%s_%s' % version_info -except (ImportError, ValueError): - version = str(major) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/auxfuncs.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/auxfuncs.py deleted file mode 100644 index 2e016e18656ac..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/auxfuncs.py +++ /dev/null @@ -1,711 +0,0 @@ -#!/usr/bin/env python -""" - -Auxiliary functions for f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) LICENSE. - - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/24 19:01:55 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import pprint -import sys -import types -from functools import reduce - -from . import __version__ -from . import cfuncs - -f2py_version = __version__.version - - -errmess=sys.stderr.write -#outmess=sys.stdout.write -show=pprint.pprint - -options={} -debugoptions=[] -wrapfuncs = 1 - - -def outmess(t): - if options.get('verbose', 1): - sys.stdout.write(t) - -def debugcapi(var): - return 'capi' in debugoptions - -def _isstring(var): - return 'typespec' in var and var['typespec']=='character' and (not isexternal(var)) - -def isstring(var): - return _isstring(var) and not isarray(var) - -def ischaracter(var): - return isstring(var) and 'charselector' not in var - -def isstringarray(var): - return isarray(var) and _isstring(var) - -def isarrayofstrings(var): - # leaving out '*' for now so that - # `character*(*) a(m)` and `character a(m,*)` - # are treated differently. Luckily `character**` is illegal. - return isstringarray(var) and var['dimension'][-1]=='(*)' - -def isarray(var): - return 'dimension' in var and (not isexternal(var)) - -def isscalar(var): - return not (isarray(var) or isstring(var) or isexternal(var)) - -def iscomplex(var): - return isscalar(var) and var.get('typespec') in ['complex', 'double complex'] - -def islogical(var): - return isscalar(var) and var.get('typespec')=='logical' - -def isinteger(var): - return isscalar(var) and var.get('typespec')=='integer' - -def isreal(var): - return isscalar(var) and var.get('typespec')=='real' - -def get_kind(var): - try: - return var['kindselector']['*'] - except KeyError: - try: - return var['kindselector']['kind'] - except KeyError: - pass - -def islong_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') not in ['integer', 'logical']: - return 0 - return get_kind(var)=='8' - -def isunsigned_char(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-1' - -def isunsigned_short(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-2' - -def isunsigned(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-4' - -def isunsigned_long_long(var): - if not isscalar(var): - return 0 - if var.get('typespec') != 'integer': - return 0 - return get_kind(var)=='-8' - -def isdouble(var): - if not isscalar(var): - return 0 - if not var.get('typespec')=='real': - return 0 - return get_kind(var)=='8' - -def islong_double(var): - if not isscalar(var): - return 0 - if not var.get('typespec')=='real': - return 0 - return get_kind(var)=='16' - -def islong_complex(var): - if not iscomplex(var): - return 0 - return get_kind(var)=='32' - -def iscomplexarray(var): - return isarray(var) and var.get('typespec') in ['complex', 'double complex'] - -def isint1array(var): - return isarray(var) and var.get('typespec')=='integer' \ - and get_kind(var)=='1' - -def isunsigned_chararray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='-1' - -def isunsigned_shortarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='-2' - -def isunsignedarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='-4' - -def isunsigned_long_longarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='-8' - -def issigned_chararray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='1' - -def issigned_shortarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='2' - -def issigned_array(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='4' - -def issigned_long_longarray(var): - return isarray(var) and var.get('typespec') in ['integer', 'logical']\ - and get_kind(var)=='8' - -def isallocatable(var): - return 'attrspec' in var and 'allocatable' in var['attrspec'] - -def ismutable(var): - return not (not 'dimension' in var or isstring(var)) - -def ismoduleroutine(rout): - return 'modulename' in rout - -def ismodule(rout): - return ('block' in rout and 'module'==rout['block']) - -def isfunction(rout): - return ('block' in rout and 'function'==rout['block']) - -#def isfunction_wrap(rout): -# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout)) - -def isfunction_wrap(rout): - if isintent_c(rout): - return 0 - return wrapfuncs and isfunction(rout) and (not isexternal(rout)) - -def issubroutine(rout): - return ('block' in rout and 'subroutine'==rout['block']) - -def issubroutine_wrap(rout): - if isintent_c(rout): - return 0 - return issubroutine(rout) and hasassumedshape(rout) - -def hasassumedshape(rout): - if rout.get('hasassumedshape'): - return True - for a in rout['args']: - for d in rout['vars'].get(a, {}).get('dimension', []): - if d==':': - rout['hasassumedshape'] = True - return True - return False - -def isroutine(rout): - return isfunction(rout) or issubroutine(rout) - -def islogicalfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return islogical(rout['vars'][a]) - return 0 - -def islong_longfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return islong_long(rout['vars'][a]) - return 0 - -def islong_doublefunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return islong_double(rout['vars'][a]) - return 0 - -def iscomplexfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return iscomplex(rout['vars'][a]) - return 0 - -def iscomplexfunction_warn(rout): - if iscomplexfunction(rout): - outmess("""\ - ************************************************************** - Warning: code with a function returning complex value - may not work correctly with your Fortran compiler. - Run the following test before using it in your applications: - $(f2py install dir)/test-site/{b/runme_scalar,e/runme} - When using GNU gcc/g77 compilers, codes should work correctly. - **************************************************************\n""") - return 1 - return 0 - -def isstringfunction(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return isstring(rout['vars'][a]) - return 0 - -def hasexternals(rout): - return 'externals' in rout and rout['externals'] - -def isthreadsafe(rout): - return 'f2pyenhancements' in rout and 'threadsafe' in rout['f2pyenhancements'] - -def hasvariables(rout): - return 'vars' in rout and rout['vars'] - -def isoptional(var): - return ('attrspec' in var and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var) - -def isexternal(var): - return ('attrspec' in var and 'external' in var['attrspec']) - -def isrequired(var): - return not isoptional(var) and isintent_nothide(var) - -def isintent_in(var): - if 'intent' not in var: - return 1 - if 'hide' in var['intent']: - return 0 - if 'inplace' in var['intent']: - return 0 - if 'in' in var['intent']: - return 1 - if 'out' in var['intent']: - return 0 - if 'inout' in var['intent']: - return 0 - if 'outin' in var['intent']: - return 0 - return 1 - -def isintent_inout(var): - return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent'] - -def isintent_out(var): - return 'out' in var.get('intent', []) - -def isintent_hide(var): - return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout, isintent_inplace)(var))))) - -def isintent_nothide(var): - return not isintent_hide(var) - -def isintent_c(var): - return 'c' in var.get('intent', []) - -# def isintent_f(var): -# return not isintent_c(var) - -def isintent_cache(var): - return 'cache' in var.get('intent', []) - -def isintent_copy(var): - return 'copy' in var.get('intent', []) - -def isintent_overwrite(var): - return 'overwrite' in var.get('intent', []) - -def isintent_callback(var): - return 'callback' in var.get('intent', []) - -def isintent_inplace(var): - return 'inplace' in var.get('intent', []) - -def isintent_aux(var): - return 'aux' in var.get('intent', []) - -def isintent_aligned4(var): - return 'aligned4' in var.get('intent', []) -def isintent_aligned8(var): - return 'aligned8' in var.get('intent', []) -def isintent_aligned16(var): - return 'aligned16' in var.get('intent', []) - -isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', - isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', - isintent_cache: 'INTENT_CACHE', - isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', - isintent_inplace: 'INTENT_INPLACE', - isintent_aligned4: 'INTENT_ALIGNED4', - isintent_aligned8: 'INTENT_ALIGNED8', - isintent_aligned16: 'INTENT_ALIGNED16', - } - -def isprivate(var): - return 'attrspec' in var and 'private' in var['attrspec'] - -def hasinitvalue(var): - return '=' in var - -def hasinitvalueasstring(var): - if not hasinitvalue(var): - return 0 - return var['='][0] in ['"', "'"] - -def hasnote(var): - return 'note' in var - -def hasresultnote(rout): - if not isfunction(rout): - return 0 - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if a in rout['vars']: - return hasnote(rout['vars'][a]) - return 0 - -def hascommon(rout): - return 'common' in rout - -def containscommon(rout): - if hascommon(rout): - return 1 - if hasbody(rout): - for b in rout['body']: - if containscommon(b): - return 1 - return 0 - -def containsmodule(block): - if ismodule(block): - return 1 - if not hasbody(block): - return 0 - for b in block['body']: - if containsmodule(b): - return 1 - return 0 - -def hasbody(rout): - return 'body' in rout - -def hascallstatement(rout): - return getcallstatement(rout) is not None - -def istrue(var): - return 1 - -def isfalse(var): - return 0 - -class F2PYError(Exception): - pass - -class throw_error: - def __init__(self, mess): - self.mess = mess - def __call__(self, var): - mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) - raise F2PYError(mess) - -def l_and(*f): - l, l2='lambda v', [] - for i in range(len(f)): - l='%s,f%d=f[%d]'%(l, i, i) - l2.append('f%d(v)'%(i)) - return eval('%s:%s'%(l, ' and '.join(l2))) - -def l_or(*f): - l, l2='lambda v', [] - for i in range(len(f)): - l='%s,f%d=f[%d]'%(l, i, i) - l2.append('f%d(v)'%(i)) - return eval('%s:%s'%(l, ' or '.join(l2))) - -def l_not(f): - return eval('lambda v,f=f:not f(v)') - -def isdummyroutine(rout): - try: - return rout['f2pyenhancements']['fortranname']=='' - except KeyError: - return 0 - -def getfortranname(rout): - try: - name = rout['f2pyenhancements']['fortranname'] - if name=='': - raise KeyError - if not name: - errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements'])) - raise KeyError - except KeyError: - name = rout['name'] - return name - -def getmultilineblock(rout,blockname,comment=1,counter=0): - try: - r = rout['f2pyenhancements'].get(blockname) - except KeyError: - return - if not r: return - if counter > 0 and isinstance(r, str): - return - if isinstance(r, list): - if counter>=len(r): return - r = r[counter] - if r[:3]=="'''": - if comment: - r = '\t/* start ' + blockname + ' multiline ('+repr(counter)+') */\n' + r[3:] - else: - r = r[3:] - if r[-3:]=="'''": - if comment: - r = r[:-3] + '\n\t/* end multiline ('+repr(counter)+')*/' - else: - r = r[:-3] - else: - errmess("%s multiline block should end with `'''`: %s\n" \ - % (blockname, repr(r))) - return r - -def getcallstatement(rout): - return getmultilineblock(rout, 'callstatement') - -def getcallprotoargument(rout,cb_map={}): - r = getmultilineblock(rout, 'callprotoargument', comment=0) - if r: return r - if hascallstatement(rout): - outmess('warning: callstatement is defined without callprotoargument\n') - return - from .capi_maps import getctype - arg_types, arg_types2 = [], [] - if l_and(isstringfunction, l_not(isfunction_wrap))(rout): - arg_types.extend(['char*', 'size_t']) - for n in rout['args']: - var = rout['vars'][n] - if isintent_callback(var): - continue - if n in cb_map: - ctype = cb_map[n]+'_typedef' - else: - ctype = getctype(var) - if l_and(isintent_c, l_or(isscalar, iscomplex))(var): - pass - elif isstring(var): - pass - #ctype = 'void*' - else: - ctype = ctype+'*' - if isstring(var) or isarrayofstrings(var): - arg_types2.append('size_t') - arg_types.append(ctype) - - proto_args = ','.join(arg_types+arg_types2) - if not proto_args: - proto_args = 'void' - #print proto_args - return proto_args - -def getusercode(rout): - return getmultilineblock(rout, 'usercode') - -def getusercode1(rout): - return getmultilineblock(rout, 'usercode', counter=1) - -def getpymethoddef(rout): - return getmultilineblock(rout, 'pymethoddef') - -def getargs(rout): - sortargs, args=[], [] - if 'args' in rout: - args=rout['args'] - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: sortargs=rout['args'] - return args, sortargs - -def getargs2(rout): - sortargs, args=[], rout.get('args', []) - auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\ - and a not in args] - args = auxvars + args - if 'sortvars' in rout: - for a in rout['sortvars']: - if a in args: sortargs.append(a) - for a in args: - if a not in sortargs: - sortargs.append(a) - else: sortargs=auxvars + rout['args'] - return args, sortargs - -def getrestdoc(rout): - if 'f2pymultilines' not in rout: - return None - k = None - if rout['block']=='python module': - k = rout['block'], rout['name'] - return rout['f2pymultilines'].get(k, None) - -def gentitle(name): - l=(80-len(name)-6)//2 - return '/*%s %s %s*/'%(l*'*', name, l*'*') - -def flatlist(l): - if isinstance(l, list): - return reduce(lambda x,y,f=flatlist:x+f(y), l, []) - return [l] - -def stripcomma(s): - if s and s[-1]==',': return s[:-1] - return s - -def replace(str,d,defaultsep=''): - if isinstance(d, list): - return [replace(str, _m, defaultsep) for _m in d] - if isinstance(str, list): - return [replace(_m, d, defaultsep) for _m in str] - for k in 2*list(d.keys()): - if k=='separatorsfor': - continue - if 'separatorsfor' in d and k in d['separatorsfor']: - sep=d['separatorsfor'][k] - else: - sep=defaultsep - if isinstance(d[k], list): - str=str.replace('#%s#'%(k), sep.join(flatlist(d[k]))) - else: - str=str.replace('#%s#'%(k), d[k]) - return str - -def dictappend(rd, ar): - if isinstance(ar, list): - for a in ar: - rd=dictappend(rd, a) - return rd - for k in ar.keys(): - if k[0]=='_': - continue - if k in rd: - if isinstance(rd[k], str): - rd[k]=[rd[k]] - if isinstance(rd[k], list): - if isinstance(ar[k], list): - rd[k]=rd[k]+ar[k] - else: - rd[k].append(ar[k]) - elif isinstance(rd[k], dict): - if isinstance(ar[k], dict): - if k=='separatorsfor': - for k1 in ar[k].keys(): - if k1 not in rd[k]: - rd[k][k1]=ar[k][k1] - else: - rd[k]=dictappend(rd[k], ar[k]) - else: - rd[k]=ar[k] - return rd - -def applyrules(rules,d,var={}): - ret={} - if isinstance(rules, list): - for r in rules: - rr=applyrules(r, d, var) - ret=dictappend(ret, rr) - if '_break' in rr: - break - return ret - if '_check' in rules and (not rules['_check'](var)): - return ret - if 'need' in rules: - res = applyrules({'needs':rules['need']}, d, var) - if 'needs' in res: - cfuncs.append_needs(res['needs']) - - for k in rules.keys(): - if k=='separatorsfor': - ret[k]=rules[k]; continue - if isinstance(rules[k], str): - ret[k]=replace(rules[k], d) - elif isinstance(rules[k], list): - ret[k]=[] - for i in rules[k]: - ar=applyrules({k:i}, d, var) - if k in ar: - ret[k].append(ar[k]) - elif k[0]=='_': - continue - elif isinstance(rules[k], dict): - ret[k]=[] - for k1 in rules[k].keys(): - if isinstance(k1, types.FunctionType) and k1(var): - if isinstance(rules[k][k1], list): - for i in rules[k][k1]: - if isinstance(i, dict): - res=applyrules({'supertext':i}, d, var) - if 'supertext' in res: - i=res['supertext'] - else: i='' - ret[k].append(replace(i, d)) - else: - i=rules[k][k1] - if isinstance(i, dict): - res=applyrules({'supertext':i}, d) - if 'supertext' in res: - i=res['supertext'] - else: i='' - ret[k].append(replace(i, d)) - else: - errmess('applyrules: ignoring rule %s.\n'%repr(rules[k])) - if isinstance(ret[k], list): - if len(ret[k])==1: - ret[k]=ret[k][0] - if ret[k]==[]: - del ret[k] - return ret diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/capi_maps.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/capi_maps.py deleted file mode 100644 index 536a576dd101a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/capi_maps.py +++ /dev/null @@ -1,773 +0,0 @@ -#!/usr/bin/env python -""" - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.60 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -import copy -import re -import os -import sys -from .auxfuncs import * -from .crackfortran import markoutercomma -from . import cb_rules - -# Numarray and Numeric users should set this False -using_newcore = True - -depargs=[] -lcb_map={} -lcb2_map={} -# forced casting: mainly caused by the fact that Python or Numeric -# C/APIs do not support the corresponding C types. -c2py_map={'double': 'float', - 'float': 'float', # forced casting - 'long_double': 'float', # forced casting - 'char': 'int', # forced casting - 'signed_char': 'int', # forced casting - 'unsigned_char': 'int', # forced casting - 'short': 'int', # forced casting - 'unsigned_short': 'int', # forced casting - 'int': 'int', # (forced casting) - 'long': 'int', - 'long_long': 'long', - 'unsigned': 'int', # forced casting - 'complex_float': 'complex', # forced casting - 'complex_double': 'complex', - 'complex_long_double': 'complex', # forced casting - 'string': 'string', - } -c2capi_map={'double':'NPY_DOUBLE', - 'float':'NPY_FLOAT', - 'long_double':'NPY_DOUBLE', # forced casting - 'char':'NPY_CHAR', - 'unsigned_char':'NPY_UBYTE', - 'signed_char':'NPY_BYTE', - 'short':'NPY_SHORT', - 'unsigned_short':'NPY_USHORT', - 'int':'NPY_INT', - 'unsigned':'NPY_UINT', - 'long':'NPY_LONG', - 'long_long':'NPY_LONG', # forced casting - 'complex_float':'NPY_CFLOAT', - 'complex_double':'NPY_CDOUBLE', - 'complex_long_double':'NPY_CDOUBLE', # forced casting - 'string':'NPY_CHAR'} - -#These new maps aren't used anyhere yet, but should be by default -# unless building numeric or numarray extensions. -if using_newcore: - c2capi_map={'double': 'NPY_DOUBLE', - 'float': 'NPY_FLOAT', - 'long_double': 'NPY_LONGDOUBLE', - 'char': 'NPY_BYTE', - 'unsigned_char': 'NPY_UBYTE', - 'signed_char': 'NPY_BYTE', - 'short': 'NPY_SHORT', - 'unsigned_short': 'NPY_USHORT', - 'int': 'NPY_INT', - 'unsigned': 'NPY_UINT', - 'long': 'NPY_LONG', - 'unsigned_long': 'NPY_ULONG', - 'long_long': 'NPY_LONGLONG', - 'unsigned_long_long': 'NPY_ULONGLONG', - 'complex_float': 'NPY_CFLOAT', - 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', - 'string': 'NPY_CHAR', # f2py 2e is not ready for NPY_STRING (must set itemisize etc) - #'string':'NPY_STRING' - - } -c2pycode_map={'double':'d', - 'float':'f', - 'long_double':'d', # forced casting - 'char':'1', - 'signed_char':'1', - 'unsigned_char':'b', - 'short':'s', - 'unsigned_short':'w', - 'int':'i', - 'unsigned':'u', - 'long':'l', - 'long_long':'L', - 'complex_float':'F', - 'complex_double':'D', - 'complex_long_double':'D', # forced casting - 'string':'c' - } -if using_newcore: - c2pycode_map={'double':'d', - 'float':'f', - 'long_double':'g', - 'char':'b', - 'unsigned_char':'B', - 'signed_char':'b', - 'short':'h', - 'unsigned_short':'H', - 'int':'i', - 'unsigned':'I', - 'long':'l', - 'unsigned_long':'L', - 'long_long':'q', - 'unsigned_long_long':'Q', - 'complex_float':'F', - 'complex_double':'D', - 'complex_long_double':'G', - 'string':'S'} -c2buildvalue_map={'double':'d', - 'float':'f', - 'char':'b', - 'signed_char':'b', - 'short':'h', - 'int':'i', - 'long':'l', - 'long_long':'L', - 'complex_float':'N', - 'complex_double':'N', - 'complex_long_double':'N', - 'string':'z'} - -if sys.version_info[0] >= 3: - # Bytes, not Unicode strings - c2buildvalue_map['string'] = 'y' - -if using_newcore: - #c2buildvalue_map=??? - pass - -f2cmap_all={'real':{'':'float','4':'float','8':'double','12':'long_double','16':'long_double'}, - 'integer':{'':'int','1':'signed_char','2':'short','4':'int','8':'long_long', - '-1':'unsigned_char','-2':'unsigned_short','-4':'unsigned', - '-8':'unsigned_long_long'}, - 'complex':{'':'complex_float','8':'complex_float', - '16':'complex_double','24':'complex_long_double', - '32':'complex_long_double'}, - 'complexkind':{'':'complex_float','4':'complex_float', - '8':'complex_double','12':'complex_long_double', - '16':'complex_long_double'}, - 'logical':{'':'int','1':'char','2':'short','4':'int','8':'long_long'}, - 'double complex':{'':'complex_double'}, - 'double precision':{'':'double'}, - 'byte':{'':'char'}, - 'character':{'':'string'} - } - -if os.path.isfile('.f2py_f2cmap'): - # User defined additions to f2cmap_all. - # .f2py_f2cmap must contain a dictionary of dictionaries, only. - # For example, {'real':{'low':'float'}} means that Fortran 'real(low)' is - # interpreted as C 'float'. - # This feature is useful for F90/95 users if they use PARAMETERSs - # in type specifications. - try: - outmess('Reading .f2py_f2cmap ...\n') - f = open('.f2py_f2cmap', 'r') - d = eval(f.read(), {}, {}) - f.close() - for k, d1 in d.items(): - for k1 in d1.keys(): - d1[k1.lower()] = d1[k1] - d[k.lower()] = d[k] - for k in d.keys(): - if k not in f2cmap_all: - f2cmap_all[k]={} - for k1 in d[k].keys(): - if d[k][k1] in c2py_map: - if k1 in f2cmap_all[k]: - outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k, k1, f2cmap_all[k][k1], d[k][k1])) - f2cmap_all[k][k1] = d[k][k1] - outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, d[k][k1])) - else: - errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) - outmess('Succesfully applied user defined changes from .f2py_f2cmap\n') - except Exception as msg: - errmess('Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg)) -cformat_map={'double': '%g', - 'float': '%g', - 'long_double': '%Lg', - 'char': '%d', - 'signed_char': '%d', - 'unsigned_char': '%hhu', - 'short': '%hd', - 'unsigned_short': '%hu', - 'int': '%d', - 'unsigned': '%u', - 'long': '%ld', - 'unsigned_long': '%lu', - 'long_long': '%ld', - 'complex_float': '(%g,%g)', - 'complex_double': '(%g,%g)', - 'complex_long_double': '(%Lg,%Lg)', - 'string': '%s', - } - -############### Auxiliary functions -def getctype(var): - """ - Determines C type - """ - ctype='void' - if isfunction(var): - if 'result' in var: - a=var['result'] - else: - a=var['name'] - if a in var['vars']: - return getctype(var['vars'][a]) - else: - errmess('getctype: function %s has no return value?!\n'%a) - elif issubroutine(var): - return ctype - elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: - typespec = var['typespec'].lower() - f2cmap=f2cmap_all[typespec] - ctype=f2cmap[''] # default type - if 'kindselector' in var: - if '*' in var['kindselector']: - try: - ctype=f2cmap[var['kindselector']['*']] - except KeyError: - errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'], '*', var['kindselector']['*'])) - elif 'kind' in var['kindselector']: - if typespec+'kind' in f2cmap_all: - f2cmap=f2cmap_all[typespec+'kind'] - try: - ctype=f2cmap[var['kindselector']['kind']] - except KeyError: - if typespec in f2cmap_all: - f2cmap=f2cmap_all[typespec] - try: - ctype=f2cmap[str(var['kindselector']['kind'])] - except KeyError: - errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n'\ - %(typespec, var['kindselector']['kind'], ctype, - typespec, var['kindselector']['kind'], os.getcwd())) - - else: - if not isexternal(var): - errmess('getctype: No C-type found in "%s", assuming void.\n'%var) - return ctype - -def getstrlength(var): - if isstringfunction(var): - if 'result' in var: - a=var['result'] - else: - a=var['name'] - if a in var['vars']: - return getstrlength(var['vars'][a]) - else: - errmess('getstrlength: function %s has no return value?!\n'%a) - if not isstring(var): - errmess('getstrlength: expected a signature of a string but got: %s\n'%(repr(var))) - len='1' - if 'charselector' in var: - a=var['charselector'] - if '*' in a: - len=a['*'] - elif 'len' in a: - len=a['len'] - if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): - #if len in ['(*)','*','(:)',':']: - if isintent_hide(var): - errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(repr(var))) - len='-1' - return len - -def getarrdims(a,var,verbose=0): - global depargs - ret={} - if isstring(var) and not isarray(var): - ret['dims']=getstrlength(var) - ret['size']=ret['dims'] - ret['rank']='1' - elif isscalar(var): - ret['size']='1' - ret['rank']='0' - ret['dims']='' - elif isarray(var): -# if not isintent_c(var): -# var['dimension'].reverse() - dim=copy.copy(var['dimension']) - ret['size']='*'.join(dim) - try: ret['size']=repr(eval(ret['size'])) - except: pass - ret['dims']=','.join(dim) - ret['rank']=repr(len(dim)) - ret['rank*[-1]']=repr(len(dim)*[-1])[1:-1] - for i in range(len(dim)): # solve dim for dependecies - v=[] - if dim[i] in depargs: v=[dim[i]] - else: - for va in depargs: - if re.match(r'.*?\b%s\b.*'%va, dim[i]): - v.append(va) - for va in v: - if depargs.index(va)>depargs.index(a): - dim[i]='*' - break - ret['setdims'], i='', -1 - for d in dim: - i=i+1 - if d not in ['*', ':', '(*)', '(:)']: - ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'], i, d) - if ret['setdims']: ret['setdims']=ret['setdims'][:-1] - ret['cbsetdims'], i='', -1 - for d in var['dimension']: - i=i+1 - if d not in ['*', ':', '(*)', '(:)']: - ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, d) - elif isintent_in(var): - outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' \ - % (d)) - ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, 0) - elif verbose : - errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(repr(a), repr(d))) - if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1] -# if not isintent_c(var): -# var['dimension'].reverse() - return ret - -def getpydocsign(a, var): - global lcb_map - if isfunction(var): - if 'result' in var: - af=var['result'] - else: - af=var['name'] - if af in var['vars']: - return getpydocsign(af, var['vars'][af]) - else: - errmess('getctype: function %s has no return value?!\n'%af) - return '', '' - sig, sigout=a, a - opt='' - if isintent_in(var): opt='input' - elif isintent_inout(var): opt='in/output' - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4]=='out=': - out_a = k[4:] - break - init='' - ctype=getctype(var) - - if hasinitvalue(var): - init, showinit=getinit(a, var) - init = ', optional\\n Default: %s' % showinit - if isscalar(var): - if isintent_inout(var): - sig='%s : %s rank-0 array(%s,\'%s\')%s'%(a, opt, c2py_map[ctype], - c2pycode_map[ctype], init) - else: - sig='%s : %s %s%s'%(a, opt, c2py_map[ctype], init) - sigout='%s : %s'%(out_a, c2py_map[ctype]) - elif isstring(var): - if isintent_inout(var): - sig='%s : %s rank-0 array(string(len=%s),\'c\')%s'%(a, opt, getstrlength(var), init) - else: - sig='%s : %s string(len=%s)%s'%(a, opt, getstrlength(var), init) - sigout='%s : string(len=%s)'%(out_a, getstrlength(var)) - elif isarray(var): - dim=var['dimension'] - rank=repr(len(dim)) - sig='%s : %s rank-%s array(\'%s\') with bounds (%s)%s'%(a, opt, rank, - c2pycode_map[ctype], - ','.join(dim), init) - if a==out_a: - sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\ - %(a, rank, c2pycode_map[ctype], ','.join(dim)) - else: - sigout='%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ - %(out_a, rank, c2pycode_map[ctype], ','.join(dim), a) - elif isexternal(var): - ua='' - if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: - ua=lcb2_map[lcb_map[a]]['argname'] - if not ua==a: ua=' => %s'%ua - else: ua='' - sig='%s : call-back function%s'%(a, ua) - sigout=sig - else: - errmess('getpydocsign: Could not resolve docsignature for "%s".\\n'%a) - return sig, sigout - -def getarrdocsign(a, var): - ctype=getctype(var) - if isstring(var) and (not isarray(var)): - sig='%s : rank-0 array(string(len=%s),\'c\')'%(a, getstrlength(var)) - elif isscalar(var): - sig='%s : rank-0 array(%s,\'%s\')'%(a, c2py_map[ctype], - c2pycode_map[ctype],) - elif isarray(var): - dim=var['dimension'] - rank=repr(len(dim)) - sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a, rank, - c2pycode_map[ctype], - ','.join(dim)) - return sig - -def getinit(a, var): - if isstring(var): init, showinit='""', "''" - else: init, showinit='', '' - if hasinitvalue(var): - init=var['='] - showinit=init - if iscomplex(var) or iscomplexarray(var): - ret={} - - try: - v = var["="] - if ',' in v: - ret['init.r'], ret['init.i']=markoutercomma(v[1:-1]).split('@,@') - else: - v = eval(v, {}, {}) - ret['init.r'], ret['init.i']=str(v.real), str(v.imag) - except: - raise ValueError('getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) - if isarray(var): - init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'], ret['init.i']) - elif isstring(var): - if not init: init, showinit='""', "''" - if init[0]=="'": - init='"%s"'%(init[1:-1].replace('"', '\\"')) - if init[0]=='"': showinit="'%s'"%(init[1:-1]) - return init, showinit - -def sign2map(a, var): - """ - varname,ctype,atype - init,init.r,init.i,pytype - vardebuginfo,vardebugshowvalue,varshowvalue - varrfromat - intent - """ - global lcb_map, cb_map - out_a = a - if isintent_out(var): - for k in var['intent']: - if k[:4]=='out=': - out_a = k[4:] - break - ret={'varname':a,'outvarname':out_a} - ret['ctype']=getctype(var) - intent_flags = [] - for f, s in isintent_dict.items(): - if f(var): intent_flags.append('F2PY_%s'%s) - if intent_flags: - #XXX: Evaluate intent_flags here. - ret['intent'] = '|'.join(intent_flags) - else: - ret['intent'] = 'F2PY_INTENT_IN' - if isarray(var): ret['varrformat']='N' - elif ret['ctype'] in c2buildvalue_map: - ret['varrformat']=c2buildvalue_map[ret['ctype']] - else: ret['varrformat']='O' - ret['init'], ret['showinit']=getinit(a, var) - if hasinitvalue(var) and iscomplex(var) and not isarray(var): - ret['init.r'], ret['init.i'] = markoutercomma(ret['init'][1:-1]).split('@,@') - if isexternal(var): - ret['cbnamekey']=a - if a in lcb_map: - ret['cbname']=lcb_map[a] - ret['maxnofargs']=lcb2_map[lcb_map[a]]['maxnofargs'] - ret['nofoptargs']=lcb2_map[lcb_map[a]]['nofoptargs'] - ret['cbdocstr']=lcb2_map[lcb_map[a]]['docstr'] - ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr'] - else: - ret['cbname']=a - errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a, list(lcb_map.keys()))) - if isstring(var): - ret['length']=getstrlength(var) - if isarray(var): - ret=dictappend(ret, getarrdims(a, var)) - dim=copy.copy(var['dimension']) - if ret['ctype'] in c2capi_map: - ret['atype']=c2capi_map[ret['ctype']] - # Debug info - if debugcapi(var): - il=[isintent_in, 'input', isintent_out, 'output', - isintent_inout, 'inoutput', isrequired, 'required', - isoptional, 'optional', isintent_hide, 'hidden', - iscomplex, 'complex scalar', - l_and(isscalar, l_not(iscomplex)), 'scalar', - isstring, 'string', isarray, 'array', - iscomplexarray, 'complex array', isstringarray, 'string array', - iscomplexfunction, 'complex function', - l_and(isfunction, l_not(iscomplexfunction)), 'function', - isexternal, 'callback', - isintent_callback, 'callback', - isintent_aux, 'auxiliary', - #ismutable,'mutable',l_not(ismutable),'immutable', - ] - rl=[] - for i in range(0, len(il), 2): - if il[i](var): rl.append(il[i+1]) - if isstring(var): - rl.append('slen(%s)=%s'%(a, ret['length'])) - if isarray(var): -# if not isintent_c(var): -# var['dimension'].reverse() - ddim=','.join(map(lambda x, y:'%s|%s'%(x, y), var['dimension'], dim)) - rl.append('dims(%s)'%ddim) -# if not isintent_c(var): -# var['dimension'].reverse() - if isexternal(var): - ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a, ret['cbname'], ','.join(rl)) - else: - ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'], a, ret['showinit'], ','.join(rl)) - if isscalar(var): - if ret['ctype'] in cformat_map: - ret['vardebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']]) - if isstring(var): - ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) - if isexternal(var): - ret['vardebugshowvalue']='debug-capi:%s=%%p'%(a) - if ret['ctype'] in cformat_map: - ret['varshowvalue']='#name#:%s=%s'%(a, cformat_map[ret['ctype']]) - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isstring(var): - ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) - ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) - if hasnote(var): - ret['note']=var['note'] - return ret - -def routsign2map(rout): - """ - name,NAME,begintitle,endtitle - rname,ctype,rformat - routdebugshowvalue - """ - global lcb_map - name = rout['name'] - fname = getfortranname(rout) - ret={'name': name, - 'texname': name.replace('_', '\\_'), - 'name_lower': name.lower(), - 'NAME': name.upper(), - 'begintitle': gentitle(name), - 'endtitle': gentitle('end of %s'%name), - 'fortranname': fname, - 'FORTRANNAME': fname.upper(), - 'callstatement': getcallstatement(rout) or '', - 'usercode': getusercode(rout) or '', - 'usercode1': getusercode1(rout) or '', - } - if '_' in fname: - ret['F_FUNC'] = 'F_FUNC_US' - else: - ret['F_FUNC'] = 'F_FUNC' - if '_' in name: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' - else: - ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' - lcb_map={} - if 'use' in rout: - for u in rout['use'].keys(): - if u in cb_rules.cb_map: - for un in cb_rules.cb_map[u]: - ln=un[0] - if 'map' in rout['use'][u]: - for k in rout['use'][u]['map'].keys(): - if rout['use'][u]['map'][k]==un[0]: ln=k;break - lcb_map[ln]=un[1] - #else: - # errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u)) - elif 'externals' in rout and rout['externals']: - errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'], repr(rout['externals']))) - ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' - if isfunction(rout): - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - ret['rname']=a - ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout) - ret['ctype']=getctype(rout['vars'][a]) - if hasresultnote(rout): - ret['resultnote']=rout['vars'][a]['note'] - rout['vars'][a]['note']=['See elsewhere.'] - if ret['ctype'] in c2buildvalue_map: - ret['rformat']=c2buildvalue_map[ret['ctype']] - else: - ret['rformat']='O' - errmess('routsign2map: no c2buildvalue key for type %s\n'%(repr(ret['ctype']))) - if debugcapi(rout): - if ret['ctype'] in cformat_map: - ret['routdebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) - if isstringfunction(rout): - ret['rlength']=getstrlength(rout['vars'][a]) - if ret['rlength']=='-1': - errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(repr(rout['name']))) - ret['rlength']='10' - if hasnote(rout): - ret['note']=rout['note'] - rout['note']=['See elsewhere.'] - return ret - -def modsign2map(m): - """ - modulename - """ - if ismodule(m): - ret={'f90modulename':m['name'], - 'F90MODULENAME':m['name'].upper(), - 'texf90modulename':m['name'].replace('_', '\\_')} - else: - ret={'modulename':m['name'], - 'MODULENAME':m['name'].upper(), - 'texmodulename':m['name'].replace('_', '\\_')} - ret['restdoc'] = getrestdoc(m) or [] - if hasnote(m): - ret['note']=m['note'] - #m['note']=['See elsewhere.'] - ret['usercode'] = getusercode(m) or '' - ret['usercode1'] = getusercode1(m) or '' - if m['body']: - ret['interface_usercode'] = getusercode(m['body'][0]) or '' - else: - ret['interface_usercode'] = '' - ret['pymethoddef'] = getpymethoddef(m) or '' - if 'coutput' in m: - ret['coutput'] = m['coutput'] - if 'f2py_wrapper_output' in m: - ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] - return ret - -def cb_sign2map(a,var,index=None): - ret={'varname':a} - if index is None or 1: # disable 7712 patch - ret['varname_i'] = ret['varname'] - else: - ret['varname_i'] = ret['varname'] + '_' + str(index) - ret['ctype']=getctype(var) - if ret['ctype'] in c2capi_map: - ret['atype']=c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isarray(var): - ret=dictappend(ret, getarrdims(a, var)) - ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) - if hasnote(var): - ret['note']=var['note'] - var['note']=['See elsewhere.'] - return ret - -def cb_routsign2map(rout, um): - """ - name,begintitle,endtitle,argname - ctype,rctype,maxnofargs,nofoptargs,returncptr - """ - ret={'name':'cb_%s_in_%s'%(rout['name'], um), - 'returncptr':''} - if isintent_callback(rout): - if '_' in rout['name']: - F_FUNC='F_FUNC_US' - else: - F_FUNC='F_FUNC' - ret['callbackname'] = '%s(%s,%s)' \ - % (F_FUNC, - rout['name'].lower(), - rout['name'].upper(), - ) - ret['static'] = 'extern' - else: - ret['callbackname'] = ret['name'] - ret['static'] = 'static' - ret['argname']=rout['name'] - ret['begintitle']=gentitle(ret['name']) - ret['endtitle']=gentitle('end of %s'%ret['name']) - ret['ctype']=getctype(rout) - ret['rctype']='void' - if ret['ctype']=='string': ret['rctype']='void' - else: - ret['rctype']=ret['ctype'] - if ret['rctype']!='void': - if iscomplexfunction(rout): - ret['returncptr'] = """ -#ifdef F2PY_CB_RETURNCOMPLEX -return_value= -#endif -""" - else: - ret['returncptr'] = 'return_value=' - if ret['ctype'] in cformat_map: - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isstringfunction(rout): - ret['strlength']=getstrlength(rout) - if isfunction(rout): - if 'result' in rout: - a=rout['result'] - else: - a=rout['name'] - if hasnote(rout['vars'][a]): - ret['note']=rout['vars'][a]['note'] - rout['vars'][a]['note']=['See elsewhere.'] - ret['rname']=a - ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout) - if iscomplexfunction(rout): - ret['rctype']=""" -#ifdef F2PY_CB_RETURNCOMPLEX -#ctype# -#else -void -#endif -""" - else: - if hasnote(rout): - ret['note']=rout['note'] - rout['note']=['See elsewhere.'] - nofargs=0 - nofoptargs=0 - if 'args' in rout and 'vars' in rout: - for a in rout['args']: - var=rout['vars'][a] - if l_or(isintent_in, isintent_inout)(var): - nofargs=nofargs+1 - if isoptional(var): - nofoptargs=nofoptargs+1 - ret['maxnofargs']=repr(nofargs) - ret['nofoptargs']=repr(nofoptargs) - if hasnote(rout) and isfunction(rout) and 'result' in rout: - ret['routnote']=rout['note'] - rout['note']=['See elsewhere.'] - return ret - -def common_sign2map(a, var): # obsolute - ret={'varname':a} - ret['ctype']=getctype(var) - if isstringarray(var): - ret['ctype']='char' - if ret['ctype'] in c2capi_map: - ret['atype']=c2capi_map[ret['ctype']] - if ret['ctype'] in cformat_map: - ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) - if isarray(var): - ret=dictappend(ret, getarrdims(a, var)) - elif isstring(var): - ret['size']=getstrlength(var) - ret['rank']='1' - ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) - if hasnote(var): - ret['note']=var['note'] - var['note']=['See elsewhere.'] - ret['arrdocstr']=getarrdocsign(a, var) # for strings this returns 0-rank but actually is 1-rank - return ret diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cb_rules.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cb_rules.py deleted file mode 100644 index f3bf848a74b2e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cb_rules.py +++ /dev/null @@ -1,539 +0,0 @@ -#!/usr/bin/env python -""" - -Build call-back mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/07/20 11:27:58 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import pprint -import sys - -from . import __version__ -from .auxfuncs import * -from . import cfuncs - -f2py_version = __version__.version - -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - - -################## Rules for callback function ############## - -cb_routine_rules={ - 'cbtypedefs':'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', - 'body':""" -#begintitle# -PyObject *#name#_capi = NULL;/*was Py_None*/ -PyTupleObject *#name#_args_capi = NULL; -int #name#_nofargs = 0; -jmp_buf #name#_jmpbuf; -/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ -#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { -\tPyTupleObject *capi_arglist = #name#_args_capi; -\tPyObject *capi_return = NULL; -\tPyObject *capi_tmp = NULL; -\tint capi_j,capi_i = 0; -\tint capi_longjmp_ok = 1; -#decl# -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_clock(); -#endif -\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); -\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi); -\tif (#name#_capi==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); -\t} -\tif (#name#_capi==NULL) { -\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); -\t\tgoto capi_fail; -\t} -\tif (F2PyCapsule_Check(#name#_capi)) { -\t#name#_typedef #name#_cptr; -\t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi); -\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); -\t#return# -\t} -\tif (capi_arglist==NULL) { -\t\tcapi_longjmp_ok = 0; -\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); -\t\tif (capi_tmp) { -\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); -\t\t\tif (capi_arglist==NULL) { -\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); -\t\t\t\tgoto capi_fail; -\t\t\t} -\t\t} else { -\t\t\tPyErr_Clear(); -\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); -\t\t} -\t} -\tif (capi_arglist == NULL) { -\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); -\t\tgoto capi_fail; -\t} -#setdims# -#pyobjfrom# -\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); -\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_start_call_clock(); -#endif -\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_call_clock(); -#endif -\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return); -\tif (capi_return == NULL) { -\t\tfprintf(stderr,\"capi_return is NULL\\n\"); -\t\tgoto capi_fail; -\t} -\tif (capi_return == Py_None) { -\t\tPy_DECREF(capi_return); -\t\tcapi_return = Py_BuildValue(\"()\"); -\t} -\telse if (!PyTuple_Check(capi_return)) { -\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return); -\t} -\tcapi_j = PyTuple_Size(capi_return); -\tcapi_i = 0; -#frompyobj# -\tCFUNCSMESS(\"cb:#name#:successful\\n\"); -\tPy_DECREF(capi_return); -#ifdef F2PY_REPORT_ATEXIT -f2py_cb_stop_clock(); -#endif -\tgoto capi_return_pt; -capi_fail: -\tfprintf(stderr,\"Call-back #name# failed.\\n\"); -\tPy_XDECREF(capi_return); -\tif (capi_longjmp_ok) -\t\tlongjmp(#name#_jmpbuf,-1); -capi_return_pt: -\t; -#return# -} -#endtitle# -""", - 'need':['setjmp.h', 'CFUNCSMESS'], - 'maxnofargs':'#maxnofargs#', - 'nofoptargs':'#nofoptargs#', - 'docstr':"""\ -\tdef #argname#(#docsignature#): return #docreturn#\\n\\ -#docstrsigns#""", - 'latexdocstr':""" -{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} -#routnote# - -#latexdocstrsigns#""", - 'docstrshort':'def #argname#(#docsignature#): return #docreturn#' - } -cb_rout_rules=[ - {# Init - 'separatorsfor': {'decl': '\n', - 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', - 'args_td': ',', 'optargs_td': '', - 'args_nm': ',', 'optargs_nm': '', - 'frompyobj': '\n', 'setdims': '\n', - 'docstrsigns': '\\n"\n"', - 'latexdocstrsigns': '\n', - 'latexdocstrreq': '\n', 'latexdocstropt': '\n', - 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', - }, - 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', - 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', - 'args_td': [], 'optargs_td': '', 'strarglens_td': '', - 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', - 'noargs': '', - 'setdims': '/*setdims*/', - 'docstrsigns': '', 'latexdocstrsigns': '', - 'docstrreq': '\tRequired arguments:', - 'docstropt': '\tOptional arguments:', - 'docstrout': '\tReturn objects:', - 'docstrcbs': '\tCall-back functions:', - 'docreturn': '', 'docsign': '', 'docsignopt': '', - 'latexdocstrreq': '\\noindent Required arguments:', - 'latexdocstropt': '\\noindent Optional arguments:', - 'latexdocstrout': '\\noindent Return objects:', - 'latexdocstrcbs': '\\noindent Call-back functions:', - 'routnote': {hasnote:'--- #note#',l_not(hasnote):''}, - }, { # Function - 'decl':'\t#ctype# return_value;', - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, - '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', - {debugcapi:'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'} - ], - 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], - 'return':'\treturn return_value;', - '_check':l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) - }, - {# String function - 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, - 'args':'#ctype# return_value,int return_value_len', - 'args_nm':'return_value,&return_value_len', - 'args_td':'#ctype# ,int', - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", - {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} - ], - 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, - 'string.h', 'GETSTRFROMPYTUPLE'], - 'return':'return;', - '_check':isstringfunction - }, - {# Complex function - 'optargs':""" -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# *return_value -#endif -""", - 'optargs_nm':""" -#ifndef F2PY_CB_RETURNCOMPLEX -return_value -#endif -""", - 'optargs_td':""" -#ifndef F2PY_CB_RETURNCOMPLEX -#ctype# * -#endif -""", - 'decl':""" -#ifdef F2PY_CB_RETURNCOMPLEX -\t#ctype# return_value; -#endif -""", - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, - """\ -\tif (capi_j>capi_i) -#ifdef F2PY_CB_RETURNCOMPLEX -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#else -\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); -#endif -""", - {debugcapi:""" -#ifdef F2PY_CB_RETURNCOMPLEX -\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); -#else -\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); -#endif - -"""} - ], - 'return':""" -#ifdef F2PY_CB_RETURNCOMPLEX -\treturn return_value; -#else -\treturn; -#endif -""", - 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, - 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], - '_check':iscomplexfunction - }, - {'docstrout':'\t\t#pydocsignout#', - 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', - {hasnote:'--- #note#'}], - 'docreturn':'#rname#,', - '_check':isfunction}, - {'_check':issubroutine,'return':'return;'} - ] - -cb_arg_rules=[ - { # Doc - 'docstropt':{l_and(isoptional, isintent_nothide):'\t\t#pydocsign#'}, - 'docstrreq':{l_and(isrequired, isintent_nothide):'\t\t#pydocsign#'}, - 'docstrout':{isintent_out:'\t\t#pydocsignout#'}, - 'latexdocstropt':{l_and(isoptional, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrreq':{l_and(isrequired, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote, isintent_hide):'--- #note#', - l_and(hasnote, isintent_nothide):'--- See above.'}]}, - 'docsign':{l_and(isrequired, isintent_nothide):'#varname#,'}, - 'docsignopt':{l_and(isoptional, isintent_nothide):'#varname#,'}, - 'depend':'' - }, - { - 'args': { - l_and (isscalar, isintent_c):'#ctype# #varname_i#', - l_and (isscalar, l_not(isintent_c)):'#ctype# *#varname_i#_cb_capi', - isarray:'#ctype# *#varname_i#', - isstring:'#ctype# #varname_i#' - }, - 'args_nm': { - l_and (isscalar, isintent_c):'#varname_i#', - l_and (isscalar, l_not(isintent_c)):'#varname_i#_cb_capi', - isarray:'#varname_i#', - isstring:'#varname_i#' - }, - 'args_td': { - l_and (isscalar, isintent_c):'#ctype#', - l_and (isscalar, l_not(isintent_c)):'#ctype# *', - isarray:'#ctype# *', - isstring:'#ctype#' - }, - 'strarglens': {isstring:',int #varname_i#_cb_len'}, # untested with multiple args - 'strarglens_td': {isstring:',int'}, # untested with multiple args - 'strarglens_nm': {isstring:',#varname_i#_cb_len'}, # untested with multiple args - }, - { # Scalars - 'decl':{l_not(isintent_c):'\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'}, - 'error': {l_and(isintent_c, isintent_out, - throw_error('intent(c,out) is forbidden for callback scalar arguments')):\ - ''}, - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, - {isintent_out:'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, - {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, - {l_and(debugcapi, l_and(l_not(iscomplex), l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, - {l_and(debugcapi, l_and(iscomplex, isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, - {l_and(debugcapi, l_and(iscomplex, l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, - ], - 'need':[{isintent_out:['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, - {debugcapi:'CFUNCSMESS'}], - '_check':isscalar - }, { - 'pyobjfrom':[{isintent_in:"""\ -\tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname_i#))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout:"""\ -\tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) -\t\t\tgoto capi_fail;"""}], - 'need':[{isintent_in:'pyobj_from_#ctype#1'}, - {isintent_inout:'pyarr_from_p_#ctype#1'}, - {iscomplex:'#ctype#'}], - '_check':l_and(isscalar, isintent_nothide), - '_optional':'' - }, {# String - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->\\"");'}, - """\tif (capi_j>capi_i) -\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", - {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, - ], - 'need':['#ctype#', 'GETSTRFROMPYTUPLE', - {debugcapi:'CFUNCSMESS'}, 'string.h'], - '_check':l_and(isstring, isintent_out) - }, { - 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, - {isintent_in:"""\ -\tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) -\t\t\tgoto capi_fail;"""}, - {isintent_inout:"""\ -\tif (#name#_nofargs>capi_i) { -\t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len}; -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) -\t\t\tgoto capi_fail; -\t}"""}], - 'need':[{isintent_in:'pyobj_from_#ctype#1size'}, - {isintent_inout:'pyarr_from_p_#ctype#1'}], - '_check':l_and(isstring, isintent_nothide), - '_optional':'' - }, -# Array ... - { - 'decl':'\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', - 'setdims':'\t#cbsetdims#;', - '_check':isarray, - '_depend':'' - }, - { - 'pyobjfrom': [{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, - {isintent_c: """\ -\tif (#name#_nofargs>capi_i) { -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ -""", - l_not(isintent_c): """\ -\tif (#name#_nofargs>capi_i) { -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ -""", - }, - """ -\t\tif (tmp_arr==NULL) -\t\t\tgoto capi_fail; -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr)) -\t\t\tgoto capi_fail; -}"""], - '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), - '_optional': '', - }, { - 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, - """\tif (capi_j>capi_i) { -\t\tPyArrayObject *rv_cb_arr = NULL; -\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; -\t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", - {isintent_c:'|F2PY_INTENT_C'}, - """,capi_tmp); -\t\tif (rv_cb_arr == NULL) { -\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\"); -\t\t\tgoto capi_fail; -\t\t} -\t\tMEMCOPY(#varname_i#,rv_cb_arr->data,PyArray_NBYTES(rv_cb_arr)); -\t\tif (capi_tmp != (PyObject *)rv_cb_arr) { -\t\t\tPy_DECREF(rv_cb_arr); -\t\t} -\t}""", - {debugcapi:'\tfprintf(stderr,"<-.\\n");'}, - ], - 'need':['MEMCOPY', {iscomplexarray:'#ctype#'}], - '_check':l_and(isarray, isintent_out) - }, { - 'docreturn':'#varname#,', - '_check':isintent_out - } - ] - -################## Build call-back module ############# -cb_map={} -def buildcallbacks(m): - global cb_map - cb_map[m['name']]=[] - for bi in m['body']: - if bi['block']=='interface': - for b in bi['body']: - if b: - buildcallback(b, m['name']) - else: - errmess('warning: empty body for %s\n' % (m['name'])) - -def buildcallback(rout, um): - global cb_map - from . import capi_maps - - outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'], um)) - args, depargs=getargs(rout) - capi_maps.depargs=depargs - var=rout['vars'] - vrd=capi_maps.cb_routsign2map(rout, um) - rd=dictappend({}, vrd) - cb_map[um].append([rout['name'], rd['name']]) - for r in cb_rout_rules: - if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar=applyrules(r, vrd, rout) - rd=dictappend(rd, ar) - savevrd={} - for i, a in enumerate(args): - vrd=capi_maps.cb_sign2map(a, var[a], index=i) - savevrd[a]=vrd - for r in cb_arg_rules: - if '_depend' in r: - continue - if '_optional' in r and isoptional(var[a]): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r, vrd, var[a]) - rd=dictappend(rd, ar) - if '_break' in r: - break - for a in args: - vrd=savevrd[a] - for r in cb_arg_rules: - if '_depend' in r: - continue - if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r, vrd, var[a]) - rd=dictappend(rd, ar) - if '_break' in r: - break - for a in depargs: - vrd=savevrd[a] - for r in cb_arg_rules: - if '_depend' not in r: - continue - if '_optional' in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r, vrd, var[a]) - rd=dictappend(rd, ar) - if '_break' in r: - break - if 'args' in rd and 'optargs' in rd: - if isinstance(rd['optargs'], list): - rd['optargs']=rd['optargs']+[""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_nm']=rd['optargs_nm']+[""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - rd['optargs_td']=rd['optargs_td']+[""" -#ifndef F2PY_CB_RETURNCOMPLEX -, -#endif -"""] - if isinstance(rd['docreturn'], list): - rd['docreturn']=stripcomma(replace('#docreturn#', {'docreturn':rd['docreturn']})) - optargs=stripcomma(replace('#docsignopt#', - {'docsignopt':rd['docsignopt']} - )) - if optargs=='': - rd['docsignature']=stripcomma(replace('#docsign#', {'docsign':rd['docsign']})) - else: - rd['docsignature']=replace('#docsign#[#docsignopt#]', - {'docsign': rd['docsign'], - 'docsignopt': optargs, - }) - rd['latexdocsignature']=rd['docsignature'].replace('_', '\\_') - rd['latexdocsignature']=rd['latexdocsignature'].replace(',', ', ') - rd['docstrsigns']=[] - rd['latexdocstrsigns']=[] - for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: - if k in rd and isinstance(rd[k], list): - rd['docstrsigns']=rd['docstrsigns']+rd[k] - k='latex'+k - if k in rd and isinstance(rd[k], list): - rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ - ['\\begin{description}']+rd[k][1:]+\ - ['\\end{description}'] - if 'args' not in rd: - rd['args']='' - rd['args_td']='' - rd['args_nm']='' - if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): - rd['noargs'] = 'void' - - ar=applyrules(cb_routine_rules, rd) - cfuncs.callbacks[rd['name']]=ar['body'] - if isinstance(ar['need'], str): - ar['need']=[ar['need']] - - if 'need' in rd: - for t in cfuncs.typedefs.keys(): - if t in rd['need']: - ar['need'].append(t) - - cfuncs.typedefs_generated[rd['name']+'_typedef'] = ar['cbtypedefs'] - ar['need'].append(rd['name']+'_typedef') - cfuncs.needs[rd['name']]=ar['need'] - - capi_maps.lcb2_map[rd['name']]={'maxnofargs':ar['maxnofargs'], - 'nofoptargs':ar['nofoptargs'], - 'docstr':ar['docstr'], - 'latexdocstr':ar['latexdocstr'], - 'argname':rd['argname'] - } - outmess('\t %s\n'%(ar['docstrshort'])) - #print ar['body'] - return -################## Build call-back function ############# diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cfuncs.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cfuncs.py deleted file mode 100644 index 7fb630697fe9b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/cfuncs.py +++ /dev/null @@ -1,1224 +0,0 @@ -#!/usr/bin/env python -""" - -C declarations, CPP macros, and C functions for f2py2e. -Only required declarations/macros/functions will be used. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 11:42:34 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import sys -import copy - -from . import __version__ - -f2py_version = __version__.version -errmess = sys.stderr.write - -##################### Definitions ################## - -outneeds={'includes0':[],'includes':[],'typedefs':[],'typedefs_generated':[], - 'userincludes':[], - 'cppmacros':[],'cfuncs':[],'callbacks':[],'f90modhooks':[], - 'commonhooks':[]} -needs={} -includes0={'includes0':'/*need_includes0*/'} -includes={'includes':'/*need_includes*/'} -userincludes={'userincludes':'/*need_userincludes*/'} -typedefs={'typedefs':'/*need_typedefs*/'} -typedefs_generated={'typedefs_generated':'/*need_typedefs_generated*/'} -cppmacros={'cppmacros':'/*need_cppmacros*/'} -cfuncs={'cfuncs':'/*need_cfuncs*/'} -callbacks={'callbacks':'/*need_callbacks*/'} -f90modhooks={'f90modhooks': '/*need_f90modhooks*/', - 'initf90modhooksstatic': '/*initf90modhooksstatic*/', - 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', - } -commonhooks={'commonhooks': '/*need_commonhooks*/', - 'initcommonhooks': '/*need_initcommonhooks*/', - } - -############ Includes ################### - -includes0['math.h']='#include ' -includes0['string.h']='#include ' -includes0['setjmp.h']='#include ' - -includes['Python.h']='#include "Python.h"' -needs['arrayobject.h']=['Python.h'] -includes['arrayobject.h']='''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API -#include "arrayobject.h"''' - -includes['arrayobject.h']='#include "fortranobject.h"' -includes['stdarg.h']='#include ' - -############# Type definitions ############### - -typedefs['unsigned_char']='typedef unsigned char unsigned_char;' -typedefs['unsigned_short']='typedef unsigned short unsigned_short;' -typedefs['unsigned_long']='typedef unsigned long unsigned_long;' -typedefs['signed_char']='typedef signed char signed_char;' -typedefs['long_long']="""\ -#ifdef _WIN32 -typedef __int64 long_long; -#else -typedef long long long_long; -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['unsigned_long_long']="""\ -#ifdef _WIN32 -typedef __uint64 long_long; -#else -typedef unsigned long long unsigned_long_long; -#endif -""" -typedefs['long_double']="""\ -#ifndef _LONG_DOUBLE -typedef long double long_double; -#endif -""" -typedefs['complex_long_double']='typedef struct {long double r,i;} complex_long_double;' -typedefs['complex_float']='typedef struct {float r,i;} complex_float;' -typedefs['complex_double']='typedef struct {double r,i;} complex_double;' -typedefs['string']="""typedef char * string;""" - - -############### CPP macros #################### -cppmacros['CFUNCSMESS']="""\ -#ifdef DEBUGCFUNCS -#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); -#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ -\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ -\tfprintf(stderr,\"\\n\"); -#else -#define CFUNCSMESS(mess) -#define CFUNCSMESSPY(mess,obj) -#endif -""" -cppmacros['F_FUNC']="""\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F -#else -#define F_FUNC(f,F) _##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) _##F##_ -#else -#define F_FUNC(f,F) _##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F -#else -#define F_FUNC(f,F) f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_FUNC(f,F) F##_ -#else -#define F_FUNC(f,F) f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) -#else -#define F_FUNC_US(f,F) F_FUNC(f,F) -#endif -""" -cppmacros['F_WRAPPEDFUNC']="""\ -#if defined(PREPEND_FORTRAN) -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ -#endif -#endif -#else -#if defined(NO_APPEND_FORTRAN) -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f -#endif -#else -#if defined(UPPERCASE_FORTRAN) -#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ -#else -#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ -#endif -#endif -#endif -#if defined(UNDERSCORE_G77) -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) -#else -#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) -#endif -""" -cppmacros['F_MODFUNC']="""\ -#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f -#else -#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f -#else -#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ -#endif -#endif - -#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ -#if defined(NO_APPEND_FORTRAN) -#define F_MODFUNCNAME(m,f) f ## .in. ## m -#else -#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ -#endif -#endif -/* -#if defined(UPPERCASE_FORTRAN) -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) -#else -#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) -#endif -*/ - -#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) -""" -cppmacros['SWAPUNSAFE']="""\ -#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ - (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) -""" -cppmacros['SWAP']="""\ -#define SWAP(a,b,t) {\\ -\tt *c;\\ -\tc = a;\\ -\ta = b;\\ -\tb = c;} -""" -#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS)' -cppmacros['PRINTPYOBJERR']="""\ -#define PRINTPYOBJERR(obj)\\ -\tfprintf(stderr,\"#modulename#.error is related to \");\\ -\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ -\tfprintf(stderr,\"\\n\"); -""" -cppmacros['MINMAX']="""\ -#ifndef max -#define max(a,b) ((a > b) ? (a) : (b)) -#endif -#ifndef min -#define min(a,b) ((a < b) ? (a) : (b)) -#endif -#ifndef MAX -#define MAX(a,b) ((a > b) ? (a) : (b)) -#endif -#ifndef MIN -#define MIN(a,b) ((a < b) ? (a) : (b)) -#endif -""" -needs['len..']=['f2py_size'] -cppmacros['len..']="""\ -#define rank(var) var ## _Rank -#define shape(var,dim) var ## _Dims[dim] -#define old_rank(var) (((PyArrayObject *)(capi_ ## var ## _tmp))->nd) -#define old_shape(var,dim) (((PyArrayObject *)(capi_ ## var ## _tmp))->dimensions[dim]) -#define fshape(var,dim) shape(var,rank(var)-dim-1) -#define len(var) shape(var,0) -#define flen(var) fshape(var,0) -#define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) -/* #define index(i) capi_i ## i */ -#define slen(var) capi_ ## var ## _len -#define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1) -""" -needs['f2py_size']=['stdarg.h'] -cfuncs['f2py_size']="""\ -static int f2py_size(PyArrayObject* var, ...) -{ - npy_int sz = 0; - npy_int dim; - npy_int rank; - va_list argp; - va_start(argp, var); - dim = va_arg(argp, npy_int); - if (dim==-1) - { - sz = PyArray_SIZE(var); - } - else - { - rank = PyArray_NDIM(var); - if (dim>=1 && dim<=rank) - sz = PyArray_DIM(var, dim-1); - else - fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank); - } - va_end(argp); - return sz; -} -""" - -cppmacros['pyobj_from_char1']='#define pyobj_from_char1(v) (PyInt_FromLong(v))' -cppmacros['pyobj_from_short1']='#define pyobj_from_short1(v) (PyInt_FromLong(v))' -needs['pyobj_from_int1']=['signed_char'] -cppmacros['pyobj_from_int1']='#define pyobj_from_int1(v) (PyInt_FromLong(v))' -cppmacros['pyobj_from_long1']='#define pyobj_from_long1(v) (PyLong_FromLong(v))' -needs['pyobj_from_long_long1']=['long_long'] -cppmacros['pyobj_from_long_long1']="""\ -#ifdef HAVE_LONG_LONG -#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) -#else -#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. -#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) -#endif -""" -needs['pyobj_from_long_double1']=['long_double'] -cppmacros['pyobj_from_long_double1']='#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' -cppmacros['pyobj_from_double1']='#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' -cppmacros['pyobj_from_float1']='#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' -needs['pyobj_from_complex_long_double1']=['complex_long_double'] -cppmacros['pyobj_from_complex_long_double1']='#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_double1']=['complex_double'] -cppmacros['pyobj_from_complex_double1']='#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_complex_float1']=['complex_float'] -cppmacros['pyobj_from_complex_float1']='#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' -needs['pyobj_from_string1']=['string'] -cppmacros['pyobj_from_string1']='#define pyobj_from_string1(v) (PyString_FromString((char *)v))' -needs['pyobj_from_string1size']=['string'] -cppmacros['pyobj_from_string1size']='#define pyobj_from_string1size(v,len) (PyUString_FromStringAndSize((char *)v, len))' -needs['TRYPYARRAYTEMPLATE']=['PRINTPYOBJERR'] -cppmacros['TRYPYARRAYTEMPLATE']="""\ -/* New SciPy */ -#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(arr->data)=*v; break; -#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(arr->data)=*v; break; -#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data); break; - -#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (arr->descr->type==typecode) {*(ctype *)(arr->data)=*v; return 1;}\\ - switch (arr->descr->type_num) {\\ - case NPY_DOUBLE: *(double *)(arr->data)=*v; break;\\ - case NPY_INT: *(int *)(arr->data)=*v; break;\\ - case NPY_LONG: *(long *)(arr->data)=*v; break;\\ - case NPY_FLOAT: *(float *)(arr->data)=*v; break;\\ - case NPY_CDOUBLE: *(double *)(arr->data)=*v; break;\\ - case NPY_CFLOAT: *(float *)(arr->data)=*v; break;\\ - case NPY_BOOL: *(npy_bool *)(arr->data)=(*v!=0); break;\\ - case NPY_UBYTE: *(unsigned char *)(arr->data)=*v; break;\\ - case NPY_BYTE: *(signed char *)(arr->data)=*v; break;\\ - case NPY_SHORT: *(short *)(arr->data)=*v; break;\\ - case NPY_USHORT: *(npy_ushort *)(arr->data)=*v; break;\\ - case NPY_UINT: *(npy_uint *)(arr->data)=*v; break;\\ - case NPY_ULONG: *(npy_ulong *)(arr->data)=*v; break;\\ - case NPY_LONGLONG: *(npy_longlong *)(arr->data)=*v; break;\\ - case NPY_ULONGLONG: *(npy_ulonglong *)(arr->data)=*v; break;\\ - case NPY_LONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\ - case NPY_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\ - case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data, arr); break;\\ - default: return -2;\\ - };\\ - return 1 -""" - -needs['TRYCOMPLEXPYARRAYTEMPLATE']=['PRINTPYOBJERR'] -cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\ -#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break; -#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ - PyArrayObject *arr = NULL;\\ - if (!obj) return -2;\\ - if (!PyArray_Check(obj)) return -1;\\ - if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (arr->descr->type==typecode) {\\ - *(ctype *)(arr->data)=(*v).r;\\ - *(ctype *)(arr->data+sizeof(ctype))=(*v).i;\\ - return 1;\\ - }\\ - switch (arr->descr->type_num) {\\ - case NPY_CDOUBLE: *(double *)(arr->data)=(*v).r;*(double *)(arr->data+sizeof(double))=(*v).i;break;\\ - case NPY_CFLOAT: *(float *)(arr->data)=(*v).r;*(float *)(arr->data+sizeof(float))=(*v).i;break;\\ - case NPY_DOUBLE: *(double *)(arr->data)=(*v).r; break;\\ - case NPY_LONG: *(long *)(arr->data)=(*v).r; break;\\ - case NPY_FLOAT: *(float *)(arr->data)=(*v).r; break;\\ - case NPY_INT: *(int *)(arr->data)=(*v).r; break;\\ - case NPY_SHORT: *(short *)(arr->data)=(*v).r; break;\\ - case NPY_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\ - case NPY_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\ - case NPY_BOOL: *(npy_bool *)(arr->data)=((*v).r!=0 && (*v).i!=0); break;\\ - case NPY_USHORT: *(npy_ushort *)(arr->data)=(*v).r; break;\\ - case NPY_UINT: *(npy_uint *)(arr->data)=(*v).r; break;\\ - case NPY_ULONG: *(npy_ulong *)(arr->data)=(*v).r; break;\\ - case NPY_LONGLONG: *(npy_longlong *)(arr->data)=(*v).r; break;\\ - case NPY_ULONGLONG: *(npy_ulonglong *)(arr->data)=(*v).r; break;\\ - case NPY_LONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r; break;\\ - case NPY_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r;*(npy_longdouble *)(arr->data+sizeof(npy_longdouble))=(*v).i;break;\\ - case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;\\ - default: return -2;\\ - };\\ - return -1; -""" -## cppmacros['NUMFROMARROBJ']="""\ -## #define NUMFROMARROBJ(typenum,ctype) \\ -## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -## \tif (arr) {\\ -## \t\tif (arr->descr->type_num==NPY_OBJECT) {\\ -## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\ -## \t\t\tgoto capi_fail;\\ -## \t\t} else {\\ -## \t\t\t(arr->descr->cast[typenum])(arr->data,1,(char*)v,1,1);\\ -## \t\t}\\ -## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -## \t\treturn 1;\\ -## \t} -## """ -## #XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ -## cppmacros['CNUMFROMARROBJ']="""\ -## #define CNUMFROMARROBJ(typenum,ctype) \\ -## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -## \tif (arr) {\\ -## \t\tif (arr->descr->type_num==NPY_OBJECT) {\\ -## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\ -## \t\t\tgoto capi_fail;\\ -## \t\t} else {\\ -## \t\t\t(arr->descr->cast[typenum])((void *)(arr->data),1,(void *)(v),1,1);\\ -## \t\t}\\ -## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -## \t\treturn 1;\\ -## \t} -## """ - - -needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN', 'PRINTPYOBJERR'] -cppmacros['GETSTRFROMPYTUPLE']="""\ -#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ -\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ -\t\tif (rv_cb_str == NULL)\\ -\t\t\tgoto capi_fail;\\ -\t\tif (PyString_Check(rv_cb_str)) {\\ -\t\t\tstr[len-1]='\\0';\\ -\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ -\t\t} else {\\ -\t\t\tPRINTPYOBJERR(rv_cb_str);\\ -\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\ -\t\t\tgoto capi_fail;\\ -\t\t}\\ -\t} -""" -cppmacros['GETSCALARFROMPYTUPLE']="""\ -#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ -\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ -\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ -\t\t\tgoto capi_fail;\\ -\t} -""" - -cppmacros['FAILNULL']="""\\ -#define FAILNULL(p) do { \\ - if ((p) == NULL) { \\ - PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ - goto capi_fail; \\ - } \\ -} while (0) -""" -needs['MEMCOPY']=['string.h', 'FAILNULL'] -cppmacros['MEMCOPY']="""\ -#define MEMCOPY(to,from,n)\\ - do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) -""" -cppmacros['STRINGMALLOC']="""\ -#define STRINGMALLOC(str,len)\\ -\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ -\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ -\t\tgoto capi_fail;\\ -\t} else {\\ -\t\t(str)[len] = '\\0';\\ -\t} -""" -cppmacros['STRINGFREE']="""\ -#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) -""" -needs['STRINGCOPYN']=['string.h', 'FAILNULL'] -cppmacros['STRINGCOPYN']="""\ -#define STRINGCOPYN(to,from,buf_size) \\ - do { \\ - int _m = (buf_size); \\ - char *_to = (to); \\ - char *_from = (from); \\ - FAILNULL(_to); FAILNULL(_from); \\ - (void)strncpy(_to, _from, sizeof(char)*_m); \\ - _to[_m-1] = '\\0'; \\ - /* Padding with spaces instead of nulls */ \\ - for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ - _to[_m] = ' '; \\ - } \\ - } while (0) -""" -needs['STRINGCOPY']=['string.h', 'FAILNULL'] -cppmacros['STRINGCOPY']="""\ -#define STRINGCOPY(to,from)\\ - do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) -""" -cppmacros['CHECKGENERIC']="""\ -#define CHECKGENERIC(check,tcheck,name) \\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -cppmacros['CHECKARRAY']="""\ -#define CHECKARRAY(check,tcheck,name) \\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -cppmacros['CHECKSTRING']="""\ -#define CHECKSTRING(check,tcheck,name,show,var)\\ -\tif (!(check)) {\\ -\t\tchar errstring[256];\\ -\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ -\t\tPyErr_SetString(#modulename#_error, errstring);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -cppmacros['CHECKSCALAR']="""\ -#define CHECKSCALAR(check,tcheck,name,show,var)\\ -\tif (!(check)) {\\ -\t\tchar errstring[256];\\ -\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ -\t\tPyErr_SetString(#modulename#_error,errstring);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ -## cppmacros['CHECKDIMS']="""\ -## #define CHECKDIMS(dims,rank) \\ -## \tfor (int i=0;i<(rank);i++)\\ -## \t\tif (dims[i]<0) {\\ -## \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ -## \t\t\tgoto capi_fail;\\ -## \t\t} -## """ -cppmacros['ARRSIZE']='#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' -cppmacros['OLDPYNUM']="""\ -#ifdef OLDPYNUM -#error You need to intall Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369 -#endif -""" -################# C functions ############### - -cfuncs['calcarrindex']="""\ -static int calcarrindex(int *i,PyArrayObject *arr) { -\tint k,ii = i[0]; -\tfor (k=1; k < arr->nd; k++) -\t\tii += (ii*(arr->dimensions[k] - 1)+i[k]); /* assuming contiguous arr */ -\treturn ii; -}""" -cfuncs['calcarrindextr']="""\ -static int calcarrindextr(int *i,PyArrayObject *arr) { -\tint k,ii = i[arr->nd-1]; -\tfor (k=1; k < arr->nd; k++) -\t\tii += (ii*(arr->dimensions[arr->nd-k-1] - 1)+i[arr->nd-k-1]); /* assuming contiguous arr */ -\treturn ii; -}""" -cfuncs['forcomb']="""\ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { - int k; - if (dims==NULL) return 0; - if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - for (k=1;kdata,str,PyArray_NBYTES(arr)); } -\treturn 1; -capi_fail: -\tPRINTPYOBJERR(obj); -\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\"); -\treturn 0; -} -""" -needs['string_from_pyobj']=['string', 'STRINGMALLOC', 'STRINGCOPYN'] -cfuncs['string_from_pyobj']="""\ -static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) { -\tPyArrayObject *arr = NULL; -\tPyObject *tmp = NULL; -#ifdef DEBUGCFUNCS -fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj); -#endif -\tif (obj == Py_None) { -\t\tif (*len == -1) -\t\t\t*len = strlen(inistr); /* Will this cause problems? */ -\t\tSTRINGMALLOC(*str,*len); -\t\tSTRINGCOPYN(*str,inistr,*len+1); -\t\treturn 1; -\t} -\tif (PyArray_Check(obj)) { -\t\tif ((arr = (PyArrayObject *)obj) == NULL) -\t\t\tgoto capi_fail; -\t\tif (!ISCONTIGUOUS(arr)) { -\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\"); -\t\t\tgoto capi_fail; -\t\t} -\t\tif (*len == -1) -\t\t\t*len = (arr->descr->elsize)*PyArray_SIZE(arr); -\t\tSTRINGMALLOC(*str,*len); -\t\tSTRINGCOPYN(*str,arr->data,*len+1); -\t\treturn 1; -\t} -\tif (PyString_Check(obj)) { -\t\ttmp = obj; -\t\tPy_INCREF(tmp); -\t} -#if PY_VERSION_HEX >= 0x03000000 -\telse if (PyUnicode_Check(obj)) { -\t\ttmp = PyUnicode_AsASCIIString(obj); -\t} -\telse { -\t\tPyObject *tmp2; -\t\ttmp2 = PyObject_Str(obj); -\t\tif (tmp2) { -\t\t\ttmp = PyUnicode_AsASCIIString(tmp2); -\t\t\tPy_DECREF(tmp2); -\t\t} -\t\telse { -\t\t\ttmp = NULL; -\t\t} -\t} -#else -\telse { -\t\ttmp = PyObject_Str(obj); -\t} -#endif -\tif (tmp == NULL) goto capi_fail; -\tif (*len == -1) -\t\t*len = PyString_GET_SIZE(tmp); -\tSTRINGMALLOC(*str,*len); -\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); -\tPy_DECREF(tmp); -\treturn 1; -capi_fail: -\tPy_XDECREF(tmp); -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['char_from_pyobj']=['int_from_pyobj'] -cfuncs['char_from_pyobj']="""\ -static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (char)i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['signed_char_from_pyobj']=['int_from_pyobj', 'signed_char'] -cfuncs['signed_char_from_pyobj']="""\ -static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (signed_char)i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['short_from_pyobj']=['int_from_pyobj'] -cfuncs['short_from_pyobj']="""\ -static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (short)i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -cfuncs['int_from_pyobj']="""\ -static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyInt_Check(obj)) { -\t\t*v = (int)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Int(obj); -\tif (tmp) { -\t\t*v = PyInt_AS_LONG(tmp); -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -cfuncs['long_from_pyobj']="""\ -static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyInt_Check(obj)) { -\t\t*v = PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Int(obj); -\tif (tmp) { -\t\t*v = PyInt_AS_LONG(tmp); -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['long_long_from_pyobj']=['long_long'] -cfuncs['long_long_from_pyobj']="""\ -static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyLong_Check(obj)) { -\t\t*v = PyLong_AsLongLong(obj); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PyInt_Check(obj)) { -\t\t*v = (long_long)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Long(obj); -\tif (tmp) { -\t\t*v = PyLong_AsLongLong(tmp); -\t\tPy_DECREF(tmp); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['long_double_from_pyobj']=['double_from_pyobj', 'long_double'] -cfuncs['long_double_from_pyobj']="""\ -static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { -\tdouble d=0; -\tif (PyArray_CheckScalar(obj)){ -\t\tif PyArray_IsScalar(obj, LongDouble) { -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t\treturn 1; -\t\t} -\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) { -\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj)); -\t\t\treturn 1; -\t\t} -\t} -\tif (double_from_pyobj(&d,obj,errmess)) { -\t\t*v = (long_double)d; -\t\treturn 1; -\t} -\treturn 0; -} -""" -cfuncs['double_from_pyobj']="""\ -static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyFloat_Check(obj)) { -#ifdef __sgi -\t\t*v = PyFloat_AsDouble(obj); -#else -\t\t*v = PyFloat_AS_DOUBLE(obj); -#endif -\t\treturn 1; -\t} -\ttmp = PyNumber_Float(obj); -\tif (tmp) { -#ifdef __sgi -\t\t*v = PyFloat_AsDouble(tmp); -#else -\t\t*v = PyFloat_AS_DOUBLE(tmp); -#endif -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['float_from_pyobj']=['double_from_pyobj'] -cfuncs['float_from_pyobj']="""\ -static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { -\tdouble d=0.0; -\tif (double_from_pyobj(&d,obj,errmess)) { -\t\t*v = (float)d; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['complex_long_double_from_pyobj']=['complex_long_double', 'long_double', - 'complex_double_from_pyobj'] -cfuncs['complex_long_double_from_pyobj']="""\ -static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { -\tcomplex_double cd={0.0,0.0}; -\tif (PyArray_CheckScalar(obj)){ -\t\tif PyArray_IsScalar(obj, CLongDouble) { -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t\treturn 1; -\t\t} -\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { -\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; -\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; -\t\t\treturn 1; -\t\t} -\t} -\tif (complex_double_from_pyobj(&cd,obj,errmess)) { -\t\t(*v).r = (long_double)cd.r; -\t\t(*v).i = (long_double)cd.i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['complex_double_from_pyobj']=['complex_double'] -cfuncs['complex_double_from_pyobj']="""\ -static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { -\tPy_complex c; -\tif (PyComplex_Check(obj)) { -\t\tc=PyComplex_AsCComplex(obj); -\t\t(*v).r=c.real, (*v).i=c.imag; -\t\treturn 1; -\t} -\tif (PyArray_IsScalar(obj, ComplexFloating)) { -\t\tif (PyArray_IsScalar(obj, CFloat)) { -\t\t\tnpy_cfloat new; -\t\t\tPyArray_ScalarAsCtype(obj, &new); -\t\t\t(*v).r = (double)new.real; -\t\t\t(*v).i = (double)new.imag; -\t\t} -\t\telse if (PyArray_IsScalar(obj, CLongDouble)) { -\t\t\tnpy_clongdouble new; -\t\t\tPyArray_ScalarAsCtype(obj, &new); -\t\t\t(*v).r = (double)new.real; -\t\t\t(*v).i = (double)new.imag; -\t\t} -\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */ -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t} -\t\treturn 1; -\t} -\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ -\t\tPyObject *arr; -\t\tif (PyArray_Check(obj)) { -\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); -\t\t} -\t\telse { -\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); -\t\t} -\t\tif (arr==NULL) return 0; -\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; -\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; -\t\treturn 1; -\t} -\t/* Python does not provide PyNumber_Complex function :-( */ -\t(*v).i=0.0; -\tif (PyFloat_Check(obj)) { -#ifdef __sgi -\t\t(*v).r = PyFloat_AsDouble(obj); -#else -\t\t(*v).r = PyFloat_AS_DOUBLE(obj); -#endif -\t\treturn 1; -\t} -\tif (PyInt_Check(obj)) { -\t\t(*v).r = (double)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\tif (PyLong_Check(obj)) { -\t\t(*v).r = PyLong_AsDouble(obj); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { -\t\tPyObject *tmp = PySequence_GetItem(obj,0); -\t\tif (tmp) { -\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) { -\t\t\t\tPy_DECREF(tmp); -\t\t\t\treturn 1; -\t\t\t} -\t\t\tPy_DECREF(tmp); -\t\t} -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) -\t\t\terr = PyExc_TypeError; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; -} -""" -needs['complex_float_from_pyobj']=['complex_float', 'complex_double_from_pyobj'] -cfuncs['complex_float_from_pyobj']="""\ -static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { -\tcomplex_double cd={0.0,0.0}; -\tif (complex_double_from_pyobj(&cd,obj,errmess)) { -\t\t(*v).r = (float)cd.r; -\t\t(*v).i = (float)cd.i; -\t\treturn 1; -\t} -\treturn 0; -} -""" -needs['try_pyarr_from_char']=['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_char']='static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n' -needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE', 'unsigned_char'] -cfuncs['try_pyarr_from_unsigned_char']='static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' -needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE', 'signed_char'] -cfuncs['try_pyarr_from_signed_char']='static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' -needs['try_pyarr_from_short']=['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_short']='static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n' -needs['try_pyarr_from_int']=['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_int']='static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n' -needs['try_pyarr_from_long']=['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_long']='static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n' -needs['try_pyarr_from_long_long']=['pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] -cfuncs['try_pyarr_from_long_long']='static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' -needs['try_pyarr_from_float']=['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_float']='static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n' -needs['try_pyarr_from_double']=['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] -cfuncs['try_pyarr_from_double']='static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n' -needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] -cfuncs['try_pyarr_from_complex_float']='static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' -needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] -cfuncs['try_pyarr_from_complex_double']='static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' - -needs['create_cb_arglist']=['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] -cfuncs['create_cb_arglist']="""\ -static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { -\tPyObject *tmp = NULL; -\tPyObject *tmp_fun = NULL; -\tint tot,opt,ext,siz,i,di=0; -\tCFUNCSMESS(\"create_cb_arglist\\n\"); -\ttot=opt=ext=siz=0; -\t/* Get the total number of arguments */ -\tif (PyFunction_Check(fun)) -\t\ttmp_fun = fun; -\telse { -\t\tdi = 1; -\t\tif (PyObject_HasAttrString(fun,\"im_func\")) { -\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\"); -\t\t} -\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) { -\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\"); -\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\")) -\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); -\t\t\telse { -\t\t\t\ttmp_fun = fun; /* built-in function */ -\t\t\t\ttot = maxnofargs; -\t\t\t\tif (xa != NULL) -\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa); -\t\t\t} -\t\t\tPy_XDECREF(tmp); -\t\t} -\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { -\t\t\ttot = maxnofargs; -\t\t\tif (xa != NULL) -\t\t\t\ttot += PyTuple_Size((PyObject *)xa); -\t\t\ttmp_fun = fun; -\t\t} -\t\telse if (F2PyCapsule_Check(fun)) { -\t\t\ttot = maxnofargs; -\t\t\tif (xa != NULL) -\t\t\t\text = PyTuple_Size((PyObject *)xa); -\t\t\tif(ext>0) { -\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); -\t\t\t\tgoto capi_fail; -\t\t\t} -\t\t\ttmp_fun = fun; -\t\t} -\t} -if (tmp_fun==NULL) { -fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name)); -goto capi_fail; -} -#if PY_VERSION_HEX >= 0x03000000 -\tif (PyObject_HasAttrString(tmp_fun,\"__code__\")) { -\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) -#else -\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) { -\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) -#endif -\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; -\t\tPy_XDECREF(tmp); -\t} -\t/* Get the number of optional arguments */ -#if PY_VERSION_HEX >= 0x03000000 -\tif (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { -\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) -#else -\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) { -\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) -#endif -\t\t\topt = PyTuple_Size(tmp); -\t\tPy_XDECREF(tmp); -\t} -\t/* Get the number of extra arguments */ -\tif (xa != NULL) -\t\text = PyTuple_Size((PyObject *)xa); -\t/* Calculate the size of call-backs argument list */ -\tsiz = MIN(maxnofargs+ext,tot); -\t*nofargs = MAX(0,siz-ext); -#ifdef DEBUGCFUNCS -\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); -#endif -\tif (siz0: - if outneeds[n][0] not in needs: - out.append(outneeds[n][0]) - del outneeds[n][0] - else: - flag=0 - for k in outneeds[n][1:]: - if k in needs[outneeds[n][0]]: - flag=1 - break - if flag: - outneeds[n]=outneeds[n][1:]+[outneeds[n][0]] - else: - out.append(outneeds[n][0]) - del outneeds[n][0] - if saveout and (0 not in map(lambda x, y:x==y, saveout, outneeds[n])) \ - and outneeds[n] != []: - print(n, saveout) - errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') - out=out+saveout - break - saveout=copy.copy(outneeds[n]) - if out==[]: - out=[n] - res[n]=out - return res diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/common_rules.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/common_rules.py deleted file mode 100644 index d3b7f6dc2ae8e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/common_rules.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -""" - -Build common block mechanism for f2py2e. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 10:57:33 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.19 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -import pprint -import sys -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from .auxfuncs import * -from . import capi_maps -from . import func2subr -from .crackfortran import rmbadname -############## - -def findcommonblocks(block,top=1): - ret = [] - if hascommon(block): - for n in block['common'].keys(): - vars={} - for v in block['common'][n]: - vars[v]=block['vars'][v] - ret.append((n, block['common'][n], vars)) - elif hasbody(block): - for b in block['body']: - ret=ret+findcommonblocks(b, 0) - if top: - tret=[] - names=[] - for t in ret: - if t[0] not in names: - names.append(t[0]) - tret.append(t) - return tret - return ret - -def buildhooks(m): - ret = {'commonhooks':[],'initcommonhooks':[],'docs':['"COMMON blocks:\\n"']} - fwrap = [''] - def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0], line) - chooks = [''] - def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0], line) - ihooks = [''] - def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0], line) - doc = [''] - def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0], line) - for (name, vnames, vars) in findcommonblocks(m): - lower_name = name.lower() - hnames, inames = [], [] - for n in vnames: - if isintent_hide(vars[n]): hnames.append(n) - else: inames.append(n) - if hnames: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name, ','.join(inames), ','.join(hnames))) - else: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name, ','.join(inames))) - fadd('subroutine f2pyinit%s(setupfunc)'%name) - fadd('external setupfunc') - for n in vnames: - fadd(func2subr.var2fixfortran(vars, n)) - if name=='_BLNK_': - fadd('common %s'%(','.join(vnames))) - else: - fadd('common /%s/ %s'%(name, ','.join(vnames))) - fadd('call setupfunc(%s)'%(','.join(inames))) - fadd('end\n') - cadd('static FortranDataDef f2py_%s_def[] = {'%(name)) - idims=[] - for n in inames: - ct = capi_maps.getctype(vars[n]) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n, vars[n]) - if dm['dims']: idims.append('(%s)'%(dm['dims'])) - else: idims.append('') - dms=dm['dims'].strip() - if not dms: dms='-1' - cadd('\t{\"%s\",%s,{{%s}},%s},'%(n, dm['rank'], dms, at)) - cadd('\t{NULL}\n};') - inames1 = rmbadname(inames) - inames1_tps = ','.join(['char *'+s for s in inames1]) - cadd('static void f2py_setup_%s(%s) {'%(name, inames1_tps)) - cadd('\tint i_f2py=0;') - for n in inames1: - cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name, n)) - cadd('}') - if '_' in lower_name: - F_FUNC='F_FUNC_US' - else: - F_FUNC='F_FUNC' - cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'\ - %(F_FUNC, lower_name, name.upper(), - ','.join(['char*']*len(inames1)))) - cadd('static void f2py_init_%s(void) {'%name) - cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ - %(F_FUNC, lower_name, name.upper(), name)) - cadd('}\n') - iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name, name, name)) - tname = name.replace('_', '\\_') - dadd('\\subsection{Common block \\texttt{%s}}\n'%(tname)) - dadd('\\begin{description}') - for n in inames: - dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n, vars[n]))) - if hasnote(vars[n]): - note = vars[n]['note'] - if isinstance(note, list): note='\n'.join(note) - dadd('--- %s'%(note)) - dadd('\\end{description}') - ret['docs'].append('"\t/%s/ %s\\n"'%(name, ','.join(map(lambda v, d:v+d, inames, idims)))) - ret['commonhooks']=chooks - ret['initcommonhooks']=ihooks - ret['latexdoc']=doc[0] - if len(ret['docs'])<=1: ret['docs']='' - return ret, fwrap[0] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/crackfortran.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/crackfortran.py deleted file mode 100644 index 8930811269c9b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/crackfortran.py +++ /dev/null @@ -1,2868 +0,0 @@ -#!/usr/bin/env python -""" -crackfortran --- read fortran (77,90) code and extract declaration information. - -Copyright 1999-2004 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/09/27 07:13:49 $ -Pearu Peterson - - -Usage of crackfortran: -====================== -Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h - -m ,--ignore-contains -Functions: crackfortran, crack2fortran -The following Fortran statements/constructions are supported -(or will be if needed): - block data,byte,call,character,common,complex,contains,data, - dimension,double complex,double precision,end,external,function, - implicit,integer,intent,interface,intrinsic, - logical,module,optional,parameter,private,public, - program,real,(sequence?),subroutine,type,use,virtual, - include,pythonmodule -Note: 'virtual' is mapped to 'dimension'. -Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). -Note: code after 'contains' will be ignored until its scope ends. -Note: 'common' statement is extended: dimensions are moved to variable definitions -Note: f2py directive: f2py is read as -Note: pythonmodule is introduced to represent Python module - -Usage: - `postlist=crackfortran(files,funcs)` - `postlist` contains declaration information read from the list of files `files`. - `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file - - `postlist` has the following structure: - *** it is a list of dictionaries containing `blocks': - B = {'block','body','vars','parent_block'[,'name','prefix','args','result', - 'implicit','externals','interfaced','common','sortvars', - 'commonvars','note']} - B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | - 'program' | 'block data' | 'type' | 'pythonmodule' - B['body'] --- list containing `subblocks' with the same structure as `blocks' - B['parent_block'] --- dictionary of a parent block: - C['body'][]['parent_block'] is C - B['vars'] --- dictionary of variable definitions - B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) - B['name'] --- name of the block (not if B['block']=='interface') - B['prefix'] --- prefix string (only if B['block']=='function') - B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' - B['result'] --- name of the return value (only if B['block']=='function') - B['implicit'] --- dictionary {'a':,'b':...} | None - B['externals'] --- list of variables being external - B['interfaced'] --- list of variables being external and defined - B['common'] --- dictionary of common blocks (list of objects) - B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) - B['from'] --- string showing the 'parents' of the current block - B['use'] --- dictionary of modules used in current block: - {:{['only':<0|1>],['map':{:,...}]}} - B['note'] --- list of LaTeX comments on the block - B['f2pyenhancements'] --- optional dictionary - {'threadsafe':'','fortranname':, - 'callstatement':|, - 'callprotoargument':, - 'usercode':|, - 'pymethoddef:' - } - B['entry'] --- dictionary {entryname:argslist,..} - B['varnames'] --- list of variable names given in the order of reading the - Fortran code, useful for derived types. - B['saved_interface'] --- a string of scanned routine signature, defines explicit interface - *** Variable definition is a dictionary - D = B['vars'][] = - {'typespec'[,'attrspec','kindselector','charselector','=','typename']} - D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | - 'double precision' | 'integer' | 'logical' | 'real' | 'type' - D['attrspec'] --- list of attributes (e.g. 'dimension()', - 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', - 'optional','required', etc) - K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = - 'complex' | 'integer' | 'logical' | 'real' ) - C = D['charselector'] = {['*','len','kind']} - (only if D['typespec']=='character') - D['='] --- initialization expression string - D['typename'] --- name of the type if D['typespec']=='type' - D['dimension'] --- list of dimension bounds - D['intent'] --- list of intent specifications - D['depend'] --- list of variable names on which current variable depends on - D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised - D['note'] --- list of LaTeX comments on the variable - *** Meaning of kind/char selectors (few examples): - D['typespec>']*K['*'] - D['typespec'](kind=K['kind']) - character*C['*'] - character(len=C['len'],kind=C['kind']) - (see also fortran type declaration statement formats below) - -Fortran 90 type declaration statement format (F77 is subset of F90) -==================================================================== -(Main source: IBM XL Fortran 5.1 Language Reference Manual) -type declaration = [[]::] - = byte | - character[] | - complex[] | - double complex | - double precision | - integer[] | - logical[] | - real[] | - type() - = * | - ([len=][,[kind=]]) | - (kind=[,len=]) - = * | - ([kind=]) - = comma separated list of attributes. - Only the following attributes are used in - building up the interface: - external - (parameter --- affects '=' key) - optional - intent - Other attributes are ignored. - = in | out | inout - = comma separated list of dimension bounds. - = [[*][()] | [()]*] - [// | =] [,] - -In addition, the following attributes are used: check,depend,note - -TODO: - * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' - -> 'real x(2)') - The above may be solved by creating appropriate preprocessor program, for example. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import string -import fileinput -import re -import pprint -import os -import copy -import platform - -from . import __version__ -from .auxfuncs import * - -f2py_version = __version__.version - -# Global flags: -strictf77=1 # Ignore `!' comments unless line[0]=='!' -sourcecodeform='fix' # 'fix','free' -quiet=0 # Be verbose if 0 (Obsolete: not used any more) -verbose=1 # Be quiet if 0, extra verbose if > 1. -tabchar=4*' ' -pyffilename='' -f77modulename='' -skipemptyends=0 # for old F77 programs without 'program' statement -ignorecontains=1 -dolowercase=1 -debug=[] - -# Global variables -groupcounter=0 -grouplist={groupcounter:[]} -neededmodule=-1 -expectbegin=1 -skipblocksuntil=-1 -usermodules=[] -f90modulevars={} -gotnextfile=1 -filepositiontext='' -currentfilename='' -skipfunctions=[] -skipfuncs=[] -onlyfuncs=[] -include_paths=[] -previous_context = None - - -def reset_global_f2py_vars(): - global groupcounter, grouplist, neededmodule, expectbegin, \ - skipblocksuntil, usermodules, f90modulevars, gotnextfile, \ - filepositiontext, currentfilename, skipfunctions, skipfuncs, \ - onlyfuncs, include_paths, previous_context, \ - strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename, \ - f77modulename, skipemptyends, ignorecontains, dolowercase, debug - - # flags - strictf77 = 1 - sourcecodeform = 'fix' - quiet = 0 - verbose = 1 - tabchar = 4*' ' - pyffilename = '' - f77modulename = '' - skipemptyends = 0 - ignorecontains = 1 - dolowercase = 1 - debug = [] - # variables - groupcounter = 0 - grouplist = {groupcounter:[]} - neededmodule =-1 - expectbegin = 1 - skipblocksuntil = -1 - usermodules = [] - f90modulevars = {} - gotnextfile = 1 - filepositiontext = '' - currentfilename = '' - skipfunctions = [] - skipfuncs = [] - onlyfuncs = [] - include_paths = [] - previous_context = None - - -###### Some helper functions -def show(o,f=0):pprint.pprint(o) -errmess=sys.stderr.write -def outmess(line,flag=1): - global filepositiontext - if not verbose: return - if not quiet: - if flag:sys.stdout.write(filepositiontext) - sys.stdout.write(line) -re._MAXCACHE=50 -defaultimplicitrules={} -for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c]={'typespec':'real'} -for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'} -del c -badnames={} -invbadnames={} -for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', - 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', - 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', - 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', - 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', - 'max', 'min', - 'flen', 'fshape', - 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', - 'type', 'default']: - badnames[n]=n+'_bn' - invbadnames[n+'_bn']=n - -def rmbadname1(name): - if name in badnames: - errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name, badnames[name])) - return badnames[name] - return name - -def rmbadname(names): return [rmbadname1(_m) for _m in names] - -def undo_rmbadname1(name): - if name in invbadnames: - errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\ - %(name, invbadnames[name])) - return invbadnames[name] - return name - -def undo_rmbadname(names): return [undo_rmbadname1(_m) for _m in names] - -def getextension(name): - i=name.rfind('.') - if i==-1: return '' - if '\\' in name[i:]: return '' - if '/' in name[i:]: return '' - return name[i+1:] - -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search -_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - f = open(file, 'r') - line = f.readline() - n = 15 # the number of non-comment lines to scan for hints - if _has_f_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - if line[0]!='!' and line.strip(): - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&': - result = 1 - break - line = f.readline() - f.close() - return result - - -####### Read fortran (77,90) code -def readfortrancode(ffile,dowithline=show,istop=1): - """ - Read fortran codes from files and - 1) Get rid of comments, line continuations, and empty lines; lower cases. - 2) Call dowithline(line) on every line. - 3) Recursively call itself when statement \"include ''\" is met. - """ - global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ - beginpattern, quiet, verbose, dolowercase, include_paths - if not istop: - saveglobals=gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ - beginpattern, quiet, verbose, dolowercase - if ffile==[]: return - localdolowercase = dolowercase - cont=0 - finalline='' - ll='' - commentline=re.compile(r'(?P([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P.*)') - includeline=re.compile(r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) - cont1=re.compile(r'(?P.*)&\s*\Z') - cont2=re.compile(r'(\s*&|)(?P.*)') - mline_mark = re.compile(r".*?'''") - if istop: dowithline('', -1) - ll, l1='', '' - spacedigits=[' '] + [str(_m) for _m in range(10)] - filepositiontext='' - fin=fileinput.FileInput(ffile) - while True: - l=fin.readline() - if not l: break - if fin.isfirstline(): - filepositiontext='' - currentfilename=fin.filename() - gotnextfile=1 - l1=l - strictf77=0 - sourcecodeform='fix' - ext = os.path.splitext(currentfilename)[1] - if is_f_file(currentfilename) and \ - not (_has_f90_header(l) or _has_fix_header(l)): - strictf77=1 - elif is_free_format(currentfilename) and not _has_fix_header(l): - sourcecodeform='free' - if strictf77: beginpattern=beginpattern77 - else: beginpattern=beginpattern90 - outmess('\tReading file %s (format:%s%s)\n'\ - %(repr(currentfilename), sourcecodeform, - strictf77 and ',strict' or '')) - - l=l.expandtabs().replace('\xa0', ' ') - while not l=='': # Get rid of newline characters - if l[-1] not in "\n\r\f": break - l=l[:-1] - if not strictf77: - r=commentline.match(l) - if r: - l=r.group('line')+' ' # Strip comments starting with `!' - rl=r.group('rest') - if rl[:4].lower()=='f2py': # f2py directive - l = l + 4*' ' - r=commentline.match(rl[4:]) - if r: l=l+r.group('line') - else: l = l + rl[4:] - if l.strip()=='': # Skip empty line - cont=0 - continue - if sourcecodeform=='fix': - if l[0] in ['*', 'c', '!', 'C', '#']: - if l[1:5].lower()=='f2py': # f2py directive - l=' '+l[5:] - else: # Skip comment line - cont=0 - continue - elif strictf77: - if len(l)>72: l=l[:72] - if not (l[0] in spacedigits): - raise Exception('readfortrancode: Found non-(space,digit) char ' - 'in the first column.\n\tAre you sure that ' - 'this code is in fix form?\n\tline=%s' % repr(l)) - - if (not cont or strictf77) and (len(l)>5 and not l[5]==' '): - # Continuation of a previous line - ll=ll+l[6:] - finalline='' - origfinalline='' - else: - if not strictf77: - # F90 continuation - r=cont1.match(l) - if r: l=r.group('line') # Continuation follows .. - if cont: - ll=ll+cont2.match(l).group('line') - finalline='' - origfinalline='' - else: - l=' '+l[5:] # clean up line beginning from possible digits. - if localdolowercase: finalline=ll.lower() - else: finalline=ll - origfinalline=ll - ll=l - cont=(r is not None) - else: - l=' '+l[5:] # clean up line beginning from possible digits. - if localdolowercase: finalline=ll.lower() - else: finalline=ll - origfinalline =ll - ll=l - - elif sourcecodeform=='free': - if not cont and ext=='.pyf' and mline_mark.match(l): - l = l + '\n' - while True: - lc = fin.readline() - if not lc: - errmess('Unexpected end of file when reading multiline\n') - break - l = l + lc - if mline_mark.match(lc): - break - l = l.rstrip() - r=cont1.match(l) - if r: l=r.group('line') # Continuation follows .. - if cont: - ll=ll+cont2.match(l).group('line') - finalline='' - origfinalline='' - else: - if localdolowercase: finalline=ll.lower() - else: finalline=ll - origfinalline =ll - ll=l - cont=(r is not None) - else: - raise ValueError("Flag sourcecodeform must be either 'fix' or 'free': %s"%repr(sourcecodeform)) - filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1) - m=includeline.match(origfinalline) - if m: - fn=m.group('name') - if os.path.isfile(fn): - readfortrancode(fn, dowithline=dowithline, istop=0) - else: - include_dirs = [os.path.dirname(currentfilename)] + include_paths - foundfile = 0 - for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir, fn) - if os.path.isfile(fn1): - foundfile = 1 - readfortrancode(fn1, dowithline=dowithline, istop=0) - break - if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs))) - else: - dowithline(finalline) - l1=ll - if localdolowercase: - finalline=ll.lower() - else: finalline=ll - origfinalline = ll - filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1) - m=includeline.match(origfinalline) - if m: - fn=m.group('name') - if os.path.isfile(fn): - readfortrancode(fn, dowithline=dowithline, istop=0) - else: - include_dirs = [os.path.dirname(currentfilename)] + include_paths - foundfile = 0 - for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir, fn) - if os.path.isfile(fn1): - foundfile = 1 - readfortrancode(fn1, dowithline=dowithline, istop=0) - break - if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs))) - else: - dowithline(finalline) - filepositiontext='' - fin.close() - if istop: dowithline('', 1) - else: - gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ - beginpattern, quiet, verbose, dolowercase=saveglobals - -########### Crack line -beforethisafter=r'\s*(?P%s(?=\s*(\b(%s)\b)))'+ \ - r'\s*(?P(\b(%s)\b))'+ \ - r'\s*(?P%s)\s*\Z' -## -fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' -typespattern=re.compile(beforethisafter%('', fortrantypes, fortrantypes, '.*'), re.I), 'type' -typespattern4implicit=re.compile(beforethisafter%('', fortrantypes+'|static|automatic|undefined', fortrantypes+'|static|automatic|undefined', '.*'), re.I) -# -functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' -subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' -#modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' -# -groupbegins77=r'program|block\s*data' -beginpattern77=re.compile(beforethisafter%('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' -groupbegins90=groupbegins77+r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' -beginpattern90=re.compile(beforethisafter%('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' -groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' -endpattern=re.compile(beforethisafter%('', groupends, groupends, '[\w\s]*'), re.I), 'end' -#endifs='end\s*(if|do|where|select|while|forall)' -endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' -endifpattern=re.compile(beforethisafter%('[\w]*?', endifs, endifs, '[\w\s]*'), re.I), 'endif' -# -implicitpattern=re.compile(beforethisafter%('', 'implicit', 'implicit', '.*'), re.I), 'implicit' -dimensionpattern=re.compile(beforethisafter%('', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' -externalpattern=re.compile(beforethisafter%('', 'external', 'external', '.*'), re.I), 'external' -optionalpattern=re.compile(beforethisafter%('', 'optional', 'optional', '.*'), re.I), 'optional' -requiredpattern=re.compile(beforethisafter%('', 'required', 'required', '.*'), re.I), 'required' -publicpattern=re.compile(beforethisafter%('', 'public', 'public', '.*'), re.I), 'public' -privatepattern=re.compile(beforethisafter%('', 'private', 'private', '.*'), re.I), 'private' -intrisicpattern=re.compile(beforethisafter%('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic' -intentpattern=re.compile(beforethisafter%('', 'intent|depend|note|check', 'intent|depend|note|check', '\s*\(.*?\).*'), re.I), 'intent' -parameterpattern=re.compile(beforethisafter%('', 'parameter', 'parameter', '\s*\(.*'), re.I), 'parameter' -datapattern=re.compile(beforethisafter%('', 'data', 'data', '.*'), re.I), 'data' -callpattern=re.compile(beforethisafter%('', 'call', 'call', '.*'), re.I), 'call' -entrypattern=re.compile(beforethisafter%('', 'entry', 'entry', '.*'), re.I), 'entry' -callfunpattern=re.compile(beforethisafter%('', 'callfun', 'callfun', '.*'), re.I), 'callfun' -commonpattern=re.compile(beforethisafter%('', 'common', 'common', '.*'), re.I), 'common' -usepattern=re.compile(beforethisafter%('', 'use', 'use', '.*'), re.I), 'use' -containspattern=re.compile(beforethisafter%('', 'contains', 'contains', ''), re.I), 'contains' -formatpattern=re.compile(beforethisafter%('', 'format', 'format', '.*'), re.I), 'format' -## Non-fortran and f2py-specific statements -f2pyenhancementspattern=re.compile(beforethisafter%('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I|re.S), 'f2pyenhancements' -multilinepattern = re.compile(r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' -## - -def _simplifyargs(argsline): - a = [] - for n in markoutercomma(argsline).split('@,@'): - for r in '(),': - n = n.replace(r, '_') - a.append(n) - return ','.join(a) - -crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+[\w]*\b)\s*[=].*', re.I) -def crackline(line,reset=0): - """ - reset=-1 --- initialize - reset=0 --- crack the line - reset=1 --- final check if mismatch of blocks occured - - Cracked data is saved in grouplist[0]. - """ - global beginpattern, groupcounter, groupname, groupcache, grouplist, gotnextfile,\ - filepositiontext, currentfilename, neededmodule, expectbegin, skipblocksuntil,\ - skipemptyends, previous_context - if ';' in line and not (f2pyenhancementspattern[0].match(line) or - multilinepattern[0].match(line)): - for l in line.split(';'): - assert reset==0, repr(reset) # XXX: non-zero reset values need testing - crackline(l, reset) - return - if reset<0: - groupcounter=0 - groupname={groupcounter:''} - groupcache={groupcounter:{}} - grouplist={groupcounter:[]} - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['vars']={} - groupcache[groupcounter]['block']='' - groupcache[groupcounter]['name']='' - neededmodule=-1 - skipblocksuntil=-1 - return - if reset>0: - fl=0 - if f77modulename and neededmodule==groupcounter: fl=2 - while groupcounter>fl: - outmess('crackline: groupcounter=%s groupname=%s\n'%(repr(groupcounter), repr(groupname))) - outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 - if f77modulename and neededmodule==groupcounter: - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end interface - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end module - neededmodule=-1 - return - if line=='': return - flag=0 - for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, - requiredpattern, - parameterpattern, datapattern, publicpattern, privatepattern, - intrisicpattern, - endifpattern, endpattern, - formatpattern, - beginpattern, functionpattern, subroutinepattern, - implicitpattern, typespattern, commonpattern, - callpattern, usepattern, containspattern, - entrypattern, - f2pyenhancementspattern, - multilinepattern - ]: - m = pat[0].match(line) - if m: - break - flag=flag+1 - if not m: - re_1 = crackline_re_1 - if 0<=skipblocksuntil<=groupcounter:return - if 'externals' in groupcache[groupcounter]: - for name in groupcache[groupcounter]['externals']: - if name in invbadnames: - name=invbadnames[name] - if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: - continue - m1=re.match(r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z'%name, markouterparen(line), re.I) - if m1: - m2 = re_1.match(m1.group('before')) - a = _simplifyargs(m1.group('args')) - if m2: - line='callfun %s(%s) result (%s)'%(name, a, m2.group('result')) - else: line='callfun %s(%s)'%(name, a) - m = callfunpattern[0].match(line) - if not m: - outmess('crackline: could not resolve function call for line=%s.\n'%repr(line)) - return - analyzeline(m, 'callfun', line) - return - if verbose>1 or (verbose==1 and currentfilename.lower().endswith('.pyf')): - previous_context = None - outmess('crackline:%d: No pattern for line\n'%(groupcounter)) - return - elif pat[1]=='end': - if 0<=skipblocksuntil(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) -nameargspattern=re.compile(r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P.*)\s*@\)@))*\s*\Z', re.I) -callnameargspattern=re.compile(r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) -real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') -real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') - -_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) -def _is_intent_callback(vdecl): - for a in vdecl.get('attrspec', []): - if _intentcallbackpattern.match(a): - return 1 - return 0 - -def _resolvenameargspattern(line): - line = markouterparen(line) - m1=nameargspattern.match(line) - if m1: - return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') - m1=callnameargspattern.match(line) - if m1: - return m1.group('name'), m1.group('args'), None, None - return None, [], None, None - -def analyzeline(m, case, line): - global groupcounter, groupname, groupcache, grouplist, filepositiontext,\ - currentfilename, f77modulename, neededinterface, neededmodule, expectbegin,\ - gotnextfile, previous_context - block=m.group('this') - if case != 'multiline': - previous_context = None - if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ - and not skipemptyends and groupcounter<1: - newname=os.path.basename(currentfilename).split('.')[0] - outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname) - gotnextfile=0 - groupcounter=groupcounter+1 - groupname[groupcounter]='program' - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['vars']={} - groupcache[groupcounter]['block']='program' - groupcache[groupcounter]['name']=newname - groupcache[groupcounter]['from']='fromsky' - expectbegin=0 - if case in ['begin', 'call', 'callfun']: - # Crack line => block,name,args,result - block = block.lower() - if re.match(r'block\s*data', block, re.I): block='block data' - if re.match(r'python\s*module', block, re.I): block='python module' - name, args, result, bind = _resolvenameargspattern(m.group('after')) - if name is None: - if block=='block data': - name = '_BLOCK_DATA_' - else: - name = '' - if block not in ['interface', 'block data']: - outmess('analyzeline: No name/args pattern found for line.\n') - - previous_context = (block, name, groupcounter) - if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) - else: args=[] - if '' in args: - while '' in args: - args.remove('') - outmess('analyzeline: argument list is malformed (missing argument).\n') - - # end of crack line => block,name,args,result - needmodule=0 - needinterface=0 - - if case in ['call', 'callfun']: - needinterface=1 - if 'args' not in groupcache[groupcounter]: - return - if name not in groupcache[groupcounter]['args']: - return - for it in grouplist[groupcounter]: - if it['name']==name: - return - if name in groupcache[groupcounter]['interfaced']: - return - block={'call':'subroutine','callfun':'function'}[case] - if f77modulename and neededmodule==-1 and groupcounter<=1: - neededmodule=groupcounter+2 - needmodule=1 - if block != 'interface': - needinterface=1 - # Create new block(s) - groupcounter=groupcounter+1 - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - if needmodule: - if verbose>1: - outmess('analyzeline: Creating module block %s\n'%repr(f77modulename), 0) - groupname[groupcounter]='module' - groupcache[groupcounter]['block']='python module' - groupcache[groupcounter]['name']=f77modulename - groupcache[groupcounter]['from']='' - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['interfaced']=[] - groupcache[groupcounter]['vars']={} - groupcounter=groupcounter+1 - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - if needinterface: - if verbose>1: - outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (groupcounter), 0) - groupname[groupcounter]='interface' - groupcache[groupcounter]['block']='interface' - groupcache[groupcounter]['name']='unknown_interface' - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name']) - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['interfaced']=[] - groupcache[groupcounter]['vars']={} - groupcounter=groupcounter+1 - groupcache[groupcounter]={} - grouplist[groupcounter]=[] - groupname[groupcounter]=block - groupcache[groupcounter]['block']=block - if not name: name='unknown_'+block - groupcache[groupcounter]['prefix']=m.group('before') - groupcache[groupcounter]['name']=rmbadname1(name) - groupcache[groupcounter]['result']=result - if groupcounter==1: - groupcache[groupcounter]['from']=currentfilename - else: - if f77modulename and groupcounter==3: - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], currentfilename) - else: - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name']) - for k in list(groupcache[groupcounter].keys()): - if not groupcache[groupcounter][k]: - del groupcache[groupcounter][k] - - groupcache[groupcounter]['args']=args - groupcache[groupcounter]['body']=[] - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['interfaced']=[] - groupcache[groupcounter]['vars']={} - groupcache[groupcounter]['entry']={} - # end of creation - if block=='type': - groupcache[groupcounter]['varnames'] = [] - - if case in ['call', 'callfun']: # set parents variables - if name not in groupcache[groupcounter-2]['externals']: - groupcache[groupcounter-2]['externals'].append(name) - groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars']) - #try: del groupcache[groupcounter]['vars'][groupcache[groupcounter-2]['name']] - #except: pass - try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] - except: pass - if block in ['function', 'subroutine']: # set global attributes - try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter-2]['vars']['']) - except: pass - if case=='callfun': # return type - if result and result in groupcache[groupcounter]['vars']: - if not name==result: - groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) - #if groupcounter>1: # name is interfaced - try: groupcache[groupcounter-2]['interfaced'].append(name) - except: pass - if block=='function': - t=typespattern[0].match(m.group('before')+' '+name) - if t: - typespec, selector, attr, edecl=cracktypespec0(t.group('this'), t.group('after')) - updatevars(typespec, selector, attr, edecl) - - if case in ['call', 'callfun']: - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end routine - grouplist[groupcounter-1].append(groupcache[groupcounter]) - grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] - del grouplist[groupcounter] - groupcounter=groupcounter-1 # end interface - - elif case=='entry': - name, args, result, bind=_resolvenameargspattern(m.group('after')) - if name is not None: - if args: - args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) - else: args=[] - assert result is None, repr(result) - groupcache[groupcounter]['entry'][name] = args - previous_context = ('entry', name, groupcounter) - elif case=='type': - typespec, selector, attr, edecl=cracktypespec0(block, m.group('after')) - last_name = updatevars(typespec, selector, attr, edecl) - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']: - edecl=groupcache[groupcounter]['vars'] - ll=m.group('after').strip() - i=ll.find('::') - if i<0 and case=='intent': - i=markouterparen(ll).find('@)@')-2 - ll=ll[:i+1]+'::'+ll[i+1:] - i=ll.find('::') - if ll[i:]=='::' and 'args' in groupcache[groupcounter]: - outmess('All arguments will have attribute %s%s\n'%(m.group('this'), ll[:i])) - ll = ll + ','.join(groupcache[groupcounter]['args']) - if i<0:i=0;pl='' - else: pl=ll[:i].strip();ll=ll[i+2:] - ch = markoutercomma(pl).split('@,@') - if len(ch)>1: - pl = ch[0] - outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (','.join(ch[1:]))) - last_name = None - - for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: - m1=namepattern.match(e) - if not m1: - if case in ['public', 'private']: k='' - else: - print(m.groupdict()) - outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case, repr(e))) - continue - else: - k=rmbadname1(m1.group('name')) - if k not in edecl: - edecl[k]={} - if case=='dimension': - ap=case+m1.group('after') - if case=='intent': - ap=m.group('this')+pl - if _intentcallbackpattern.match(ap): - if k not in groupcache[groupcounter]['args']: - if groupcounter>1: - if '__user__' not in groupcache[groupcounter-2]['name']: - outmess('analyzeline: missing __user__ module (could be nothing)\n') - if k!=groupcache[groupcounter]['name']: # fixes ticket 1693 - outmess('analyzeline: appending intent(callback) %s'\ - ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) - groupcache[groupcounter]['args'].append(k) - else: - errmess('analyzeline: intent(callback) %s is ignored' % (k)) - else: - errmess('analyzeline: intent(callback) %s is already'\ - ' in argument list' % (k)) - if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']: - ap=case - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append(ap) - else: - edecl[k]['attrspec']=[ap] - if case=='external': - if groupcache[groupcounter]['block']=='program': - outmess('analyzeline: ignoring program arguments\n') - continue - if k not in groupcache[groupcounter]['args']: - #outmess('analyzeline: ignoring external %s (not in arguments list)\n'%(`k`)) - continue - if 'externals' not in groupcache[groupcounter]: - groupcache[groupcounter]['externals']=[] - groupcache[groupcounter]['externals'].append(k) - last_name = k - groupcache[groupcounter]['vars']=edecl - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case=='parameter': - edecl=groupcache[groupcounter]['vars'] - ll=m.group('after').strip()[1:-1] - last_name = None - for e in markoutercomma(ll).split('@,@'): - try: - k, initexpr=[x.strip() for x in e.split('=')] - except: - outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e, ll));continue - params = get_parameters(edecl) - k=rmbadname1(k) - if k not in edecl: - edecl[k]={} - if '=' in edecl[k] and (not edecl[k]['=']==initexpr): - outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k, edecl[k]['='], initexpr)) - t = determineexprtype(initexpr, params) - if t: - if t.get('typespec')=='real': - tt = list(initexpr) - for m in real16pattern.finditer(initexpr): - tt[m.start():m.end()] = list(\ - initexpr[m.start():m.end()].lower().replace('d', 'e')) - initexpr = ''.join(tt) - elif t.get('typespec')=='complex': - initexpr = initexpr[1:].lower().replace('d', 'e').\ - replace(',', '+1j*(') - try: - v = eval(initexpr, {}, params) - except (SyntaxError, NameError, TypeError) as msg: - errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\ - % (initexpr, msg)) - continue - edecl[k]['='] = repr(v) - if 'attrspec' in edecl[k]: - edecl[k]['attrspec'].append('parameter') - else: edecl[k]['attrspec']=['parameter'] - last_name = k - groupcache[groupcounter]['vars']=edecl - if last_name is not None: - previous_context = ('variable', last_name, groupcounter) - elif case=='implicit': - if m.group('after').strip().lower()=='none': - groupcache[groupcounter]['implicit']=None - elif m.group('after'): - if 'implicit' in groupcache[groupcounter]: - impl=groupcache[groupcounter]['implicit'] - else: impl={} - if impl is None: - outmess('analyzeline: Overwriting earlier "implicit none" statement.\n') - impl={} - for e in markoutercomma(m.group('after')).split('@,@'): - decl={} - m1=re.match(r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) - if not m1: - outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue - m2=typespattern4implicit.match(m1.group('this')) - if not m2: - outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue - typespec, selector, attr, edecl=cracktypespec0(m2.group('this'), m2.group('after')) - kindselect, charselect, typename=cracktypespec(typespec, selector) - decl['typespec']=typespec - decl['kindselector']=kindselect - decl['charselector']=charselect - decl['typename']=typename - for k in list(decl.keys()): - if not decl[k]: del decl[k] - for r in markoutercomma(m1.group('after')).split('@,@'): - if '-' in r: - try: begc, endc=[x.strip() for x in r.split('-')] - except: - outmess('analyzeline: expected "-" instead of "%s" in range list of implicit statement\n'%r);continue - else: begc=endc=r.strip() - if not len(begc)==len(endc)==1: - outmess('analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n'%r);continue - for o in range(ord(begc), ord(endc)+1): - impl[chr(o)]=decl - groupcache[groupcounter]['implicit']=impl - elif case=='data': - ll=[] - dl='';il='';f=0;fc=1;inp=0 - for c in m.group('after'): - if not inp: - if c=="'": fc=not fc - if c=='/' and fc: f=f+1;continue - if c=='(': inp = inp + 1 - elif c==')': inp = inp - 1 - if f==0: dl=dl+c - elif f==1: il=il+c - elif f==2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl, il]) - dl=c;il='';f=0 - if f==2: - dl = dl.strip() - if dl.startswith(','): - dl = dl[1:].strip() - ll.append([dl, il]) - vars={} - if 'vars' in groupcache[groupcounter]: - vars=groupcache[groupcounter]['vars'] - last_name = None - for l in ll: - l=[x.strip() for x in l] - if l[0][0]==',':l[0]=l[0][1:] - if l[0][0]=='(': - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%l[0]) - continue - #if '(' in l[0]: - # #outmess('analyzeline: ignoring this data statement.\n') - # continue - i=0;j=0;llen=len(l[1]) - for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): - if v[0]=='(': - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%v) - # XXX: subsequent init expressions may get wrong values. - # Ignoring since data statements are irrelevant for wrapping. - continue - fc=0 - while (i=3: - bn = bn.strip() - if not bn: bn='_BLNK_' - cl.append([bn, ol]) - f=f-2;bn='';ol='' - if f%2: bn=bn+c - else: ol=ol+c - bn = bn.strip() - if not bn: bn='_BLNK_' - cl.append([bn, ol]) - commonkey={} - if 'common' in groupcache[groupcounter]: - commonkey=groupcache[groupcounter]['common'] - for c in cl: - if c[0] in commonkey: - outmess('analyzeline: previously defined common block encountered. Skipping.\n') - continue - commonkey[c[0]]=[] - for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: - if i: commonkey[c[0]].append(i) - groupcache[groupcounter]['common']=commonkey - previous_context = ('common', bn, groupcounter) - elif case=='use': - m1=re.match(r'\A\s*(?P\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) - if m1: - mm=m1.groupdict() - if 'use' not in groupcache[groupcounter]: - groupcache[groupcounter]['use']={} - name=m1.group('name') - groupcache[groupcounter]['use'][name]={} - isonly=0 - if 'list' in mm and mm['list'] is not None: - if 'notonly' in mm and mm['notonly'] is None: - isonly=1 - groupcache[groupcounter]['use'][name]['only']=isonly - ll=[x.strip() for x in mm['list'].split(',')] - rl={} - for l in ll: - if '=' in l: - m2=re.match(r'\A\s*(?P\b[\w]+\b)\s*=\s*>\s*(?P\b[\w]+\b)\s*\Z', l, re.I) - if m2: rl[m2.group('local').strip()]=m2.group('use').strip() - else: - outmess('analyzeline: Not local=>use pattern found in %s\n'%repr(l)) - else: - rl[l]=l - groupcache[groupcounter]['use'][name]['map']=rl - else: - pass - else: - print(m.groupdict()) - outmess('analyzeline: Could not crack the use statement.\n') - elif case in ['f2pyenhancements']: - if 'f2pyenhancements' not in groupcache[groupcounter]: - groupcache[groupcounter]['f2pyenhancements'] = {} - d = groupcache[groupcounter]['f2pyenhancements'] - if m.group('this')=='usercode' and 'usercode' in d: - if isinstance(d['usercode'], str): - d['usercode'] = [d['usercode']] - d['usercode'].append(m.group('after')) - else: - d[m.group('this')] = m.group('after') - elif case=='multiline': - if previous_context is None: - if verbose: - outmess('analyzeline: No context for multiline block.\n') - return - gc = groupcounter - #gc = previous_context[2] - appendmultiline(groupcache[gc], - previous_context[:2], - m.group('this')) - else: - if verbose>1: - print(m.groupdict()) - outmess('analyzeline: No code implemented for line.\n') - -def appendmultiline(group, context_name, ml): - if 'f2pymultilines' not in group: - group['f2pymultilines'] = {} - d = group['f2pymultilines'] - if context_name not in d: - d[context_name] = [] - d[context_name].append(ml) - return - -def cracktypespec0(typespec, ll): - selector=None - attr=None - if re.match(r'double\s*complex', typespec, re.I): typespec='double complex' - elif re.match(r'double\s*precision', typespec, re.I): typespec='double precision' - else: typespec=typespec.strip().lower() - m1=selectpattern.match(markouterparen(ll)) - if not m1: - outmess('cracktypespec0: no kind/char_selector pattern found for line.\n') - return - d=m1.groupdict() - for k in list(d.keys()): d[k]=unmarkouterparen(d[k]) - if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: - selector=d['this'] - ll=d['after'] - i=ll.find('::') - if i>=0: - attr=ll[:i].strip() - ll=ll[i+2:] - return typespec, selector, attr, ll -##### -namepattern=re.compile(r'\s*(?P\b[\w]+\b)\s*(?P.*)\s*\Z', re.I) -kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|[*]\s*(?P.*?))\s*\Z', re.I) -charselector=re.compile(r'\s*(\((?P.*)\)|[*]\s*(?P.*))\s*\Z', re.I) -lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z', re.I) -lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*[*]\s*(?P.*?)|([*]\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) -def removespaces(expr): - expr=expr.strip() - if len(expr)<=1: return expr - expr2=expr[0] - for i in range(1, len(expr)-1): - if expr[i]==' ' and \ - ((expr[i+1] in "()[]{}=+-/* ") or (expr[i-1] in "()[]{}=+-/* ")): continue - expr2=expr2+expr[i] - expr2=expr2+expr[-1] - return expr2 -def markinnerspaces(line): - l='';f=0 - cc='\'' - cc1='"' - cb='' - for c in line: - if cb=='\\' and c in ['\\', '\'', '"']: - l=l+c; - cb=c - continue - if f==0 and c in ['\'', '"']: cc=c; cc1={'\'':'"','"':'\''}[c] - if c==cc:f=f+1 - elif c==cc:f=f-1 - elif c==' ' and f==1: l=l+'@_@'; continue - l=l+c;cb=c - return l -def updatevars(typespec, selector, attrspec, entitydecl): - global groupcache, groupcounter - last_name = None - kindselect, charselect, typename=cracktypespec(typespec, selector) - if attrspec: - attrspec=[x.strip() for x in markoutercomma(attrspec).split('@,@')] - l = [] - c = re.compile(r'(?P[a-zA-Z]+)') - for a in attrspec: - if not a: - continue - m = c.match(a) - if m: - s = m.group('start').lower() - a = s + a[len(s):] - l.append(a) - attrspec = l - el=[x.strip() for x in markoutercomma(entitydecl).split('@,@')] - el1=[] - for e in el: - for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: - if e1: el1.append(e1.replace('@_@', ' ')) - for e in el1: - m=namepattern.match(e) - if not m: - outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(repr(e))) - continue - ename=rmbadname1(m.group('name')) - edecl={} - if ename in groupcache[groupcounter]['vars']: - edecl=groupcache[groupcounter]['vars'][ename].copy() - not_has_typespec = 'typespec' not in edecl - if not_has_typespec: - edecl['typespec']=typespec - elif typespec and (not typespec==edecl['typespec']): - outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typespec'], typespec)) - if 'kindselector' not in edecl: - edecl['kindselector']=copy.copy(kindselect) - elif kindselect: - for k in list(kindselect.keys()): - if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]): - outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['kindselector'][k], kindselect[k])) - else: edecl['kindselector'][k]=copy.copy(kindselect[k]) - if 'charselector' not in edecl and charselect: - if not_has_typespec: - edecl['charselector']=charselect - else: - errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \ - %(ename, charselect)) - elif charselect: - for k in list(charselect.keys()): - if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]): - outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['charselector'][k], charselect[k])) - else: edecl['charselector'][k]=copy.copy(charselect[k]) - if 'typename' not in edecl: - edecl['typename']=typename - elif typename and (not edecl['typename']==typename): - outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typename'], typename)) - if 'attrspec' not in edecl: - edecl['attrspec']=copy.copy(attrspec) - elif attrspec: - for a in attrspec: - if a not in edecl['attrspec']: - edecl['attrspec'].append(a) - else: - edecl['typespec']=copy.copy(typespec) - edecl['kindselector']=copy.copy(kindselect) - edecl['charselector']=copy.copy(charselect) - edecl['typename']=typename - edecl['attrspec']=copy.copy(attrspec) - if m.group('after'): - m1=lenarraypattern.match(markouterparen(m.group('after'))) - if m1: - d1=m1.groupdict() - for lk in ['len', 'array', 'init']: - if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2'] - for k in list(d1.keys()): - if d1[k] is not None: d1[k]=unmarkouterparen(d1[k]) - else: del d1[k] - if 'len' in d1 and 'array' in d1: - if d1['len']=='': - d1['len']=d1['array'] - del d1['array'] - else: - d1['array']=d1['array']+','+d1['len'] - del d1['len'] - errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec, e, typespec, ename, d1['array'])) - if 'array' in d1: - dm = 'dimension(%s)'%d1['array'] - if 'attrspec' not in edecl or (not edecl['attrspec']): - edecl['attrspec']=[dm] - else: - edecl['attrspec'].append(dm) - for dm1 in edecl['attrspec']: - if dm1[:9]=='dimension' and dm1!=dm: - del edecl['attrspec'][-1] - errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \ - % (ename, dm1, dm)) - break - - if 'len' in d1: - if typespec in ['complex', 'integer', 'logical', 'real']: - if ('kindselector' not in edecl) or (not edecl['kindselector']): - edecl['kindselector']={} - edecl['kindselector']['*']=d1['len'] - elif typespec == 'character': - if ('charselector' not in edecl) or (not edecl['charselector']): - edecl['charselector']={} - if 'len' in edecl['charselector']: - del edecl['charselector']['len'] - edecl['charselector']['*']=d1['len'] - if 'init' in d1: - if '=' in edecl and (not edecl['=']==d1['init']): - outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['='], d1['init'])) - else: - edecl['=']=d1['init'] - else: - outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after'))) - for k in list(edecl.keys()): - if not edecl[k]: - del edecl[k] - groupcache[groupcounter]['vars'][ename]=edecl - if 'varnames' in groupcache[groupcounter]: - groupcache[groupcounter]['varnames'].append(ename) - last_name = ename - return last_name - -def cracktypespec(typespec, selector): - kindselect=None - charselect=None - typename=None - if selector: - if typespec in ['complex', 'integer', 'logical', 'real']: - kindselect=kindselector.match(selector) - if not kindselect: - outmess('cracktypespec: no kindselector pattern found for %s\n'%(repr(selector))) - return - kindselect=kindselect.groupdict() - kindselect['*']=kindselect['kind2'] - del kindselect['kind2'] - for k in list(kindselect.keys()): - if not kindselect[k]: del kindselect[k] - for k, i in list(kindselect.items()): - kindselect[k] = rmbadname1(i) - elif typespec=='character': - charselect=charselector.match(selector) - if not charselect: - outmess('cracktypespec: no charselector pattern found for %s\n'%(repr(selector))) - return - charselect=charselect.groupdict() - charselect['*']=charselect['charlen'] - del charselect['charlen'] - if charselect['lenkind']: - lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind'])) - lenkind=lenkind.groupdict() - for lk in ['len', 'kind']: - if lenkind[lk+'2']: - lenkind[lk]=lenkind[lk+'2'] - charselect[lk]=lenkind[lk] - del lenkind[lk+'2'] - del charselect['lenkind'] - for k in list(charselect.keys()): - if not charselect[k]: del charselect[k] - for k, i in list(charselect.items()): - charselect[k] = rmbadname1(i) - elif typespec=='type': - typename=re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) - if typename: typename=typename.group('name') - else: outmess('cracktypespec: no typename found in %s\n'%(repr(typespec+selector))) - else: - outmess('cracktypespec: no selector used for %s\n'%(repr(selector))) - return kindselect, charselect, typename -###### -def setattrspec(decl,attr,force=0): - if not decl: - decl={} - if not attr: - return decl - if 'attrspec' not in decl: - decl['attrspec']=[attr] - return decl - if force: decl['attrspec'].append(attr) - if attr in decl['attrspec']: return decl - if attr=='static' and 'automatic' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr=='automatic' and 'static' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr=='public' and 'private' not in decl['attrspec']: - decl['attrspec'].append(attr) - elif attr=='private' and 'public' not in decl['attrspec']: - decl['attrspec'].append(attr) - else: - decl['attrspec'].append(attr) - return decl - -def setkindselector(decl,sel,force=0): - if not decl: - decl={} - if not sel: - return decl - if 'kindselector' not in decl: - decl['kindselector']=sel - return decl - for k in list(sel.keys()): - if force or k not in decl['kindselector']: - decl['kindselector'][k]=sel[k] - return decl - -def setcharselector(decl,sel,force=0): - if not decl: - decl={} - if not sel: - return decl - if 'charselector' not in decl: - decl['charselector']=sel - return decl - for k in list(sel.keys()): - if force or k not in decl['charselector']: - decl['charselector'][k]=sel[k] - return decl - -def getblockname(block,unknown='unknown'): - if 'name' in block: - return block['name'] - return unknown - -###### post processing - -def setmesstext(block): - global filepositiontext - try: - filepositiontext='In: %s:%s\n'%(block['from'], block['name']) - except: - pass - -def get_usedict(block): - usedict = {} - if 'parent_block' in block: - usedict = get_usedict(block['parent_block']) - if 'use' in block: - usedict.update(block['use']) - return usedict - -def get_useparameters(block, param_map=None): - global f90modulevars - if param_map is None: - param_map = {} - usedict = get_usedict(block) - if not usedict: - return param_map - for usename, mapping in list(usedict.items()): - usename = usename.lower() - if usename not in f90modulevars: - outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name'))) - continue - mvars = f90modulevars[usename] - params = get_parameters(mvars) - if not params: - continue - # XXX: apply mapping - if mapping: - errmess('get_useparameters: mapping for %s not impl.' % (mapping)) - for k, v in list(params.items()): - if k in param_map: - outmess('get_useparameters: overriding parameter %s with'\ - ' value from module %s' % (repr(k), repr(usename))) - param_map[k] = v - - return param_map - -def postcrack2(block,tab='',param_map=None): - global f90modulevars - if not f90modulevars: - return block - if isinstance(block, list): - ret = [] - for g in block: - g = postcrack2(g, tab=tab+'\t', param_map=param_map) - ret.append(g) - return ret - setmesstext(block) - outmess('%sBlock: %s\n'%(tab, block['name']), 0) - - if param_map is None: - param_map = get_useparameters(block) - - if param_map is not None and 'vars' in block: - vars = block['vars'] - for n in list(vars.keys()): - var = vars[n] - if 'kindselector' in var: - kind = var['kindselector'] - if 'kind' in kind: - val = kind['kind'] - if val in param_map: - kind['kind'] = param_map[val] - new_body = [] - for b in block['body']: - b = postcrack2(b, tab=tab+'\t', param_map=param_map) - new_body.append(b) - block['body'] = new_body - - return block - -def postcrack(block,args=None,tab=''): - """ - TODO: - function return values - determine expression types if in argument list - """ - global usermodules, onlyfunctions - if isinstance(block, list): - gret=[] - uret=[] - for g in block: - setmesstext(g) - g=postcrack(g, tab=tab+'\t') - if 'name' in g and '__user__' in g['name']: # sort user routines to appear first - uret.append(g) - else: - gret.append(g) - return uret+gret - setmesstext(block) - if not isinstance(block, dict) and 'block' not in block: - raise Exception('postcrack: Expected block dictionary instead of ' + \ - str(block)) - if 'name' in block and not block['name']=='unknown_interface': - outmess('%sBlock: %s\n'%(tab, block['name']), 0) - blocktype=block['block'] - block=analyzeargs(block) - block=analyzecommon(block) - block['vars']=analyzevars(block) - block['sortvars']=sortvarnames(block['vars']) - if 'args' in block and block['args']: - args=block['args'] - block['body']=analyzebody(block, args, tab=tab) - - userisdefined=[] -## fromuser = [] - if 'use' in block: - useblock=block['use'] - for k in list(useblock.keys()): - if '__user__' in k: - userisdefined.append(k) -## if 'map' in useblock[k]: -## for n in useblock[k]['map'].itervalues(): -## if n not in fromuser: fromuser.append(n) - else: useblock={} - name='' - if 'name' in block: - name=block['name'] - if 'externals' in block and block['externals']:# and not userisdefined: # Build a __user__ module - interfaced=[] - if 'interfaced' in block: - interfaced=block['interfaced'] - mvars=copy.copy(block['vars']) - if name: - mname=name+'__user__routines' - else: - mname='unknown__user__routines' - if mname in userisdefined: - i=1 - while '%s_%i'%(mname, i) in userisdefined: i=i+1 - mname='%s_%i'%(mname, i) - interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'} - for e in block['externals']: -## if e in fromuser: -## outmess(' Skipping %s that is defined explicitly in another use statement\n'%(`e`)) -## continue - if e in interfaced: - edef=[] - j=-1 - for b in block['body']: - j=j+1 - if b['block']=='interface': - i=-1 - for bb in b['body']: - i=i+1 - if 'name' in bb and bb['name']==e: - edef=copy.copy(bb) - del b['body'][i] - break - if edef: - if not b['body']: del block['body'][j] - del interfaced[interfaced.index(e)] - break - interface['body'].append(edef) - else: - if e in mvars and not isexternal(mvars[e]): - interface['vars'][e]=mvars[e] - if interface['vars'] or interface['body']: - block['interfaced']=interfaced - mblock={'block':'python module','body':[interface],'vars':{},'name':mname,'interfaced':block['externals']} - useblock[mname]={} - usermodules.append(mblock) - if useblock: - block['use']=useblock - return block - -def sortvarnames(vars): - indep = [] - dep = [] - for v in list(vars.keys()): - if 'depend' in vars[v] and vars[v]['depend']: - dep.append(v) - #print '%s depends on %s'%(v,vars[v]['depend']) - else: indep.append(v) - n = len(dep) - i = 0 - while dep: #XXX: How to catch dependence cycles correctly? - v = dep[0] - fl = 0 - for w in dep[1:]: - if w in vars[v]['depend']: - fl = 1 - break - if fl: - dep = dep[1:]+[v] - i = i + 1 - if i>n: - errmess('sortvarnames: failed to compute dependencies because' - ' of cyclic dependencies between ' - +', '.join(dep)+'\n') - indep = indep + dep - break - else: - indep.append(v) - dep = dep[1:] - n = len(dep) - i = 0 - #print indep - return indep - -def analyzecommon(block): - if not hascommon(block): return block - commonvars=[] - for k in list(block['common'].keys()): - comvars=[] - for e in block['common'][k]: - m=re.match(r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) - if m: - dims=[] - if m.group('dims'): - dims=[x.strip() for x in markoutercomma(m.group('dims')).split('@,@')] - n=m.group('name').strip() - if n in block['vars']: - if 'attrspec' in block['vars'][n]: - block['vars'][n]['attrspec'].append('dimension(%s)'%(','.join(dims))) - else: - block['vars'][n]['attrspec']=['dimension(%s)'%(','.join(dims))] - else: - if dims: - block['vars'][n]={'attrspec':['dimension(%s)'%(','.join(dims))]} - else: block['vars'][n]={} - if n not in commonvars: commonvars.append(n) - else: - n=e - errmess('analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n'%(e, k)) - comvars.append(n) - block['common'][k]=comvars - if 'commonvars' not in block: - block['commonvars']=commonvars - else: - block['commonvars']=block['commonvars']+commonvars - return block - -def analyzebody(block,args,tab=''): - global usermodules, skipfuncs, onlyfuncs, f90modulevars - setmesstext(block) - body=[] - for b in block['body']: - b['parent_block'] = block - if b['block'] in ['function', 'subroutine']: - if args is not None and b['name'] not in args: - continue - else: - as_=b['args'] - if b['name'] in skipfuncs: - continue - if onlyfuncs and b['name'] not in onlyfuncs: - continue - b['saved_interface'] = crack2fortrangen(b, '\n'+' '*6, as_interface=True) - - else: as_=args - b=postcrack(b, as_, tab=tab+'\t') - if b['block']=='interface' and not b['body']: - if 'f2pyenhancements' not in b: - continue - if b['block'].replace(' ', '')=='pythonmodule': - usermodules.append(b) - else: - if b['block']=='module': - f90modulevars[b['name']] = b['vars'] - body.append(b) - return body - -def buildimplicitrules(block): - setmesstext(block) - implicitrules=defaultimplicitrules - attrrules={} - if 'implicit' in block: - if block['implicit'] is None: - implicitrules=None - if verbose>1: - outmess('buildimplicitrules: no implicit rules for routine %s.\n'%repr(block['name'])) - else: - for k in list(block['implicit'].keys()): - if block['implicit'][k].get('typespec') not in ['static', 'automatic']: - implicitrules[k]=block['implicit'][k] - else: - attrrules[k]=block['implicit'][k]['typespec'] - return implicitrules, attrrules - -def myeval(e,g=None,l=None): - r = eval(e, g, l) - if type(r) in [type(0), type(0.0)]: - return r - raise ValueError('r=%r' % (r)) - -getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) -def getlincoef(e, xset): # e = a*x+b ; x in xset - try: - c = int(myeval(e, {}, {})) - return 0, c, None - except: pass - if getlincoef_re_1.match(e): - return 1, 0, e - len_e = len(e) - for x in xset: - if len(x)>len_e: continue - if re.search(r'\w\s*\([^)]*\b'+x+r'\b', e): - # skip function calls having x as an argument, e.g max(1, x) - continue - re_1 = re.compile(r'(?P.*?)\b'+x+r'\b(?P.*)', re.I) - m = re_1.match(e) - if m: - try: - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'), 0, m1.group('after')) - m1 = re_1.match(ee) - b = myeval(ee, {}, {}) - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'), 1, m1.group('after')) - m1 = re_1.match(ee) - a = myeval(ee, {}, {}) - b - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'), 0.5, m1.group('after')) - m1 = re_1.match(ee) - c = myeval(ee, {}, {}) - # computing another point to be sure that expression is linear - m1 = re_1.match(e) - while m1: - ee = '%s(%s)%s'%(m1.group('before'), 1.5, m1.group('after')) - m1 = re_1.match(ee) - c2 = myeval(ee, {}, {}) - if (a*0.5+b==c and a*1.5+b==c2): - return a, b, x - except: pass - break - return None, None, None - -_varname_match = re.compile(r'\A[a-z]\w*\Z').match -def getarrlen(dl,args,star='*'): - edl = [] - try: edl.append(myeval(dl[0], {}, {})) - except: edl.append(dl[0]) - try: edl.append(myeval(dl[1], {}, {})) - except: edl.append(dl[1]) - if isinstance(edl[0], int): - p1 = 1-edl[0] - if p1==0: d = str(dl[1]) - elif p1<0: d = '%s-%s'%(dl[1], -p1) - else: d = '%s+%s'%(dl[1], p1) - elif isinstance(edl[1], int): - p1 = 1+edl[1] - if p1==0: d='-(%s)' % (dl[0]) - else: d='%s-(%s)' % (p1, dl[0]) - else: d = '%s-(%s)+1'%(dl[1], dl[0]) - try: return repr(myeval(d, {}, {})), None, None - except: pass - d1, d2=getlincoef(dl[0], args), getlincoef(dl[1], args) - if None not in [d1[0], d2[0]]: - if (d1[0], d2[0])==(0, 0): - return repr(d2[1]-d1[1]+1), None, None - b = d2[1] - d1[1] + 1 - d1 = (d1[0], 0, d1[2]) - d2 = (d2[0], b, d2[2]) - if d1[0]==0 and d2[2] in args: - if b<0: return '%s * %s - %s'%(d2[0], d2[2], -b), d2[2], '+%s)/(%s)'%(-b, d2[0]) - elif b: return '%s * %s + %s'%(d2[0], d2[2], b), d2[2], '-%s)/(%s)'%(b, d2[0]) - else: return '%s * %s'%(d2[0], d2[2]), d2[2], ')/(%s)'%(d2[0]) - if d2[0]==0 and d1[2] in args: - - if b<0: return '%s * %s - %s'%(-d1[0], d1[2], -b), d1[2], '+%s)/(%s)'%(-b, -d1[0]) - elif b: return '%s * %s + %s'%(-d1[0], d1[2], b), d1[2], '-%s)/(%s)'%(b, -d1[0]) - else: return '%s * %s'%(-d1[0], d1[2]), d1[2], ')/(%s)'%(-d1[0]) - if d1[2]==d2[2] and d1[2] in args: - a = d2[0] - d1[0] - if not a: return repr(b), None, None - if b<0: return '%s * %s - %s'%(a, d1[2], -b), d2[2], '+%s)/(%s)'%(-b, a) - elif b: return '%s * %s + %s'%(a, d1[2], b), d2[2], '-%s)/(%s)'%(b, a) - else: return '%s * %s'%(a, d1[2]), d2[2], ')/(%s)'%(a) - if d1[0]==d2[0]==1: - c = str(d1[2]) - if c not in args: - if _varname_match(c): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) - c = '(%s)'%c - if b==0: d='%s-%s' % (d2[2], c) - elif b<0: d='%s-%s-%s' % (d2[2], c, -b) - else: d='%s-%s+%s' % (d2[2], c, b) - elif d1[0]==0: - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)'%c2 - if d2[0]==1: pass - elif d2[0]==-1: c2='-%s' %c2 - else: c2='%s*%s'%(d2[0], c2) - - if b==0: d=c2 - elif b<0: d='%s-%s' % (c2, -b) - else: d='%s+%s' % (c2, b) - elif d2[0]==0: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)'%c1 - if d1[0]==1: c1='-%s'%c1 - elif d1[0]==-1: c1='+%s'%c1 - elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1) - else: c1 = '-%s*%s' % (d1[0], c1) - - if b==0: d=c1 - elif b<0: d='%s-%s' % (c1, -b) - else: d='%s+%s' % (c1, b) - else: - c1 = str(d1[2]) - if c1 not in args: - if _varname_match(c1): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) - c1 = '(%s)'%c1 - if d1[0]==1: c1='-%s'%c1 - elif d1[0]==-1: c1='+%s'%c1 - elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1) - else: c1 = '-%s*%s' % (d1[0], c1) - - c2 = str(d2[2]) - if c2 not in args: - if _varname_match(c2): - outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) - c2 = '(%s)'%c2 - if d2[0]==1: pass - elif d2[0]==-1: c2='-%s' %c2 - else: c2='%s*%s'%(d2[0], c2) - - if b==0: d='%s%s' % (c2, c1) - elif b<0: d='%s%s-%s' % (c2, c1, -b) - else: d='%s%s+%s' % (c2, c1, b) - return d, None, None - -word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) - -def _get_depend_dict(name, vars, deps): - if name in vars: - words = vars[name].get('depend', []) - - if '=' in vars[name] and not isstring(vars[name]): - for word in word_pattern.findall(vars[name]['=']): - if word not in words and word in vars: - words.append(word) - for word in words[:]: - for w in deps.get(word, []) \ - or _get_depend_dict(word, vars, deps): - if w not in words: - words.append(w) - else: - outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) - words = [] - deps[name] = words - return words - -def _calc_depend_dict(vars): - names = list(vars.keys()) - depend_dict = {} - for n in names: - _get_depend_dict(n, vars, depend_dict) - return depend_dict - -def get_sorted_names(vars): - """ - """ - depend_dict = _calc_depend_dict(vars) - names = [] - for name in list(depend_dict.keys()): - if not depend_dict[name]: - names.append(name) - del depend_dict[name] - while depend_dict: - for name, lst in list(depend_dict.items()): - new_lst = [n for n in lst if n in depend_dict] - if not new_lst: - names.append(name) - del depend_dict[name] - else: - depend_dict[name] = new_lst - return [name for name in names if name in vars] - -def _kind_func(string): - #XXX: return something sensible. - if string[0] in "'\"": - string = string[1:-1] - if real16pattern.match(string): - return 8 - elif real8pattern.match(string): - return 4 - return 'kind('+string+')' - -def _selected_int_kind_func(r): - #XXX: This should be processor dependent - m = 10**r - if m<=2**8: return 1 - if m<=2**16: return 2 - if m<=2**32: return 4 - if m<=2**63: return 8 - if m<=2**128: return 16 - return -1 - -def _selected_real_kind_func(p, r=0, radix=0): - #XXX: This should be processor dependent - # This is only good for 0 <= p <= 20 - if p < 7: return 4 - if p < 16: return 8 - if platform.machine().lower().startswith('power'): - if p <= 20: - return 16 - else: - if p < 19: - return 10 - elif p <= 20: - return 16 - return -1 - -def get_parameters(vars, global_params={}): - params = copy.copy(global_params) - g_params = copy.copy(global_params) - for name, func in [('kind', _kind_func), - ('selected_int_kind', _selected_int_kind_func), - ('selected_real_kind', _selected_real_kind_func), - ]: - if name not in g_params: - g_params[name] = func - param_names = [] - for n in get_sorted_names(vars): - if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: - param_names.append(n) - kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) - selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) - selected_kind_re = re.compile(r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) - for n in param_names: - if '=' in vars[n]: - v = vars[n]['='] - if islogical(vars[n]): - v = v.lower() - for repl in [ - ('.false.', 'False'), - ('.true.', 'True'), - #TODO: test .eq., .neq., etc replacements. - ]: - v = v.replace(*repl) - v = kind_re.sub(r'kind("\1")', v) - v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) - if isinteger(vars[n]) and not selected_kind_re.match(v): - v = v.split('_')[0] - if isdouble(vars[n]): - tt = list(v) - for m in real16pattern.finditer(v): - tt[m.start():m.end()] = list(\ - v[m.start():m.end()].lower().replace('d', 'e')) - v = ''.join(tt) - if iscomplex(vars[n]): - if v[0]=='(' and v[-1]==')': - l = markoutercomma(v[1:-1]).split('@,@') - try: - params[n] = eval(v, g_params, params) - except Exception as msg: - params[n] = v - #print params - outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v))) - if isstring(vars[n]) and isinstance(params[n], int): - params[n] = chr(params[n]) - nl = n.lower() - if nl!=n: - params[nl] = params[n] - else: - print(vars[n]) - outmess('get_parameters:parameter %s does not have value?!\n'%(repr(n))) - return params - -def _eval_length(length, params): - if length in ['(:)', '(*)', '*']: - return '(*)' - return _eval_scalar(length, params) - -_is_kind_number = re.compile(r'\d+_').match - -def _eval_scalar(value, params): - if _is_kind_number(value): - value = value.split('_')[0] - try: - value = str(eval(value, {}, params)) - except (NameError, SyntaxError): - return value - except Exception as msg: - errmess('"%s" in evaluating %r '\ - '(available names: %s)\n' \ - % (msg, value, list(params.keys()))) - return value - -def analyzevars(block): - global f90modulevars - setmesstext(block) - implicitrules, attrrules=buildimplicitrules(block) - vars=copy.copy(block['vars']) - if block['block']=='function' and block['name'] not in vars: - vars[block['name']]={} - if '' in block['vars']: - del vars[''] - if 'attrspec' in block['vars']['']: - gen=block['vars']['']['attrspec'] - for n in list(vars.keys()): - for k in ['public', 'private']: - if k in gen: - vars[n]=setattrspec(vars[n], k) - svars=[] - args = block['args'] - for a in args: - try: - vars[a] - svars.append(a) - except KeyError: - pass - for n in list(vars.keys()): - if n not in args: svars.append(n) - - params = get_parameters(vars, get_useparameters(block)) - - dep_matches = {} - name_match = re.compile(r'\w[\w\d_$]*').match - for v in list(vars.keys()): - m = name_match(v) - if m: - n = v[m.start():m.end()] - try: - dep_matches[n] - except KeyError: - dep_matches[n] = re.compile(r'.*\b%s\b'%(v), re.I).match - for n in svars: - if n[0] in list(attrrules.keys()): - vars[n]=setattrspec(vars[n], attrrules[n[0]]) - if 'typespec' not in vars[n]: - if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): - if implicitrules: - ln0 = n[0].lower() - for k in list(implicitrules[ln0].keys()): - if k=='typespec' and implicitrules[ln0][k]=='undefined': - continue - if k not in vars[n]: - vars[n][k]=implicitrules[ln0][k] - elif k=='attrspec': - for l in implicitrules[ln0][k]: - vars[n]=setattrspec(vars[n], l) - elif n in block['args']: - outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(repr(n), block['name'])) - - if 'charselector' in vars[n]: - if 'len' in vars[n]['charselector']: - l = vars[n]['charselector']['len'] - try: - l = str(eval(l, {}, params)) - except: - pass - vars[n]['charselector']['len'] = l - - if 'kindselector' in vars[n]: - if 'kind' in vars[n]['kindselector']: - l = vars[n]['kindselector']['kind'] - try: - l = str(eval(l, {}, params)) - except: - pass - vars[n]['kindselector']['kind'] = l - - savelindims = {} - if 'attrspec' in vars[n]: - attr=vars[n]['attrspec'] - attr.reverse() - vars[n]['attrspec']=[] - dim, intent, depend, check, note=None, None, None, None, None - for a in attr: - if a[:9]=='dimension': dim=(a[9:].strip())[1:-1] - elif a[:6]=='intent': intent=(a[6:].strip())[1:-1] - elif a[:6]=='depend': depend=(a[6:].strip())[1:-1] - elif a[:5]=='check': check=(a[5:].strip())[1:-1] - elif a[:4]=='note': note=(a[4:].strip())[1:-1] - else: vars[n]=setattrspec(vars[n], a) - if intent: - if 'intent' not in vars[n]: - vars[n]['intent']=[] - for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: - if not c in vars[n]['intent']: - vars[n]['intent'].append(c) - intent=None - if note: - note=note.replace('\\n\\n', '\n\n') - note=note.replace('\\n ', '\n') - if 'note' not in vars[n]: - vars[n]['note']=[note] - else: - vars[n]['note'].append(note) - note=None - if depend is not None: - if 'depend' not in vars[n]: - vars[n]['depend']=[] - for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): - if c not in vars[n]['depend']: - vars[n]['depend'].append(c) - depend=None - if check is not None: - if 'check' not in vars[n]: - vars[n]['check']=[] - for c in [x.strip() for x in markoutercomma(check).split('@,@')]: - if not c in vars[n]['check']: - vars[n]['check'].append(c) - check=None - if dim and 'dimension' not in vars[n]: - vars[n]['dimension']=[] - for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): - star = '*' - if d==':': - star=':' - if d in params: - d = str(params[d]) - for p in list(params.keys()): - m = re.match(r'(?P.*?)\b'+p+r'\b(?P.*)', d, re.I) - if m: - #outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`)) - d = m.group('before')+str(params[p])+m.group('after') - if d==star: - dl = [star] - else: - dl=markoutercomma(d, ':').split('@:@') - if len(dl)==2 and '*' in dl: # e.g. dimension(5:*) - dl = ['*'] - d = '*' - if len(dl)==1 and not dl[0]==star: dl = ['1', dl[0]] - if len(dl)==2: - d, v, di = getarrlen(dl, list(block['vars'].keys())) - if d[:4] == '1 * ': d = d[4:] - if di and di[-4:] == '/(1)': di = di[:-4] - if v: savelindims[d] = v, di - vars[n]['dimension'].append(d) - if 'dimension' in vars[n]: - if isintent_c(vars[n]): - shape_macro = 'shape' - else: - shape_macro = 'shape'#'fshape' - if isstringarray(vars[n]): - if 'charselector' in vars[n]: - d = vars[n]['charselector'] - if '*' in d: - d = d['*'] - errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\ - %(d, n, - ','.join(vars[n]['dimension']), - n, ','.join(vars[n]['dimension']+[d]))) - vars[n]['dimension'].append(d) - del vars[n]['charselector'] - if 'intent' not in vars[n]: - vars[n]['intent'] = [] - if 'c' not in vars[n]['intent']: - vars[n]['intent'].append('c') - else: - errmess("analyzevars: charselector=%r unhandled." % (d)) - if 'check' not in vars[n] and 'args' in block and n in block['args']: - flag = 'depend' not in vars[n] - if flag: - vars[n]['depend']=[] - vars[n]['check']=[] - if 'dimension' in vars[n]: - #/----< no check - #vars[n]['check'].append('rank(%s)==%s'%(n,len(vars[n]['dimension']))) - i=-1; ni=len(vars[n]['dimension']) - for d in vars[n]['dimension']: - ddeps=[] # dependecies of 'd' - ad='' - pd='' - #origd = d - if d not in vars: - if d in savelindims: - pd, ad='(', savelindims[d][1] - d = savelindims[d][0] - else: - for r in block['args']: - #for r in block['vars'].iterkeys(): - if r not in vars: - continue - if re.match(r'.*?\b'+r+r'\b', d, re.I): - ddeps.append(r) - if d in vars: - if 'attrspec' in vars[d]: - for aa in vars[d]['attrspec']: - if aa[:6]=='depend': - ddeps += aa[6:].strip()[1:-1].split(',') - if 'depend' in vars[d]: - ddeps=ddeps+vars[d]['depend'] - i=i+1 - if d in vars and ('depend' not in vars[d]) \ - and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ - and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): - vars[d]['depend']=[n] - if ni>1: - vars[d]['=']='%s%s(%s,%s)%s'% (pd, shape_macro, n, i, ad) - else: - vars[d]['=']='%slen(%s)%s'% (pd, n, ad) - # /---< no check - if 1 and 'check' not in vars[d]: - if ni>1: - vars[d]['check']=['%s%s(%s,%i)%s==%s'\ - %(pd, shape_macro, n, i, ad, d)] - else: - vars[d]['check']=['%slen(%s)%s>=%s'%(pd, n, ad, d)] - if 'attrspec' not in vars[d]: - vars[d]['attrspec']=['optional'] - if ('optional' not in vars[d]['attrspec']) and\ - ('required' not in vars[d]['attrspec']): - vars[d]['attrspec'].append('optional') - elif d not in ['*', ':']: - #/----< no check - #if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d)) - #else: vars[n]['check'].append('len(%s)>=%s'%(n,d)) - if flag: - if d in vars: - if n not in ddeps: - vars[n]['depend'].append(d) - else: - vars[n]['depend'] = vars[n]['depend'] + ddeps - elif isstring(vars[n]): - length='1' - if 'charselector' in vars[n]: - if '*' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['*'], - params) - vars[n]['charselector']['*']=length - elif 'len' in vars[n]['charselector']: - length = _eval_length(vars[n]['charselector']['len'], - params) - del vars[n]['charselector']['len'] - vars[n]['charselector']['*']=length - - if not vars[n]['check']: - del vars[n]['check'] - if flag and not vars[n]['depend']: - del vars[n]['depend'] - if '=' in vars[n]: - if 'attrspec' not in vars[n]: - vars[n]['attrspec']=[] - if ('optional' not in vars[n]['attrspec']) and \ - ('required' not in vars[n]['attrspec']): - vars[n]['attrspec'].append('optional') - if 'depend' not in vars[n]: - vars[n]['depend']=[] - for v, m in list(dep_matches.items()): - if m(vars[n]['=']): vars[n]['depend'].append(v) - if not vars[n]['depend']: del vars[n]['depend'] - if isscalar(vars[n]): - vars[n]['='] = _eval_scalar(vars[n]['='], params) - - for n in list(vars.keys()): - if n==block['name']: # n is block name - if 'note' in vars[n]: - block['note']=vars[n]['note'] - if block['block']=='function': - if 'result' in block and block['result'] in vars: - vars[n]=appenddecl(vars[n], vars[block['result']]) - if 'prefix' in block: - pr=block['prefix']; ispure=0; isrec=1 - pr1=pr.replace('pure', '') - ispure=(not pr==pr1) - pr=pr1.replace('recursive', '') - isrec=(not pr==pr1) - m=typespattern[0].match(pr) - if m: - typespec, selector, attr, edecl=cracktypespec0(m.group('this'), m.group('after')) - kindselect, charselect, typename=cracktypespec(typespec, selector) - vars[n]['typespec']=typespec - if kindselect: - if 'kind' in kindselect: - try: - kindselect['kind'] = eval(kindselect['kind'], {}, params) - except: - pass - vars[n]['kindselector']=kindselect - if charselect: vars[n]['charselector']=charselect - if typename: vars[n]['typename']=typename - if ispure: vars[n]=setattrspec(vars[n], 'pure') - if isrec: vars[n]=setattrspec(vars[n], 'recursive') - else: - outmess('analyzevars: prefix (%s) were not used\n'%repr(block['prefix'])) - if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: - if 'commonvars' in block: - neededvars=copy.copy(block['args']+block['commonvars']) - else: - neededvars=copy.copy(block['args']) - for n in list(vars.keys()): - if l_or(isintent_callback, isintent_aux)(vars[n]): - neededvars.append(n) - if 'entry' in block: - neededvars.extend(list(block['entry'].keys())) - for k in list(block['entry'].keys()): - for n in block['entry'][k]: - if n not in neededvars: - neededvars.append(n) - if block['block']=='function': - if 'result' in block: - neededvars.append(block['result']) - else: - neededvars.append(block['name']) - if block['block'] in ['subroutine', 'function']: - name = block['name'] - if name in vars and 'intent' in vars[name]: - block['intent'] = vars[name]['intent'] - if block['block'] == 'type': - neededvars.extend(list(vars.keys())) - for n in list(vars.keys()): - if n not in neededvars: - del vars[n] - return vars - -analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) -def expr2name(a, block, args=[]): - orig_a = a - a_is_expr = not analyzeargs_re_1.match(a) - if a_is_expr: # `a` is an expression - implicitrules, attrrules=buildimplicitrules(block) - at=determineexprtype(a, block['vars'], implicitrules) - na='e_' - for c in a: - c = c.lower() - if c not in string.ascii_lowercase+string.digits: c='_' - na=na+c - if na[-1]=='_': na=na+'e' - else: na=na+'_e' - a=na - while a in block['vars'] or a in block['args']: - a=a+'r' - if a in args: - k = 1 - while a + str(k) in args: - k = k + 1 - a = a + str(k) - if a_is_expr: - block['vars'][a]=at - else: - if a not in block['vars']: - if orig_a in block['vars']: - block['vars'][a] = block['vars'][orig_a] - else: - block['vars'][a]={} - if 'externals' in block and orig_a in block['externals']+block['interfaced']: - block['vars'][a]=setattrspec(block['vars'][a], 'external') - return a - -def analyzeargs(block): - setmesstext(block) - implicitrules, attrrules=buildimplicitrules(block) - if 'args' not in block: - block['args']=[] - args=[] - for a in block['args']: - a = expr2name(a, block, args) - args.append(a) - block['args']=args - if 'entry' in block: - for k, args1 in list(block['entry'].items()): - for a in args1: - if a not in block['vars']: - block['vars'][a]={} - - for b in block['body']: - if b['name'] in args: - if 'externals' not in block: - block['externals']=[] - if b['name'] not in block['externals']: - block['externals'].append(b['name']) - if 'result' in block and block['result'] not in block['vars']: - block['vars'][block['result']]={} - return block - -determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) -determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P[\w]+)|)\Z', re.I) -determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P[\w]+)|)\Z', re.I) -determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) -determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) -def _ensure_exprdict(r): - if isinstance(r, int): - return {'typespec':'integer'} - if isinstance(r, float): - return {'typespec':'real'} - if isinstance(r, complex): - return {'typespec':'complex'} - if isinstance(r, dict): - return r - raise AssertionError(repr(r)) - -def determineexprtype(expr,vars,rules={}): - if expr in vars: - return _ensure_exprdict(vars[expr]) - expr=expr.strip() - if determineexprtype_re_1.match(expr): - return {'typespec':'complex'} - m=determineexprtype_re_2.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr)) - return {'typespec':'integer'} - m = determineexprtype_re_3.match(expr) - if m: - if 'name' in m.groupdict() and m.group('name'): - outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr)) - return {'typespec':'real'} - for op in ['+', '-', '*', '/']: - for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@'+op+'@')]: - if e in vars: - return _ensure_exprdict(vars[e]) - t={} - if determineexprtype_re_4.match(expr): # in parenthesis - t=determineexprtype(expr[1:-1], vars, rules) - else: - m = determineexprtype_re_5.match(expr) - if m: - rn=m.group('name') - t=determineexprtype(m.group('name'), vars, rules) - if t and 'attrspec' in t: - del t['attrspec'] - if not t: - if rn[0] in rules: - return _ensure_exprdict(rules[rn[0]]) - if expr[0] in '\'"': - return {'typespec':'character','charselector':{'*':'*'}} - if not t: - outmess('determineexprtype: could not determine expressions (%s) type.\n'%(repr(expr))) - return t - -###### -def crack2fortrangen(block,tab='\n', as_interface=False): - global skipfuncs, onlyfuncs - setmesstext(block) - ret='' - if isinstance(block, list): - for g in block: - if g and g['block'] in ['function', 'subroutine']: - if g['name'] in skipfuncs: - continue - if onlyfuncs and g['name'] not in onlyfuncs: - continue - ret=ret+crack2fortrangen(g, tab, as_interface=as_interface) - return ret - prefix='' - name='' - args='' - blocktype=block['block'] - if blocktype=='program': return '' - argsl = [] - if 'name' in block: - name=block['name'] - if 'args' in block: - vars = block['vars'] - for a in block['args']: - a = expr2name(a, block, argsl) - if not isintent_callback(vars[a]): - argsl.append(a) - if block['block']=='function' or argsl: - args='(%s)'%','.join(argsl) - f2pyenhancements = '' - if 'f2pyenhancements' in block: - for k in list(block['f2pyenhancements'].keys()): - f2pyenhancements = '%s%s%s %s'%(f2pyenhancements, tab+tabchar, k, block['f2pyenhancements'][k]) - intent_lst = block.get('intent', [])[:] - if blocktype=='function' and 'callback' in intent_lst: - intent_lst.remove('callback') - if intent_lst: - f2pyenhancements = '%s%sintent(%s) %s'%\ - (f2pyenhancements, tab+tabchar, - ','.join(intent_lst), name) - use='' - if 'use' in block: - use=use2fortran(block['use'], tab+tabchar) - common='' - if 'common' in block: - common=common2fortran(block['common'], tab+tabchar) - if name=='unknown_interface': name='' - result='' - if 'result' in block: - result=' result (%s)'%block['result'] - if block['result'] not in argsl: - argsl.append(block['result']) - #if 'prefix' in block: - # prefix=block['prefix']+' ' - body=crack2fortrangen(block['body'], tab+tabchar) - vars=vars2fortran(block, block['vars'], argsl, tab+tabchar, as_interface=as_interface) - mess='' - if 'from' in block and not as_interface: - mess='! in %s'%block['from'] - if 'entry' in block: - entry_stmts = '' - for k, i in list(block['entry'].items()): - entry_stmts = '%s%sentry %s(%s)' \ - % (entry_stmts, tab+tabchar, k, ','.join(i)) - body = body + entry_stmts - if blocktype=='block data' and name=='_BLOCK_DATA_': - name = '' - ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) - return ret - -def common2fortran(common,tab=''): - ret='' - for k in list(common.keys()): - if k=='_BLNK_': - ret='%s%scommon %s'%(ret, tab, ','.join(common[k])) - else: - ret='%s%scommon /%s/ %s'%(ret, tab, k, ','.join(common[k])) - return ret - -def use2fortran(use,tab=''): - ret='' - for m in list(use.keys()): - ret='%s%suse %s,'%(ret, tab, m) - if use[m]=={}: - if ret and ret[-1]==',': ret=ret[:-1] - continue - if 'only' in use[m] and use[m]['only']: - ret='%s only:'%(ret) - if 'map' in use[m] and use[m]['map']: - c=' ' - for k in list(use[m]['map'].keys()): - if k==use[m]['map'][k]: - ret='%s%s%s'%(ret, c, k); c=',' - else: - ret='%s%s%s=>%s'%(ret, c, k, use[m]['map'][k]); c=',' - if ret and ret[-1]==',': ret=ret[:-1] - return ret - -def true_intent_list(var): - lst = var['intent'] - ret = [] - for intent in lst: - try: - c = eval('isintent_%s(var)' % intent) - except NameError: - c = 0 - if c: - ret.append(intent) - return ret - -def vars2fortran(block,vars,args,tab='', as_interface=False): - """ - TODO: - public sub - ... - """ - setmesstext(block) - ret='' - nout=[] - for a in args: - if a in block['vars']: - nout.append(a) - if 'commonvars' in block: - for a in block['commonvars']: - if a in vars: - if a not in nout: - nout.append(a) - else: - errmess('vars2fortran: Confused?!: "%s" is not defined in vars.\n'%a) - if 'varnames' in block: - nout.extend(block['varnames']) - if not as_interface: - for a in list(vars.keys()): - if a not in nout: - nout.append(a) - for a in nout: - if 'depend' in vars[a]: - for d in vars[a]['depend']: - if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: - errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a, d)) - if 'externals' in block and a in block['externals']: - if isintent_callback(vars[a]): - ret='%s%sintent(callback) %s'%(ret, tab, a) - ret='%s%sexternal %s'%(ret, tab, a) - if isoptional(vars[a]): - ret='%s%soptional %s'%(ret, tab, a) - if a in vars and 'typespec' not in vars[a]: - continue - cont=1 - for b in block['body']: - if a==b['name'] and b['block']=='function': - cont=0;break - if cont: - continue - if a not in vars: - show(vars) - outmess('vars2fortran: No definition for argument "%s".\n'%a) - continue - if a==block['name'] and not block['block']=='function': - continue - if 'typespec' not in vars[a]: - if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: - if a in args: - ret='%s%sexternal %s'%(ret, tab, a) - continue - show(vars[a]) - outmess('vars2fortran: No typespec for argument "%s".\n'%a) - continue - vardef=vars[a]['typespec'] - if vardef=='type' and 'typename' in vars[a]: - vardef='%s(%s)'%(vardef, vars[a]['typename']) - selector={} - if 'kindselector' in vars[a]: - selector=vars[a]['kindselector'] - elif 'charselector' in vars[a]: - selector=vars[a]['charselector'] - if '*' in selector: - if selector['*'] in ['*', ':']: - vardef='%s*(%s)'%(vardef, selector['*']) - else: - vardef='%s*%s'%(vardef, selector['*']) - else: - if 'len' in selector: - vardef='%s(len=%s'%(vardef, selector['len']) - if 'kind' in selector: - vardef='%s,kind=%s)'%(vardef, selector['kind']) - else: - vardef='%s)'%(vardef) - elif 'kind' in selector: - vardef='%s(kind=%s)'%(vardef, selector['kind']) - c=' ' - if 'attrspec' in vars[a]: - attr=[] - for l in vars[a]['attrspec']: - if l not in ['external']: - attr.append(l) - if attr: - vardef='%s, %s'%(vardef, ','.join(attr)) - c=',' - if 'dimension' in vars[a]: -# if not isintent_c(vars[a]): -# vars[a]['dimension'].reverse() - vardef='%s%sdimension(%s)'%(vardef, c, ','.join(vars[a]['dimension'])) - c=',' - if 'intent' in vars[a]: - lst = true_intent_list(vars[a]) - if lst: - vardef='%s%sintent(%s)'%(vardef, c, ','.join(lst)) - c=',' - if 'check' in vars[a]: - vardef='%s%scheck(%s)'%(vardef, c, ','.join(vars[a]['check'])) - c=',' - if 'depend' in vars[a]: - vardef='%s%sdepend(%s)'%(vardef, c, ','.join(vars[a]['depend'])) - c=',' - if '=' in vars[a]: - v = vars[a]['='] - if vars[a]['typespec'] in ['complex', 'double complex']: - try: - v = eval(v) - v = '(%s,%s)' % (v.real, v.imag) - except: - pass - vardef='%s :: %s=%s'%(vardef, a, v) - else: - vardef='%s :: %s'%(vardef, a) - ret='%s%s%s'%(ret, tab, vardef) - return ret -###### - -def crackfortran(files): - global usermodules - outmess('Reading fortran codes...\n', 0) - readfortrancode(files, crackline) - outmess('Post-processing...\n', 0) - usermodules=[] - postlist=postcrack(grouplist[0]) - outmess('Post-processing (stage 2)...\n', 0) - postlist=postcrack2(postlist) - return usermodules+postlist - -def crack2fortran(block): - global f2py_version - pyf=crack2fortrangen(block)+'\n' - header="""! -*- f90 -*- -! Note: the context of this file is case sensitive. -""" - footer=""" -! This file was auto-generated with f2py (version:%s). -! See http://cens.ioc.ee/projects/f2py2e/ -"""%(f2py_version) - return header+pyf+footer - -if __name__ == "__main__": - files=[] - funcs=[] - f=1;f2=0;f3=0 - showblocklist=0 - for l in sys.argv[1:]: - if l=='': pass - elif l[0]==':': - f=0 - elif l=='-quiet': - quiet=1 - verbose=0 - elif l=='-verbose': - verbose=2 - quiet=0 - elif l=='-fix': - if strictf77: - outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) - skipemptyends=1 - sourcecodeform='fix' - elif l=='-skipemptyends': - skipemptyends=1 - elif l=='--ignore-contains': - ignorecontains=1 - elif l=='-f77': - strictf77=1 - sourcecodeform='fix' - elif l=='-f90': - strictf77=0 - sourcecodeform='free' - skipemptyends=1 - elif l=='-h': - f2=1 - elif l=='-show': - showblocklist=1 - elif l=='-m': - f3=1 - elif l[0]=='-': - errmess('Unknown option %s\n'%repr(l)) - elif f2: - f2=0 - pyffilename=l - elif f3: - f3=0 - f77modulename=l - elif f: - try: - open(l).close() - files.append(l) - except IOError as detail: - errmess('IOError: %s\n'%str(detail)) - else: - funcs.append(l) - if not strictf77 and f77modulename and not skipemptyends: - outmess("""\ - Warning: You have specifyied module name for non Fortran 77 code - that should not need one (expect if you are scanning F90 code - for non module blocks but then you should use flag -skipemptyends - and also be sure that the files do not contain programs without program statement). -""", 0) - - postlist=crackfortran(files, funcs) - if pyffilename: - outmess('Writing fortran code to file %s\n'%repr(pyffilename), 0) - pyf=crack2fortran(postlist) - f=open(pyffilename, 'w') - f.write(pyf) - f.close() - if showblocklist: - show(postlist) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/diagnose.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/diagnose.py deleted file mode 100644 index 68d7e48d29755..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/diagnose.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, absolute_import, print_function - -import os -import sys -import tempfile - -def run_command(cmd): - print('Running %r:' % (cmd)) - s = os.system(cmd) - print('------') -def run(): - _path = os.getcwd() - os.chdir(tempfile.gettempdir()) - print('------') - print('os.name=%r' % (os.name)) - print('------') - print('sys.platform=%r' % (sys.platform)) - print('------') - print('sys.version:') - print(sys.version) - print('------') - print('sys.prefix:') - print(sys.prefix) - print('------') - print('sys.path=%r' % (':'.join(sys.path))) - print('------') - - try: - import numpy - has_newnumpy = 1 - except ImportError: - print('Failed to import new numpy:', sys.exc_info()[1]) - has_newnumpy = 0 - - try: - from numpy.f2py import f2py2e - has_f2py2e = 1 - except ImportError: - print('Failed to import f2py2e:', sys.exc_info()[1]) - has_f2py2e = 0 - - try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError: - print('Failed to import numpy_distutils:', sys.exc_info()[1]) - has_numpy_distutils = 0 - - if has_newnumpy: - try: - print('Found new numpy version %r in %s' % \ - (numpy.__version__, numpy.__file__)) - except Exception as msg: - print('error:', msg) - print('------') - - if has_f2py2e: - try: - print('Found f2py2e version %r in %s' % \ - (f2py2e.__version__.version, f2py2e.__file__)) - except Exception as msg: - print('error:', msg) - print('------') - - if has_numpy_distutils: - try: - if has_numpy_distutils == 2: - print('Found numpy.distutils version %r in %r' % (\ - numpy.distutils.__version__, - numpy.distutils.__file__)) - else: - print('Found numpy_distutils version %r in %r' % (\ - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 1: - print('Importing numpy_distutils.command.build_flib ...', end=' ') - import numpy_distutils.command.build_flib as build_flib - print('ok') - print('------') - try: - print('Checking availability of supported Fortran compilers:') - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print('error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.fcompiler ...', end=' ') - import numpy.distutils.fcompiler as fcompiler - else: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler - print('ok') - print('------') - try: - print('Checking availability of supported Fortran compilers:') - fcompiler.show_fcompilers() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.cpuinfo ...', end=' ') - from numpy.distutils.cpuinfo import cpuinfo - print('ok') - print('------') - else: - try: - print('Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') - cpu = cpuinfo() - print('CPU information:', end=' ') - for name in dir(cpuinfo): - if name[0]=='_' and name[1]!='_' and getattr(cpu, name[1:])(): - print(name[1:], end=' ') - print('------') - except Exception as msg: - print('error:', msg) - print('------') - os.chdir(_path) -if __name__ == "__main__": - run() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py2e.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py2e.py deleted file mode 100644 index 25407d42163a1..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py2e.py +++ /dev/null @@ -1,598 +0,0 @@ -#!/usr/bin/env python -""" - -f2py2e - Fortran to Python C/API generator. 2nd Edition. - See __usage__ below. - -Copyright 1999--2011 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/05/06 08:31:19 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -import sys -import os -import pprint -import re - -from . import crackfortran -from . import rules -from . import cb_rules -from . import auxfuncs -from . import cfuncs -from . import f90mod_rules -from . import __version__ - -f2py_version = __version__.version -errmess = sys.stderr.write -#outmess=sys.stdout.write -show = pprint.pprint -outmess = auxfuncs.outmess - -try: - from numpy import __version__ as numpy_version -except ImportError: - numpy_version = 'N/A' - -__usage__ = """\ -Usage: - -1) To construct extension module sources: - - f2py [] [[[only:]||[skip:]] \\ - ] \\ - [: ...] - -2) To compile fortran files and build extension modules: - - f2py -c [, , ] - -3) To generate signature files: - - f2py -h ...< same options as in (1) > - -Description: This program generates a Python C/API file (module.c) - that contains wrappers for given fortran functions so that they - can be called from Python. With the -c option the corresponding - extension modules are built. - -Options: - - --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT] - --2d-numeric Use f2py2e tool with Numeric support. - --2d-numarray Use f2py2e tool with Numarray support. - --g3-numpy Use 3rd generation f2py from the separate f2py package. - [NOT AVAILABLE YET] - - -h Write signatures of the fortran routines to file - and exit. You can then edit and use it instead - of . If ==stdout then the - signatures are printed to stdout. - Names of fortran routines for which Python C/API - functions will be generated. Default is all that are found - in . - Paths to fortran/signature files that will be scanned for - in order to determine their signatures. - skip: Ignore fortran functions that follow until `:'. - only: Use only fortran functions that follow until `:'. - : Get back to mode. - - -m Name of the module; f2py generates a Python/C API - file module.c or extension module . - Default is 'untitled'. - - --[no-]lower Do [not] lower the cases in . By default, - --lower is assumed with -h key, and --no-lower without -h key. - - --build-dir All f2py generated files are created in . - Default is tempfile.mkdtemp(). - - --overwrite-signature Overwrite existing signature file. - - --[no-]latex-doc Create (or not) module.tex. - Default is --no-latex-doc. - --short-latex Create 'incomplete' LaTeX document (without commands - \\documentclass, \\tableofcontents, and \\begin{document}, - \\end{document}). - - --[no-]rest-doc Create (or not) module.rst. - Default is --no-rest-doc. - - --debug-capi Create C/API code that reports the state of the wrappers - during runtime. Useful for debugging. - - --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 - functions. --wrap-functions is default because it ensures - maximum portability/compiler independence. - - --include-paths ::... Search include files from the given - directories. - - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - - --quiet Run quietly. - --verbose Run with extra verbosity. - -v Print f2py version ID and exit. - - -numpy.distutils options (only effective with -c): - - --fcompiler= Specify Fortran compiler type by vendor - --compiler= Specify C compiler type (as defined by distutils) - - --help-fcompiler List available Fortran compilers and exit - --f77exec= Specify the path to F77 compiler - --f90exec= Specify the path to F90 compiler - --f77flags= Specify F77 compiler flags - --f90flags= Specify F90 compiler flags - --opt= Specify optimization flags - --arch= Specify architecture specific optimization flags - --noopt Compile without optimization - --noarch Compile without arch-dependent optimization - --debug Compile with debugging information - -Extra options (only effective with -c): - - --link- Link extension module with as defined - by numpy.distutils/system_info.py. E.g. to link - with optimized LAPACK libraries (vecLib on MacOSX, - ATLAS elsewhere), use --link-lapack_opt. - See also --help-link switch. - - -L/path/to/lib/ -l - -D -U - -I/path/to/include/ - .o .so .a - - Using the following macros may be required with non-gcc Fortran - compilers: - -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN - -DUNDERSCORE_G77 - - When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY - interface is printed out at exit (platforms: Linux). - - When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is - sent to stderr whenever F2PY interface makes a copy of an - array. Integer sets the threshold for array sizes when - a message should be shown. - -Version: %s -numpy Version: %s -Requires: Python 2.3 or higher. -License: NumPy license (see LICENSE.txt in the NumPy source code) -Copyright 1999 - 2011 Pearu Peterson all rights reserved. -http://cens.ioc.ee/projects/f2py2e/"""%(f2py_version, numpy_version) - -def scaninputline(inputline): - files, funcs, skipfuncs, onlyfuncs, debug=[], [], [], [], [] - f, f2, f3, f4, f5, f6, f7, f8, f9=1, 0, 0, 0, 0, 0, 0, 0, 0 - verbose = 1 - dolc=-1 - dolatexdoc = 0 - dorestdoc = 0 - wrapfuncs = 1 - buildpath = '.' - include_paths = [] - signsfile, modulename=None, None - options = {'buildpath':buildpath, - 'coutput': None, - 'f2py_wrapper_output': None} - for l in inputline: - if l=='': pass - elif l=='only:': f=0 - elif l=='skip:': f=-1 - elif l==':': f=1;f4=0 - elif l[:8]=='--debug-': debug.append(l[8:]) - elif l=='--lower': dolc=1 - elif l=='--build-dir': f6=1 - elif l=='--no-lower': dolc=0 - elif l=='--quiet': verbose = 0 - elif l=='--verbose': verbose += 1 - elif l=='--latex-doc': dolatexdoc=1 - elif l=='--no-latex-doc': dolatexdoc=0 - elif l=='--rest-doc': dorestdoc=1 - elif l=='--no-rest-doc': dorestdoc=0 - elif l=='--wrap-functions': wrapfuncs=1 - elif l=='--no-wrap-functions': wrapfuncs=0 - elif l=='--short-latex': options['shortlatex']=1 - elif l=='--coutput': f8=1 - elif l=='--f2py-wrapper-output': f9=1 - elif l=='--overwrite-signature': options['h-overwrite']=1 - elif l=='-h': f2=1 - elif l=='-m': f3=1 - elif l[:2]=='-v': - print(f2py_version) - sys.exit() - elif l=='--show-compilers': - f5=1 - elif l[:8]=='-include': - cfuncs.outneeds['userincludes'].append(l[9:-1]) - cfuncs.userincludes[l[9:-1]]='#include '+l[8:] - elif l[:15] in '--include_paths': - outmess('f2py option --include_paths is deprecated, use --include-paths instead.\n') - f7=1 - elif l[:15] in '--include-paths': - f7=1 - elif l[0]=='-': - errmess('Unknown option %s\n'%repr(l)) - sys.exit() - elif f2: f2=0;signsfile=l - elif f3: f3=0;modulename=l - elif f6: f6=0;buildpath=l - elif f7: f7=0;include_paths.extend(l.split(os.pathsep)) - elif f8: f8=0;options["coutput"]=l - elif f9: f9=0;options["f2py_wrapper_output"]=l - elif f==1: - try: - open(l).close() - files.append(l) - except IOError as detail: - errmess('IOError: %s. Skipping file "%s".\n'%(str(detail), l)) - elif f==-1: skipfuncs.append(l) - elif f==0: onlyfuncs.append(l) - if not f5 and not files and not modulename: - print(__usage__) - sys.exit() - if not os.path.isdir(buildpath): - if not verbose: - outmess('Creating build directory %s'%(buildpath)) - os.mkdir(buildpath) - if signsfile: - signsfile = os.path.join(buildpath, signsfile) - if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: - errmess('Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n'%(signsfile)) - sys.exit() - - options['debug']=debug - options['verbose']=verbose - if dolc==-1 and not signsfile: options['do-lower']=0 - else: options['do-lower']=dolc - if modulename: options['module']=modulename - if signsfile: options['signsfile']=signsfile - if onlyfuncs: options['onlyfuncs']=onlyfuncs - if skipfuncs: options['skipfuncs']=skipfuncs - options['dolatexdoc'] = dolatexdoc - options['dorestdoc'] = dorestdoc - options['wrapfuncs'] = wrapfuncs - options['buildpath']=buildpath - options['include_paths']=include_paths - return files, options - -def callcrackfortran(files, options): - rules.options=options - funcs=[] - crackfortran.debug=options['debug'] - crackfortran.verbose=options['verbose'] - if 'module' in options: - crackfortran.f77modulename=options['module'] - if 'skipfuncs' in options: - crackfortran.skipfuncs=options['skipfuncs'] - if 'onlyfuncs' in options: - crackfortran.onlyfuncs=options['onlyfuncs'] - crackfortran.include_paths[:]=options['include_paths'] - crackfortran.dolowercase=options['do-lower'] - postlist=crackfortran.crackfortran(files) - if 'signsfile' in options: - outmess('Saving signatures to file "%s"\n'%(options['signsfile'])) - pyf=crackfortran.crack2fortran(postlist) - if options['signsfile'][-6:]=='stdout': - sys.stdout.write(pyf) - else: - f=open(options['signsfile'], 'w') - f.write(pyf) - f.close() - if options["coutput"] is None: - for mod in postlist: - mod["coutput"] = "%smodule.c" % mod["name"] - else: - for mod in postlist: - mod["coutput"] = options["coutput"] - if options["f2py_wrapper_output"] is None: - for mod in postlist: - mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] - else: - for mod in postlist: - mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] - return postlist - -def buildmodules(lst): - cfuncs.buildcfuncs() - outmess('Building modules...\n') - modules, mnames, isusedby=[], [], {} - for i in range(len(lst)): - if '__user__' in lst[i]['name']: - cb_rules.buildcallbacks(lst[i]) - else: - if 'use' in lst[i]: - for u in lst[i]['use'].keys(): - if u not in isusedby: - isusedby[u]=[] - isusedby[u].append(lst[i]['name']) - modules.append(lst[i]) - mnames.append(lst[i]['name']) - ret = {} - for i in range(len(mnames)): - if mnames[i] in isusedby: - outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i], ','.join(['"%s"'%s for s in isusedby[mnames[i]]]))) - else: - um=[] - if 'use' in modules[i]: - for u in modules[i]['use'].keys(): - if u in isusedby and u in mnames: - um.append(modules[mnames.index(u)]) - else: - outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i], u)) - ret[mnames[i]] = {} - dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um)) - return ret - -def dict_append(d_out, d_in): - for (k, v) in d_in.items(): - if k not in d_out: - d_out[k] = [] - if isinstance(v, list): - d_out[k] = d_out[k] + v - else: - d_out[k].append(v) - -def run_main(comline_list): - """Run f2py as if string.join(comline_list,' ') is used as a command line. - In case of using -h flag, return None. - """ - crackfortran.reset_global_f2py_vars() - f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__)) - fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') - fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') - files, options=scaninputline(comline_list) - auxfuncs.options=options - postlist=callcrackfortran(files, options) - isusedby={} - for i in range(len(postlist)): - if 'use' in postlist[i]: - for u in postlist[i]['use'].keys(): - if u not in isusedby: - isusedby[u]=[] - isusedby[u].append(postlist[i]['name']) - for i in range(len(postlist)): - if postlist[i]['block']=='python module' and '__user__' in postlist[i]['name']: - if postlist[i]['name'] in isusedby: - #if not quiet: - outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'], ','.join(['"%s"'%s for s in isusedby[postlist[i]['name']]]))) - if 'signsfile' in options: - if options['verbose']>1: - outmess('Stopping. Edit the signature file and then run f2py on the signature file: ') - outmess('%s %s\n'%(os.path.basename(sys.argv[0]), options['signsfile'])) - return - for i in range(len(postlist)): - if postlist[i]['block']!='python module': - if 'python module' not in options: - errmess('Tip: If your original code is Fortran source then you must use -m option.\n') - raise TypeError('All blocks must be python module blocks but got %s'%(repr(postlist[i]['block']))) - auxfuncs.debugoptions=options['debug'] - f90mod_rules.options=options - auxfuncs.wrapfuncs=options['wrapfuncs'] - - ret=buildmodules(postlist) - - for mn in ret.keys(): - dict_append(ret[mn], {'csrc':fobjcsrc,'h':fobjhsrc}) - return ret - -def filter_files(prefix,suffix,files,remove_prefix=None): - """ - Filter files by prefix and suffix. - """ - filtered, rest = [], [] - match = re.compile(prefix+r'.*'+suffix+r'\Z').match - if remove_prefix: - ind = len(prefix) - else: - ind = 0 - for file in [x.strip() for x in files]: - if match(file): filtered.append(file[ind:]) - else: rest.append(file) - return filtered, rest - -def get_prefix(module): - p = os.path.dirname(os.path.dirname(module.__file__)) - return p - -def run_compile(): - """ - Do it all in one call! - """ - import tempfile - - i = sys.argv.index('-c') - del sys.argv[i] - - remove_build_dir = 0 - try: i = sys.argv.index('--build-dir') - except ValueError: i=None - if i is not None: - build_dir = sys.argv[i+1] - del sys.argv[i+1] - del sys.argv[i] - else: - remove_build_dir = 1 - build_dir = tempfile.mkdtemp() - - _reg1 = re.compile(r'[-][-]link[-]') - sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] - if sysinfo_flags: - sysinfo_flags = [f[7:] for f in sysinfo_flags] - - _reg2 = re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include') - f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] - f2py_flags2 = [] - fl = 0 - for a in sys.argv[1:]: - if a in ['only:', 'skip:']: - fl = 1 - elif a==':': - fl = 0 - if fl or a==':': - f2py_flags2.append(a) - if f2py_flags2 and f2py_flags2[-1]!=':': - f2py_flags2.append(':') - f2py_flags.extend(f2py_flags2) - - sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] - _reg3 = re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)') - flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in flib_flags] - _reg4 = re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))') - fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in fc_flags] - - if 1: - del_list = [] - for s in flib_flags: - v = '--fcompiler=' - if s[:len(v)]==v: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = list(fcompiler.fcompiler_class.keys()) - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print('Unknown vendor: "%s"' % (s[len(v):])) - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv - continue - for s in del_list: - i = flib_flags.index(s) - del flib_flags[i] - assert len(flib_flags)<=2, repr(flib_flags) - - _reg5 = re.compile(r'[-][-](verbose)') - setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in setup_flags] - - if '--quiet' in f2py_flags: - setup_flags.append('--quiet') - - modulename = 'untitled' - sources = sys.argv[1:] - - for optname in ['--include_paths', '--include-paths']: - if optname in sys.argv: - i = sys.argv.index (optname) - f2py_flags.extend (sys.argv[i:i+2]) - del sys.argv[i+1], sys.argv[i] - sources = sys.argv[1:] - - if '-m' in sys.argv: - i = sys.argv.index('-m') - modulename = sys.argv[i+1] - del sys.argv[i+1], sys.argv[i] - sources = sys.argv[1:] - else: - from numpy.distutils.command.build_src import get_f2py_modulename - pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources) - sources = pyf_files + sources - for f in pyf_files: - modulename = get_f2py_modulename(f) - if modulename: - break - - extra_objects, sources = filter_files('', '[.](o|a|so)', sources) - include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1) - library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) - libraries, sources = filter_files('-l', '', sources, remove_prefix=1) - undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) - define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) - using_numarray = 0 - using_numeric = 0 - for i in range(len(define_macros)): - name_value = define_macros[i].split('=', 1) - if len(name_value)==1: - name_value.append(None) - if len(name_value)==2: - define_macros[i] = tuple(name_value) - else: - print('Invalid use of -D:', name_value) - - from numpy.distutils.system_info import get_info - - num_include_dir = None - num_info = {} - #import numpy - #n = 'numpy' - #p = get_prefix(numpy) - #from numpy.distutils.misc_util import get_numpy_include_dirs - #num_info = {'include_dirs': get_numpy_include_dirs()} - - if num_info: - include_dirs.extend(num_info.get('include_dirs', [])) - - from numpy.distutils.core import setup, Extension - ext_args = {'name': modulename, 'sources': sources, - 'include_dirs': include_dirs, - 'library_dirs': library_dirs, - 'libraries': libraries, - 'define_macros': define_macros, - 'undef_macros': undef_macros, - 'extra_objects': extra_objects, - 'f2py_options': f2py_flags, - } - - if sysinfo_flags: - from numpy.distutils.misc_util import dict_append - for n in sysinfo_flags: - i = get_info(n) - if not i: - outmess('No %s resources found in system'\ - ' (try `f2py --help-link`)\n' % (repr(n))) - dict_append(ext_args,**i) - - ext = Extension(**ext_args) - sys.argv = [sys.argv[0]] + setup_flags - sys.argv.extend(['build', - '--build-temp', build_dir, - '--build-base', build_dir, - '--build-platlib', '.']) - if fc_flags: - sys.argv.extend(['config_fc']+fc_flags) - if flib_flags: - sys.argv.extend(['build_ext']+flib_flags) - - setup(ext_modules = [ext]) - - if remove_build_dir and os.path.exists(build_dir): - import shutil - outmess('Removing build directory %s\n'%(build_dir)) - shutil.rmtree(build_dir) - -def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - from numpy.distutils.system_info import show_all - show_all() - return - if '-c' in sys.argv[1:]: - run_compile() - else: - run_main(sys.argv[1:]) - -#if __name__ == "__main__": -# main() - - -# EOF diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py_testing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py_testing.py deleted file mode 100644 index 4cec4baad77bc..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/f2py_testing.py +++ /dev/null @@ -1,46 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import re - -from numpy.testing.utils import jiffies, memusage - -def cmdline(): - m=re.compile(r'\A\d+\Z') - args = [] - repeat = 1 - for a in sys.argv[1:]: - if m.match(a): - repeat = eval(a) - else: - args.append(a) - f2py_opts = ' '.join(args) - return repeat, f2py_opts - -def run(runtest,test_functions,repeat=1): - l = [(t, repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] - #l = [(t,'') for t in test_functions] - start_memusage = memusage() - diff_memusage = None - start_jiffies = jiffies() - i = 0 - while i -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/02/03 19:30:23 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.27 $"[10:-1] - -f2py_version='See `f2py -v`' - -import pprint -import sys -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from .auxfuncs import * -import numpy as np -from . import capi_maps -from . import func2subr -from .crackfortran import undo_rmbadname, undo_rmbadname1 - -options={} - -def findf90modules(m): - if ismodule(m): return [m] - if not hasbody(m): return [] - ret = [] - for b in m['body']: - if ismodule(b): ret.append(b) - else: ret=ret+findf90modules(b) - return ret - -fgetdims1 = """\ - external f2pysetdata - logical ns - integer r,i,j - integer(%d) s(*) - ns = .FALSE. - if (allocated(d)) then - do i=1,r - if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then - ns = .TRUE. - end if - end do - if (ns) then - deallocate(d) - end if - end if - if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize - -fgetdims2="""\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - end if - flag = 1 - call f2pysetdata(d,allocated(d))""" - -fgetdims2_sa="""\ - end if - if (allocated(d)) then - do i=1,r - s(i) = size(d,i) - end do - !s(r) must be equal to len(d(1)) - end if - flag = 2 - call f2pysetdata(d,allocated(d))""" - - -def buildhooks(pymod): - global fgetdims1, fgetdims2 - from . import rules - ret = {'f90modhooks':[],'initf90modhooks':[],'body':[], - 'need':['F_FUNC', 'arrayobject.h'], - 'separatorsfor':{'includes0':'\n','includes':'\n'}, - 'docs':['"Fortran 90/95 modules:\\n"'], - 'latexdoc':[]} - fhooks=[''] - def fadd(line,s=fhooks): s[0] = '%s\n %s'%(s[0], line) - doc = [''] - def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0], line) - for m in findf90modules(pymod): - sargs, fargs, efargs, modobjs, notvars, onlyvars=[], [], [], [], [m['name']], [] - sargsp = [] - ifargs = [] - mfargs = [] - if hasbody(m): - for b in m['body']: notvars.append(b['name']) - for n in m['vars'].keys(): - var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): - onlyvars.append(n) - mfargs.append(n) - outmess('\t\tConstructing F90 module support for "%s"...\n'%(m['name'])) - if onlyvars: - outmess('\t\t Variables: %s\n'%(' '.join(onlyvars))) - chooks=[''] - def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0], line) - ihooks=[''] - def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0], line) - - vrd=capi_maps.modsign2map(m) - cadd('static FortranDataDef f2py_%s_def[] = {'%(m['name'])) - dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n'%(m['name'])) - if hasnote(m): - note = m['note'] - if isinstance(note, list): note='\n'.join(note) - dadd(note) - if onlyvars: - dadd('\\begin{description}') - for n in onlyvars: - var = m['vars'][n] - modobjs.append(n) - ct = capi_maps.getctype(var) - at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n, var) - dms = dm['dims'].replace('*', '-1').strip() - dms = dms.replace(':', '-1').strip() - if not dms: dms='-1' - use_fgetdims2 = fgetdims2 - if isstringarray(var): - if 'charselector' in var and 'len' in var['charselector']: - cadd('\t{"%s",%s,{{%s,%s}},%s},'\ - %(undo_rmbadname1(n), dm['rank'], dms, var['charselector']['len'], at)) - use_fgetdims2 = fgetdims2_sa - else: - cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n), dm['rank'], dms, at)) - else: - cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n), dm['rank'], dms, at)) - dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n, var))) - if hasnote(var): - note = var['note'] - if isinstance(note, list): note='\n'.join(note) - dadd('--- %s'%(note)) - if isallocatable(var): - fargs.append('f2py_%s_getdims_%s'%(m['name'], n)) - efargs.append(fargs[-1]) - sargs.append('void (*%s)(int*,int*,void(*)(char*,int*),int*)'%(n)) - sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)') - iadd('\tf2py_%s_def[i_f2py++].func = %s;'%(m['name'], n)) - fadd('subroutine %s(r,s,f2pysetdata,flag)'%(fargs[-1])) - fadd('use %s, only: d => %s\n'%(m['name'], undo_rmbadname1(n))) - fadd('integer flag\n') - fhooks[0]=fhooks[0]+fgetdims1 - dms = eval('range(1,%s+1)'%(dm['rank'])) - fadd(' allocate(d(%s))\n'%(','.join(['s(%s)'%i for i in dms]))) - fhooks[0]=fhooks[0]+use_fgetdims2 - fadd('end subroutine %s'%(fargs[-1])) - else: - fargs.append(n) - sargs.append('char *%s'%(n)) - sargsp.append('char*') - iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'], n)) - if onlyvars: - dadd('\\end{description}') - if hasbody(m): - for b in m['body']: - if not isroutine(b): - print('Skipping', b['block'], b['name']) - continue - modobjs.append('%s()'%(b['name'])) - b['modulename'] = m['name'] - api, wrap=rules.buildapi(b) - if isfunction(b): - fhooks[0]=fhooks[0]+wrap - fargs.append('f2pywrap_%s_%s'%(m['name'], b['name'])) - #efargs.append(fargs[-1]) - ifargs.append(func2subr.createfuncwrapper(b, signature=1)) - else: - if wrap: - fhooks[0]=fhooks[0]+wrap - fargs.append('f2pywrap_%s_%s'%(m['name'], b['name'])) - ifargs.append(func2subr.createsubrwrapper(b, signature=1)) - else: - fargs.append(b['name']) - mfargs.append(fargs[-1]) - #if '--external-modroutines' in options and options['--external-modroutines']: - # outmess('\t\t\tapplying --external-modroutines for %s\n'%(b['name'])) - # efargs.append(fargs[-1]) - api['externroutines']=[] - ar=applyrules(api, vrd) - ar['docs']=[] - ar['docshort']=[] - ret=dictappend(ret, ar) - cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},'%(b['name'], m['name'], b['name'], m['name'], b['name'])) - sargs.append('char *%s'%(b['name'])) - sargsp.append('char *') - iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'], b['name'])) - cadd('\t{NULL}\n};\n') - iadd('}') - ihooks[0]='static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s'%(m['name'], ','.join(sargs), ihooks[0]) - if '_' in m['name']: - F_FUNC='F_FUNC_US' - else: - F_FUNC='F_FUNC' - iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));'\ - %(F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) - iadd('static void f2py_init_%s(void) {'%(m['name'])) - iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ - %(F_FUNC, m['name'], m['name'].upper(), m['name'])) - iadd('}\n') - ret['f90modhooks']=ret['f90modhooks']+chooks+ihooks - ret['initf90modhooks']=['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(m['name'], m['name'], m['name'])]+ret['initf90modhooks'] - fadd('') - fadd('subroutine f2pyinit%s(f2pysetupfunc)'%(m['name'])) - #fadd('use %s'%(m['name'])) - if mfargs: - for a in undo_rmbadname(mfargs): - fadd('use %s, only : %s'%(m['name'], a)) - if ifargs: - fadd(' '.join(['interface']+ifargs)) - fadd('end interface') - fadd('external f2pysetupfunc') - if efargs: - for a in undo_rmbadname(efargs): - fadd('external %s'%(a)) - fadd('call f2pysetupfunc(%s)'%(','.join(undo_rmbadname(fargs)))) - fadd('end subroutine f2pyinit%s\n'%(m['name'])) - - dadd('\n'.join(ret['latexdoc']).replace(r'\subsection{', r'\subsubsection{')) - - ret['latexdoc']=[] - ret['docs'].append('"\t%s --- %s"'%(m['name'], - ','.join(undo_rmbadname(modobjs)))) - - ret['routine_defs']='' - ret['doc']=[] - ret['docshort']=[] - ret['latexdoc']=doc[0] - if len(ret['docs'])<=1: ret['docs']='' - return ret, fhooks[0] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/func2subr.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/func2subr.py deleted file mode 100644 index 22f60851d202a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/func2subr.py +++ /dev/null @@ -1,291 +0,0 @@ -#!/usr/bin/env python -""" - -Rules for building C/API module with f2py2e. - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2004/11/26 11:13:06 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.16 $"[10:-1] - -f2py_version='See `f2py -v`' - -import pprint -import copy -import sys -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from .auxfuncs import * -def var2fixfortran(vars,a,fa=None,f90mode=None): - if fa is None: - fa = a - if a not in vars: - show(vars) - outmess('var2fixfortran: No definition for argument "%s".\n'%a) - return '' - if 'typespec' not in vars[a]: - show(vars[a]) - outmess('var2fixfortran: No typespec for argument "%s".\n'%a) - return '' - vardef=vars[a]['typespec'] - if vardef=='type' and 'typename' in vars[a]: - vardef='%s(%s)'%(vardef, vars[a]['typename']) - selector={} - lk = '' - if 'kindselector' in vars[a]: - selector=vars[a]['kindselector'] - lk = 'kind' - elif 'charselector' in vars[a]: - selector=vars[a]['charselector'] - lk = 'len' - if '*' in selector: - if f90mode: - if selector['*'] in ['*', ':', '(*)']: - vardef='%s(len=*)'%(vardef) - else: - vardef='%s(%s=%s)'%(vardef, lk, selector['*']) - else: - if selector['*'] in ['*', ':']: - vardef='%s*(%s)'%(vardef, selector['*']) - else: - vardef='%s*%s'%(vardef, selector['*']) - else: - if 'len' in selector: - vardef='%s(len=%s'%(vardef, selector['len']) - if 'kind' in selector: - vardef='%s,kind=%s)'%(vardef, selector['kind']) - else: - vardef='%s)'%(vardef) - elif 'kind' in selector: - vardef='%s(kind=%s)'%(vardef, selector['kind']) - - vardef='%s %s'%(vardef, fa) - if 'dimension' in vars[a]: - vardef='%s(%s)'%(vardef, ','.join(vars[a]['dimension'])) - return vardef - -def createfuncwrapper(rout,signature=0): - assert isfunction(rout) - - extra_args = [] - vars = rout['vars'] - for a in rout['args']: - v = rout['vars'][a] - for i, d in enumerate(v.get('dimension', [])): - if d==':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) - extra_args.append(dn) - vars[dn] = dv - v['dimension'][i] = dn - rout['args'].extend(extra_args) - need_interface = bool(extra_args) - - ret = [''] - def add(line,ret=ret): - ret[0] = '%s\n %s'%(ret[0], line) - name = rout['name'] - fortranname = getfortranname(rout) - f90mode = ismoduleroutine(rout) - newname = '%sf2pywrap'%(name) - - if newname not in vars: - vars[newname] = vars[name] - args = [newname]+rout['args'][1:] - else: - args = [newname]+rout['args'] - - l = var2fixfortran(vars, name, newname, f90mode) - return_char_star = 0 - if l[:13]=='character*(*)': - return_char_star = 1 - if f90mode: l = 'character(len=10)'+l[13:] - else: l = 'character*10'+l[13:] - charselect = vars[name]['charselector'] - if charselect.get('*', '')=='(*)': - charselect['*'] = '10' - sargs = ', '.join(args) - if f90mode: - add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'], name, sargs)) - if not signature: - add('use %s, only : %s'%(rout['modulename'], fortranname)) - else: - add('subroutine f2pywrap%s (%s)'%(name, sargs)) - if not need_interface: - add('external %s'%(fortranname)) - l = l + ', '+fortranname - if need_interface: - for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): - add(line) - - args = args[1:] - dumped_args = [] - for a in args: - if isexternal(vars[a]): - add('external %s'%(a)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - if isscalar(vars[a]): - add(var2fixfortran(vars, a, f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - if isintent_in(vars[a]): - add(var2fixfortran(vars, a, f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - add(var2fixfortran(vars, a, f90mode=f90mode)) - - add(l) - - if need_interface: - if f90mode: - # f90 module already defines needed interface - pass - else: - add('interface') - add(rout['saved_interface'].lstrip()) - add('end interface') - - sargs = ', '.join([a for a in args if a not in extra_args]) - - if not signature: - if islogicalfunction(rout): - add('%s = .not.(.not.%s(%s))'%(newname, fortranname, sargs)) - else: - add('%s = %s(%s)'%(newname, fortranname, sargs)) - if f90mode: - add('end subroutine f2pywrap_%s_%s'%(rout['modulename'], name)) - else: - add('end') - #print '**'*10 - #print ret[0] - #print '**'*10 - return ret[0] - -def createsubrwrapper(rout,signature=0): - assert issubroutine(rout) - - extra_args = [] - vars = rout['vars'] - for a in rout['args']: - v = rout['vars'][a] - for i, d in enumerate(v.get('dimension', [])): - if d==':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) - extra_args.append(dn) - vars[dn] = dv - v['dimension'][i] = dn - rout['args'].extend(extra_args) - need_interface = bool(extra_args) - - ret = [''] - def add(line,ret=ret): - ret[0] = '%s\n %s'%(ret[0], line) - name = rout['name'] - fortranname = getfortranname(rout) - f90mode = ismoduleroutine(rout) - - args = rout['args'] - - sargs = ', '.join(args) - if f90mode: - add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'], name, sargs)) - if not signature: - add('use %s, only : %s'%(rout['modulename'], fortranname)) - else: - add('subroutine f2pywrap%s (%s)'%(name, sargs)) - if not need_interface: - add('external %s'%(fortranname)) - - if need_interface: - for line in rout['saved_interface'].split('\n'): - if line.lstrip().startswith('use '): - add(line) - - dumped_args = [] - for a in args: - if isexternal(vars[a]): - add('external %s'%(a)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - if isscalar(vars[a]): - add(var2fixfortran(vars, a, f90mode=f90mode)) - dumped_args.append(a) - for a in args: - if a in dumped_args: continue - add(var2fixfortran(vars, a, f90mode=f90mode)) - - if need_interface: - if f90mode: - # f90 module already defines needed interface - pass - else: - add('interface') - add(rout['saved_interface'].lstrip()) - add('end interface') - - sargs = ', '.join([a for a in args if a not in extra_args]) - - if not signature: - add('call %s(%s)'%(fortranname, sargs)) - if f90mode: - add('end subroutine f2pywrap_%s_%s'%(rout['modulename'], name)) - else: - add('end') - #print '**'*10 - #print ret[0] - #print '**'*10 - return ret[0] - - -def assubr(rout): - if isfunction_wrap(rout): - fortranname = getfortranname(rout) - name = rout['name'] - outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n'%(name, fortranname)) - rout = copy.copy(rout) - fname = name - rname = fname - if 'result' in rout: - rname = rout['result'] - rout['vars'][fname]=rout['vars'][rname] - fvar = rout['vars'][fname] - if not isintent_out(fvar): - if 'intent' not in fvar: - fvar['intent']=[] - fvar['intent'].append('out') - flag=1 - for i in fvar['intent']: - if i.startswith('out='): - flag = 0 - break - if flag: - fvar['intent'].append('out=%s' % (rname)) - rout['args'][:] = [fname] + rout['args'] - return rout, createfuncwrapper(rout) - if issubroutine_wrap(rout): - fortranname = getfortranname(rout) - name = rout['name'] - outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n'%(name, fortranname)) - rout = copy.copy(rout) - return rout, createsubrwrapper(rout) - return rout, '' diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/info.py deleted file mode 100644 index c895c5de28d0b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/info.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Fortran to Python Interface Generator. - -""" -from __future__ import division, absolute_import, print_function - -postpone_import = True diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/rules.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/rules.py deleted file mode 100644 index 4c186712c9e35..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/rules.py +++ /dev/null @@ -1,1448 +0,0 @@ -#!/usr/bin/env python -""" - -Rules for building C/API module with f2py2e. - -Here is a skeleton of a new wrapper function (13Dec2001): - -wrapper_function(args) - declarations - get_python_arguments, say, `a' and `b' - - get_a_from_python - if (successful) { - - get_b_from_python - if (successful) { - - callfortran - if (succesful) { - - put_a_to_python - if (succesful) { - - put_b_to_python - if (succesful) { - - buildvalue = ... - - } - - } - - } - - } - cleanup_b - - } - cleanup_a - - return buildvalue - -Copyright 1999,2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2005/08/30 08:58:42 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.129 $"[10:-1] - -from . import __version__ -f2py_version = __version__.version - -import pprint -import sys -import time -import copy - -from .auxfuncs import * -from . import capi_maps -from .capi_maps import * -from . import cfuncs -from . import common_rules -from . import use_rules -from . import f90mod_rules -from . import func2subr - -errmess = sys.stderr.write -outmess = sys.stdout.write -show = pprint.pprint - -options={} -sepdict={} -#for k in ['need_cfuncs']: sepdict[k]=',' -for k in ['decl', - 'frompyobj', - 'cleanupfrompyobj', - 'topyarr', 'method', - 'pyobjfrom', 'closepyobjfrom', - 'freemem', - 'userincludes', - 'includes0', 'includes', 'typedefs', 'typedefs_generated', - 'cppmacros', 'cfuncs', 'callbacks', - 'latexdoc', - 'restdoc', - 'routine_defs', 'externroutines', - 'initf2pywraphooks', - 'commonhooks', 'initcommonhooks', - 'f90modhooks', 'initf90modhooks']: - sepdict[k]='\n' - -#################### Rules for C/API module ################# - -module_rules={ - 'modulebody':"""\ -/* File: #modulename#module.c - * This file is auto-generated with f2py (version:#f2py_version#). - * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, - * written by Pearu Peterson . - * See http://cens.ioc.ee/projects/f2py2e/ - * Generation date: """+time.asctime(time.localtime(time.time()))+""" - * $R"""+"""evision:$ - * $D"""+"""ate:$ - * Do not edit this file directly unless you know what you are doing!!! - */ -#ifdef __cplusplus -extern \"C\" { -#endif - -"""+gentitle("See f2py2e/cfuncs.py: includes")+""" -#includes# -#includes0# - -"""+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+""" -static PyObject *#modulename#_error; -static PyObject *#modulename#_module; - -"""+gentitle("See f2py2e/cfuncs.py: typedefs")+""" -#typedefs# - -"""+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+""" -#typedefs_generated# - -"""+gentitle("See f2py2e/cfuncs.py: cppmacros")+""" -#cppmacros# - -"""+gentitle("See f2py2e/cfuncs.py: cfuncs")+""" -#cfuncs# - -"""+gentitle("See f2py2e/cfuncs.py: userincludes")+""" -#userincludes# - -"""+gentitle("See f2py2e/capi_rules.py: usercode")+""" -#usercode# - -/* See f2py2e/rules.py */ -#externroutines# - -"""+gentitle("See f2py2e/capi_rules.py: usercode1")+""" -#usercode1# - -"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+""" -#callbacks# - -"""+gentitle("See f2py2e/rules.py: buildapi")+""" -#body# - -"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+""" -#f90modhooks# - -"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+""" - -"""+gentitle("See f2py2e/common_rules.py: buildhooks")+""" -#commonhooks# - -"""+gentitle("See f2py2e/rules.py")+""" - -static FortranDataDef f2py_routine_defs[] = { -#routine_defs# -\t{NULL} -}; - -static PyMethodDef f2py_module_methods[] = { -#pymethoddef# -\t{NULL,NULL} -}; - -#if PY_VERSION_HEX >= 0x03000000 -static struct PyModuleDef moduledef = { -\tPyModuleDef_HEAD_INIT, -\t"#modulename#", -\tNULL, -\t-1, -\tf2py_module_methods, -\tNULL, -\tNULL, -\tNULL, -\tNULL -}; -#endif - -#if PY_VERSION_HEX >= 0x03000000 -#define RETVAL m -PyMODINIT_FUNC PyInit_#modulename#(void) { -#else -#define RETVAL -PyMODINIT_FUNC init#modulename#(void) { -#endif -\tint i; -\tPyObject *m,*d, *s; -#if PY_VERSION_HEX >= 0x03000000 -\tm = #modulename#_module = PyModule_Create(&moduledef); -#else -\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods); -#endif -\tPy_TYPE(&PyFortran_Type) = &PyType_Type; -\timport_array(); -\tif (PyErr_Occurred()) -\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;} -\td = PyModule_GetDict(m); -\ts = PyString_FromString(\"$R"""+"""evision: $\"); -\tPyDict_SetItemString(d, \"__version__\", s); -#if PY_VERSION_HEX >= 0x03000000 -\ts = PyUnicode_FromString( -#else -\ts = PyString_FromString( -#endif -\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); -\tPyDict_SetItemString(d, \"__doc__\", s); -\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); -\tPy_DECREF(s); -\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) -\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i])); -#initf2pywraphooks# -#initf90modhooks# -#initcommonhooks# -#interface_usercode# - -#ifdef F2PY_REPORT_ATEXIT -\tif (! PyErr_Occurred()) -\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); -#endif - -\treturn RETVAL; -} -#ifdef __cplusplus -} -#endif -""", - 'separatorsfor':{'latexdoc':'\n\n', - 'restdoc':'\n\n'}, - 'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n', - '#modnote#\n', - '#latexdoc#'], - 'restdoc':['Module #modulename#\n'+'='*80, - '\n#restdoc#'] - } - -defmod_rules=[ - {'body': '/*eof body*/', - 'method': '/*eof method*/', - 'externroutines': '/*eof externroutines*/', - 'routine_defs': '/*eof routine_defs*/', - 'initf90modhooks': '/*eof initf90modhooks*/', - 'initf2pywraphooks': '/*eof initf2pywraphooks*/', - 'initcommonhooks': '/*eof initcommonhooks*/', - 'latexdoc': '', - 'restdoc': '', - 'modnote': {hasnote:'#note#',l_not(hasnote):''}, - } - ] - -routine_rules={ - 'separatorsfor':sepdict, - 'body':""" -#begintitle# -static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; -/* #declfortranroutine# */ -static PyObject *#apiname#(const PyObject *capi_self, - PyObject *capi_args, - PyObject *capi_keywds, - #functype# (*f2py_func)(#callprotoargument#)) { -\tPyObject * volatile capi_buildvalue = NULL; -\tvolatile int f2py_success = 1; -#decl# -\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; -#usercode# -#routdebugenter# -#ifdef F2PY_REPORT_ATEXIT -f2py_start_clock(); -#endif -\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ -\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\ -\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL; -#frompyobj# -/*end of frompyobj*/ -#ifdef F2PY_REPORT_ATEXIT -f2py_start_call_clock(); -#endif -#callfortranroutine# -if (PyErr_Occurred()) - f2py_success = 0; -#ifdef F2PY_REPORT_ATEXIT -f2py_stop_call_clock(); -#endif -/*end of callfortranroutine*/ -\t\tif (f2py_success) { -#pyobjfrom# -/*end of pyobjfrom*/ -\t\tCFUNCSMESS(\"Building return value.\\n\"); -\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); -/*closepyobjfrom*/ -#closepyobjfrom# -\t\t} /*if (f2py_success) after callfortranroutine*/ -/*cleanupfrompyobj*/ -#cleanupfrompyobj# -\tif (capi_buildvalue == NULL) { -#routdebugfailure# -\t} else { -#routdebugleave# -\t} -\tCFUNCSMESS(\"Freeing memory.\\n\"); -#freemem# -#ifdef F2PY_REPORT_ATEXIT -f2py_stop_clock(); -#endif -\treturn capi_buildvalue; -} -#endtitle# -""", - 'routine_defs':'#routine_def#', - 'initf2pywraphooks':'#initf2pywraphook#', - 'externroutines':'#declfortranroutine#', - 'doc':'#docreturn##name#(#docsignature#)', - 'docshort':'#docreturn##name#(#docsignatureshort#)', - 'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n', - 'need':['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], - 'cppmacros':{debugcapi:'#define DEBUGCFUNCS'}, - 'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n', - """ -\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} -#routnote# - -#latexdocstrsigns# -"""], - 'restdoc':['Wrapped function ``#name#``\n'+'-'*80, - - ] - } - -################## Rules for C/API function ############## - -rout_rules=[ - { # Init - 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', - 'routdebugleave': '\n', 'routdebugfailure': '\n', - 'setjmpbuf': ' || ', - 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', - 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', - 'latexdocstrsigns': '\n', - 'latexdocstrreq': '\n', 'latexdocstropt': '\n', - 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', - }, - 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', - 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', - 'freemem': '/*freemem*/', - 'docsignshort': '', 'docsignoptshort': '', - 'docstrsigns': '', 'latexdocstrsigns': '', - 'docstrreq': '\\nParameters\\n----------', - 'docstropt': '\\nOther Parameters\\n----------------', - 'docstrout': '\\nReturns\\n-------', - 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', - 'latexdocstrreq': '\\noindent Required arguments:', - 'latexdocstropt': '\\noindent Optional arguments:', - 'latexdocstrout': '\\noindent Return objects:', - 'latexdocstrcbs': '\\noindent Call-back functions:', - 'args_capi': '', 'keys_capi': '', 'functype': '', - 'frompyobj': '/*frompyobj*/', - 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], #this list will be reversed - 'pyobjfrom': '/*pyobjfrom*/', - 'closepyobjfrom': ['/*end of closepyobjfrom*/'], #this list will be reversed - 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', - 'routdebugenter': '/*routdebugenter*/', - 'routdebugfailure': '/*routdebugfailure*/', - 'callfortranroutine': '/*callfortranroutine*/', - 'argformat': '', 'keyformat': '', 'need_cfuncs': '', - 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', - 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', - 'initf2pywraphook': '', - 'routnote': {hasnote:'--- #note#',l_not(hasnote):''}, - }, { - 'apiname':'f2py_rout_#modulename#_#name#', - 'pyname':'#modulename#.#name#', - 'decl':'', - '_check':l_not(ismoduleroutine) - }, { - 'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#', - 'pyname':'#modulename#.#f90modulename#.#name#', - 'decl':'', - '_check':ismoduleroutine - }, { # Subroutine - 'functype': 'void', - 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);', - ismoduleroutine:'', - isdummyroutine:'' - }, - 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'F_FUNC'}, - 'callfortranroutine': [ - {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, - {hasexternals:"""\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement:'''\t\t\t\t#callstatement#; -\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, - {l_not(l_or(hascallstatement, isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'}, - {isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'}, - {hasexternals:"""\t\t}"""} - ], - '_check': l_and(issubroutine, l_not(issubroutine_wrap)), - }, { # Wrapped function - 'functype': 'void', - 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine: '', - }, - - 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)):''' - { - extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); - PyObject* o = PyDict_GetItemString(d,"#name#"); - PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); -#if PY_VERSION_HEX >= 0x03000000 - PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); -#else - PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); -#endif - } - '''}, - 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)):['F_WRAPPEDFUNC', 'F_FUNC']}, - 'callfortranroutine': [ - {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, - {hasexternals:"""\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement, isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, - {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, - {hasexternals:'\t}'} - ], - '_check': isfunction_wrap, - }, { # Wrapped subroutine - 'functype': 'void', - 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine: '', - }, - - 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)):''' - { - extern void #F_FUNC#(#name_lower#,#NAME#)(void); - PyObject* o = PyDict_GetItemString(d,"#name#"); - PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); -#if PY_VERSION_HEX >= 0x03000000 - PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); -#else - PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); -#endif - } - '''}, - 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)):['F_WRAPPEDFUNC', 'F_FUNC']}, - 'callfortranroutine': [ - {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, - {hasexternals:"""\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement, isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, - {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, - {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, - {hasexternals:'\t}'} - ], - '_check': issubroutine_wrap, - }, { # Function - 'functype':'#ctype#', - 'docreturn':{l_not(isintent_hide):'#rname#,'}, - 'docstrout':'#pydocsignout#', - 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', - {hasresultnote:'--- #resultnote#'}], - 'callfortranroutine':[{l_and(debugcapi, isstringfunction):"""\ -#ifdef USESCOMPAQFORTRAN -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); -#else -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); -#endif -"""}, - {l_and(debugcapi, l_not(isstringfunction)):"""\ -\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); -"""} - ], - '_check':l_and(isfunction, l_not(isfunction_wrap)) - }, { # Scalar function - 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);', - isdummyroutine:'' - }, - 'routine_def':{l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', - }, - 'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};', - l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'}, - {iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'} - ], - 'callfortranroutine':[ - {hasexternals:"""\ -\tif (#setjmpbuf#) { -\t\tf2py_success = 0; -\t} else {"""}, - {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, - {hascallstatement:'''\t#callstatement#; -/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ -'''}, - {l_not(l_or(hascallstatement, isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'}, - {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, - {hasexternals:'\t}'}, - {l_and(debugcapi, iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, - {l_and(debugcapi, l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], - 'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, - 'need':[{l_not(isdummyroutine):'F_FUNC'}, - {iscomplexfunction:'pyobj_from_#ctype#1'}, - {islong_longfunction:'long_long'}, - {islong_doublefunction:'long_double'}], - 'returnformat':{l_not(isintent_hide):'#rformat#'}, - 'return':{iscomplexfunction:',#name#_return_value_capi', - l_not(l_or(iscomplexfunction, isintent_hide)):',#name#_return_value'}, - '_check':l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) - }, { # String function # in use for --no-wrap - 'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - 'routine_def':{l_not(l_or(ismoduleroutine, isintent_c)): -# '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},', - '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine), isintent_c): -# '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},' - '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' - }, - 'decl':['\t#ctype# #name#_return_value = NULL;', - '\tint #name#_return_value_len = 0;'], - 'callfortran':'#name#_return_value,#name#_return_value_len,', - 'callfortranroutine':['\t#name#_return_value_len = #rlength#;', - '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {', - '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");', - '\t\tf2py_success = 0;', - '\t} else {', - "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';", - '\t}', - '\tif (f2py_success) {', - {hasexternals:"""\ -\t\tif (#setjmpbuf#) { -\t\t\tf2py_success = 0; -\t\t} else {"""}, - {isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'}, - """\ -#ifdef USESCOMPAQFORTRAN -\t\t(*f2py_func)(#callcompaqfortran#); -#else -\t\t(*f2py_func)(#callfortran#); -#endif -""", - {isthreadsafe:'\t\tPy_END_ALLOW_THREADS'}, - {hasexternals:'\t\t}'}, - {debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, - '\t} /* if (f2py_success) after (string)malloc */', - ], - 'returnformat':'#rformat#', - 'return':',#name#_return_value', - 'freemem':'\tSTRINGFREE(#name#_return_value);', - 'need':['F_FUNC', '#ctype#', 'STRINGFREE'], - '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete - }, - { # Debugging - 'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', - 'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', - 'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', - '_check':debugcapi - } - ] - -################ Rules for arguments ################## - -typedef_need_dict = {islong_long: 'long_long', - islong_double: 'long_double', - islong_complex: 'complex_long_double', - isunsigned_char: 'unsigned_char', - isunsigned_short: 'unsigned_short', - isunsigned: 'unsigned', - isunsigned_long_long: 'unsigned_long_long', - isunsigned_chararray: 'unsigned_char', - isunsigned_shortarray: 'unsigned_short', - isunsigned_long_longarray: 'unsigned_long_long', - issigned_long_longarray: 'long_long', - } - -aux_rules=[ - { - 'separatorsfor':sepdict - }, - { # Common - 'frompyobj': ['\t/* Processing auxiliary variable #varname# */', - {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], - 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', - 'need': typedef_need_dict, - }, -# Scalars (not complex) - { # Common - 'decl': '\t#ctype# #varname# = 0;', - 'need': {hasinitvalue:'math.h'}, - 'frompyobj': {hasinitvalue:'\t#varname# = #init#;'}, - '_check': l_and(isscalar, l_not(iscomplex)), - }, - { - 'return': ',#varname#', - 'docstrout': '#pydocsignout#', - 'docreturn': '#outvarname#,', - 'returnformat': '#varrformat#', - '_check': l_and(isscalar, l_not(iscomplex), isintent_out), - }, -# Complex scalars - { # Common - 'decl':'\t#ctype# #varname#;', - 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, - '_check':iscomplex - }, -# String - { # Common - 'decl':['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', - ], - 'need':['len..'], - '_check':isstring - }, -# Array - { # Common - 'decl':['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', - ], - 'need':['len..', {hasinitvalue:'forcomb'}, {hasinitvalue:'CFUNCSMESS'}], - '_check':isarray - }, -# Scalararray - { # Common - '_check':l_and(isarray, l_not(iscomplexarray)) - }, { # Not hidden - '_check':l_and(isarray, l_not(iscomplexarray), isintent_nothide) - }, -# Integer*1 array - {'need':'#ctype#', - '_check':isint1array, - '_depend':'' - }, -# Integer*-1 array - {'need':'#ctype#', - '_check':isunsigned_chararray, - '_depend':'' - }, -# Integer*-2 array - {'need':'#ctype#', - '_check':isunsigned_shortarray, - '_depend':'' - }, -# Integer*-8 array - {'need':'#ctype#', - '_check':isunsigned_long_longarray, - '_depend':'' - }, -# Complexarray - {'need':'#ctype#', - '_check':iscomplexarray, - '_depend':'' - }, -# Stringarray - { - 'callfortranappend':{isarrayofstrings:'flen(#varname#),'}, - 'need':'string', - '_check':isstringarray - } - ] - -arg_rules=[ - { - 'separatorsfor':sepdict - }, - { # Common - 'frompyobj': ['\t/* Processing variable #varname# */', - {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], - 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', - '_depend': '', - 'need': typedef_need_dict, - }, -# Doc signatures - { - 'docstropt':{l_and(isoptional, isintent_nothide):'#pydocsign#'}, - 'docstrreq':{l_and(isrequired, isintent_nothide):'#pydocsign#'}, - 'docstrout':{isintent_out:'#pydocsignout#'}, - 'latexdocstropt':{l_and(isoptional, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrreq':{l_and(isrequired, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', - {hasnote:'--- #note#'}]}, - 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote, isintent_hide):'--- #note#', - l_and(hasnote, isintent_nothide):'--- See above.'}]}, - 'depend':'' - }, -# Required/Optional arguments - { - 'kwlist':'"#varname#",', - 'docsign':'#varname#,', - '_check':l_and(isintent_nothide, l_not(isoptional)) - }, - { - 'kwlistopt':'"#varname#",', - 'docsignopt':'#varname#=#showinit#,', - 'docsignoptshort':'#varname#,', - '_check':l_and(isintent_nothide, isoptional) - }, -# Docstring/BuildValue - { - 'docreturn':'#outvarname#,', - 'returnformat':'#varrformat#', - '_check':isintent_out - }, -# Externals (call-back functions) - { # Common - 'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'}, - 'docsignxashort':{isintent_nothide:'#varname#_extra_args,'}, - 'docstropt':{isintent_nothide:'#varname#_extra_args : input tuple, optional\\n Default: ()'}, - 'docstrcbs':'#cbdocstr#', - 'latexdocstrcbs':'\\item[] #cblatexdocstr#', - 'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, - 'decl':['\tPyObject *#varname#_capi = Py_None;', - '\tPyTupleObject *#varname#_xa_capi = NULL;', - '\tPyTupleObject *#varname#_args_capi = NULL;', - '\tint #varname#_nofargs_capi = 0;', - {l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'} - ], - 'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'}, - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'xaformat':{isintent_nothide:'O!'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, - 'keys_xa':',&PyTuple_Type,&#varname#_xa_capi', - 'setjmpbuf':'(setjmp(#cbname#_jmpbuf))', - 'callfortran':{l_not(isintent_callback):'#varname#_cptr,'}, - 'need':['#cbname#', 'setjmp.h'], - '_check':isexternal - }, - { - 'frompyobj':[{l_not(isintent_callback):"""\ -if(F2PyCapsule_Check(#varname#_capi)) { - #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi); -} else { - #varname#_cptr = #cbname#; -} -"""}, {isintent_callback:"""\ -if (#varname#_capi==Py_None) { - #varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); - if (#varname#_capi) { - if (#varname#_xa_capi==NULL) { - if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { - PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); - if (capi_tmp) - #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); - else - #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); - if (#varname#_xa_capi==NULL) { - PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); - return NULL; - } - } - } - } - if (#varname#_capi==NULL) { - PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); - return NULL; - } -} -"""}, -## {l_not(isintent_callback):"""\ -## if (#varname#_capi==Py_None) { -## printf(\"hoi\\n\"); -## } -## """}, -"""\ -\t#varname#_nofargs_capi = #cbname#_nofargs; -\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) { -\t\tjmp_buf #varname#_jmpbuf;""", -{debugcapi:["""\ -\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs); -\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""", -{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, - """\ -\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\"); -\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject); -\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject); -\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""", - ], -'cleanupfrompyobj': -"""\ -\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\"); -\t\t#cbname#_capi = #varname#_capi; -\t\tPy_DECREF(#cbname#_args_capi); -\t\t#cbname#_args_capi = #varname#_args_capi; -\t\t#cbname#_nofargs = #varname#_nofargs_capi; -\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf)); -\t}""", - 'need':['SWAP', 'create_cb_arglist'], - '_check':isexternal, - '_depend':'' - }, -# Scalars (not complex) - { # Common - 'decl':'\t#ctype# #varname# = 0;', - 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, - 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'}, - 'return':{isintent_out:',#varname#'}, - '_check':l_and(isscalar, l_not(iscomplex)) - }, { - 'need': {hasinitvalue:'math.h'}, - '_check': l_and(isscalar, l_not(iscomplex)), - #'_depend':'' - }, { # Not hidden - 'decl':'\tPyObject *#varname#_capi = Py_None;', - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, - 'pyobjfrom':{isintent_inout:"""\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\tif (f2py_success) {"""}, - 'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, - 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, - '_check':l_and(isscalar, l_not(iscomplex), isintent_nothide) - }, { - 'frompyobj':[ -# hasinitvalue... -# if pyobj is None: -# varname = init -# else -# from_pyobj(varname) -# -# isoptional and noinitvalue... -# if pyobj is not None: -# from_pyobj(varname) -# else: -# varname is uninitialized -# -# ... -# from_pyobj(varname) -# - {hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else', - '_depend':''}, - {l_and(isoptional, l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)', - '_depend':''}, - {l_not(islogical):'''\ -\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); -\tif (f2py_success) {'''}, - {islogical:'''\ -\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); -\t\tf2py_success = 1; -\tif (f2py_success) {'''}, - ], - 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/', - 'need':{l_not(islogical):'#ctype#_from_pyobj'}, - '_check':l_and(isscalar, l_not(iscomplex), isintent_nothide), - '_depend':'' -# },{ # Hidden -# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide) - }, { # Hidden - 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'}, - 'need':typedef_need_dict, - '_check':l_and(isscalar, l_not(iscomplex), isintent_hide), - '_depend':'' - }, { # Common - 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, - '_check':l_and(isscalar, l_not(iscomplex)), - '_depend':'' - }, -# Complex scalars - { # Common - 'decl':'\t#ctype# #varname#;', - 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'}, - 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, - 'return':{isintent_out:',#varname#_capi'}, - '_check':iscomplex - }, { # Not hidden - 'decl':'\tPyObject *#varname#_capi = Py_None;', - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, - 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, - 'pyobjfrom':{isintent_inout:"""\ -\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); -\t\tif (f2py_success) {"""}, - 'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, - '_check':l_and(iscomplex, isintent_nothide) - }, { - 'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, - {l_and(isoptional, l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'}, -# '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");' - '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' - '\n\tif (f2py_success) {'], - 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/', - 'need':['#ctype#_from_pyobj'], - '_check':l_and(iscomplex, isintent_nothide), - '_depend':'' - }, { # Hidden - 'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'}, - '_check':l_and(iscomplex, isintent_hide) - }, { - 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, - '_check':l_and(iscomplex, isintent_hide), - '_depend':'' - }, { # Common - 'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, - 'need':['pyobj_from_#ctype#1'], - '_check':iscomplex - }, { - 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, - '_check':iscomplex, - '_depend':'' - }, -# String - { # Common - 'decl':['\t#ctype# #varname# = NULL;', - '\tint slen(#varname#);', - '\tPyObject *#varname#_capi = Py_None;'], - 'callfortran':'#varname#,', - 'callfortranappend':'slen(#varname#),', - 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, -# 'freemem':'\tSTRINGFREE(#varname#);', - 'return':{isintent_out:',#varname#'}, - 'need':['len..'],#'STRINGFREE'], - '_check':isstring - }, { # Common - 'frompyobj':"""\ -\tslen(#varname#) = #length#; -\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\"); -\tif (f2py_success) {""", - 'cleanupfrompyobj':"""\ -\t\tSTRINGFREE(#varname#); -\t} /*if (f2py_success) of #varname#*/""", - 'need':['#ctype#_from_pyobj', 'len..', 'STRINGFREE'], - '_check':isstring, - '_depend':'' - }, { # Not hidden - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, - 'pyobjfrom':{isintent_inout:'''\ -\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#); -\tif (f2py_success) {'''}, - 'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, - 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, - '_check':l_and(isstring, isintent_nothide) - }, { # Hidden - '_check':l_and(isstring, isintent_hide) - }, { - 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, - '_check':isstring, - '_depend':'' - }, -# Array - { # Common - 'decl':['\t#ctype# *#varname# = NULL;', - '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', - '\tconst int #varname#_Rank = #rank#;', - '\tPyArrayObject *capi_#varname#_tmp = NULL;', - '\tint capi_#varname#_intent = 0;', - ], - 'callfortran':'#varname#,', - 'return':{isintent_out:',capi_#varname#_tmp'}, - 'need':'len..', - '_check':isarray - }, { # intent(overwrite) array - 'decl': '\tint capi_overwrite_#varname# = 1;', - 'kwlistxa': '"overwrite_#varname#",', - 'xaformat': 'i', - 'keys_xa': ',&capi_overwrite_#varname#', - 'docsignxa': 'overwrite_#varname#=1,', - 'docsignxashort': 'overwrite_#varname#,', - 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', - '_check': l_and(isarray, isintent_overwrite), - }, { - 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check': l_and(isarray, isintent_overwrite), - '_depend': '', - }, - { # intent(copy) array - 'decl': '\tint capi_overwrite_#varname# = 0;', - 'kwlistxa': '"overwrite_#varname#",', - 'xaformat': 'i', - 'keys_xa': ',&capi_overwrite_#varname#', - 'docsignxa': 'overwrite_#varname#=0,', - 'docsignxashort': 'overwrite_#varname#,', - 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', - '_check': l_and(isarray, isintent_copy), - }, { - 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check': l_and(isarray, isintent_copy), - '_depend': '', - }, { - 'need':[{hasinitvalue:'forcomb'}, {hasinitvalue:'CFUNCSMESS'}], - '_check':isarray, - '_depend':'' - }, { # Not hidden - 'decl':'\tPyObject *#varname#_capi = Py_None;', - 'argformat':{isrequired:'O'}, - 'keyformat':{isoptional:'O'}, - 'args_capi':{isrequired:',&#varname#_capi'}, - 'keys_capi':{isoptional:',&#varname#_capi'}, -# 'pyobjfrom':{isintent_inout:"""\ -# /* Partly because of the following hack, intent(inout) is depreciated, -# Use intent(in,out) instead. - -# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\ -# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) { -# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) { -# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base) -# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi); -# \t\t} else -# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi); -# \t} -# */ -# """}, -# 'need':{isintent_inout:'copy_ND_array'}, - '_check':l_and(isarray, isintent_nothide) - }, { - 'frompyobj':['\t#setdims#;', - '\tcapi_#varname#_intent |= #intent#;', - {isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, - {isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, - """\ -\tif (capi_#varname#_tmp == NULL) { -\t\tif (!PyErr_Occurred()) -\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); -\t} else { -\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data); -""", -{hasinitvalue:[ - {isintent_nothide:'\tif (#varname#_capi == Py_None) {'}, - {isintent_hide:'\t{'}, - {iscomplexarray:'\t\t#ctype# capi_c;'}, - """\ -\t\tint *_i,capi_i=0; -\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); -\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) { -\t\t\twhile ((_i = nextforcomb())) -\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */ -\t\t} else { -\t\t\tif (!PyErr_Occurred()) -\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); -\t\t\tf2py_success = 0; -\t\t} -\t} -\tif (f2py_success) {"""]}, - ], - 'cleanupfrompyobj':[ # note that this list will be reversed - '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', - {l_not(l_or(isintent_out, isintent_hide)):"""\ -\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { -\t\tPy_XDECREF(capi_#varname#_tmp); }"""}, - {l_and(isintent_hide, l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""}, - {hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'}, - ], - '_check':isarray, - '_depend':'' - }, -# { # Hidden -# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'}, -# '_check':l_and(isarray,isintent_hide) -# }, -# Scalararray - { # Common - '_check':l_and(isarray, l_not(iscomplexarray)) - }, { # Not hidden - '_check':l_and(isarray, l_not(iscomplexarray), isintent_nothide) - }, -# Integer*1 array - {'need':'#ctype#', - '_check':isint1array, - '_depend':'' - }, -# Integer*-1 array - {'need':'#ctype#', - '_check':isunsigned_chararray, - '_depend':'' - }, -# Integer*-2 array - {'need':'#ctype#', - '_check':isunsigned_shortarray, - '_depend':'' - }, -# Integer*-8 array - {'need':'#ctype#', - '_check':isunsigned_long_longarray, - '_depend':'' - }, -# Complexarray - {'need':'#ctype#', - '_check':iscomplexarray, - '_depend':'' - }, -# Stringarray - { - 'callfortranappend':{isarrayofstrings:'flen(#varname#),'}, - 'need':'string', - '_check':isstringarray - } - ] - -################# Rules for checking ############### - -check_rules=[ - { - 'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, - 'need':'len..' - }, { - 'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/', - 'need':'CHECKSCALAR', - '_check':l_and(isscalar, l_not(iscomplex)), - '_break':'' - }, { - 'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', - 'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/', - 'need':'CHECKSTRING', - '_check':isstring, - '_break':'' - }, { - 'need':'CHECKARRAY', - 'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/', - '_check':isarray, - '_break':'' - }, { - 'need': 'CHECKGENERIC', - 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/', - } -] - -########## Applying the rules. No need to modify what follows ############# - -#################### Build C/API module ####################### - -def buildmodule(m, um): - """ - Return - """ - global f2py_version, options - outmess('\tBuilding module "%s"...\n'%(m['name'])) - ret = {} - mod_rules=defmod_rules[:] - vrd=modsign2map(m) - rd=dictappend({'f2py_version':f2py_version}, vrd) - funcwrappers = [] - funcwrappers2 = [] # F90 codes - for n in m['interfaced']: - nb=None - for bi in m['body']: - if not bi['block']=='interface': - errmess('buildmodule: Expected interface block. Skipping.\n') - continue - for b in bi['body']: - if b['name']==n: nb=b;break - - if not nb: - errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n)) - continue - nb_list = [nb] - if 'entry' in nb: - for k, a in nb['entry'].items(): - nb1 = copy.deepcopy(nb) - del nb1['entry'] - nb1['name'] = k - nb1['args'] = a - nb_list.append(nb1) - for nb in nb_list: - api, wrap=buildapi(nb) - if wrap: - if ismoduleroutine(nb): - funcwrappers2.append(wrap) - else: - funcwrappers.append(wrap) - ar=applyrules(api, vrd) - rd=dictappend(rd, ar) - - # Construct COMMON block support - cr, wrap = common_rules.buildhooks(m) - if wrap: - funcwrappers.append(wrap) - ar=applyrules(cr, vrd) - rd=dictappend(rd, ar) - - # Construct F90 module support - mr, wrap = f90mod_rules.buildhooks(m) - if wrap: - funcwrappers2.append(wrap) - ar=applyrules(mr, vrd) - rd=dictappend(rd, ar) - - for u in um: - ar=use_rules.buildusevars(u, m['use'][u['name']]) - rd=dictappend(rd, ar) - - needs=cfuncs.get_needs() - code={} - for n in needs.keys(): - code[n]=[] - for k in needs[n]: - c='' - if k in cfuncs.includes0: - c=cfuncs.includes0[k] - elif k in cfuncs.includes: - c=cfuncs.includes[k] - elif k in cfuncs.userincludes: - c=cfuncs.userincludes[k] - elif k in cfuncs.typedefs: - c=cfuncs.typedefs[k] - elif k in cfuncs.typedefs_generated: - c=cfuncs.typedefs_generated[k] - elif k in cfuncs.cppmacros: - c=cfuncs.cppmacros[k] - elif k in cfuncs.cfuncs: - c=cfuncs.cfuncs[k] - elif k in cfuncs.callbacks: - c=cfuncs.callbacks[k] - elif k in cfuncs.f90modhooks: - c=cfuncs.f90modhooks[k] - elif k in cfuncs.commonhooks: - c=cfuncs.commonhooks[k] - else: - errmess('buildmodule: unknown need %s.\n'%(repr(k)));continue - code[n].append(c) - mod_rules.append(code) - for r in mod_rules: - if ('_check' in r and r['_check'](m)) or ('_check' not in r): - ar=applyrules(r, vrd, m) - rd=dictappend(rd, ar) - ar=applyrules(module_rules, rd) - - fn = os.path.join(options['buildpath'], vrd['coutput']) - ret['csrc'] = fn - f=open(fn, 'w') - f.write(ar['modulebody'].replace('\t', 2*' ')) - f.close() - outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'], fn)) - - if options['dorestdoc']: - fn = os.path.join(options['buildpath'], vrd['modulename']+'module.rest') - f=open(fn, 'w') - f.write('.. -*- rest -*-\n') - f.write('\n'.join(ar['restdoc'])) - f.close() - outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'], vrd['modulename'])) - if options['dolatexdoc']: - fn = os.path.join(options['buildpath'], vrd['modulename']+'module.tex') - ret['ltx'] = fn - f=open(fn, 'w') - f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version)) - if 'shortlatex' not in options: - f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') - f.write('\n'.join(ar['latexdoc'])) - if 'shortlatex' not in options: - f.write('\\end{document}') - f.close() - outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'], vrd['modulename'])) - if funcwrappers: - wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) - ret['fsrc'] = wn - f=open(wn, 'w') - f.write('C -*- fortran -*-\n') - f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) - f.write('C It contains Fortran 77 wrappers to fortran functions.\n') - lines = [] - for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'): - if l and l[0]==' ': - while len(l)>=66: - lines.append(l[:66]+'\n &') - l = l[66:] - lines.append(l+'\n') - else: lines.append(l+'\n') - lines = ''.join(lines).replace('\n &\n', '\n') - f.write(lines) - f.close() - outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn)) - if funcwrappers2: - wn = os.path.join(options['buildpath'], '%s-f2pywrappers2.f90'%(vrd['modulename'])) - ret['fsrc'] = wn - f=open(wn, 'w') - f.write('! -*- f90 -*-\n') - f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) - f.write('! It contains Fortran 90 wrappers to fortran functions.\n') - lines = [] - for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'): - if len(l)>72 and l[0]==' ': - lines.append(l[:72]+'&\n &') - l = l[72:] - while len(l)>66: - lines.append(l[:66]+'&\n &') - l = l[66:] - lines.append(l+'\n') - else: lines.append(l+'\n') - lines = ''.join(lines).replace('\n &\n', '\n') - f.write(lines) - f.close() - outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn)) - return ret - -################## Build C/API function ############# - -stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'} - -def buildapi(rout): - rout, wrap = func2subr.assubr(rout) - args, depargs=getargs2(rout) - capi_maps.depargs=depargs - var=rout['vars'] - auxvars = [a for a in var.keys() if isintent_aux(var[a])] - - if ismoduleroutine(rout): - outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'], rout['name'])) - else: - outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name'])) - # Routine - vrd=routsign2map(rout) - rd=dictappend({}, vrd) - for r in rout_rules: - if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar=applyrules(r, vrd, rout) - rd=dictappend(rd, ar) - - # Args - nth, nthk=0, 0 - savevrd={} - for a in args: - vrd=sign2map(a, var[a]) - if isintent_aux(var[a]): - _rules = aux_rules - else: - _rules = arg_rules - if not isintent_hide(var[a]): - if not isoptional(var[a]): - nth=nth+1 - vrd['nth']=repr(nth)+stnd[nth%10]+' argument' - else: - nthk=nthk+1 - vrd['nth']=repr(nthk)+stnd[nthk%10]+' keyword' - else: vrd['nth']='hidden' - savevrd[a]=vrd - for r in _rules: - if '_depend' in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r, vrd, var[a]) - rd=dictappend(rd, ar) - if '_break' in r: - break - for a in depargs: - if isintent_aux(var[a]): - _rules = aux_rules - else: - _rules = arg_rules - vrd=savevrd[a] - for r in _rules: - if '_depend' not in r: - continue - if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r, vrd, var[a]) - rd=dictappend(rd, ar) - if '_break' in r: - break - if 'check' in var[a]: - for c in var[a]['check']: - vrd['check']=c - ar=applyrules(check_rules, vrd, var[a]) - rd=dictappend(rd, ar) - if isinstance(rd['cleanupfrompyobj'], list): - rd['cleanupfrompyobj'].reverse() - if isinstance(rd['closepyobjfrom'], list): - rd['closepyobjfrom'].reverse() - rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#', - {'docsign':rd['docsign'], - 'docsignopt':rd['docsignopt'], - 'docsignxa':rd['docsignxa']})) - optargs=stripcomma(replace('#docsignopt##docsignxa#', - {'docsignxa':rd['docsignxashort'], - 'docsignopt':rd['docsignoptshort']} - )) - if optargs=='': - rd['docsignatureshort']=stripcomma(replace('#docsign#', {'docsign':rd['docsign']})) - else: - rd['docsignatureshort']=replace('#docsign#[#docsignopt#]', - {'docsign': rd['docsign'], - 'docsignopt': optargs, - }) - rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_', '\\_') - rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',', ', ') - cfs=stripcomma(replace('#callfortran##callfortranappend#', {'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) - if len(rd['callfortranappend'])>1: - rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#', {'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) - else: - rd['callcompaqfortran']=cfs - rd['callfortran']=cfs - if isinstance(rd['docreturn'], list): - rd['docreturn']=stripcomma(replace('#docreturn#', {'docreturn':rd['docreturn']}))+' = ' - rd['docstrsigns']=[] - rd['latexdocstrsigns']=[] - for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: - if k in rd and isinstance(rd[k], list): - rd['docstrsigns']=rd['docstrsigns']+rd[k] - k='latex'+k - if k in rd and isinstance(rd[k], list): - rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ - ['\\begin{description}']+rd[k][1:]+\ - ['\\end{description}'] - - # Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720 - if rd['keyformat'] or rd['xaformat']: - argformat = rd['argformat'] - if isinstance(argformat, list): - argformat.append('|') - else: - assert isinstance(argformat, str), repr((argformat, type(argformat))) - rd['argformat'] += '|' - - ar=applyrules(routine_rules, rd) - if ismoduleroutine(rout): - outmess('\t\t\t %s\n'%(ar['docshort'])) - else: - outmess('\t\t %s\n'%(ar['docshort'])) - return ar, wrap - - -#################### EOF rules.py ####################### diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/setup.py deleted file mode 100644 index 2f1fd6a015076..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/setup.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -""" -setup.py for installing F2PY - -Usage: - python setup.py install - -Copyright 2001-2005 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Revision: 1.32 $ -$Date: 2005/01/30 17:22:14 $ -Pearu Peterson - -""" -from __future__ import division, print_function - -__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $" - -import os -import sys -from distutils.dep_util import newer -from numpy.distutils import log -from numpy.distutils.core import setup -from numpy.distutils.misc_util import Configuration - -from __version__ import version - -def configuration(parent_package='',top_path=None): - config = Configuration('f2py', parent_package, top_path) - - config.add_data_dir('docs') - config.add_data_dir('tests') - - config.add_data_files('src/fortranobject.c', - 'src/fortranobject.h', - 'f2py.1' - ) - - config.make_svn_version_py() - - def generate_f2py_py(build_dir): - f2py_exe = 'f2py'+os.path.basename(sys.executable)[6:] - if f2py_exe[-4:]=='.exe': - f2py_exe = f2py_exe[:-4] + '.py' - if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py': - f2py_exe = f2py_exe + '.py' - target = os.path.join(build_dir, f2py_exe) - if newer(__file__, target): - log.info('Creating %s', target) - f = open(target, 'w') - f.write('''\ -#!%s -# See http://cens.ioc.ee/projects/f2py2e/ -import os, sys -for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]: - try: - i=sys.argv.index("--"+mode) - del sys.argv[i] - break - except ValueError: pass -os.environ["NO_SCIPY_IMPORT"]="f2py" -if mode=="g3-numpy": - sys.stderr.write("G3 f2py support is not implemented, yet.\\n") - sys.exit(1) -elif mode=="2e-numeric": - from f2py2e import main -elif mode=="2e-numarray": - sys.argv.append("-DNUMARRAY") - from f2py2e import main -elif mode=="2e-numpy": - from numpy.f2py import main -else: - sys.stderr.write("Unknown mode: " + repr(mode) + "\\n") - sys.exit(1) -main() -'''%(sys.executable)) - f.close() - return target - - config.add_scripts(generate_f2py_py) - - log.info('F2PY Version %s', config.get_version()) - - return config - -if __name__ == "__main__": - - config = configuration(top_path='') - version = config.get_version() - print('F2PY Version', version) - config = config.todict() - - if sys.version[:3]>='2.3': - config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ - "/F2PY-2-latest.tar.gz" - config['classifiers'] = [ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: NumPy License', - 'Natural Language :: English', - 'Operating System :: OS Independent', - 'Programming Language :: C', - 'Programming Language :: Fortran', - 'Programming Language :: Python', - 'Topic :: Scientific/Engineering', - 'Topic :: Software Development :: Code Generators', - ] - setup(version=version, - description = "F2PY - Fortran to Python Interface Generaton", - author = "Pearu Peterson", - author_email = "pearu@cens.ioc.ee", - maintainer = "Pearu Peterson", - maintainer_email = "pearu@cens.ioc.ee", - license = "BSD", - platforms = "Unix, Windows (mingw|cygwin), Mac OSX", - long_description = """\ -The Fortran to Python Interface Generator, or F2PY for short, is a -command line tool (f2py) for generating Python C/API modules for -wrapping Fortran 77/90/95 subroutines, accessing common blocks from -Python, and calling Python functions from Fortran (call-backs). -Interfacing subroutines/data from Fortran 90/95 modules is supported.""", - url = "http://cens.ioc.ee/projects/f2py2e/", - keywords = ['Fortran', 'f2py'], - **config) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/src/fortranobject.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/src/fortranobject.h deleted file mode 100644 index 689f78c923b06..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/src/fortranobject.h +++ /dev/null @@ -1,162 +0,0 @@ -#ifndef Py_FORTRANOBJECT_H -#define Py_FORTRANOBJECT_H -#ifdef __cplusplus -extern "C" { -#endif - -#include "Python.h" - -#ifdef FORTRANOBJECT_C -#define NO_IMPORT_ARRAY -#endif -#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API -#include "numpy/arrayobject.h" - -/* - * Python 3 support macros - */ -#if PY_VERSION_HEX >= 0x03000000 -#define PyString_Check PyBytes_Check -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_FromString PyBytes_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString - -#define PyInt_Check PyLong_Check -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsLong PyLong_AsLong - -#define PyNumber_Int PyNumber_Long - -#else - -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#endif - - -#ifdef F2PY_REPORT_ATEXIT -#include - extern void f2py_start_clock(void); - extern void f2py_stop_clock(void); - extern void f2py_start_call_clock(void); - extern void f2py_stop_call_clock(void); - extern void f2py_cb_start_clock(void); - extern void f2py_cb_stop_clock(void); - extern void f2py_cb_start_call_clock(void); - extern void f2py_cb_stop_call_clock(void); - extern void f2py_report_on_exit(int,void*); -#endif - -#ifdef DMALLOC -#include "dmalloc.h" -#endif - -/* Fortran object interface */ - -/* -123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 - -PyFortranObject represents various Fortran objects: -Fortran (module) routines, COMMON blocks, module data. - -Author: Pearu Peterson -*/ - -#define F2PY_MAX_DIMS 40 - -typedef void (*f2py_set_data_func)(char*,npy_intp*); -typedef void (*f2py_void_func)(void); -typedef void (*f2py_init_func)(int*,npy_intp*,f2py_set_data_func,int*); - - /*typedef void* (*f2py_c_func)(void*,...);*/ - -typedef void *(*f2pycfunc)(void); - -typedef struct { - char *name; /* attribute (array||routine) name */ - int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, - || rank=-1 for Fortran routine */ - struct {npy_intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ - int type; /* PyArray_ || not used */ - char *data; /* pointer to array || Fortran routine */ - f2py_init_func func; /* initialization function for - allocatable arrays: - func(&rank,dims,set_ptr_func,name,len(name)) - || C/API wrapper for Fortran routine */ - char *doc; /* documentation string; only recommended - for routines. */ -} FortranDataDef; - -typedef struct { - PyObject_HEAD - int len; /* Number of attributes */ - FortranDataDef *defs; /* An array of FortranDataDef's */ - PyObject *dict; /* Fortran object attribute dictionary */ -} PyFortranObject; - -#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) -#define PyFortran_Check1(op) (0==strcmp(Py_TYPE(op)->tp_name,"fortran")) - - extern PyTypeObject PyFortran_Type; - extern int F2PyDict_SetItemString(PyObject* dict, char *name, PyObject *obj); - extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init); - extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs); - -#if PY_VERSION_HEX >= 0x03000000 - -PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); -void * F2PyCapsule_AsVoidPtr(PyObject *obj); -int F2PyCapsule_Check(PyObject *ptr); - -#else - -PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)); -void * F2PyCapsule_AsVoidPtr(PyObject *ptr); -int F2PyCapsule_Check(PyObject *ptr); - -#endif - -#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS) -#define F2PY_INTENT_IN 1 -#define F2PY_INTENT_INOUT 2 -#define F2PY_INTENT_OUT 4 -#define F2PY_INTENT_HIDE 8 -#define F2PY_INTENT_CACHE 16 -#define F2PY_INTENT_COPY 32 -#define F2PY_INTENT_C 64 -#define F2PY_OPTIONAL 128 -#define F2PY_INTENT_INPLACE 256 -#define F2PY_INTENT_ALIGNED4 512 -#define F2PY_INTENT_ALIGNED8 1024 -#define F2PY_INTENT_ALIGNED16 2048 - -#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) -#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) -#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) -#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) - -#define F2PY_GET_ALIGNMENT(intent) \ - (F2PY_ALIGN4(intent) ? 4 : \ - (F2PY_ALIGN8(intent) ? 8 : \ - (F2PY_ALIGN16(intent) ? 16 : 1) )) -#define F2PY_CHECK_ALIGNMENT(arr, intent) ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) - - extern PyArrayObject* array_from_pyobj(const int type_num, - npy_intp *dims, - const int rank, - const int intent, - PyObject *obj); - extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out); - -#ifdef DEBUG_COPY_ND_ARRAY - extern void dump_attrs(const PyArrayObject* arr); -#endif - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_FORTRANOBJECT_H */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap deleted file mode 100644 index 2665f89b52d2f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap +++ /dev/null @@ -1 +0,0 @@ -dict(real=dict(rk="double")) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_free.f90 deleted file mode 100644 index b301710f5dda0..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_free.f90 +++ /dev/null @@ -1,34 +0,0 @@ - -subroutine sum(x, res) - implicit none - real, intent(in) :: x(:) - real, intent(out) :: res - - integer :: i - - !print *, "sum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end subroutine sum - -function fsum(x) result (res) - implicit none - real, intent(in) :: x(:) - real :: res - - integer :: i - - !print *, "fsum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end function fsum diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 deleted file mode 100644 index cbe6317ed8f39..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 +++ /dev/null @@ -1,41 +0,0 @@ - -module mod - -contains - -subroutine sum(x, res) - implicit none - real, intent(in) :: x(:) - real, intent(out) :: res - - integer :: i - - !print *, "sum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end subroutine sum - -function fsum(x) result (res) - implicit none - real, intent(in) :: x(:) - real :: res - - integer :: i - - !print *, "fsum: size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - -end function fsum - - -end module mod diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_use.f90 deleted file mode 100644 index 337465ac54044..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/foo_use.f90 +++ /dev/null @@ -1,19 +0,0 @@ -subroutine sum_with_use(x, res) - use precision - - implicit none - - real(kind=rk), intent(in) :: x(:) - real(kind=rk), intent(out) :: res - - integer :: i - - !print *, "size(x) = ", size(x) - - res = 0.0 - - do i = 1, size(x) - res = res + x(i) - enddo - - end subroutine diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/precision.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/precision.f90 deleted file mode 100644 index ed6c70cbbe7da..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/assumed_shape/precision.f90 +++ /dev/null @@ -1,4 +0,0 @@ -module precision - integer, parameter :: rk = selected_real_kind(8) - integer, parameter :: ik = selected_real_kind(4) -end module diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/kind/foo.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/kind/foo.f90 deleted file mode 100644 index d3d15cfb20a15..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/kind/foo.f90 +++ /dev/null @@ -1,20 +0,0 @@ - - -subroutine selectedrealkind(p, r, res) - implicit none - - integer, intent(in) :: p, r - !f2py integer :: r=0 - integer, intent(out) :: res - res = selected_real_kind(p, r) - -end subroutine - -subroutine selectedintkind(p, res) - implicit none - - integer, intent(in) :: p - integer, intent(out) :: res - res = selected_int_kind(p) - -end subroutine diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo.f b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo.f deleted file mode 100644 index c34742578f855..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo.f +++ /dev/null @@ -1,5 +0,0 @@ - subroutine bar11(a) -cf2py intent(out) a - integer a - a = 11 - end diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_fixed.f90 deleted file mode 100644 index 7543a6acb7375..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_fixed.f90 +++ /dev/null @@ -1,8 +0,0 @@ - module foo_fixed - contains - subroutine bar12(a) -!f2py intent(out) a - integer a - a = 12 - end subroutine bar12 - end module foo_fixed diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_free.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_free.f90 deleted file mode 100644 index c1b641f13ec29..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/mixed/foo_free.f90 +++ /dev/null @@ -1,8 +0,0 @@ -module foo_free -contains - subroutine bar13(a) - !f2py intent(out) a - integer a - a = 13 - end subroutine bar13 -end module foo_free diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/size/foo.f90 b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/size/foo.f90 deleted file mode 100644 index 5b66f8c430d79..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/src/size/foo.f90 +++ /dev/null @@ -1,44 +0,0 @@ - -subroutine foo(a, n, m, b) - implicit none - - real, intent(in) :: a(n, m) - integer, intent(in) :: n, m - real, intent(out) :: b(size(a, 1)) - - integer :: i - - do i = 1, size(b) - b(i) = sum(a(i,:)) - enddo -end subroutine - -subroutine trans(x,y) - implicit none - real, intent(in), dimension(:,:) :: x - real, intent(out), dimension( size(x,2), size(x,1) ) :: y - integer :: N, M, i, j - N = size(x,1) - M = size(x,2) - DO i=1,N - do j=1,M - y(j,i) = x(i,j) - END DO - END DO -end subroutine trans - -subroutine flatten(x,y) - implicit none - real, intent(in), dimension(:,:) :: x - real, intent(out), dimension( size(x) ) :: y - integer :: N, M, i, j, k - N = size(x,1) - M = size(x,2) - k = 1 - DO i=1,N - do j=1,M - y(k) = x(i,j) - k = k + 1 - END DO - END DO -end subroutine flatten diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_array_from_pyobj.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_array_from_pyobj.py deleted file mode 100644 index c51fa39363e4f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_array_from_pyobj.py +++ /dev/null @@ -1,559 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import unittest -import os -import sys -import copy -import platform - -import nose - -from numpy.testing import * -from numpy import (array, alltrue, ndarray, asarray, can_cast, zeros, dtype, - intp, clongdouble) -from numpy.core.multiarray import typeinfo - -import util - -wrap = None -def setup(): - """ - Build the required testing extension module - - """ - global wrap - - # Check compiler availability first - if not util.has_c_compiler(): - raise nose.SkipTest("No C compiler available") - - if wrap is None: - config_code = """ - config.add_extension('test_array_from_pyobj_ext', - sources=['wrapmodule.c', 'fortranobject.c'], - define_macros=[]) - """ - d = os.path.dirname(__file__) - src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), - os.path.join(d, '..', 'src', 'fortranobject.c'), - os.path.join(d, '..', 'src', 'fortranobject.h')] - wrap = util.build_module_distutils(src, config_code, - 'test_array_from_pyobj_ext') - -def flags_info(arr): - flags = wrap.array_attrs(arr)[6] - return flags2names(flags) - -def flags2names(flags): - info = [] - for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', - 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', - 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', - 'CARRAY', 'FARRAY' - ]: - if abs(flags) & getattr(wrap, flagname, 0): - info.append(flagname) - return info - -class Intent(object): - def __init__(self,intent_list=[]): - self.intent_list = intent_list[:] - flags = 0 - for i in intent_list: - if i=='optional': - flags |= wrap.F2PY_OPTIONAL - else: - flags |= getattr(wrap, 'F2PY_INTENT_'+i.upper()) - self.flags = flags - def __getattr__(self, name): - name = name.lower() - if name=='in_': name='in' - return self.__class__(self.intent_list+[name]) - def __str__(self): - return 'intent(%s)' % (','.join(self.intent_list)) - def __repr__(self): - return 'Intent(%r)' % (self.intent_list) - def is_intent(self,*names): - for name in names: - if name not in self.intent_list: - return False - return True - def is_intent_exact(self,*names): - return len(self.intent_list)==len(names) and self.is_intent(*names) - -intent = Intent() - -_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', - 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', - 'FLOAT', 'DOUBLE', 'CFLOAT'] - -_cast_dict = {'BOOL':['BOOL']} -_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] -_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] -_cast_dict['BYTE'] = ['BYTE'] -_cast_dict['UBYTE'] = ['UBYTE'] -_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] -_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] -_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] -_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] - -_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] -_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] - -_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] -_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] - -_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] -_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] - -_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] - -# 32 bit system malloc typically does not provide the alignment required by -# 16 byte long double types this means the inout intent cannot be satisfied and -# several tests fail as the alignment flag can be randomly true or fals -# when numpy gains an aligned allocator the tests could be enabled again -if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and - sys.platform != 'win32'): - _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) - _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ - ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] - _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \ - ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] - _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] - -class Type(object): - _type_cache = {} - - def __new__(cls, name): - if isinstance(name, dtype): - dtype0 = name - name = None - for n, i in typeinfo.items(): - if isinstance(i, tuple) and dtype0.type is i[-1]: - name = n - break - obj = cls._type_cache.get(name.upper(), None) - if obj is not None: - return obj - obj = object.__new__(cls) - obj._init(name) - cls._type_cache[name.upper()] = obj - return obj - - def _init(self, name): - self.NAME = name.upper() - self.type_num = getattr(wrap, 'NPY_'+self.NAME) - assert_equal(self.type_num, typeinfo[self.NAME][1]) - self.dtype = typeinfo[self.NAME][-1] - self.elsize = typeinfo[self.NAME][2] / 8 - self.dtypechar = typeinfo[self.NAME][0] - - def cast_types(self): - return [self.__class__(_m) for _m in _cast_dict[self.NAME]] - - def all_types(self): - return [self.__class__(_m) for _m in _type_names] - - def smaller_types(self): - bits = typeinfo[self.NAME][3] - types = [] - for name in _type_names: - if typeinfo[name][3]bits: - types.append(Type(name)) - return types - -class Array(object): - def __init__(self, typ, dims, intent, obj): - self.type = typ - self.dims = dims - self.intent = intent - self.obj_copy = copy.deepcopy(obj) - self.obj = obj - - # arr.dtypechar may be different from typ.dtypechar - self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) - - assert_(isinstance(self.arr, ndarray), repr(type(self.arr))) - - self.arr_attr = wrap.array_attrs(self.arr) - - if len(dims)>1: - if self.intent.is_intent('c'): - assert_(intent.flags & wrap.F2PY_INTENT_C) - assert_(not self.arr.flags['FORTRAN'], repr((self.arr.flags, getattr(obj, 'flags', None)))) - assert_(self.arr.flags['CONTIGUOUS']) - assert_(not self.arr_attr[6] & wrap.FORTRAN) - else: - assert_(not intent.flags & wrap.F2PY_INTENT_C) - assert_(self.arr.flags['FORTRAN']) - assert_(not self.arr.flags['CONTIGUOUS']) - assert_(self.arr_attr[6] & wrap.FORTRAN) - - if obj is None: - self.pyarr = None - self.pyarr_attr = None - return - - if intent.is_intent('cache'): - assert_(isinstance(obj, ndarray), repr(type(obj))) - self.pyarr = array(obj).reshape(*dims).copy() - else: - self.pyarr = array(array(obj, - dtype = typ.dtypechar).reshape(*dims), - order=self.intent.is_intent('c') and 'C' or 'F') - assert_(self.pyarr.dtype == typ, \ - repr((self.pyarr.dtype, typ))) - assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) - self.pyarr_attr = wrap.array_attrs(self.pyarr) - - if len(dims)>1: - if self.intent.is_intent('c'): - assert_(not self.pyarr.flags['FORTRAN']) - assert_(self.pyarr.flags['CONTIGUOUS']) - assert_(not self.pyarr_attr[6] & wrap.FORTRAN) - else: - assert_(self.pyarr.flags['FORTRAN']) - assert_(not self.pyarr.flags['CONTIGUOUS']) - assert_(self.pyarr_attr[6] & wrap.FORTRAN) - - - assert_(self.arr_attr[1]==self.pyarr_attr[1]) # nd - assert_(self.arr_attr[2]==self.pyarr_attr[2]) # dimensions - if self.arr_attr[1]<=1: - assert_(self.arr_attr[3]==self.pyarr_attr[3],\ - repr((self.arr_attr[3], self.pyarr_attr[3], - self.arr.tobytes(), self.pyarr.tobytes()))) # strides - assert_(self.arr_attr[5][-2:]==self.pyarr_attr[5][-2:],\ - repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr - assert_(self.arr_attr[6]==self.pyarr_attr[6],\ - repr((self.arr_attr[6], self.pyarr_attr[6], flags2names(0*self.arr_attr[6]-self.pyarr_attr[6]), flags2names(self.arr_attr[6]), intent))) # flags - - if intent.is_intent('cache'): - assert_(self.arr_attr[5][3]>=self.type.elsize,\ - repr((self.arr_attr[5][3], self.type.elsize))) - else: - assert_(self.arr_attr[5][3]==self.type.elsize,\ - repr((self.arr_attr[5][3], self.type.elsize))) - assert_(self.arr_equal(self.pyarr, self.arr)) - - if isinstance(self.obj, ndarray): - if typ.elsize==Type(obj.dtype).elsize: - if not intent.is_intent('copy') and self.arr_attr[1]<=1: - assert_(self.has_shared_memory()) - - def arr_equal(self, arr1, arr2): - if arr1.shape != arr2.shape: - return False - s = arr1==arr2 - return alltrue(s.flatten()) - - def __str__(self): - return str(self.arr) - - def has_shared_memory(self): - """Check that created array shares data with input array. - """ - if self.obj is self.arr: - return True - if not isinstance(self.obj, ndarray): - return False - obj_attr = wrap.array_attrs(self.obj) - return obj_attr[0]==self.arr_attr[0] - -################################################## - -class test_intent(unittest.TestCase): - def test_in_out(self): - assert_equal(str(intent.in_.out), 'intent(in,out)') - assert_(intent.in_.c.is_intent('c')) - assert_(not intent.in_.c.is_intent_exact('c')) - assert_(intent.in_.c.is_intent_exact('c', 'in')) - assert_(intent.in_.c.is_intent_exact('in', 'c')) - assert_(not intent.in_.is_intent('c')) - -class _test_shared_memory: - num2seq = [1, 2] - num23seq = [[1, 2, 3], [4, 5, 6]] - def test_in_from_2seq(self): - a = self.array([2], intent.in_, self.num2seq) - assert_(not a.has_shared_memory()) - - def test_in_from_2casttype(self): - for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) - a = self.array([len(self.num2seq)], intent.in_, obj) - if t.elsize==self.type.elsize: - assert_(a.has_shared_memory(), repr((self.type.dtype, t.dtype))) - else: - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_inout_2seq(self): - obj = array(self.num2seq, dtype=self.type.dtype) - a = self.array([len(self.num2seq)], intent.inout, obj) - assert_(a.has_shared_memory()) - - try: - a = self.array([2], intent.in_.inout, self.num2seq) - except TypeError as msg: - if not str(msg).startswith('failed to initialize intent(inout|inplace|cache) array'): - raise - else: - raise SystemError('intent(inout) should have failed on sequence') - - def test_f_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype, order='F') - shape = (len(self.num23seq), len(self.num23seq[0])) - a = self.array(shape, intent.in_.inout, obj) - assert_(a.has_shared_memory()) - - obj = array(self.num23seq, dtype=self.type.dtype, order='C') - shape = (len(self.num23seq), len(self.num23seq[0])) - try: - a = self.array(shape, intent.in_.inout, obj) - except ValueError as msg: - if not str(msg).startswith('failed to initialize intent(inout) array'): - raise - else: - raise SystemError('intent(inout) should have failed on improper array') - - def test_c_inout_23seq(self): - obj = array(self.num23seq, dtype=self.type.dtype) - shape = (len(self.num23seq), len(self.num23seq[0])) - a = self.array(shape, intent.in_.c.inout, obj) - assert_(a.has_shared_memory()) - - def test_in_copy_from_2casttype(self): - for t in self.type.cast_types(): - obj = array(self.num2seq, dtype=t.dtype) - a = self.array([len(self.num2seq)], intent.in_.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_c_in_from_23seq(self): - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, self.num23seq) - assert_(not a.has_shared_memory()) - - def test_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_f_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_, obj) - if t.elsize==self.type.elsize: - assert_(a.has_shared_memory(), repr(t.dtype)) - else: - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_c_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.c, obj) - if t.elsize==self.type.elsize: - assert_(a.has_shared_memory(), repr(t.dtype)) - else: - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_f_copy_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype, order='F') - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_c_copy_in_from_23casttype(self): - for t in self.type.cast_types(): - obj = array(self.num23seq, dtype=t.dtype) - a = self.array([len(self.num23seq), len(self.num23seq[0])], - intent.in_.c.copy, obj) - assert_(not a.has_shared_memory(), repr(t.dtype)) - - def test_in_cache_from_2casttype(self): - for t in self.type.all_types(): - if t.elsize != self.type.elsize: - continue - obj = array(self.num2seq, dtype=t.dtype) - shape = (len(self.num2seq),) - a = self.array(shape, intent.in_.c.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - a = self.array(shape, intent.in_.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - obj = array(self.num2seq, dtype=t.dtype, order='F') - a = self.array(shape, intent.in_.c.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - a = self.array(shape, intent.in_.cache, obj) - assert_(a.has_shared_memory(), repr(t.dtype)) - - try: - a = self.array(shape, intent.in_.cache, obj[::-1]) - except ValueError as msg: - if not str(msg).startswith('failed to initialize intent(cache) array'): - raise - else: - raise SystemError('intent(cache) should have failed on multisegmented array') - def test_in_cache_from_2casttype_failure(self): - for t in self.type.all_types(): - if t.elsize >= self.type.elsize: - continue - obj = array(self.num2seq, dtype=t.dtype) - shape = (len(self.num2seq),) - try: - a = self.array(shape, intent.in_.cache, obj) - except ValueError as msg: - if not str(msg).startswith('failed to initialize intent(cache) array'): - raise - else: - raise SystemError('intent(cache) should have failed on smaller array') - - def test_cache_hidden(self): - shape = (2,) - a = self.array(shape, intent.cache.hide, None) - assert_(a.arr.shape==shape) - - shape = (2, 3) - a = self.array(shape, intent.cache.hide, None) - assert_(a.arr.shape==shape) - - shape = (-1, 3) - try: - a = self.array(shape, intent.cache.hide, None) - except ValueError as msg: - if not str(msg).startswith('failed to create intent(cache|hide)|optional array'): - raise - else: - raise SystemError('intent(cache) should have failed on undefined dimensions') - - def test_hidden(self): - shape = (2,) - a = self.array(shape, intent.hide, None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - - shape = (2, 3) - a = self.array(shape, intent.hide, None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - - shape = (2, 3) - a = self.array(shape, intent.c.hide, None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) - - shape = (-1, 3) - try: - a = self.array(shape, intent.hide, None) - except ValueError as msg: - if not str(msg).startswith('failed to create intent(cache|hide)|optional array'): - raise - else: - raise SystemError('intent(hide) should have failed on undefined dimensions') - - def test_optional_none(self): - shape = (2,) - a = self.array(shape, intent.optional, None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - - shape = (2, 3) - a = self.array(shape, intent.optional, None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - - shape = (2, 3) - a = self.array(shape, intent.c.optional, None) - assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) - - def test_optional_from_2seq(self): - obj = self.num2seq - shape = (len(obj),) - a = self.array(shape, intent.optional, obj) - assert_(a.arr.shape==shape) - assert_(not a.has_shared_memory()) - - def test_optional_from_23seq(self): - obj = self.num23seq - shape = (len(obj), len(obj[0])) - a = self.array(shape, intent.optional, obj) - assert_(a.arr.shape==shape) - assert_(not a.has_shared_memory()) - - a = self.array(shape, intent.optional.c, obj) - assert_(a.arr.shape==shape) - assert_(not a.has_shared_memory()) - - def test_inplace(self): - obj = array(self.num23seq, dtype=self.type.dtype) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) - shape = obj.shape - a = self.array(shape, intent.inplace, obj) - assert_(obj[1][2]==a.arr[1][2], repr((obj, a.arr))) - a.arr[1][2]=54 - assert_(obj[1][2]==a.arr[1][2]==array(54, dtype=self.type.dtype), repr((obj, a.arr))) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! - assert_(not obj.flags['CONTIGUOUS']) - - def test_inplace_from_casttype(self): - for t in self.type.cast_types(): - if t is self.type: - continue - obj = array(self.num23seq, dtype=t.dtype) - assert_(obj.dtype.type==t.dtype) - assert_(obj.dtype.type is not self.type.dtype) - assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) - shape = obj.shape - a = self.array(shape, intent.inplace, obj) - assert_(obj[1][2]==a.arr[1][2], repr((obj, a.arr))) - a.arr[1][2]=54 - assert_(obj[1][2]==a.arr[1][2]==array(54, dtype=self.type.dtype), repr((obj, a.arr))) - assert_(a.arr is obj) - assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! - assert_(not obj.flags['CONTIGUOUS']) - assert_(obj.dtype.type is self.type.dtype) # obj type is changed inplace! - - -for t in _type_names: - exec('''\ -class test_%s_gen(unittest.TestCase, - _test_shared_memory - ): - def setUp(self): - self.type = Type(%r) - array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj) -''' % (t, t, t)) - -if __name__ == "__main__": - setup() - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_assumed_shape.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_assumed_shape.py deleted file mode 100644 index d6beaee63dfd5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_assumed_shape.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import math - -from numpy.testing import * -from numpy import array - -import util - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestAssumedShapeSumExample(util.F2PyTest): - sources = [_path('src', 'assumed_shape', 'foo_free.f90'), - _path('src', 'assumed_shape', 'foo_use.f90'), - _path('src', 'assumed_shape', 'precision.f90'), - _path('src', 'assumed_shape', 'foo_mod.f90'), - ] - - @dec.slow - def test_all(self): - r = self.module.fsum([1, 2]) - assert_(r==3, repr(r)) - r = self.module.sum([1, 2]) - assert_(r==3, repr(r)) - r = self.module.sum_with_use([1, 2]) - assert_(r==3, repr(r)) - - r = self.module.mod.sum([1, 2]) - assert_(r==3, repr(r)) - r = self.module.mod.fsum([1, 2]) - assert_(r==3, repr(r)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_callback.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_callback.py deleted file mode 100644 index 16464140f14cd..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_callback.py +++ /dev/null @@ -1,132 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * -from numpy import array -import math -import util -import textwrap - -class TestF77Callback(util.F2PyTest): - code = """ - subroutine t(fun,a) - integer a -cf2py intent(out) a - external fun - call fun(a) - end - - subroutine func(a) -cf2py intent(in,out) a - integer a - a = a + 11 - end - - subroutine func0(a) -cf2py intent(out) a - integer a - a = 11 - end - - subroutine t2(a) -cf2py intent(callback) fun - integer a -cf2py intent(out) a - external fun - call fun(a) - end - - subroutine string_callback(callback, a) - external callback - double precision callback - double precision a - character*1 r -cf2py intent(out) a - r = 'r' - a = callback(r) - end - - """ - - @dec.slow - def test_all(self): - for name in "t,t2".split(","): - self.check_function(name) - - @dec.slow - def test_docstring(self): - expected = """ - a = t(fun,[fun_extra_args]) - - Wrapper for ``t``. - - Parameters - ---------- - fun : call-back function - - Other Parameters - ---------------- - fun_extra_args : input tuple, optional - Default: () - - Returns - ------- - a : int - - Notes - ----- - Call-back functions:: - - def fun(): return a - Return objects: - a : int - """ - assert_equal(self.module.t.__doc__, textwrap.dedent(expected).lstrip()) - - def check_function(self, name): - t = getattr(self.module, name) - r = t(lambda : 4) - assert_( r==4, repr(r)) - r = t(lambda a:5, fun_extra_args=(6,)) - assert_( r==5, repr(r)) - r = t(lambda a:a, fun_extra_args=(6,)) - assert_( r==6, repr(r)) - r = t(lambda a:5+a, fun_extra_args=(7,)) - assert_( r==12, repr(r)) - r = t(lambda a:math.degrees(a), fun_extra_args=(math.pi,)) - assert_( r==180, repr(r)) - r = t(math.degrees, fun_extra_args=(math.pi,)) - assert_( r==180, repr(r)) - - r = t(self.module.func, fun_extra_args=(6,)) - assert_( r==17, repr(r)) - r = t(self.module.func0) - assert_( r==11, repr(r)) - r = t(self.module.func0._cpointer) - assert_( r==11, repr(r)) - class A(object): - def __call__(self): - return 7 - def mth(self): - return 9 - a = A() - r = t(a) - assert_( r==7, repr(r)) - r = t(a.mth) - assert_( r==9, repr(r)) - - def test_string_callback(self): - - def callback(code): - if code == 'r': - return 0 - else: - return 1 - - f = getattr(self.module, 'string_callback') - r = f(callback) - assert_(r == 0, repr(r)) - - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_kind.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_kind.py deleted file mode 100644 index f96fbffdb51be..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_kind.py +++ /dev/null @@ -1,36 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import math - -from numpy.testing import * -from numpy import array - -import util - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -from numpy.f2py.crackfortran import _selected_int_kind_func as selected_int_kind -from numpy.f2py.crackfortran import _selected_real_kind_func as selected_real_kind - -class TestKind(util.F2PyTest): - sources = [_path('src', 'kind', 'foo.f90'), - ] - - @dec.slow - def test_all(self): - selectedrealkind = self.module.selectedrealkind - selectedintkind = self.module.selectedintkind - - for i in range(40): - assert_(selectedintkind(i) in [selected_int_kind(i), -1],\ - 'selectedintkind(%s): expected %r but got %r' % (i, selected_int_kind(i), selectedintkind(i))) - - for i in range(20): - assert_(selectedrealkind(i) in [selected_real_kind(i), -1],\ - 'selectedrealkind(%s): expected %r but got %r' % (i, selected_real_kind(i), selectedrealkind(i))) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_mixed.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_mixed.py deleted file mode 100644 index c4cb4889bcb3a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_mixed.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import math - -from numpy.testing import * -from numpy import array - -import util -import textwrap - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestMixed(util.F2PyTest): - sources = [_path('src', 'mixed', 'foo.f'), - _path('src', 'mixed', 'foo_fixed.f90'), - _path('src', 'mixed', 'foo_free.f90')] - - @dec.slow - def test_all(self): - assert_( self.module.bar11() == 11) - assert_( self.module.foo_fixed.bar12() == 12) - assert_( self.module.foo_free.bar13() == 13) - - @dec.slow - def test_docstring(self): - expected = """ - a = bar11() - - Wrapper for ``bar11``. - - Returns - ------- - a : int - """ - assert_equal(self.module.bar11.__doc__, textwrap.dedent(expected).lstrip()) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_character.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_character.py deleted file mode 100644 index 0865d54b3eb01..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_character.py +++ /dev/null @@ -1,142 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * -from numpy import array -from numpy.compat import asbytes -import util - -class TestReturnCharacter(util.F2PyTest): - def check_function(self, t): - tname = t.__doc__.split()[0] - if tname in ['t0', 't1', 's0', 's1']: - assert_( t(23)==asbytes('2')) - r = t('ab');assert_( r==asbytes('a'), repr(r)) - r = t(array('ab'));assert_( r==asbytes('a'), repr(r)) - r = t(array(77, 'u1'));assert_( r==asbytes('M'), repr(r)) - #assert_(_raises(ValueError, t, array([77,87]))) - #assert_(_raises(ValueError, t, array(77))) - elif tname in ['ts', 'ss']: - assert_( t(23)==asbytes('23 '), repr(t(23))) - assert_( t('123456789abcdef')==asbytes('123456789a')) - elif tname in ['t5', 's5']: - assert_( t(23)==asbytes('23 '), repr(t(23))) - assert_( t('ab')==asbytes('ab '), repr(t('ab'))) - assert_( t('123456789abcdef')==asbytes('12345')) - else: - raise NotImplementedError - -class TestF77ReturnCharacter(TestReturnCharacter): - code = """ - function t0(value) - character value - character t0 - t0 = value - end - function t1(value) - character*1 value - character*1 t1 - t1 = value - end - function t5(value) - character*5 value - character*5 t5 - t5 = value - end - function ts(value) - character*(*) value - character*(*) ts - ts = value - end - - subroutine s0(t0,value) - character value - character t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - character*1 value - character*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s5(t5,value) - character*5 value - character*5 t5 -cf2py intent(out) t5 - t5 = value - end - subroutine ss(ts,value) - character*(*) value - character*10 ts -cf2py intent(out) ts - ts = value - end - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t5,s0,s1,s5,ss".split(","): - self.check_function(getattr(self.module, name)) - -class TestF90ReturnCharacter(TestReturnCharacter): - suffix = ".f90" - code = """ -module f90_return_char - contains - function t0(value) - character :: value - character :: t0 - t0 = value - end function t0 - function t1(value) - character(len=1) :: value - character(len=1) :: t1 - t1 = value - end function t1 - function t5(value) - character(len=5) :: value - character(len=5) :: t5 - t5 = value - end function t5 - function ts(value) - character(len=*) :: value - character(len=10) :: ts - ts = value - end function ts - - subroutine s0(t0,value) - character :: value - character :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - character(len=1) :: value - character(len=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s5(t5,value) - character(len=5) :: value - character(len=5) :: t5 -!f2py intent(out) t5 - t5 = value - end subroutine s5 - subroutine ss(ts,value) - character(len=*) :: value - character(len=10) :: ts -!f2py intent(out) ts - ts = value - end subroutine ss -end module f90_return_char - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t5,ts,s0,s1,s5,ss".split(","): - self.check_function(getattr(self.module.f90_return_char, name)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_complex.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_complex.py deleted file mode 100644 index d144cecf16575..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_complex.py +++ /dev/null @@ -1,169 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * -from numpy import array -from numpy.compat import long -import util - -class TestReturnComplex(util.F2PyTest): - def check_function(self, t): - tname = t.__doc__.split()[0] - if tname in ['t0', 't8', 's0', 's8']: - err = 1e-5 - else: - err = 0.0 - assert_( abs(t(234j)-234.0j)<=err) - assert_( abs(t(234.6)-234.6)<=err) - assert_( abs(t(long(234))-234.0)<=err) - assert_( abs(t(234.6+3j)-(234.6+3j))<=err) - #assert_( abs(t('234')-234.)<=err) - #assert_( abs(t('234.6')-234.6)<=err) - assert_( abs(t(-234)+234.)<=err) - assert_( abs(t([234])-234.)<=err) - assert_( abs(t((234,))-234.)<=err) - assert_( abs(t(array(234))-234.)<=err) - assert_( abs(t(array(23+4j, 'F'))-(23+4j))<=err) - assert_( abs(t(array([234]))-234.)<=err) - assert_( abs(t(array([[234]]))-234.)<=err) - assert_( abs(t(array([234], 'b'))+22.)<=err) - assert_( abs(t(array([234], 'h'))-234.)<=err) - assert_( abs(t(array([234], 'i'))-234.)<=err) - assert_( abs(t(array([234], 'l'))-234.)<=err) - assert_( abs(t(array([234], 'q'))-234.)<=err) - assert_( abs(t(array([234], 'f'))-234.)<=err) - assert_( abs(t(array([234], 'd'))-234.)<=err) - assert_( abs(t(array([234+3j], 'F'))-(234+3j))<=err) - assert_( abs(t(array([234], 'D'))-234.)<=err) - - #assert_raises(TypeError, t, array([234], 'a1')) - assert_raises(TypeError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(TypeError, t, t) - assert_raises(TypeError, t, {}) - - try: - r = t(10**400) - assert_( repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r)) - except OverflowError: - pass - - -class TestF77ReturnComplex(TestReturnComplex): - code = """ - function t0(value) - complex value - complex t0 - t0 = value - end - function t8(value) - complex*8 value - complex*8 t8 - t8 = value - end - function t16(value) - complex*16 value - complex*16 t16 - t16 = value - end - function td(value) - double complex value - double complex td - td = value - end - - subroutine s0(t0,value) - complex value - complex t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s8(t8,value) - complex*8 value - complex*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine s16(t16,value) - complex*16 value - complex*16 t16 -cf2py intent(out) t16 - t16 = value - end - subroutine sd(td,value) - double complex value - double complex td -cf2py intent(out) td - td = value - end - """ - - @dec.slow - def test_all(self): - for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnComplex(TestReturnComplex): - suffix = ".f90" - code = """ -module f90_return_complex - contains - function t0(value) - complex :: value - complex :: t0 - t0 = value - end function t0 - function t8(value) - complex(kind=4) :: value - complex(kind=4) :: t8 - t8 = value - end function t8 - function t16(value) - complex(kind=8) :: value - complex(kind=8) :: t16 - t16 = value - end function t16 - function td(value) - double complex :: value - double complex :: td - td = value - end function td - - subroutine s0(t0,value) - complex :: value - complex :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s8(t8,value) - complex(kind=4) :: value - complex(kind=4) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine s16(t16,value) - complex(kind=8) :: value - complex(kind=8) :: t16 -!f2py intent(out) t16 - t16 = value - end subroutine s16 - subroutine sd(td,value) - double complex :: value - double complex :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_complex - """ - - @dec.slow - def test_all(self): - for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","): - self.check_function(getattr(self.module.f90_return_complex, name)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_integer.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_integer.py deleted file mode 100644 index 056466208f6cb..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_integer.py +++ /dev/null @@ -1,178 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * -from numpy import array -from numpy.compat import long -import util - -class TestReturnInteger(util.F2PyTest): - def check_function(self, t): - assert_( t(123)==123, repr(t(123))) - assert_( t(123.6)==123) - assert_( t(long(123))==123) - assert_( t('123')==123) - assert_( t(-123)==-123) - assert_( t([123])==123) - assert_( t((123,))==123) - assert_( t(array(123))==123) - assert_( t(array([123]))==123) - assert_( t(array([[123]]))==123) - assert_( t(array([123], 'b'))==123) - assert_( t(array([123], 'h'))==123) - assert_( t(array([123], 'i'))==123) - assert_( t(array([123], 'l'))==123) - assert_( t(array([123], 'B'))==123) - assert_( t(array([123], 'f'))==123) - assert_( t(array([123], 'd'))==123) - - #assert_raises(ValueError, t, array([123],'S3')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) - - if t.__doc__.split()[0] in ['t8', 's8']: - assert_raises(OverflowError, t, 100000000000000000000000) - assert_raises(OverflowError, t, 10000000011111111111111.23) - -class TestF77ReturnInteger(TestReturnInteger): - code = """ - function t0(value) - integer value - integer t0 - t0 = value - end - function t1(value) - integer*1 value - integer*1 t1 - t1 = value - end - function t2(value) - integer*2 value - integer*2 t2 - t2 = value - end - function t4(value) - integer*4 value - integer*4 t4 - t4 = value - end - function t8(value) - integer*8 value - integer*8 t8 - t8 = value - end - - subroutine s0(t0,value) - integer value - integer t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - integer*1 value - integer*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - integer*2 value - integer*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - integer*4 value - integer*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - integer*8 value - integer*8 t8 -cf2py intent(out) t8 - t8 = value - end - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","): - self.check_function(getattr(self.module, name)) - - -class TestF90ReturnInteger(TestReturnInteger): - suffix = ".f90" - code = """ -module f90_return_integer - contains - function t0(value) - integer :: value - integer :: t0 - t0 = value - end function t0 - function t1(value) - integer(kind=1) :: value - integer(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - integer(kind=2) :: value - integer(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - integer(kind=4) :: value - integer(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - integer(kind=8) :: value - integer(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - integer :: value - integer :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - integer(kind=1) :: value - integer(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - integer(kind=2) :: value - integer(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - integer(kind=4) :: value - integer(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - integer(kind=8) :: value - integer(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_integer - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","): - self.check_function(getattr(self.module.f90_return_integer, name)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_logical.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_logical.py deleted file mode 100644 index 82f86b67f1ea4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_logical.py +++ /dev/null @@ -1,187 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * -from numpy import array -from numpy.compat import long -import util - -class TestReturnLogical(util.F2PyTest): - def check_function(self, t): - assert_( t(True)==1, repr(t(True))) - assert_( t(False)==0, repr(t(False))) - assert_( t(0)==0) - assert_( t(None)==0) - assert_( t(0.0)==0) - assert_( t(0j)==0) - assert_( t(1j)==1) - assert_( t(234)==1) - assert_( t(234.6)==1) - assert_( t(long(234))==1) - assert_( t(234.6+3j)==1) - assert_( t('234')==1) - assert_( t('aaa')==1) - assert_( t('')==0) - assert_( t([])==0) - assert_( t(())==0) - assert_( t({})==0) - assert_( t(t)==1) - assert_( t(-234)==1) - assert_( t(10**100)==1) - assert_( t([234])==1) - assert_( t((234,))==1) - assert_( t(array(234))==1) - assert_( t(array([234]))==1) - assert_( t(array([[234]]))==1) - assert_( t(array([234], 'b'))==1) - assert_( t(array([234], 'h'))==1) - assert_( t(array([234], 'i'))==1) - assert_( t(array([234], 'l'))==1) - assert_( t(array([234], 'f'))==1) - assert_( t(array([234], 'd'))==1) - assert_( t(array([234+3j], 'F'))==1) - assert_( t(array([234], 'D'))==1) - assert_( t(array(0))==0) - assert_( t(array([0]))==0) - assert_( t(array([[0]]))==0) - assert_( t(array([0j]))==0) - assert_( t(array([1]))==1) - assert_raises(ValueError, t, array([0, 0])) - - -class TestF77ReturnLogical(TestReturnLogical): - code = """ - function t0(value) - logical value - logical t0 - t0 = value - end - function t1(value) - logical*1 value - logical*1 t1 - t1 = value - end - function t2(value) - logical*2 value - logical*2 t2 - t2 = value - end - function t4(value) - logical*4 value - logical*4 t4 - t4 = value - end -c function t8(value) -c logical*8 value -c logical*8 t8 -c t8 = value -c end - - subroutine s0(t0,value) - logical value - logical t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s1(t1,value) - logical*1 value - logical*1 t1 -cf2py intent(out) t1 - t1 = value - end - subroutine s2(t2,value) - logical*2 value - logical*2 t2 -cf2py intent(out) t2 - t2 = value - end - subroutine s4(t4,value) - logical*4 value - logical*4 t4 -cf2py intent(out) t4 - t4 = value - end -c subroutine s8(t8,value) -c logical*8 value -c logical*8 t8 -cf2py intent(out) t8 -c t8 = value -c end - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t2,t4,s0,s1,s2,s4".split(","): - self.check_function(getattr(self.module, name)) - -class TestF90ReturnLogical(TestReturnLogical): - suffix = ".f90" - code = """ -module f90_return_logical - contains - function t0(value) - logical :: value - logical :: t0 - t0 = value - end function t0 - function t1(value) - logical(kind=1) :: value - logical(kind=1) :: t1 - t1 = value - end function t1 - function t2(value) - logical(kind=2) :: value - logical(kind=2) :: t2 - t2 = value - end function t2 - function t4(value) - logical(kind=4) :: value - logical(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - logical(kind=8) :: value - logical(kind=8) :: t8 - t8 = value - end function t8 - - subroutine s0(t0,value) - logical :: value - logical :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s1(t1,value) - logical(kind=1) :: value - logical(kind=1) :: t1 -!f2py intent(out) t1 - t1 = value - end subroutine s1 - subroutine s2(t2,value) - logical(kind=2) :: value - logical(kind=2) :: t2 -!f2py intent(out) t2 - t2 = value - end subroutine s2 - subroutine s4(t4,value) - logical(kind=4) :: value - logical(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - logical(kind=8) :: value - logical(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 -end module f90_return_logical - """ - - @dec.slow - def test_all(self): - for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","): - self.check_function(getattr(self.module.f90_return_logical, name)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_real.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_real.py deleted file mode 100644 index f9a09f6207242..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_return_real.py +++ /dev/null @@ -1,203 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * -from numpy import array -from numpy.compat import long -import math -import util - -class TestReturnReal(util.F2PyTest): - def check_function(self, t): - if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: - err = 1e-5 - else: - err = 0.0 - assert_( abs(t(234)-234.0)<=err) - assert_( abs(t(234.6)-234.6)<=err) - assert_( abs(t(long(234))-234.0)<=err) - assert_( abs(t('234')-234)<=err) - assert_( abs(t('234.6')-234.6)<=err) - assert_( abs(t(-234)+234)<=err) - assert_( abs(t([234])-234)<=err) - assert_( abs(t((234,))-234.)<=err) - assert_( abs(t(array(234))-234.)<=err) - assert_( abs(t(array([234]))-234.)<=err) - assert_( abs(t(array([[234]]))-234.)<=err) - assert_( abs(t(array([234], 'b'))+22)<=err) - assert_( abs(t(array([234], 'h'))-234.)<=err) - assert_( abs(t(array([234], 'i'))-234.)<=err) - assert_( abs(t(array([234], 'l'))-234.)<=err) - assert_( abs(t(array([234], 'B'))-234.)<=err) - assert_( abs(t(array([234], 'f'))-234.)<=err) - assert_( abs(t(array([234], 'd'))-234.)<=err) - if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: - assert_( t(1e200)==t(1e300)) # inf - - #assert_raises(ValueError, t, array([234], 'S1')) - assert_raises(ValueError, t, 'abc') - - assert_raises(IndexError, t, []) - assert_raises(IndexError, t, ()) - - assert_raises(Exception, t, t) - assert_raises(Exception, t, {}) - - try: - r = t(10**400) - assert_( repr(r) in ['inf', 'Infinity'], repr(r)) - except OverflowError: - pass - -class TestCReturnReal(TestReturnReal): - suffix = ".pyf" - module_name = "c_ext_return_real" - code = """ -python module c_ext_return_real -usercode \'\'\' -float t4(float value) { return value; } -void s4(float *t4, float value) { *t4 = value; } -double t8(double value) { return value; } -void s8(double *t8, double value) { *t8 = value; } -\'\'\' -interface - function t4(value) - real*4 intent(c) :: t4,value - end - function t8(value) - real*8 intent(c) :: t8,value - end - subroutine s4(t4,value) - intent(c) s4 - real*4 intent(out) :: t4 - real*4 intent(c) :: value - end - subroutine s8(t8,value) - intent(c) s8 - real*8 intent(out) :: t8 - real*8 intent(c) :: value - end -end interface -end python module c_ext_return_real - """ - - @dec.slow - def test_all(self): - for name in "t4,t8,s4,s8".split(","): - self.check_function(getattr(self.module, name)) - -class TestF77ReturnReal(TestReturnReal): - code = """ - function t0(value) - real value - real t0 - t0 = value - end - function t4(value) - real*4 value - real*4 t4 - t4 = value - end - function t8(value) - real*8 value - real*8 t8 - t8 = value - end - function td(value) - double precision value - double precision td - td = value - end - - subroutine s0(t0,value) - real value - real t0 -cf2py intent(out) t0 - t0 = value - end - subroutine s4(t4,value) - real*4 value - real*4 t4 -cf2py intent(out) t4 - t4 = value - end - subroutine s8(t8,value) - real*8 value - real*8 t8 -cf2py intent(out) t8 - t8 = value - end - subroutine sd(td,value) - double precision value - double precision td -cf2py intent(out) td - td = value - end - """ - - @dec.slow - def test_all(self): - for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","): - self.check_function(getattr(self.module, name)) - -class TestF90ReturnReal(TestReturnReal): - suffix = ".f90" - code = """ -module f90_return_real - contains - function t0(value) - real :: value - real :: t0 - t0 = value - end function t0 - function t4(value) - real(kind=4) :: value - real(kind=4) :: t4 - t4 = value - end function t4 - function t8(value) - real(kind=8) :: value - real(kind=8) :: t8 - t8 = value - end function t8 - function td(value) - double precision :: value - double precision :: td - td = value - end function td - - subroutine s0(t0,value) - real :: value - real :: t0 -!f2py intent(out) t0 - t0 = value - end subroutine s0 - subroutine s4(t4,value) - real(kind=4) :: value - real(kind=4) :: t4 -!f2py intent(out) t4 - t4 = value - end subroutine s4 - subroutine s8(t8,value) - real(kind=8) :: value - real(kind=8) :: t8 -!f2py intent(out) t8 - t8 = value - end subroutine s8 - subroutine sd(td,value) - double precision :: value - double precision :: td -!f2py intent(out) td - td = value - end subroutine sd -end module f90_return_real - """ - - @dec.slow - def test_all(self): - for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","): - self.check_function(getattr(self.module.f90_return_real, name)) - - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_size.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_size.py deleted file mode 100644 index e4f21b519ca4e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/test_size.py +++ /dev/null @@ -1,47 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import math - -from numpy.testing import * -from numpy import array - -import util - -def _path(*a): - return os.path.join(*((os.path.dirname(__file__),) + a)) - -class TestSizeSumExample(util.F2PyTest): - sources = [_path('src', 'size', 'foo.f90'), - ] - - @dec.slow - def test_all(self): - r = self.module.foo([[1, 2]]) - assert_equal(r, [3], repr(r)) - - r = self.module.foo([[1, 2], [3, 4]]) - assert_equal(r, [3, 7], repr(r)) - - r = self.module.foo([[1, 2], [3, 4], [5, 6]]) - assert_equal(r, [3, 7, 11], repr(r)) - - @dec.slow - def test_transpose(self): - r = self.module.trans([[1, 2]]) - assert_equal(r, [[1], [2]], repr(r)) - - r = self.module.trans([[1, 2, 3], [4, 5, 6]]) - assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r)) - - @dec.slow - def test_flatten(self): - r = self.module.flatten([[1, 2]]) - assert_equal(r, [1, 2], repr(r)) - - r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) - assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r)) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/util.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/util.py deleted file mode 100644 index 56aff2b666fa3..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/tests/util.py +++ /dev/null @@ -1,353 +0,0 @@ -""" -Utility functions for - -- building and importing modules on test time, using a temporary location -- detecting if compilers are present - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import subprocess -import tempfile -import shutil -import atexit -import textwrap -import re -import random - -import nose - -from numpy.compat import asbytes, asstr -import numpy.f2py - -try: - from hashlib import md5 -except ImportError: - from md5 import new as md5 - -# -# Maintaining a temporary module directory -# - -_module_dir = None - -def _cleanup(): - global _module_dir - if _module_dir is not None: - try: - sys.path.remove(_module_dir) - except ValueError: - pass - try: - shutil.rmtree(_module_dir) - except (IOError, OSError): - pass - _module_dir = None - -def get_module_dir(): - global _module_dir - if _module_dir is None: - _module_dir = tempfile.mkdtemp() - atexit.register(_cleanup) - if _module_dir not in sys.path: - sys.path.insert(0, _module_dir) - return _module_dir - -def get_temp_module_name(): - # Assume single-threaded, and the module dir usable only by this thread - d = get_module_dir() - for j in range(5403, 9999999): - name = "_test_ext_module_%d" % j - fn = os.path.join(d, name) - if name not in sys.modules and not os.path.isfile(fn+'.py'): - return name - raise RuntimeError("Failed to create a temporary module name") - -def _memoize(func): - memo = {} - def wrapper(*a, **kw): - key = repr((a, kw)) - if key not in memo: - try: - memo[key] = func(*a, **kw) - except Exception as e: - memo[key] = e - raise - ret = memo[key] - if isinstance(ret, Exception): - raise ret - return ret - wrapper.__name__ = func.__name__ - return wrapper - -# -# Building modules -# - -@_memoize -def build_module(source_files, options=[], skip=[], only=[], module_name=None): - """ - Compile and import a f2py module, built from the given files. - - """ - - code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; " - "f2py2e.main()" % repr(sys.path)) - - d = get_module_dir() - - # Copy files - dst_sources = [] - for fn in source_files: - if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) - dst = os.path.join(d, os.path.basename(fn)) - shutil.copyfile(fn, dst) - dst_sources.append(dst) - - fn = os.path.join(os.path.dirname(fn), '.f2py_f2cmap') - if os.path.isfile(fn): - dst = os.path.join(d, os.path.basename(fn)) - if not os.path.isfile(dst): - shutil.copyfile(fn, dst) - - # Prepare options - if module_name is None: - module_name = get_temp_module_name() - f2py_opts = ['-c', '-m', module_name] + options + dst_sources - if skip: - f2py_opts += ['skip:'] + skip - if only: - f2py_opts += ['only:'] + only - - # Build - cwd = os.getcwd() - try: - os.chdir(d) - cmd = [sys.executable, '-c', code] + f2py_opts - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - if p.returncode != 0: - raise RuntimeError("Running f2py failed: %s\n%s" - % (cmd[4:], asstr(out))) - finally: - os.chdir(cwd) - - # Partial cleanup - for fn in dst_sources: - os.unlink(fn) - - # Import - __import__(module_name) - return sys.modules[module_name] - -@_memoize -def build_code(source_code, options=[], skip=[], only=[], suffix=None, - module_name=None): - """ - Compile and import Fortran code using f2py. - - """ - if suffix is None: - suffix = '.f' - - fd, tmp_fn = tempfile.mkstemp(suffix=suffix) - os.write(fd, asbytes(source_code)) - os.close(fd) - - try: - return build_module([tmp_fn], options=options, skip=skip, only=only, - module_name=module_name) - finally: - os.unlink(tmp_fn) - -# -# Check if compilers are available at all... -# - -_compiler_status = None -def _get_compiler_status(): - global _compiler_status - if _compiler_status is not None: - return _compiler_status - - _compiler_status = (False, False, False) - - # XXX: this is really ugly. But I don't know how to invoke Distutils - # in a safer way... - code = """ -import os -import sys -sys.path = %(syspath)s - -def configuration(parent_name='',top_path=None): - global config - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - return config - -from numpy.distutils.core import setup -setup(configuration=configuration) - -config_cmd = config.get_config_cmd() -have_c = config_cmd.try_compile('void foo() {}') -print('COMPILERS:%%d,%%d,%%d' %% (have_c, - config.have_f77c(), - config.have_f90c())) -sys.exit(99) -""" - code = code % dict(syspath=repr(sys.path)) - - fd, script = tempfile.mkstemp(suffix='.py') - os.write(fd, asbytes(code)) - os.close(fd) - - try: - cmd = [sys.executable, script, 'config'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - m = re.search(asbytes(r'COMPILERS:(\d+),(\d+),(\d+)'), out) - if m: - _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), - bool(int(m.group(3)))) - finally: - os.unlink(script) - - # Finished - return _compiler_status - -def has_c_compiler(): - return _get_compiler_status()[0] - -def has_f77_compiler(): - return _get_compiler_status()[1] - -def has_f90_compiler(): - return _get_compiler_status()[2] - -# -# Building with distutils -# - -@_memoize -def build_module_distutils(source_files, config_code, module_name, **kw): - """ - Build a module via distutils and import it. - - """ - from numpy.distutils.misc_util import Configuration - from numpy.distutils.core import setup - - d = get_module_dir() - - # Copy files - dst_sources = [] - for fn in source_files: - if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) - dst = os.path.join(d, os.path.basename(fn)) - shutil.copyfile(fn, dst) - dst_sources.append(dst) - - # Build script - config_code = textwrap.dedent(config_code).replace("\n", "\n ") - - code = """\ -import os -import sys -sys.path = %(syspath)s - -def configuration(parent_name='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - %(config_code)s - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) -""" % dict(config_code=config_code, syspath = repr(sys.path)) - - script = os.path.join(d, get_temp_module_name() + '.py') - dst_sources.append(script) - f = open(script, 'wb') - f.write(asbytes(code)) - f.close() - - # Build - cwd = os.getcwd() - try: - os.chdir(d) - cmd = [sys.executable, script, 'build_ext', '-i'] - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = p.communicate() - if p.returncode != 0: - raise RuntimeError("Running distutils build failed: %s\n%s" - % (cmd[4:], asstr(out))) - finally: - os.chdir(cwd) - - # Partial cleanup - for fn in dst_sources: - os.unlink(fn) - - # Import - __import__(module_name) - return sys.modules[module_name] - -# -# Unittest convenience -# - -class F2PyTest(object): - code = None - sources = None - options = [] - skip = [] - only = [] - suffix = '.f' - module = None - module_name = None - - def setUp(self): - if self.module is not None: - return - - # Check compiler availability first - if not has_c_compiler(): - raise nose.SkipTest("No C compiler available") - - codes = [] - if self.sources: - codes.extend(self.sources) - if self.code is not None: - codes.append(self.suffix) - - needs_f77 = False - needs_f90 = False - for fn in codes: - if fn.endswith('.f'): - needs_f77 = True - elif fn.endswith('.f90'): - needs_f90 = True - if needs_f77 and not has_f77_compiler(): - raise nose.SkipTest("No Fortran 77 compiler available") - if needs_f90 and not has_f90_compiler(): - raise nose.SkipTest("No Fortran 90 compiler available") - - # Build the module - if self.code is not None: - self.module = build_code(self.code, options=self.options, - skip=self.skip, only=self.only, - suffix=self.suffix, - module_name=self.module_name) - - if self.sources is not None: - self.module = build_module(self.sources, options=self.options, - skip=self.skip, only=self.only, - module_name=self.module_name) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/use_rules.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/use_rules.py deleted file mode 100644 index 6fd72bd774fcf..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/f2py/use_rules.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -""" - -Build 'use others module data' mechanism for f2py2e. - -Unfinished. - -Copyright 2000 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -$Date: 2000/09/10 12:35:43 $ -Pearu Peterson - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "$Revision: 1.3 $"[10:-1] - -f2py_version='See `f2py -v`' - -import pprint -import sys -errmess=sys.stderr.write -outmess=sys.stdout.write -show=pprint.pprint - -from .auxfuncs import * -############## - -usemodule_rules={ - 'body':""" -#begintitle# -static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ -\t #name# = get_#name#()\\n\\ -Arguments:\\n\\ -#docstr#\"; -extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); -static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { -/*#decl#*/ -\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; -printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); -\treturn Py_BuildValue(\"\"); -capi_fail: -\treturn NULL; -} -""", - 'method':'\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', - 'need':['F_MODFUNC'] - } - -################ - -def buildusevars(m, r): - ret={} - outmess('\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n'%(m['name'])) - varsmap={} - revmap={} - if 'map' in r: - for k in r['map'].keys(): - if r['map'][k] in revmap: - outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n'%(r['map'][k], k, revmap[r['map'][k]])) - else: - revmap[r['map'][k]]=k - if 'only' in r and r['only']: - for v in r['map'].keys(): - if r['map'][v] in m['vars']: - - if revmap[r['map'][v]]==v: - varsmap[v]=r['map'][v] - else: - outmess('\t\t\tIgnoring map "%s=>%s". See above.\n'%(v, r['map'][v])) - else: - outmess('\t\t\tNo definition for variable "%s=>%s". Skipping.\n'%(v, r['map'][v])) - else: - for v in m['vars'].keys(): - if v in revmap: - varsmap[v]=revmap[v] - else: - varsmap[v]=v - for v in varsmap.keys(): - ret=dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) - return ret -def buildusevar(name, realname, vars, usemodulename): - outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n'%(name, realname)) - ret={} - vrd={'name':name, - 'realname':realname, - 'REALNAME':realname.upper(), - 'usemodulename':usemodulename, - 'USEMODULENAME':usemodulename.upper(), - 'texname':name.replace('_', '\\_'), - 'begintitle':gentitle('%s=>%s'%(name, realname)), - 'endtitle':gentitle('end of %s=>%s'%(name, realname)), - 'apiname':'#modulename#_use_%s_from_%s'%(realname, usemodulename) - } - nummap={0:'Ro',1:'Ri',2:'Rii',3:'Riii',4:'Riv',5:'Rv',6:'Rvi',7:'Rvii',8:'Rviii',9:'Rix'} - vrd['texnamename']=name - for i in nummap.keys(): - vrd['texnamename']=vrd['texnamename'].replace(repr(i), nummap[i]) - if hasnote(vars[realname]): vrd['note']=vars[realname]['note'] - rd=dictappend({}, vrd) - var=vars[realname] - - print(name, realname, vars[realname]) - ret=applyrules(usemodule_rules, rd) - return ret diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/__init__.py deleted file mode 100644 index 96809a94f847f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import division, absolute_import, print_function - -# To get sub-modules -from .info import __doc__ - -from .fftpack import * -from .helper import * - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack.py deleted file mode 100644 index 706fcdd2f0749..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack.py +++ /dev/null @@ -1,1169 +0,0 @@ -""" -Discrete Fourier Transforms - -Routines in this module: - -fft(a, n=None, axis=-1) -ifft(a, n=None, axis=-1) -rfft(a, n=None, axis=-1) -irfft(a, n=None, axis=-1) -hfft(a, n=None, axis=-1) -ihfft(a, n=None, axis=-1) -fftn(a, s=None, axes=None) -ifftn(a, s=None, axes=None) -rfftn(a, s=None, axes=None) -irfftn(a, s=None, axes=None) -fft2(a, s=None, axes=(-2,-1)) -ifft2(a, s=None, axes=(-2, -1)) -rfft2(a, s=None, axes=(-2,-1)) -irfft2(a, s=None, axes=(-2, -1)) - -i = inverse transform -r = transform of purely real data -h = Hermite transform -n = n-dimensional transform -2 = 2-dimensional transform -(Note: 2D routines are just nD routines with different default -behavior.) - -The underlying code for these functions is an f2c-translated and modified -version of the FFTPACK routines. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', - 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] - -from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \ - take -from . import fftpack_lite as fftpack - -_fft_cache = {} -_real_fft_cache = {} - -def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti, - work_function=fftpack.cfftf, fft_cache = _fft_cache ): - a = asarray(a) - - if n is None: - n = a.shape[axis] - - if n < 1: - raise ValueError("Invalid number of FFT data points (%d) specified." % n) - - try: - # Thread-safety note: We rely on list.pop() here to atomically - # retrieve-and-remove a wsave from the cache. This ensures that no - # other thread can get the same wsave while we're using it. - wsave = fft_cache.setdefault(n, []).pop() - except (IndexError): - wsave = init_function(n) - - if a.shape[axis] != n: - s = list(a.shape) - if s[axis] > n: - index = [slice(None)]*len(s) - index[axis] = slice(0, n) - a = a[index] - else: - index = [slice(None)]*len(s) - index[axis] = slice(0, s[axis]) - s[axis] = n - z = zeros(s, a.dtype.char) - z[index] = a - a = z - - if axis != -1: - a = swapaxes(a, axis, -1) - r = work_function(a, wsave) - if axis != -1: - r = swapaxes(r, axis, -1) - - # As soon as we put wsave back into the cache, another thread could pick it - # up and start using it, so we must not do this until after we're - # completely done using it ourselves. - fft_cache[n].append(wsave) - - return r - - -def fft(a, n=None, axis=-1): - """ - Compute the one-dimensional discrete Fourier Transform. - - This function computes the one-dimensional *n*-point discrete Fourier - Transform (DFT) with the efficient Fast Fourier Transform (FFT) - algorithm [CT]. - - Parameters - ---------- - a : array_like - Input array, can be complex. - n : int, optional - Length of the transformed axis of the output. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - axis : int, optional - Axis over which to compute the FFT. If not given, the last axis is - used. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - - Raises - ------ - IndexError - if `axes` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : for definition of the DFT and conventions used. - ifft : The inverse of `fft`. - fft2 : The two-dimensional FFT. - fftn : The *n*-dimensional FFT. - rfftn : The *n*-dimensional FFT of real input. - fftfreq : Frequency bins for given FFT parameters. - - Notes - ----- - FFT (Fast Fourier Transform) refers to a way the discrete Fourier - Transform (DFT) can be calculated efficiently, by using symmetries in the - calculated terms. The symmetry is highest when `n` is a power of 2, and - the transform is therefore most efficient for these sizes. - - The DFT is defined, with the conventions used in this implementation, in - the documentation for the `numpy.fft` module. - - References - ---------- - .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - - Examples - -------- - >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) - array([ -3.44505240e-16 +1.14383329e-17j, - 8.00000000e+00 -5.71092652e-15j, - 2.33482938e-16 +1.22460635e-16j, - 1.64863782e-15 +1.77635684e-15j, - 9.95839695e-17 +2.33482938e-16j, - 0.00000000e+00 +1.66837030e-15j, - 1.14383329e-17 +1.22460635e-16j, - -1.64863782e-15 +1.77635684e-15j]) - - >>> import matplotlib.pyplot as plt - >>> t = np.arange(256) - >>> sp = np.fft.fft(np.sin(t)) - >>> freq = np.fft.fftfreq(t.shape[-1]) - >>> plt.plot(freq, sp.real, freq, sp.imag) - [, ] - >>> plt.show() - - In this example, real input has an FFT which is Hermitian, i.e., symmetric - in the real part and anti-symmetric in the imaginary part, as described in - the `numpy.fft` documentation. - - """ - - return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache) - - -def ifft(a, n=None, axis=-1): - """ - Compute the one-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the one-dimensional *n*-point - discrete Fourier transform computed by `fft`. In other words, - ``ifft(fft(a)) == a`` to within numerical accuracy. - For a general description of the algorithm and definitions, - see `numpy.fft`. - - The input should be ordered in the same way as is returned by `fft`, - i.e., ``a[0]`` should contain the zero frequency term, - ``a[1:n/2+1]`` should contain the positive-frequency terms, and - ``a[n/2+1:]`` should contain the negative-frequency terms, in order of - decreasingly negative frequency. See `numpy.fft` for details. - - Parameters - ---------- - a : array_like - Input array, can be complex. - n : int, optional - Length of the transformed axis of the output. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - See notes about padding issues. - axis : int, optional - Axis over which to compute the inverse DFT. If not given, the last - axis is used. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - - Raises - ------ - IndexError - If `axes` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : An introduction, with definitions and general explanations. - fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse - ifft2 : The two-dimensional inverse FFT. - ifftn : The n-dimensional inverse FFT. - - Notes - ----- - If the input parameter `n` is larger than the size of the input, the input - is padded by appending zeros at the end. Even though this is the common - approach, it might lead to surprising results. If a different padding is - desired, it must be performed before calling `ifft`. - - Examples - -------- - >>> np.fft.ifft([0, 4, 0, 0]) - array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) - - Create and plot a band-limited signal with random phases: - - >>> import matplotlib.pyplot as plt - >>> t = np.arange(400) - >>> n = np.zeros((400,), dtype=complex) - >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) - >>> s = np.fft.ifft(n) - >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') - [, ] - >>> plt.legend(('real', 'imaginary')) - - >>> plt.show() - - """ - - a = asarray(a).astype(complex) - if n is None: - n = shape(a)[axis] - return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n - - -def rfft(a, n=None, axis=-1): - """ - Compute the one-dimensional discrete Fourier Transform for real input. - - This function computes the one-dimensional *n*-point discrete Fourier - Transform (DFT) of a real-valued array by means of an efficient algorithm - called the Fast Fourier Transform (FFT). - - Parameters - ---------- - a : array_like - Input array - n : int, optional - Number of points along transformation axis in the input to use. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - axis : int, optional - Axis over which to compute the FFT. If not given, the last axis is - used. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - If `n` is even, the length of the transformed axis is ``(n/2)+1``. - If `n` is odd, the length is ``(n+1)/2``. - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : For definition of the DFT and conventions used. - irfft : The inverse of `rfft`. - fft : The one-dimensional FFT of general (complex) input. - fftn : The *n*-dimensional FFT. - rfftn : The *n*-dimensional FFT of real input. - - Notes - ----- - When the DFT is computed for purely real input, the output is - Hermitian-symmetric, i.e. the negative frequency terms are just the complex - conjugates of the corresponding positive-frequency terms, and the - negative-frequency terms are therefore redundant. This function does not - compute the negative frequency terms, and the length of the transformed - axis of the output is therefore ``n//2 + 1``. - - When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains - the zero-frequency term 0*fs, which is real due to Hermitian symmetry. - - If `n` is even, ``A[-1]`` contains the term representing both positive - and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely - real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains - the largest positive frequency (fs/2*(n-1)/n), and is complex in the - general case. - - If the input `a` contains an imaginary part, it is silently discarded. - - Examples - -------- - >>> np.fft.fft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) - >>> np.fft.rfft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j]) - - Notice how the final element of the `fft` output is the complex conjugate - of the second element, for real input. For `rfft`, this symmetry is - exploited to compute only the non-negative frequency terms. - - """ - - a = asarray(a).astype(float) - return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache) - - -def irfft(a, n=None, axis=-1): - """ - Compute the inverse of the n-point DFT for real input. - - This function computes the inverse of the one-dimensional *n*-point - discrete Fourier Transform of real input computed by `rfft`. - In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical - accuracy. (See Notes below for why ``len(a)`` is necessary here.) - - The input is expected to be in the form returned by `rfft`, i.e. the - real zero-frequency term followed by the complex positive frequency terms - in order of increasing frequency. Since the discrete Fourier Transform of - real input is Hermitian-symmetric, the negative frequency terms are taken - to be the complex conjugates of the corresponding positive frequency terms. - - Parameters - ---------- - a : array_like - The input array. - n : int, optional - Length of the transformed axis of the output. - For `n` output points, ``n//2+1`` input points are necessary. If the - input is longer than this, it is cropped. If it is shorter than this, - it is padded with zeros. If `n` is not given, it is determined from - the length of the input along the axis specified by `axis`. - axis : int, optional - Axis over which to compute the inverse FFT. If not given, the last - axis is used. - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is `n`, or, if `n` is not given, - ``2*(m-1)`` where ``m`` is the length of the transformed axis of the - input. To get an odd number of output points, `n` must be specified. - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See Also - -------- - numpy.fft : For definition of the DFT and conventions used. - rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. - fft : The one-dimensional FFT. - irfft2 : The inverse of the two-dimensional FFT of real input. - irfftn : The inverse of the *n*-dimensional FFT of real input. - - Notes - ----- - Returns the real valued `n`-point inverse discrete Fourier transform - of `a`, where `a` contains the non-negative frequency terms of a - Hermitian-symmetric sequence. `n` is the length of the result, not the - input. - - If you specify an `n` such that `a` must be zero-padded or truncated, the - extra/removed values will be added/removed at high frequencies. One can - thus resample a series to `m` points via Fourier interpolation by: - ``a_resamp = irfft(rfft(a), m)``. - - Examples - -------- - >>> np.fft.ifft([1, -1j, -1, 1j]) - array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) - >>> np.fft.irfft([1, -1j, -1]) - array([ 0., 1., 0., 0.]) - - Notice how the last term in the input to the ordinary `ifft` is the - complex conjugate of the second term, and the output has zero imaginary - part everywhere. When calling `irfft`, the negative frequencies are not - specified, and the output array is purely real. - - """ - - a = asarray(a).astype(complex) - if n is None: - n = (shape(a)[axis] - 1) * 2 - return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb, - _real_fft_cache) / n - - -def hfft(a, n=None, axis=-1): - """ - Compute the FFT of a signal which has Hermitian symmetry (real spectrum). - - Parameters - ---------- - a : array_like - The input array. - n : int, optional - Length of the transformed axis of the output. - For `n` output points, ``n//2+1`` input points are necessary. If the - input is longer than this, it is cropped. If it is shorter than this, - it is padded with zeros. If `n` is not given, it is determined from - the length of the input along the axis specified by `axis`. - axis : int, optional - Axis over which to compute the FFT. If not given, the last - axis is used. - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - The length of the transformed axis is `n`, or, if `n` is not given, - ``2*(m-1)`` where ``m`` is the length of the transformed axis of the - input. To get an odd number of output points, `n` must be specified. - - Raises - ------ - IndexError - If `axis` is larger than the last axis of `a`. - - See also - -------- - rfft : Compute the one-dimensional FFT for real input. - ihfft : The inverse of `hfft`. - - Notes - ----- - `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the - opposite case: here the signal has Hermitian symmetry in the time domain - and is real in the frequency domain. So here it's `hfft` for which - you must supply the length of the result if it is to be odd: - ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. - - Examples - -------- - >>> signal = np.array([1, 2, 3, 4, 3, 2]) - >>> np.fft.fft(signal) - array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) - >>> np.fft.hfft(signal[:4]) # Input first half of signal - array([ 15., -4., 0., -1., 0., -4.]) - >>> np.fft.hfft(signal, 6) # Input entire signal and truncate - array([ 15., -4., 0., -1., 0., -4.]) - - - >>> signal = np.array([[1, 1.j], [-1.j, 2]]) - >>> np.conj(signal.T) - signal # check Hermitian symmetry - array([[ 0.-0.j, 0.+0.j], - [ 0.+0.j, 0.-0.j]]) - >>> freq_spectrum = np.fft.hfft(signal) - >>> freq_spectrum - array([[ 1., 1.], - [ 2., -2.]]) - - """ - - a = asarray(a).astype(complex) - if n is None: - n = (shape(a)[axis] - 1) * 2 - return irfft(conjugate(a), n, axis) * n - - -def ihfft(a, n=None, axis=-1): - """ - Compute the inverse FFT of a signal which has Hermitian symmetry. - - Parameters - ---------- - a : array_like - Input array. - n : int, optional - Length of the inverse FFT. - Number of points along transformation axis in the input to use. - If `n` is smaller than the length of the input, the input is cropped. - If it is larger, the input is padded with zeros. If `n` is not given, - the length of the input along the axis specified by `axis` is used. - axis : int, optional - Axis over which to compute the inverse FFT. If not given, the last - axis is used. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axis - indicated by `axis`, or the last one if `axis` is not specified. - If `n` is even, the length of the transformed axis is ``(n/2)+1``. - If `n` is odd, the length is ``(n+1)/2``. - - See also - -------- - hfft, irfft - - Notes - ----- - `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the - opposite case: here the signal has Hermitian symmetry in the time domain - and is real in the frequency domain. So here it's `hfft` for which - you must supply the length of the result if it is to be odd: - ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. - - Examples - -------- - >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) - >>> np.fft.ifft(spectrum) - array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j]) - >>> np.fft.ihfft(spectrum) - array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) - - """ - - a = asarray(a).astype(float) - if n is None: - n = shape(a)[axis] - return conjugate(rfft(a, n, axis))/n - - -def _cook_nd_args(a, s=None, axes=None, invreal=0): - if s is None: - shapeless = 1 - if axes is None: - s = list(a.shape) - else: - s = take(a.shape, axes) - else: - shapeless = 0 - s = list(s) - if axes is None: - axes = list(range(-len(s), 0)) - if len(s) != len(axes): - raise ValueError("Shape and axes have different lengths.") - if invreal and shapeless: - s[-1] = (a.shape[axes[-1]] - 1) * 2 - return s, axes - - -def _raw_fftnd(a, s=None, axes=None, function=fft): - a = asarray(a) - s, axes = _cook_nd_args(a, s, axes) - itl = list(range(len(axes))) - itl.reverse() - for ii in itl: - a = function(a, n=s[ii], axis=axes[ii]) - return a - - -def fftn(a, s=None, axes=None): - """ - Compute the N-dimensional discrete Fourier Transform. - - This function computes the *N*-dimensional discrete Fourier Transform over - any number of axes in an *M*-dimensional array by means of the Fast Fourier - Transform (FFT). - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). - This corresponds to `n` for `fft(x, n)`. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the transform over that axis is - performed multiple times. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` and `a`, - as explained in the parameters section above. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. - fft : The one-dimensional FFT, with definitions and conventions used. - rfftn : The *n*-dimensional FFT of real input. - fft2 : The two-dimensional FFT. - fftshift : Shifts zero-frequency terms to centre of array - - Notes - ----- - The output, analogously to `fft`, contains the term for zero frequency in - the low-order corner of all axes, the positive frequency terms in the - first half of all axes, the term for the Nyquist frequency in the middle - of all axes and the negative frequency terms in the second half of all - axes, in order of decreasingly negative frequency. - - See `numpy.fft` for details, definitions and conventions used. - - Examples - -------- - >>> a = np.mgrid[:3, :3, :3][0] - >>> np.fft.fftn(a, axes=(1, 2)) - array([[[ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[ 9.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[ 18.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]]]) - >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) - array([[[ 2.+0.j, 2.+0.j, 2.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[-2.+0.j, -2.+0.j, -2.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]]]) - - >>> import matplotlib.pyplot as plt - >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, - ... 2 * np.pi * np.arange(200) / 34) - >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) - >>> FS = np.fft.fftn(S) - >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) - - >>> plt.show() - - """ - - return _raw_fftnd(a, s, axes, fft) - -def ifftn(a, s=None, axes=None): - """ - Compute the N-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the N-dimensional discrete - Fourier Transform over any number of axes in an M-dimensional array by - means of the Fast Fourier Transform (FFT). In other words, - ``ifftn(fftn(a)) == a`` to within numerical accuracy. - For a description of the definitions and conventions used, see `numpy.fft`. - - The input, analogously to `ifft`, should be ordered in the same way as is - returned by `fftn`, i.e. it should have the term for zero frequency - in all axes in the low-order corner, the positive frequency terms in the - first half of all axes, the term for the Nyquist frequency in the middle - of all axes and the negative frequency terms in the second half of all - axes, in order of decreasingly negative frequency. - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - This corresponds to ``n`` for ``ifft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. See notes for issue on `ifft` zero padding. - axes : sequence of ints, optional - Axes over which to compute the IFFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the inverse transform over that - axis is performed multiple times. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` or `a`, - as explained in the parameters section above. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. - ifft : The one-dimensional inverse FFT. - ifft2 : The two-dimensional inverse FFT. - ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning - of array. - - Notes - ----- - See `numpy.fft` for definitions and conventions used. - - Zero-padding, analogously with `ifft`, is performed by appending zeros to - the input along the specified dimension. Although this is the common - approach, it might lead to surprising results. If another form of zero - padding is desired, it must be performed before `ifftn` is called. - - Examples - -------- - >>> a = np.eye(4) - >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) - array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) - - - Create and plot an image with band-limited frequency content: - - >>> import matplotlib.pyplot as plt - >>> n = np.zeros((200,200), dtype=complex) - >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) - >>> im = np.fft.ifftn(n).real - >>> plt.imshow(im) - - >>> plt.show() - - """ - - return _raw_fftnd(a, s, axes, ifft) - - -def fft2(a, s=None, axes=(-2, -1)): - """ - Compute the 2-dimensional discrete Fourier Transform - - This function computes the *n*-dimensional discrete Fourier Transform - over any axes in an *M*-dimensional array by means of the - Fast Fourier Transform (FFT). By default, the transform is computed over - the last two axes of the input array, i.e., a 2-dimensional FFT. - - Parameters - ---------- - a : array_like - Input array, can be complex - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). - This corresponds to `n` for `fft(x, n)`. - Along each axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last two - axes are used. A repeated index in `axes` means the transform over - that axis is performed multiple times. A one-element sequence means - that a one-dimensional FFT is performed. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or the last two axes if `axes` is not given. - - Raises - ------ - ValueError - If `s` and `axes` have different length, or `axes` not given and - ``len(s) != 2``. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - ifft2 : The inverse two-dimensional FFT. - fft : The one-dimensional FFT. - fftn : The *n*-dimensional FFT. - fftshift : Shifts zero-frequency terms to the center of the array. - For two-dimensional input, swaps first and third quadrants, and second - and fourth quadrants. - - Notes - ----- - `fft2` is just `fftn` with a different default for `axes`. - - The output, analogously to `fft`, contains the term for zero frequency in - the low-order corner of the transformed axes, the positive frequency terms - in the first half of these axes, the term for the Nyquist frequency in the - middle of the axes and the negative frequency terms in the second half of - the axes, in order of decreasingly negative frequency. - - See `fftn` for details and a plotting example, and `numpy.fft` for - definitions and conventions used. - - - Examples - -------- - >>> a = np.mgrid[:5, :5][0] - >>> np.fft.fft2(a) - array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ], - [-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ], - [-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ], - [-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ], - [-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ]]) - - """ - - return _raw_fftnd(a, s, axes, fft) - - -def ifft2(a, s=None, axes=(-2, -1)): - """ - Compute the 2-dimensional inverse discrete Fourier Transform. - - This function computes the inverse of the 2-dimensional discrete Fourier - Transform over any number of axes in an M-dimensional array by means of - the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` - to within numerical accuracy. By default, the inverse transform is - computed over the last two axes of the input array. - - The input, analogously to `ifft`, should be ordered in the same way as is - returned by `fft2`, i.e. it should have the term for zero frequency - in the low-order corner of the two axes, the positive frequency terms in - the first half of these axes, the term for the Nyquist frequency in the - middle of the axes and the negative frequency terms in the second half of - both axes, in order of decreasingly negative frequency. - - Parameters - ---------- - a : array_like - Input array, can be complex. - s : sequence of ints, optional - Shape (length of each axis) of the output (``s[0]`` refers to axis 0, - ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. - Along each axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. See notes for issue on `ifft` zero padding. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last two - axes are used. A repeated index in `axes` means the transform over - that axis is performed multiple times. A one-element sequence means - that a one-dimensional FFT is performed. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or the last two axes if `axes` is not given. - - Raises - ------ - ValueError - If `s` and `axes` have different length, or `axes` not given and - ``len(s) != 2``. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - numpy.fft : Overall view of discrete Fourier transforms, with definitions - and conventions used. - fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. - ifftn : The inverse of the *n*-dimensional FFT. - fft : The one-dimensional FFT. - ifft : The one-dimensional inverse FFT. - - Notes - ----- - `ifft2` is just `ifftn` with a different default for `axes`. - - See `ifftn` for details and a plotting example, and `numpy.fft` for - definition and conventions used. - - Zero-padding, analogously with `ifft`, is performed by appending zeros to - the input along the specified dimension. Although this is the common - approach, it might lead to surprising results. If another form of zero - padding is desired, it must be performed before `ifft2` is called. - - Examples - -------- - >>> a = 4 * np.eye(4) - >>> np.fft.ifft2(a) - array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], - [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) - - """ - - return _raw_fftnd(a, s, axes, ifft) - - -def rfftn(a, s=None, axes=None): - """ - Compute the N-dimensional discrete Fourier Transform for real input. - - This function computes the N-dimensional discrete Fourier Transform over - any number of axes in an M-dimensional real array by means of the Fast - Fourier Transform (FFT). By default, all axes are transformed, with the - real transform performed over the last axis, while the remaining - transforms are complex. - - Parameters - ---------- - a : array_like - Input array, taken to be real. - s : sequence of ints, optional - Shape (length along each transformed axis) to use from the input. - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). - The final element of `s` corresponds to `n` for ``rfft(x, n)``, while - for the remaining axes, it corresponds to `n` for ``fft(x, n)``. - Along any axis, if the given shape is smaller than that of the input, - the input is cropped. If it is larger, the input is padded with zeros. - if `s` is not given, the shape of the input along the axes specified - by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the FFT. If not given, the last ``len(s)`` - axes are used, or all axes if `s` is also not specified. - - Returns - ------- - out : complex ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` and `a`, - as explained in the parameters section above. - The length of the last axis transformed will be ``s[-1]//2+1``, - while the remaining transformed axes will have lengths according to - `s`, or unchanged from the input. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT - of real input. - fft : The one-dimensional FFT, with definitions and conventions used. - rfft : The one-dimensional FFT of real input. - fftn : The n-dimensional FFT. - rfft2 : The two-dimensional FFT of real input. - - Notes - ----- - The transform for real input is performed over the last transformation - axis, as by `rfft`, then the transform over the remaining axes is - performed as by `fftn`. The order of the output is as for `rfft` for the - final transformation axis, and as for `fftn` for the remaining - transformation axes. - - See `fft` for details, definitions and conventions used. - - Examples - -------- - >>> a = np.ones((2, 2, 2)) - >>> np.fft.rfftn(a) - array([[[ 8.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j]], - [[ 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j]]]) - - >>> np.fft.rfftn(a, axes=(2, 0)) - array([[[ 4.+0.j, 0.+0.j], - [ 4.+0.j, 0.+0.j]], - [[ 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j]]]) - - """ - - a = asarray(a).astype(float) - s, axes = _cook_nd_args(a, s, axes) - a = rfft(a, s[-1], axes[-1]) - for ii in range(len(axes)-1): - a = fft(a, s[ii], axes[ii]) - return a - -def rfft2(a, s=None, axes=(-2, -1)): - """ - Compute the 2-dimensional FFT of a real array. - - Parameters - ---------- - a : array - Input array, taken to be real. - s : sequence of ints, optional - Shape of the FFT. - axes : sequence of ints, optional - Axes over which to compute the FFT. - - Returns - ------- - out : ndarray - The result of the real 2-D FFT. - - See Also - -------- - rfftn : Compute the N-dimensional discrete Fourier Transform for real - input. - - Notes - ----- - This is really just `rfftn` with different default behavior. - For more details see `rfftn`. - - """ - - return rfftn(a, s, axes) - -def irfftn(a, s=None, axes=None): - """ - Compute the inverse of the N-dimensional FFT of real input. - - This function computes the inverse of the N-dimensional discrete - Fourier Transform for real input over any number of axes in an - M-dimensional array by means of the Fast Fourier Transform (FFT). In - other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical - accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, - and for the same reason.) - - The input should be ordered in the same way as is returned by `rfftn`, - i.e. as for `irfft` for the final transformation axis, and as for `ifftn` - along all the other axes. - - Parameters - ---------- - a : array_like - Input array. - s : sequence of ints, optional - Shape (length of each transformed axis) of the output - (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the - number of input points used along this axis, except for the last axis, - where ``s[-1]//2+1`` points of the input are used. - Along any axis, if the shape indicated by `s` is smaller than that of - the input, the input is cropped. If it is larger, the input is padded - with zeros. If `s` is not given, the shape of the input along the - axes specified by `axes` is used. - axes : sequence of ints, optional - Axes over which to compute the inverse FFT. If not given, the last - `len(s)` axes are used, or all axes if `s` is also not specified. - Repeated indices in `axes` means that the inverse transform over that - axis is performed multiple times. - - Returns - ------- - out : ndarray - The truncated or zero-padded input, transformed along the axes - indicated by `axes`, or by a combination of `s` or `a`, - as explained in the parameters section above. - The length of each transformed axis is as given by the corresponding - element of `s`, or the length of the input in every axis except for the - last one if `s` is not given. In the final transformed axis the length - of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the - length of the final transformed axis of the input. To get an odd - number of output points in the final axis, `s` must be specified. - - Raises - ------ - ValueError - If `s` and `axes` have different length. - IndexError - If an element of `axes` is larger than than the number of axes of `a`. - - See Also - -------- - rfftn : The forward n-dimensional FFT of real input, - of which `ifftn` is the inverse. - fft : The one-dimensional FFT, with definitions and conventions used. - irfft : The inverse of the one-dimensional FFT of real input. - irfft2 : The inverse of the two-dimensional FFT of real input. - - Notes - ----- - See `fft` for definitions and conventions used. - - See `rfft` for definitions and conventions used for real input. - - Examples - -------- - >>> a = np.zeros((3, 2, 2)) - >>> a[0, 0, 0] = 3 * 2 * 2 - >>> np.fft.irfftn(a) - array([[[ 1., 1.], - [ 1., 1.]], - [[ 1., 1.], - [ 1., 1.]], - [[ 1., 1.], - [ 1., 1.]]]) - - """ - - a = asarray(a).astype(complex) - s, axes = _cook_nd_args(a, s, axes, invreal=1) - for ii in range(len(axes)-1): - a = ifft(a, s[ii], axes[ii]) - a = irfft(a, s[-1], axes[-1]) - return a - -def irfft2(a, s=None, axes=(-2, -1)): - """ - Compute the 2-dimensional inverse FFT of a real array. - - Parameters - ---------- - a : array_like - The input array - s : sequence of ints, optional - Shape of the inverse FFT. - axes : sequence of ints, optional - The axes over which to compute the inverse fft. - Default is the last two axes. - - Returns - ------- - out : ndarray - The result of the inverse real 2-D FFT. - - See Also - -------- - irfftn : Compute the inverse of the N-dimensional FFT of real input. - - Notes - ----- - This is really `irfftn` with different defaults. - For more details see `irfftn`. - - """ - - return irfftn(a, s, axes) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack_lite.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack_lite.py deleted file mode 100644 index 267b7dba4ed1d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/fftpack_lite.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'fftpack_lite.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/helper.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/helper.py deleted file mode 100644 index 160120e585bd2..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/helper.py +++ /dev/null @@ -1,224 +0,0 @@ -""" -Discrete Fourier Transforms - helper.py - -""" -from __future__ import division, absolute_import, print_function - -from numpy.compat import integer_types -from numpy.core import ( - asarray, concatenate, arange, take, integer, empty - ) - -# Created by Pearu Peterson, September 2002 - -__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] - -integer_types = integer_types + (integer,) - - -def fftshift(x, axes=None): - """ - Shift the zero-frequency component to the center of the spectrum. - - This function swaps half-spaces for all axes listed (defaults to all). - Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. - - Parameters - ---------- - x : array_like - Input array. - axes : int or shape tuple, optional - Axes over which to shift. Default is None, which shifts all axes. - - Returns - ------- - y : ndarray - The shifted array. - - See Also - -------- - ifftshift : The inverse of `fftshift`. - - Examples - -------- - >>> freqs = np.fft.fftfreq(10, 0.1) - >>> freqs - array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.]) - >>> np.fft.fftshift(freqs) - array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) - - Shift the zero-frequency component only along the second axis: - - >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) - >>> freqs - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - >>> np.fft.fftshift(freqs, axes=(1,)) - array([[ 2., 0., 1.], - [-4., 3., 4.], - [-1., -3., -2.]]) - - """ - tmp = asarray(x) - ndim = len(tmp.shape) - if axes is None: - axes = list(range(ndim)) - elif isinstance(axes, integer_types): - axes = (axes,) - y = tmp - for k in axes: - n = tmp.shape[k] - p2 = (n+1)//2 - mylist = concatenate((arange(p2, n), arange(p2))) - y = take(y, mylist, k) - return y - - -def ifftshift(x, axes=None): - """ - The inverse of `fftshift`. Although identical for even-length `x`, the - functions differ by one sample for odd-length `x`. - - Parameters - ---------- - x : array_like - Input array. - axes : int or shape tuple, optional - Axes over which to calculate. Defaults to None, which shifts all axes. - - Returns - ------- - y : ndarray - The shifted array. - - See Also - -------- - fftshift : Shift zero-frequency component to the center of the spectrum. - - Examples - -------- - >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) - >>> freqs - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - >>> np.fft.ifftshift(np.fft.fftshift(freqs)) - array([[ 0., 1., 2.], - [ 3., 4., -4.], - [-3., -2., -1.]]) - - """ - tmp = asarray(x) - ndim = len(tmp.shape) - if axes is None: - axes = list(range(ndim)) - elif isinstance(axes, integer_types): - axes = (axes,) - y = tmp - for k in axes: - n = tmp.shape[k] - p2 = n-(n+1)//2 - mylist = concatenate((arange(p2, n), arange(p2))) - y = take(y, mylist, k) - return y - - -def fftfreq(n, d=1.0): - """ - Return the Discrete Fourier Transform sample frequencies. - - The returned float array `f` contains the frequency bin centers in cycles - per unit of the sample spacing (with zero at the start). For instance, if - the sample spacing is in seconds, then the frequency unit is cycles/second. - - Given a window length `n` and a sample spacing `d`:: - - f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even - f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd - - Parameters - ---------- - n : int - Window length. - d : scalar, optional - Sample spacing (inverse of the sampling rate). Defaults to 1. - - Returns - ------- - f : ndarray - Array of length `n` containing the sample frequencies. - - Examples - -------- - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) - >>> fourier = np.fft.fft(signal) - >>> n = signal.size - >>> timestep = 0.1 - >>> freq = np.fft.fftfreq(n, d=timestep) - >>> freq - array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25]) - - """ - if not isinstance(n, integer_types): - raise ValueError("n should be an integer") - val = 1.0 / (n * d) - results = empty(n, int) - N = (n-1)//2 + 1 - p1 = arange(0, N, dtype=int) - results[:N] = p1 - p2 = arange(-(n//2), 0, dtype=int) - results[N:] = p2 - return results * val - #return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d) - - -def rfftfreq(n, d=1.0): - """ - Return the Discrete Fourier Transform sample frequencies - (for usage with rfft, irfft). - - The returned float array `f` contains the frequency bin centers in cycles - per unit of the sample spacing (with zero at the start). For instance, if - the sample spacing is in seconds, then the frequency unit is cycles/second. - - Given a window length `n` and a sample spacing `d`:: - - f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even - f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd - - Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) - the Nyquist frequency component is considered to be positive. - - Parameters - ---------- - n : int - Window length. - d : scalar, optional - Sample spacing (inverse of the sampling rate). Defaults to 1. - - Returns - ------- - f : ndarray - Array of length ``n//2 + 1`` containing the sample frequencies. - - Examples - -------- - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) - >>> fourier = np.fft.rfft(signal) - >>> n = signal.size - >>> sample_rate = 100 - >>> freq = np.fft.fftfreq(n, d=1./sample_rate) - >>> freq - array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.]) - >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) - >>> freq - array([ 0., 10., 20., 30., 40., 50.]) - - """ - if not isinstance(n, integer_types): - raise ValueError("n should be an integer") - val = 1.0/(n*d) - N = n//2 + 1 - results = arange(0, N, dtype=int) - return results * val diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/info.py deleted file mode 100644 index 916d452f20914..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/info.py +++ /dev/null @@ -1,179 +0,0 @@ -""" -Discrete Fourier Transform (:mod:`numpy.fft`) -============================================= - -.. currentmodule:: numpy.fft - -Standard FFTs -------------- - -.. autosummary:: - :toctree: generated/ - - fft Discrete Fourier transform. - ifft Inverse discrete Fourier transform. - fft2 Discrete Fourier transform in two dimensions. - ifft2 Inverse discrete Fourier transform in two dimensions. - fftn Discrete Fourier transform in N-dimensions. - ifftn Inverse discrete Fourier transform in N dimensions. - -Real FFTs ---------- - -.. autosummary:: - :toctree: generated/ - - rfft Real discrete Fourier transform. - irfft Inverse real discrete Fourier transform. - rfft2 Real discrete Fourier transform in two dimensions. - irfft2 Inverse real discrete Fourier transform in two dimensions. - rfftn Real discrete Fourier transform in N dimensions. - irfftn Inverse real discrete Fourier transform in N dimensions. - -Hermitian FFTs --------------- - -.. autosummary:: - :toctree: generated/ - - hfft Hermitian discrete Fourier transform. - ihfft Inverse Hermitian discrete Fourier transform. - -Helper routines ---------------- - -.. autosummary:: - :toctree: generated/ - - fftfreq Discrete Fourier Transform sample frequencies. - rfftfreq DFT sample frequencies (for usage with rfft, irfft). - fftshift Shift zero-frequency component to center of spectrum. - ifftshift Inverse of fftshift. - - -Background information ----------------------- - -Fourier analysis is fundamentally a method for expressing a function as a -sum of periodic components, and for recovering the function from those -components. When both the function and its Fourier transform are -replaced with discretized counterparts, it is called the discrete Fourier -transform (DFT). The DFT has become a mainstay of numerical computing in -part because of a very fast algorithm for computing it, called the Fast -Fourier Transform (FFT), which was known to Gauss (1805) and was brought -to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ -provide an accessible introduction to Fourier analysis and its -applications. - -Because the discrete Fourier transform separates its input into -components that contribute at discrete frequencies, it has a great number -of applications in digital signal processing, e.g., for filtering, and in -this context the discretized input to the transform is customarily -referred to as a *signal*, which exists in the *time domain*. The output -is called a *spectrum* or *transform* and exists in the *frequency -domain*. - -Implementation details ----------------------- - -There are many ways to define the DFT, varying in the sign of the -exponent, normalization, etc. In this implementation, the DFT is defined -as - -.. math:: - A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} - \\qquad k = 0,\\ldots,n-1. - -The DFT is in general defined for complex inputs and outputs, and a -single-frequency component at linear frequency :math:`f` is -represented by a complex exponential -:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` -is the sampling interval. - -The values in the result follow so-called "standard" order: If ``A = -fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the mean of -the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` -contains the positive-frequency terms, and ``A[n/2+1:]`` contains the -negative-frequency terms, in order of decreasingly negative frequency. -For an even number of input points, ``A[n/2]`` represents both positive and -negative Nyquist frequency, and is also purely real for real input. For -an odd number of input points, ``A[(n-1)/2]`` contains the largest positive -frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. -The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies -of corresponding elements in the output. The routine -``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the -zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes -that shift. - -When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` -is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. -The phase spectrum is obtained by ``np.angle(A)``. - -The inverse DFT is defined as - -.. math:: - a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} - \\qquad m = 0,\\ldots,n-1. - -It differs from the forward transform by the sign of the exponential -argument and the normalization by :math:`1/n`. - -Real and Hermitian transforms ------------------------------ - -When the input is purely real, its transform is Hermitian, i.e., the -component at frequency :math:`f_k` is the complex conjugate of the -component at frequency :math:`-f_k`, which means that for real -inputs there is no information in the negative frequency components that -is not already available from the positive frequency components. -The family of `rfft` functions is -designed to operate on real inputs, and exploits this symmetry by -computing only the positive frequency components, up to and including the -Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex -output points. The inverses of this family assumes the same symmetry of -its input, and for an output of ``n`` points uses ``n/2+1`` input points. - -Correspondingly, when the spectrum is purely real, the signal is -Hermitian. The `hfft` family of functions exploits this symmetry by -using ``n/2+1`` complex points in the input (time) domain for ``n`` real -points in the frequency domain. - -In higher dimensions, FFTs are used, e.g., for image analysis and -filtering. The computational efficiency of the FFT means that it can -also be a faster way to compute large convolutions, using the property -that a convolution in the time domain is equivalent to a point-by-point -multiplication in the frequency domain. - -Higher dimensions ------------------ - -In two dimensions, the DFT is defined as - -.. math:: - A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} - a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} - \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, - -which extends in the obvious way to higher dimensions, and the inverses -in higher dimensions also extend in the same way. - -References ----------- - -.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the - machine calculation of complex Fourier series," *Math. Comput.* - 19: 297-301. - -.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., - 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. - 12-13. Cambridge Univ. Press, Cambridge, UK. - -Examples --------- - -For examples, see the various functions. - -""" -from __future__ import division, absolute_import, print_function - -depends = ['core'] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/setup.py deleted file mode 100644 index 79f681e5549f7..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import division, print_function - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('fft', parent_package, top_path) - - config.add_data_dir('tests') - - # Configure fftpack_lite - config.add_extension('fftpack_lite', - sources=['fftpack_litemodule.c', 'fftpack.c'] - ) - - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_fftpack.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_fftpack.py deleted file mode 100644 index 45b5ac784ee9a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_fftpack.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal -from numpy.testing import assert_array_equal -import threading -import sys -if sys.version_info[0] >= 3: - import queue -else: - import Queue as queue - - -def fft1(x): - L = len(x) - phase = -2j*np.pi*(np.arange(L)/float(L)) - phase = np.arange(L).reshape(-1, 1) * phase - return np.sum(x*np.exp(phase), axis=1) - - -class TestFFTShift(TestCase): - - def test_fft_n(self): - self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0) - - -class TestFFT1D(TestCase): - - def test_basic(self): - rand = np.random.random - x = rand(30) + 1j*rand(30) - assert_array_almost_equal(fft1(x), np.fft.fft(x)) - - -class TestFFTThreadSafe(TestCase): - threads = 16 - input_shape = (800, 200) - - def _test_mtsame(self, func, *args): - def worker(args, q): - q.put(func(*args)) - - q = queue.Queue() - expected = func(*args) - - # Spin off a bunch of threads to call the same function simultaneously - t = [threading.Thread(target=worker, args=(args, q)) - for i in range(self.threads)] - [x.start() for x in t] - - [x.join() for x in t] - # Make sure all threads returned the correct value - for i in range(self.threads): - assert_array_equal(q.get(timeout=5), expected, - 'Function returned wrong value in multithreaded context') - - def test_fft(self): - a = np.ones(self.input_shape) * 1+0j - self._test_mtsame(np.fft.fft, a) - - def test_ifft(self): - a = np.ones(self.input_shape) * 1+0j - self._test_mtsame(np.fft.ifft, a) - - def test_rfft(self): - a = np.ones(self.input_shape) - self._test_mtsame(np.fft.rfft, a) - - def test_irfft(self): - a = np.ones(self.input_shape) * 1+0j - self._test_mtsame(np.fft.irfft, a) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_helper.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_helper.py deleted file mode 100644 index 7eaa99fdb9881..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/fft/tests/test_helper.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -"""Test functions for fftpack.helper module - -Copied from fftpack.helper by Pearu Peterson, October 2005 - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal -from numpy import fft -from numpy import pi - - -class TestFFTShift(TestCase): - - def test_definition(self): - x = [0, 1, 2, 3, 4, -4, -3, -2, -1] - y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] - assert_array_almost_equal(fft.fftshift(x), y) - assert_array_almost_equal(fft.ifftshift(y), x) - x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] - y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] - assert_array_almost_equal(fft.fftshift(x), y) - assert_array_almost_equal(fft.ifftshift(y), x) - - def test_inverse(self): - for n in [1, 4, 9, 100, 211]: - x = np.random.random((n,)) - assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) - - def test_axes_keyword(self): - freqs = [[ 0, 1, 2], [ 3, 4, -4], [-3, -2, -1]] - shifted = [[-1, -3, -2], [ 2, 0, 1], [-4, 3, 4]] - assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) - assert_array_almost_equal(fft.fftshift(freqs, axes=0), - fft.fftshift(freqs, axes=(0,))) - assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) - assert_array_almost_equal(fft.ifftshift(shifted, axes=0), - fft.ifftshift(shifted, axes=(0,))) - - -class TestFFTFreq(TestCase): - - def test_definition(self): - x = [0, 1, 2, 3, 4, -4, -3, -2, -1] - assert_array_almost_equal(9*fft.fftfreq(9), x) - assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) - x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] - assert_array_almost_equal(10*fft.fftfreq(10), x) - assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) - - -class TestRFFTFreq(TestCase): - - def test_definition(self): - x = [0, 1, 2, 3, 4] - assert_array_almost_equal(9*fft.rfftfreq(9), x) - assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) - x = [0, 1, 2, 3, 4, 5] - assert_array_almost_equal(10*fft.rfftfreq(10), x) - assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) - - -class TestIRFFTN(TestCase): - - def test_not_last_axis_success(self): - ar, ai = np.random.random((2, 16, 8, 32)) - a = ar + 1j*ai - - axes = (-2,) - - # Should not raise error - fft.irfftn(a, axes=axes) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/__init__.py deleted file mode 100644 index 8c420b0c33012..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import math - -from .info import __doc__ -from numpy.version import version as __version__ - -from .type_check import * -from .index_tricks import * -from .function_base import * -from .nanfunctions import * -from .shape_base import * -from .stride_tricks import * -from .twodim_base import * -from .ufunclike import * - -from . import scimath as emath -from .polynomial import * -#import convertcode -from .utils import * -from .arraysetops import * -from .npyio import * -from .financial import * -from .arrayterator import * -from .arraypad import * -from ._version import * - -__all__ = ['emath', 'math'] -__all__ += type_check.__all__ -__all__ += index_tricks.__all__ -__all__ += function_base.__all__ -__all__ += shape_base.__all__ -__all__ += stride_tricks.__all__ -__all__ += twodim_base.__all__ -__all__ += ufunclike.__all__ -__all__ += arraypad.__all__ -__all__ += polynomial.__all__ -__all__ += utils.__all__ -__all__ += arraysetops.__all__ -__all__ += npyio.__all__ -__all__ += financial.__all__ -__all__ += nanfunctions.__all__ - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_compiled_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_compiled_base.py deleted file mode 100644 index c560a751011e8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_compiled_base.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, '_compiled_base.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_datasource.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_datasource.py deleted file mode 100644 index 338c8b3311b0c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_datasource.py +++ /dev/null @@ -1,666 +0,0 @@ -"""A file interface for handling local and remote data files. - -The goal of datasource is to abstract some of the file system operations -when dealing with data files so the researcher doesn't have to know all the -low-level details. Through datasource, a researcher can obtain and use a -file with one function call, regardless of location of the file. - -DataSource is meant to augment standard python libraries, not replace them. -It should work seemlessly with standard file IO operations and the os -module. - -DataSource files can originate locally or remotely: - -- local files : '/home/guido/src/local/data.txt' -- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' - -DataSource files can also be compressed or uncompressed. Currently only -gzip and bz2 are supported. - -Example:: - - >>> # Create a DataSource, use os.curdir (default) for local storage. - >>> ds = datasource.DataSource() - >>> - >>> # Open a remote file. - >>> # DataSource downloads the file, stores it locally in: - >>> # './www.google.com/index.html' - >>> # opens the file and returns a file object. - >>> fp = ds.open('http://www.google.com/index.html') - >>> - >>> # Use the file as you normally would - >>> fp.read() - >>> fp.close() - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import shutil - -_open = open - - -# Using a class instead of a module-level dictionary -# to reduce the inital 'import numpy' overhead by -# deferring the import of bz2 and gzip until needed - -# TODO: .zip support, .tar support? -class _FileOpeners(object): - """ - Container for different methods to open (un-)compressed files. - - `_FileOpeners` contains a dictionary that holds one method for each - supported file format. Attribute lookup is implemented in such a way - that an instance of `_FileOpeners` itself can be indexed with the keys - of that dictionary. Currently uncompressed files as well as files - compressed with ``gzip`` or ``bz2`` compression are supported. - - Notes - ----- - `_file_openers`, an instance of `_FileOpeners`, is made available for - use in the `_datasource` module. - - Examples - -------- - >>> np.lib._datasource._file_openers.keys() - [None, '.bz2', '.gz'] - >>> np.lib._datasource._file_openers['.gz'] is gzip.open - True - - """ - - def __init__(self): - self._loaded = False - self._file_openers = {None: open} - - def _load(self): - if self._loaded: - return - try: - import bz2 - self._file_openers[".bz2"] = bz2.BZ2File - except ImportError: - pass - try: - import gzip - self._file_openers[".gz"] = gzip.open - except ImportError: - pass - self._loaded = True - - def keys(self): - """ - Return the keys of currently supported file openers. - - Parameters - ---------- - None - - Returns - ------- - keys : list - The keys are None for uncompressed files and the file extension - strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression - methods. - - """ - self._load() - return list(self._file_openers.keys()) - - def __getitem__(self, key): - self._load() - return self._file_openers[key] - -_file_openers = _FileOpeners() - -def open(path, mode='r', destpath=os.curdir): - """ - Open `path` with `mode` and return the file object. - - If ``path`` is an URL, it will be downloaded, stored in the - `DataSource` `destpath` directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. - mode : str, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to - append. Available modes depend on the type of object specified by - path. Default is 'r'. - destpath : str, optional - Path to the directory where the source file gets downloaded to for - use. If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - - Returns - ------- - out : file object - The opened file. - - Notes - ----- - This is a convenience function that instantiates a `DataSource` and - returns the file object from ``DataSource.open(path)``. - - """ - - ds = DataSource(destpath) - return ds.open(path, mode) - - -class DataSource (object): - """ - DataSource(destpath='.') - - A generic data source file (file, http, ftp, ...). - - DataSources can be local files or remote files/URLs. The files may - also be compressed or uncompressed. DataSource hides some of the - low-level details of downloading the file, allowing you to simply pass - in a valid file path (or URL) and obtain a file object. - - Parameters - ---------- - destpath : str or None, optional - Path to the directory where the source file gets downloaded to for - use. If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - - Notes - ----- - URLs require a scheme string (``http://``) to be used, without it they - will fail:: - - >>> repos = DataSource() - >>> repos.exists('www.google.com/index.html') - False - >>> repos.exists('http://www.google.com/index.html') - True - - Temporary directories are deleted when the DataSource is deleted. - - Examples - -------- - :: - - >>> ds = DataSource('/home/guido') - >>> urlname = 'http://www.google.com/index.html' - >>> gfile = ds.open('http://www.google.com/index.html') # remote file - >>> ds.abspath(urlname) - '/home/guido/www.google.com/site/index.html' - - >>> ds = DataSource(None) # use with temporary file - >>> ds.open('/home/guido/foobar.txt') - - >>> ds.abspath('/home/guido/foobar.txt') - '/tmp/tmpy4pgsP/home/guido/foobar.txt' - - """ - - def __init__(self, destpath=os.curdir): - """Create a DataSource with a local path at destpath.""" - if destpath: - self._destpath = os.path.abspath(destpath) - self._istmpdest = False - else: - import tempfile # deferring import to improve startup time - self._destpath = tempfile.mkdtemp() - self._istmpdest = True - - def __del__(self): - # Remove temp directories - if self._istmpdest: - shutil.rmtree(self._destpath) - - def _iszip(self, filename): - """Test if the filename is a zip file by looking at the file extension. - - """ - fname, ext = os.path.splitext(filename) - return ext in _file_openers.keys() - - def _iswritemode(self, mode): - """Test if the given mode will open a file for writing.""" - - # Currently only used to test the bz2 files. - _writemodes = ("w", "+") - for c in mode: - if c in _writemodes: - return True - return False - - def _splitzipext(self, filename): - """Split zip extension from filename and return filename. - - *Returns*: - base, zip_ext : {tuple} - - """ - - if self._iszip(filename): - return os.path.splitext(filename) - else: - return filename, None - - def _possible_names(self, filename): - """Return a tuple containing compressed filename variations.""" - names = [filename] - if not self._iszip(filename): - for zipext in _file_openers.keys(): - if zipext: - names.append(filename+zipext) - return names - - def _isurl(self, path): - """Test if path is a net location. Tests the scheme and netloc.""" - - # We do this here to reduce the 'import numpy' initial import time. - if sys.version_info[0] >= 3: - from urllib.parse import urlparse - else: - from urlparse import urlparse - - # BUG : URLs require a scheme string ('http://') to be used. - # www.google.com will fail. - # Should we prepend the scheme for those that don't have it and - # test that also? Similar to the way we append .gz and test for - # for compressed versions of files. - - scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) - return bool(scheme and netloc) - - def _cache(self, path): - """Cache the file specified by path. - - Creates a copy of the file in the datasource cache. - - """ - # We import these here because importing urllib2 is slow and - # a significant fraction of numpy's total import time. - if sys.version_info[0] >= 3: - from urllib.request import urlopen - from urllib.error import URLError - else: - from urllib2 import urlopen - from urllib2 import URLError - - upath = self.abspath(path) - - # ensure directory exists - if not os.path.exists(os.path.dirname(upath)): - os.makedirs(os.path.dirname(upath)) - - # TODO: Doesn't handle compressed files! - if self._isurl(path): - try: - openedurl = urlopen(path) - f = _open(upath, 'wb') - try: - shutil.copyfileobj(openedurl, f) - finally: - f.close() - openedurl.close() - except URLError: - raise URLError("URL not found: %s" % path) - else: - shutil.copyfile(path, upath) - return upath - - def _findfile(self, path): - """Searches for ``path`` and returns full path if found. - - If path is an URL, _findfile will cache a local copy and return the - path to the cached file. If path is a local file, _findfile will - return a path to that local file. - - The search will include possible compressed versions of the file - and return the first occurence found. - - """ - - # Build list of possible local file paths - if not self._isurl(path): - # Valid local paths - filelist = self._possible_names(path) - # Paths in self._destpath - filelist += self._possible_names(self.abspath(path)) - else: - # Cached URLs in self._destpath - filelist = self._possible_names(self.abspath(path)) - # Remote URLs - filelist = filelist + self._possible_names(path) - - for name in filelist: - if self.exists(name): - if self._isurl(name): - name = self._cache(name) - return name - return None - - def abspath(self, path): - """ - Return absolute path of file in the DataSource directory. - - If `path` is an URL, then `abspath` will return either the location - the file exists locally or the location it would exist when opened - using the `open` method. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. - - Returns - ------- - out : str - Complete path, including the `DataSource` destination directory. - - Notes - ----- - The functionality is based on `os.path.abspath`. - - """ - # We do this here to reduce the 'import numpy' initial import time. - if sys.version_info[0] >= 3: - from urllib.parse import urlparse - else: - from urlparse import urlparse - - # TODO: This should be more robust. Handles case where path includes - # the destpath, but not other sub-paths. Failing case: - # path = /home/guido/datafile.txt - # destpath = /home/alex/ - # upath = self.abspath(path) - # upath == '/home/alex/home/guido/datafile.txt' - - # handle case where path includes self._destpath - splitpath = path.split(self._destpath, 2) - if len(splitpath) > 1: - path = splitpath[1] - scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) - netloc = self._sanitize_relative_path(netloc) - upath = self._sanitize_relative_path(upath) - return os.path.join(self._destpath, netloc, upath) - - def _sanitize_relative_path(self, path): - """Return a sanitised relative path for which - os.path.abspath(os.path.join(base, path)).startswith(base) - """ - last = None - path = os.path.normpath(path) - while path != last: - last = path - # Note: os.path.join treats '/' as os.sep on Windows - path = path.lstrip(os.sep).lstrip('/') - path = path.lstrip(os.pardir).lstrip('..') - drive, path = os.path.splitdrive(path) # for Windows - return path - - def exists(self, path): - """ - Test if path exists. - - Test if `path` exists as (and in this order): - - - a local file. - - a remote URL that has been downloaded and stored locally in the - `DataSource` directory. - - a remote URL that has not been downloaded, but is valid and - accessible. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. - - Returns - ------- - out : bool - True if `path` exists. - - Notes - ----- - When `path` is an URL, `exists` will return True if it's either - stored locally in the `DataSource` directory, or is a valid remote - URL. `DataSource` does not discriminate between the two, the file - is accessible if it exists in either location. - - """ - # We import this here because importing urllib2 is slow and - # a significant fraction of numpy's total import time. - if sys.version_info[0] >= 3: - from urllib.request import urlopen - from urllib.error import URLError - else: - from urllib2 import urlopen - from urllib2 import URLError - - # Test local path - if os.path.exists(path): - return True - - # Test cached url - upath = self.abspath(path) - if os.path.exists(upath): - return True - - # Test remote url - if self._isurl(path): - try: - netfile = urlopen(path) - netfile.close() - del(netfile) - return True - except URLError: - return False - return False - - def open(self, path, mode='r'): - """ - Open and return file-like object. - - If `path` is an URL, it will be downloaded, stored in the - `DataSource` directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. - mode : {'r', 'w', 'a'}, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, - 'a' to append. Available modes depend on the type of object - specified by `path`. Default is 'r'. - - Returns - ------- - out : file object - File object. - - """ - - # TODO: There is no support for opening a file for writing which - # doesn't exist yet (creating a file). Should there be? - - # TODO: Add a ``subdir`` parameter for specifying the subdirectory - # used to store URLs in self._destpath. - - if self._isurl(path) and self._iswritemode(mode): - raise ValueError("URLs are not writeable") - - # NOTE: _findfile will fail on a new file opened for writing. - found = self._findfile(path) - if found: - _fname, ext = self._splitzipext(found) - if ext == 'bz2': - mode.replace("+", "") - return _file_openers[ext](found, mode=mode) - else: - raise IOError("%s not found." % path) - - -class Repository (DataSource): - """ - Repository(baseurl, destpath='.') - - A data repository where multiple DataSource's share a base - URL/directory. - - `Repository` extends `DataSource` by prepending a base URL (or - directory) to all the files it handles. Use `Repository` when you will - be working with multiple files from one base URL. Initialize - `Repository` with the base URL, then refer to each file by its filename - only. - - Parameters - ---------- - baseurl : str - Path to the local directory or remote location that contains the - data files. - destpath : str or None, optional - Path to the directory where the source file gets downloaded to for - use. If `destpath` is None, a temporary directory will be created. - The default path is the current directory. - - Examples - -------- - To analyze all files in the repository, do something like this - (note: this is not self-contained code):: - - >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') - >>> for filename in filelist: - ... fp = repos.open(filename) - ... fp.analyze() - ... fp.close() - - Similarly you could use a URL for a repository:: - - >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') - - """ - - def __init__(self, baseurl, destpath=os.curdir): - """Create a Repository with a shared url or directory of baseurl.""" - DataSource.__init__(self, destpath=destpath) - self._baseurl = baseurl - - def __del__(self): - DataSource.__del__(self) - - def _fullpath(self, path): - """Return complete path for path. Prepends baseurl if necessary.""" - splitpath = path.split(self._baseurl, 2) - if len(splitpath) == 1: - result = os.path.join(self._baseurl, path) - else: - result = path # path contains baseurl already - return result - - def _findfile(self, path): - """Extend DataSource method to prepend baseurl to ``path``.""" - return DataSource._findfile(self, self._fullpath(path)) - - def abspath(self, path): - """ - Return absolute path of file in the Repository directory. - - If `path` is an URL, then `abspath` will return either the location - the file exists locally or the location it would exist when opened - using the `open` method. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. This may, but does not - have to, include the `baseurl` with which the `Repository` was - initialized. - - Returns - ------- - out : str - Complete path, including the `DataSource` destination directory. - - """ - return DataSource.abspath(self, self._fullpath(path)) - - def exists(self, path): - """ - Test if path exists prepending Repository base URL to path. - - Test if `path` exists as (and in this order): - - - a local file. - - a remote URL that has been downloaded and stored locally in the - `DataSource` directory. - - a remote URL that has not been downloaded, but is valid and - accessible. - - Parameters - ---------- - path : str - Can be a local file or a remote URL. This may, but does not - have to, include the `baseurl` with which the `Repository` was - initialized. - - Returns - ------- - out : bool - True if `path` exists. - - Notes - ----- - When `path` is an URL, `exists` will return True if it's either - stored locally in the `DataSource` directory, or is a valid remote - URL. `DataSource` does not discriminate between the two, the file - is accessible if it exists in either location. - - """ - return DataSource.exists(self, self._fullpath(path)) - - def open(self, path, mode='r'): - """ - Open and return file-like object prepending Repository base URL. - - If `path` is an URL, it will be downloaded, stored in the - DataSource directory and opened from there. - - Parameters - ---------- - path : str - Local file path or URL to open. This may, but does not have to, - include the `baseurl` with which the `Repository` was - initialized. - mode : {'r', 'w', 'a'}, optional - Mode to open `path`. Mode 'r' for reading, 'w' for writing, - 'a' to append. Available modes depend on the type of object - specified by `path`. Default is 'r'. - - Returns - ------- - out : file object - File object. - - """ - return DataSource.open(self, self._fullpath(path), mode) - - def listdir(self): - """ - List files in the source Repository. - - Returns - ------- - files : list of str - List of file names (not containing a directory part). - - Notes - ----- - Does not currently work for remote repositories. - - """ - if self._isurl(self._baseurl): - raise NotImplementedError( - "Directory listing of URLs, not supported yet.") - else: - return os.listdir(self._baseurl) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_iotools.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_iotools.py deleted file mode 100644 index 9108b2e4ce169..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_iotools.py +++ /dev/null @@ -1,891 +0,0 @@ -"""A collection of functions designed to help I/O with ascii files. - -""" -from __future__ import division, absolute_import, print_function - -__docformat__ = "restructuredtext en" - -import sys -import numpy as np -import numpy.core.numeric as nx -from numpy.compat import asbytes, bytes, asbytes_nested, basestring - -if sys.version_info[0] >= 3: - from builtins import bool, int, float, complex, object, str - unicode = str -else: - from __builtin__ import bool, int, float, complex, object, unicode, str - - -if sys.version_info[0] >= 3: - def _bytes_to_complex(s): - return complex(s.decode('ascii')) - - def _bytes_to_name(s): - return s.decode('ascii') -else: - _bytes_to_complex = complex - _bytes_to_name = str - -def _is_string_like(obj): - """ - Check whether obj behaves like a string. - """ - try: - obj + '' - except (TypeError, ValueError): - return False - return True - -def _is_bytes_like(obj): - """ - Check whether obj behaves like a bytes object. - """ - try: - obj + asbytes('') - except (TypeError, ValueError): - return False - return True - - -def _to_filehandle(fname, flag='r', return_opened=False): - """ - Returns the filehandle corresponding to a string or a file. - If the string ends in '.gz', the file is automatically unzipped. - - Parameters - ---------- - fname : string, filehandle - Name of the file whose filehandle must be returned. - flag : string, optional - Flag indicating the status of the file ('r' for read, 'w' for write). - return_opened : boolean, optional - Whether to return the opening status of the file. - """ - if _is_string_like(fname): - if fname.endswith('.gz'): - import gzip - fhd = gzip.open(fname, flag) - elif fname.endswith('.bz2'): - import bz2 - fhd = bz2.BZ2File(fname) - else: - fhd = file(fname, flag) - opened = True - elif hasattr(fname, 'seek'): - fhd = fname - opened = False - else: - raise ValueError('fname must be a string or file handle') - if return_opened: - return fhd, opened - return fhd - - -def has_nested_fields(ndtype): - """ - Returns whether one or several fields of a dtype are nested. - - Parameters - ---------- - ndtype : dtype - Data-type of a structured array. - - Raises - ------ - AttributeError - If `ndtype` does not have a `names` attribute. - - Examples - -------- - >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) - >>> np.lib._iotools.has_nested_fields(dt) - False - - """ - for name in ndtype.names or (): - if ndtype[name].names: - return True - return False - - -def flatten_dtype(ndtype, flatten_base=False): - """ - Unpack a structured data-type by collapsing nested fields and/or fields - with a shape. - - Note that the field names are lost. - - Parameters - ---------- - ndtype : dtype - The datatype to collapse - flatten_base : {False, True}, optional - Whether to transform a field with a shape into several fields or not. - - Examples - -------- - >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ... ('block', int, (2, 3))]) - >>> np.lib._iotools.flatten_dtype(dt) - [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')] - >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) - [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'), - dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'), - dtype('int32')] - - """ - names = ndtype.names - if names is None: - if flatten_base: - return [ndtype.base] * int(np.prod(ndtype.shape)) - return [ndtype.base] - else: - types = [] - for field in names: - info = ndtype.fields[field] - flat_dt = flatten_dtype(info[0], flatten_base) - types.extend(flat_dt) - return types - - -class LineSplitter(object): - """ - Object to split a string at a given delimiter or at given places. - - Parameters - ---------- - delimiter : str, int, or sequence of ints, optional - If a string, character used to delimit consecutive fields. - If an integer or a sequence of integers, width(s) of each field. - comment : str, optional - Character used to mark the beginning of a comment. Default is '#'. - autostrip : bool, optional - Whether to strip each individual field. Default is True. - - """ - - def autostrip(self, method): - """ - Wrapper to strip each member of the output of `method`. - - Parameters - ---------- - method : function - Function that takes a single argument and returns a sequence of - strings. - - Returns - ------- - wrapped : function - The result of wrapping `method`. `wrapped` takes a single input - argument and returns a list of strings that are stripped of - white-space. - - """ - return lambda input: [_.strip() for _ in method(input)] - # - - def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True): - self.comments = comments - # Delimiter is a character - if isinstance(delimiter, unicode): - delimiter = delimiter.encode('ascii') - if (delimiter is None) or _is_bytes_like(delimiter): - delimiter = delimiter or None - _handyman = self._delimited_splitter - # Delimiter is a list of field widths - elif hasattr(delimiter, '__iter__'): - _handyman = self._variablewidth_splitter - idx = np.cumsum([0] + list(delimiter)) - delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] - # Delimiter is a single integer - elif int(delimiter): - (_handyman, delimiter) = ( - self._fixedwidth_splitter, int(delimiter)) - else: - (_handyman, delimiter) = (self._delimited_splitter, None) - self.delimiter = delimiter - if autostrip: - self._handyman = self.autostrip(_handyman) - else: - self._handyman = _handyman - # - - def _delimited_splitter(self, line): - if self.comments is not None: - line = line.split(self.comments)[0] - line = line.strip(asbytes(" \r\n")) - if not line: - return [] - return line.split(self.delimiter) - # - - def _fixedwidth_splitter(self, line): - if self.comments is not None: - line = line.split(self.comments)[0] - line = line.strip(asbytes("\r\n")) - if not line: - return [] - fixed = self.delimiter - slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] - return [line[s] for s in slices] - # - - def _variablewidth_splitter(self, line): - if self.comments is not None: - line = line.split(self.comments)[0] - if not line: - return [] - slices = self.delimiter - return [line[s] for s in slices] - # - - def __call__(self, line): - return self._handyman(line) - - -class NameValidator(object): - """ - Object to validate a list of strings to use as field names. - - The strings are stripped of any non alphanumeric character, and spaces - are replaced by '_'. During instantiation, the user can define a list - of names to exclude, as well as a list of invalid characters. Names in - the exclusion list are appended a '_' character. - - Once an instance has been created, it can be called with a list of - names, and a list of valid names will be created. The `__call__` - method accepts an optional keyword "default" that sets the default name - in case of ambiguity. By default this is 'f', so that names will - default to `f0`, `f1`, etc. - - Parameters - ---------- - excludelist : sequence, optional - A list of names to exclude. This list is appended to the default - list ['return', 'file', 'print']. Excluded names are appended an - underscore: for example, `file` becomes `file_` if supplied. - deletechars : str, optional - A string combining invalid characters that must be deleted from the - names. - casesensitive : {True, False, 'upper', 'lower'}, optional - * If True, field names are case-sensitive. - * If False or 'upper', field names are converted to upper case. - * If 'lower', field names are converted to lower case. - - The default value is True. - replace_space : '_', optional - Character(s) used in replacement of white spaces. - - Notes - ----- - Calling an instance of `NameValidator` is the same as calling its - method `validate`. - - Examples - -------- - >>> validator = np.lib._iotools.NameValidator() - >>> validator(['file', 'field2', 'with space', 'CaSe']) - ['file_', 'field2', 'with_space', 'CaSe'] - - >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], - deletechars='q', - case_sensitive='False') - >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) - ['excl_', 'field2', 'no_', 'with_space', 'case'] - - """ - # - defaultexcludelist = ['return', 'file', 'print'] - defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") - # - - def __init__(self, excludelist=None, deletechars=None, - case_sensitive=None, replace_space='_'): - # Process the exclusion list .. - if excludelist is None: - excludelist = [] - excludelist.extend(self.defaultexcludelist) - self.excludelist = excludelist - # Process the list of characters to delete - if deletechars is None: - delete = self.defaultdeletechars - else: - delete = set(deletechars) - delete.add('"') - self.deletechars = delete - # Process the case option ..... - if (case_sensitive is None) or (case_sensitive is True): - self.case_converter = lambda x: x - elif (case_sensitive is False) or ('u' in case_sensitive): - self.case_converter = lambda x: x.upper() - elif 'l' in case_sensitive: - self.case_converter = lambda x: x.lower() - else: - self.case_converter = lambda x: x - # - self.replace_space = replace_space - - def validate(self, names, defaultfmt="f%i", nbfields=None): - """ - Validate a list of strings as field names for a structured array. - - Parameters - ---------- - names : sequence of str - Strings to be validated. - defaultfmt : str, optional - Default format string, used if validating a given string - reduces its length to zero. - nboutput : integer, optional - Final number of validated names, used to expand or shrink the - initial list of names. - - Returns - ------- - validatednames : list of str - The list of validated field names. - - Notes - ----- - A `NameValidator` instance can be called directly, which is the - same as calling `validate`. For examples, see `NameValidator`. - - """ - # Initial checks .............. - if (names is None): - if (nbfields is None): - return None - names = [] - if isinstance(names, basestring): - names = [names, ] - if nbfields is not None: - nbnames = len(names) - if (nbnames < nbfields): - names = list(names) + [''] * (nbfields - nbnames) - elif (nbnames > nbfields): - names = names[:nbfields] - # Set some shortcuts ........... - deletechars = self.deletechars - excludelist = self.excludelist - case_converter = self.case_converter - replace_space = self.replace_space - # Initializes some variables ... - validatednames = [] - seen = dict() - nbempty = 0 - # - for item in names: - item = case_converter(item).strip() - if replace_space: - item = item.replace(' ', replace_space) - item = ''.join([c for c in item if c not in deletechars]) - if item == '': - item = defaultfmt % nbempty - while item in names: - nbempty += 1 - item = defaultfmt % nbempty - nbempty += 1 - elif item in excludelist: - item += '_' - cnt = seen.get(item, 0) - if cnt > 0: - validatednames.append(item + '_%d' % cnt) - else: - validatednames.append(item) - seen[item] = cnt + 1 - return tuple(validatednames) - # - - def __call__(self, names, defaultfmt="f%i", nbfields=None): - return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) - - -def str2bool(value): - """ - Tries to transform a string supposed to represent a boolean to a boolean. - - Parameters - ---------- - value : str - The string that is transformed to a boolean. - - Returns - ------- - boolval : bool - The boolean representation of `value`. - - Raises - ------ - ValueError - If the string is not 'True' or 'False' (case independent) - - Examples - -------- - >>> np.lib._iotools.str2bool('TRUE') - True - >>> np.lib._iotools.str2bool('false') - False - - """ - value = value.upper() - if value == asbytes('TRUE'): - return True - elif value == asbytes('FALSE'): - return False - else: - raise ValueError("Invalid boolean") - - -class ConverterError(Exception): - """ - Exception raised when an error occurs in a converter for string values. - - """ - pass - -class ConverterLockError(ConverterError): - """ - Exception raised when an attempt is made to upgrade a locked converter. - - """ - pass - -class ConversionWarning(UserWarning): - """ - Warning issued when a string converter has a problem. - - Notes - ----- - In `genfromtxt` a `ConversionWarning` is issued if raising exceptions - is explicitly suppressed with the "invalid_raise" keyword. - - """ - pass - - -class StringConverter(object): - """ - Factory class for function transforming a string into another object - (int, float). - - After initialization, an instance can be called to transform a string - into another object. If the string is recognized as representing a - missing value, a default value is returned. - - Attributes - ---------- - func : function - Function used for the conversion. - default : any - Default value to return when the input corresponds to a missing - value. - type : type - Type of the output. - _status : int - Integer representing the order of the conversion. - _mapper : sequence of tuples - Sequence of tuples (dtype, function, default value) to evaluate in - order. - _locked : bool - Holds `locked` parameter. - - Parameters - ---------- - dtype_or_func : {None, dtype, function}, optional - If a `dtype`, specifies the input data type, used to define a basic - function and a default value for missing data. For example, when - `dtype` is float, the `func` attribute is set to `float` and the - default value to `np.nan`. If a function, this function is used to - convert a string to another object. In this case, it is recommended - to give an associated default value as input. - default : any, optional - Value to return by default, that is, when the string to be - converted is flagged as missing. If not given, `StringConverter` - tries to supply a reasonable default value. - missing_values : sequence of str, optional - Sequence of strings indicating a missing value. - locked : bool, optional - Whether the StringConverter should be locked to prevent automatic - upgrade or not. Default is False. - - """ - # - _mapper = [(nx.bool_, str2bool, False), - (nx.integer, int, -1), - (nx.floating, float, nx.nan), - (complex, _bytes_to_complex, nx.nan + 0j), - (nx.string_, bytes, asbytes('???'))] - (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper) - # - - @classmethod - def _getdtype(cls, val): - """Returns the dtype of the input variable.""" - return np.array(val).dtype - # - - @classmethod - def _getsubdtype(cls, val): - """Returns the type of the dtype of the input variable.""" - return np.array(val).dtype.type - # - # This is a bit annoying. We want to return the "general" type in most - # cases (ie. "string" rather than "S10"), but we want to return the - # specific type for datetime64 (ie. "datetime64[us]" rather than - # "datetime64"). - - @classmethod - def _dtypeortype(cls, dtype): - """Returns dtype for datetime64 and type of dtype otherwise.""" - if dtype.type == np.datetime64: - return dtype - return dtype.type - # - - @classmethod - def upgrade_mapper(cls, func, default=None): - """ - Upgrade the mapper of a StringConverter by adding a new function and - its corresponding default. - - The input function (or sequence of functions) and its associated - default value (if any) is inserted in penultimate position of the - mapper. The corresponding type is estimated from the dtype of the - default value. - - Parameters - ---------- - func : var - Function, or sequence of functions - - Examples - -------- - >>> import dateutil.parser - >>> import datetime - >>> dateparser = datetustil.parser.parse - >>> defaultdate = datetime.date(2000, 1, 1) - >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) - """ - # Func is a single functions - if hasattr(func, '__call__'): - cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) - return - elif hasattr(func, '__iter__'): - if isinstance(func[0], (tuple, list)): - for _ in func: - cls._mapper.insert(-1, _) - return - if default is None: - default = [None] * len(func) - else: - default = list(default) - default.append([None] * (len(func) - len(default))) - for (fct, dft) in zip(func, default): - cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) - # - - def __init__(self, dtype_or_func=None, default=None, missing_values=None, - locked=False): - # Convert unicode (for Py3) - if isinstance(missing_values, unicode): - missing_values = asbytes(missing_values) - elif isinstance(missing_values, (list, tuple)): - missing_values = asbytes_nested(missing_values) - # Defines a lock for upgrade - self._locked = bool(locked) - # No input dtype: minimal initialization - if dtype_or_func is None: - self.func = str2bool - self._status = 0 - self.default = default or False - dtype = np.dtype('bool') - else: - # Is the input a np.dtype ? - try: - self.func = None - dtype = np.dtype(dtype_or_func) - except TypeError: - # dtype_or_func must be a function, then - if not hasattr(dtype_or_func, '__call__'): - errmsg = ("The input argument `dtype` is neither a" - " function nor a dtype (got '%s' instead)") - raise TypeError(errmsg % type(dtype_or_func)) - # Set the function - self.func = dtype_or_func - # If we don't have a default, try to guess it or set it to - # None - if default is None: - try: - default = self.func(asbytes('0')) - except ValueError: - default = None - dtype = self._getdtype(default) - # Set the status according to the dtype - _status = -1 - for (i, (deftype, func, default_def)) in enumerate(self._mapper): - if np.issubdtype(dtype.type, deftype): - _status = i - if default is None: - self.default = default_def - else: - self.default = default - break - if _status == -1: - # We never found a match in the _mapper... - _status = 0 - self.default = default - self._status = _status - # If the input was a dtype, set the function to the last we saw - if self.func is None: - self.func = func - # If the status is 1 (int), change the function to - # something more robust. - if self.func == self._mapper[1][1]: - if issubclass(dtype.type, np.uint64): - self.func = np.uint64 - elif issubclass(dtype.type, np.int64): - self.func = np.int64 - else: - self.func = lambda x: int(float(x)) - # Store the list of strings corresponding to missing values. - if missing_values is None: - self.missing_values = set([asbytes('')]) - else: - if isinstance(missing_values, bytes): - missing_values = missing_values.split(asbytes(",")) - self.missing_values = set(list(missing_values) + [asbytes('')]) - # - self._callingfunction = self._strict_call - self.type = self._dtypeortype(dtype) - self._checked = False - self._initial_default = default - # - - def _loose_call(self, value): - try: - return self.func(value) - except ValueError: - return self.default - # - - def _strict_call(self, value): - try: - return self.func(value) - except ValueError: - if value.strip() in self.missing_values: - if not self._status: - self._checked = False - return self.default - raise ValueError("Cannot convert string '%s'" % value) - # - - def __call__(self, value): - return self._callingfunction(value) - # - - def upgrade(self, value): - """ - Find the best converter for a given string, and return the result. - - The supplied string `value` is converted by testing different - converters in order. First the `func` method of the - `StringConverter` instance is tried, if this fails other available - converters are tried. The order in which these other converters - are tried is determined by the `_status` attribute of the instance. - - Parameters - ---------- - value : str - The string to convert. - - Returns - ------- - out : any - The result of converting `value` with the appropriate converter. - - """ - self._checked = True - try: - self._strict_call(value) - except ValueError: - # Raise an exception if we locked the converter... - if self._locked: - errmsg = "Converter is locked and cannot be upgraded" - raise ConverterLockError(errmsg) - _statusmax = len(self._mapper) - # Complains if we try to upgrade by the maximum - _status = self._status - if _status == _statusmax: - errmsg = "Could not find a valid conversion function" - raise ConverterError(errmsg) - elif _status < _statusmax - 1: - _status += 1 - (self.type, self.func, default) = self._mapper[_status] - self._status = _status - if self._initial_default is not None: - self.default = self._initial_default - else: - self.default = default - self.upgrade(value) - - def iterupgrade(self, value): - self._checked = True - if not hasattr(value, '__iter__'): - value = (value,) - _strict_call = self._strict_call - try: - for _m in value: - _strict_call(_m) - except ValueError: - # Raise an exception if we locked the converter... - if self._locked: - errmsg = "Converter is locked and cannot be upgraded" - raise ConverterLockError(errmsg) - _statusmax = len(self._mapper) - # Complains if we try to upgrade by the maximum - _status = self._status - if _status == _statusmax: - raise ConverterError( - "Could not find a valid conversion function" - ) - elif _status < _statusmax - 1: - _status += 1 - (self.type, self.func, default) = self._mapper[_status] - if self._initial_default is not None: - self.default = self._initial_default - else: - self.default = default - self._status = _status - self.iterupgrade(value) - - def update(self, func, default=None, testing_value=None, - missing_values=asbytes(''), locked=False): - """ - Set StringConverter attributes directly. - - Parameters - ---------- - func : function - Conversion function. - default : any, optional - Value to return by default, that is, when the string to be - converted is flagged as missing. If not given, - `StringConverter` tries to supply a reasonable default value. - testing_value : str, optional - A string representing a standard input value of the converter. - This string is used to help defining a reasonable default - value. - missing_values : sequence of str, optional - Sequence of strings indicating a missing value. - locked : bool, optional - Whether the StringConverter should be locked to prevent - automatic upgrade or not. Default is False. - - Notes - ----- - `update` takes the same parameters as the constructor of - `StringConverter`, except that `func` does not accept a `dtype` - whereas `dtype_or_func` in the constructor does. - - """ - self.func = func - self._locked = locked - # Don't reset the default to None if we can avoid it - if default is not None: - self.default = default - self.type = self._dtypeortype(self._getdtype(default)) - else: - try: - tester = func(testing_value or asbytes('1')) - except (TypeError, ValueError): - tester = None - self.type = self._dtypeortype(self._getdtype(tester)) - # Add the missing values to the existing set - if missing_values is not None: - if _is_bytes_like(missing_values): - self.missing_values.add(missing_values) - elif hasattr(missing_values, '__iter__'): - for val in missing_values: - self.missing_values.add(val) - else: - self.missing_values = [] - - -def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): - """ - Convenience function to create a `np.dtype` object. - - The function processes the input `dtype` and matches it with the given - names. - - Parameters - ---------- - ndtype : var - Definition of the dtype. Can be any string or dictionary recognized - by the `np.dtype` function, or a sequence of types. - names : str or sequence, optional - Sequence of strings to use as field names for a structured dtype. - For convenience, `names` can be a string of a comma-separated list - of names. - defaultfmt : str, optional - Format string used to define missing names, such as ``"f%i"`` - (default) or ``"fields_%02i"``. - validationargs : optional - A series of optional arguments used to initialize a - `NameValidator`. - - Examples - -------- - >>> np.lib._iotools.easy_dtype(float) - dtype('float64') - >>> np.lib._iotools.easy_dtype("i4, f8") - dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") - dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") - dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") - dtype([('a', ' 0): - validate = NameValidator(**validationargs) - # Default initial names : should we change the format ? - if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and - (defaultfmt != "f%i")): - ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt) - # Explicit initial names : just validate - else: - ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt) - return ndtype diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_version.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_version.py deleted file mode 100644 index 54b9c1dc78125..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/_version.py +++ /dev/null @@ -1,156 +0,0 @@ -"""Utility to compare (Numpy) version strings. - -The NumpyVersion class allows properly comparing numpy version strings. -The LooseVersion and StrictVersion classes that distutils provides don't -work; they don't recognize anything like alpha/beta/rc/dev versions. - -""" -from __future__ import division, absolute_import, print_function - -import re - -from numpy.compat import basestring - - -__all__ = ['NumpyVersion'] - - -class NumpyVersion(): - """Parse and compare numpy version strings. - - Numpy has the following versioning scheme (numbers given are examples; they - can be > 9) in principle): - - - Released version: '1.8.0', '1.8.1', etc. - - Alpha: '1.8.0a1', '1.8.0a2', etc. - - Beta: '1.8.0b1', '1.8.0b2', etc. - - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - - Development versions after a1: '1.8.0a1.dev-f1234afa', - '1.8.0b2.dev-f1234afa', - '1.8.1rc1.dev-f1234afa', etc. - - Development versions (no git hash available): '1.8.0.dev-Unknown' - - Comparing needs to be done against a valid version string or other - `NumpyVersion` instance. Note that all development versions of the same - (pre-)release compare equal. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - vstring : str - Numpy version string (``np.__version__``). - - Examples - -------- - >>> from numpy.lib import NumpyVersion - >>> if NumpyVersion(np.__version__) < '1.7.0'): - ... print('skip') - skip - - >>> NumpyVersion('1.7') # raises ValueError, add ".0" - - """ - - def __init__(self, vstring): - self.vstring = vstring - ver_main = re.match(r'\d[.]\d+[.]\d+', vstring) - if not ver_main: - raise ValueError("Not a valid numpy version string") - - self.version = ver_main.group() - self.major, self.minor, self.bugfix = [int(x) for x in - self.version.split('.')] - if len(vstring) == ver_main.end(): - self.pre_release = 'final' - else: - alpha = re.match(r'a\d', vstring[ver_main.end():]) - beta = re.match(r'b\d', vstring[ver_main.end():]) - rc = re.match(r'rc\d', vstring[ver_main.end():]) - pre_rel = [m for m in [alpha, beta, rc] if m is not None] - if pre_rel: - self.pre_release = pre_rel[0].group() - else: - self.pre_release = '' - - self.is_devversion = bool(re.search(r'.dev', vstring)) - - def _compare_version(self, other): - """Compare major.minor.bugfix""" - if self.major == other.major: - if self.minor == other.minor: - if self.bugfix == other.bugfix: - vercmp = 0 - elif self.bugfix > other.bugfix: - vercmp = 1 - else: - vercmp = -1 - elif self.minor > other.minor: - vercmp = 1 - else: - vercmp = -1 - elif self.major > other.major: - vercmp = 1 - else: - vercmp = -1 - - return vercmp - - def _compare_pre_release(self, other): - """Compare alpha/beta/rc/final.""" - if self.pre_release == other.pre_release: - vercmp = 0 - elif self.pre_release == 'final': - vercmp = 1 - elif other.pre_release == 'final': - vercmp = -1 - elif self.pre_release > other.pre_release: - vercmp = 1 - else: - vercmp = -1 - - return vercmp - - def _compare(self, other): - if not isinstance(other, (basestring, NumpyVersion)): - raise ValueError("Invalid object to compare with NumpyVersion.") - - if isinstance(other, basestring): - other = NumpyVersion(other) - - vercmp = self._compare_version(other) - if vercmp == 0: - # Same x.y.z version, check for alpha/beta/rc - vercmp = self._compare_pre_release(other) - if vercmp == 0: - # Same version and same pre-release, check if dev version - if self.is_devversion is other.is_devversion: - vercmp = 0 - elif self.is_devversion: - vercmp = -1 - else: - vercmp = 1 - - return vercmp - - def __lt__(self, other): - return self._compare(other) < 0 - - def __le__(self, other): - return self._compare(other) <= 0 - - def __eq__(self, other): - return self._compare(other) == 0 - - def __ne__(self, other): - return self._compare(other) != 0 - - def __gt__(self, other): - return self._compare(other) > 0 - - def __ge__(self, other): - return self._compare(other) >= 0 - - def __repr(self): - return "NumpyVersion(%s)" % self.vstring diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraypad.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraypad.py deleted file mode 100644 index bbfdce794e770..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraypad.py +++ /dev/null @@ -1,1475 +0,0 @@ -""" -The arraypad module contains a group of functions to pad values onto the edges -of an n-dimensional array. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.compat import long - - -__all__ = ['pad'] - - -############################################################################### -# Private utility functions. - - -def _arange_ndarray(arr, shape, axis, reverse=False): - """ - Create an ndarray of `shape` with increments along specified `axis` - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - shape : tuple of ints - Shape of desired array. Should be equivalent to `arr.shape` except - `shape[axis]` which may have any positive value. - axis : int - Axis to increment along. - reverse : bool - If False, increment in a positive fashion from 1 to `shape[axis]`, - inclusive. If True, the bounds are the same but the order reversed. - - Returns - ------- - padarr : ndarray - Output array sized to pad `arr` along `axis`, with linear range from - 1 to `shape[axis]` along specified `axis`. - - Notes - ----- - The range is deliberately 1-indexed for this specific use case. Think of - this algorithm as broadcasting `np.arange` to a single `axis` of an - arbitrarily shaped ndarray. - - """ - initshape = tuple(1 if i != axis else shape[axis] - for (i, x) in enumerate(arr.shape)) - if not reverse: - padarr = np.arange(1, shape[axis] + 1) - else: - padarr = np.arange(shape[axis], 0, -1) - padarr = padarr.reshape(initshape) - for i, dim in enumerate(shape): - if padarr.shape[i] != dim: - padarr = padarr.repeat(dim, axis=i) - return padarr - - -def _round_ifneeded(arr, dtype): - """ - Rounds arr inplace if destination dtype is integer. - - Parameters - ---------- - arr : ndarray - Input array. - dtype : dtype - The dtype of the destination array. - - """ - if np.issubdtype(dtype, np.integer): - arr.round(out=arr) - - -def _prepend_const(arr, pad_amt, val, axis=-1): - """ - Prepend constant `val` along `axis` of `arr`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - val : scalar - Constant value to use. For best results should be of type `arr.dtype`; - if not `arr.dtype` will be cast to `arr.dtype`. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` constant `val` prepended along `axis`. - - """ - if pad_amt == 0: - return arr - padshape = tuple(x if i != axis else pad_amt - for (i, x) in enumerate(arr.shape)) - if val == 0: - return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr), - axis=axis) - else: - return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype), - arr), axis=axis) - - -def _append_const(arr, pad_amt, val, axis=-1): - """ - Append constant `val` along `axis` of `arr`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - val : scalar - Constant value to use. For best results should be of type `arr.dtype`; - if not `arr.dtype` will be cast to `arr.dtype`. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` constant `val` appended along `axis`. - - """ - if pad_amt == 0: - return arr - padshape = tuple(x if i != axis else pad_amt - for (i, x) in enumerate(arr.shape)) - if val == 0: - return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)), - axis=axis) - else: - return np.concatenate( - (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis) - - -def _prepend_edge(arr, pad_amt, axis=-1): - """ - Prepend `pad_amt` to `arr` along `axis` by extending edge values. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, extended by `pad_amt` edge values appended along `axis`. - - """ - if pad_amt == 0: - return arr - - edge_slice = tuple(slice(None) if i != axis else 0 - for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - edge_arr = arr[edge_slice].reshape(pad_singleton) - return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr), - axis=axis) - - -def _append_edge(arr, pad_amt, axis=-1): - """ - Append `pad_amt` to `arr` along `axis` by extending edge values. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, extended by `pad_amt` edge values prepended along - `axis`. - - """ - if pad_amt == 0: - return arr - - edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1 - for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - edge_arr = arr[edge_slice].reshape(pad_singleton) - return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)), - axis=axis) - - -def _prepend_ramp(arr, pad_amt, end, axis=-1): - """ - Prepend linear ramp along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - end : scalar - Constal value to use. For best results should be of type `arr.dtype`; - if not `arr.dtype` will be cast to `arr.dtype`. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values prepended along `axis`. The - prepended region ramps linearly from the edge value to `end`. - - """ - if pad_amt == 0: - return arr - - # Generate shape for final concatenated array - padshape = tuple(x if i != axis else pad_amt - for (i, x) in enumerate(arr.shape)) - - # Generate an n-dimensional array incrementing along `axis` - ramp_arr = _arange_ndarray(arr, padshape, axis, - reverse=True).astype(np.float64) - - # Appropriate slicing to extract n-dimensional edge along `axis` - edge_slice = tuple(slice(None) if i != axis else 0 - for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract edge, reshape to original rank, and extend along `axis` - edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) - - # Linear ramp - slope = (end - edge_pad) / float(pad_amt) - ramp_arr = ramp_arr * slope - ramp_arr += edge_pad - _round_ifneeded(ramp_arr, arr.dtype) - - # Ramp values will most likely be float, cast them to the same type as arr - return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis) - - -def _append_ramp(arr, pad_amt, end, axis=-1): - """ - Append linear ramp along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - end : scalar - Constal value to use. For best results should be of type `arr.dtype`; - if not `arr.dtype` will be cast to `arr.dtype`. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region ramps linearly from the edge value to `end`. - - """ - if pad_amt == 0: - return arr - - # Generate shape for final concatenated array - padshape = tuple(x if i != axis else pad_amt - for (i, x) in enumerate(arr.shape)) - - # Generate an n-dimensional array incrementing along `axis` - ramp_arr = _arange_ndarray(arr, padshape, axis, - reverse=False).astype(np.float64) - - # Slice a chunk from the edge to calculate stats on - edge_slice = tuple(slice(None) if i != axis else -1 - for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract edge, reshape to original rank, and extend along `axis` - edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis) - - # Linear ramp - slope = (end - edge_pad) / float(pad_amt) - ramp_arr = ramp_arr * slope - ramp_arr += edge_pad - _round_ifneeded(ramp_arr, arr.dtype) - - # Ramp values will most likely be float, cast them to the same type as arr - return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis) - - -def _prepend_max(arr, pad_amt, num, axis=-1): - """ - Prepend `pad_amt` maximum values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - num : int - Depth into `arr` along `axis` to calculate maximum. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - prepended region is the maximum of the first `num` values along - `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _prepend_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - max_slice = tuple(slice(None) if i != axis else slice(num) - for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate max, reshape to add singleton dimension back - max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) - - # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` - return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr), - axis=axis) - - -def _append_max(arr, pad_amt, num, axis=-1): - """ - Pad one `axis` of `arr` with the maximum of the last `num` elements. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - num : int - Depth into `arr` along `axis` to calculate maximum. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region is the maximum of the final `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _append_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - end = arr.shape[axis] - 1 - if num is not None: - max_slice = tuple( - slice(None) if i != axis else slice(end, end - num, -1) - for (i, x) in enumerate(arr.shape)) - else: - max_slice = tuple(slice(None) for x in arr.shape) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate max, reshape to add singleton dimension back - max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton) - - # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` - return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)), - axis=axis) - - -def _prepend_mean(arr, pad_amt, num, axis=-1): - """ - Prepend `pad_amt` mean values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - num : int - Depth into `arr` along `axis` to calculate mean. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values prepended along `axis`. The - prepended region is the mean of the first `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _prepend_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - mean_slice = tuple(slice(None) if i != axis else slice(num) - for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate mean, reshape to add singleton dimension back - mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton) - _round_ifneeded(mean_chunk, arr.dtype) - - # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` - return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype), - arr), axis=axis) - - -def _append_mean(arr, pad_amt, num, axis=-1): - """ - Append `pad_amt` mean values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - num : int - Depth into `arr` along `axis` to calculate mean. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region is the maximum of the final `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _append_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - end = arr.shape[axis] - 1 - if num is not None: - mean_slice = tuple( - slice(None) if i != axis else slice(end, end - num, -1) - for (i, x) in enumerate(arr.shape)) - else: - mean_slice = tuple(slice(None) for x in arr.shape) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate mean, reshape to add singleton dimension back - mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton) - _round_ifneeded(mean_chunk, arr.dtype) - - # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` - return np.concatenate( - (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) - - -def _prepend_med(arr, pad_amt, num, axis=-1): - """ - Prepend `pad_amt` median values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - num : int - Depth into `arr` along `axis` to calculate median. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values prepended along `axis`. The - prepended region is the median of the first `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _prepend_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - med_slice = tuple(slice(None) if i != axis else slice(num) - for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate median, reshape to add singleton dimension back - med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) - _round_ifneeded(med_chunk, arr.dtype) - - # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` - return np.concatenate( - (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis) - - -def _append_med(arr, pad_amt, num, axis=-1): - """ - Append `pad_amt` median values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - num : int - Depth into `arr` along `axis` to calculate median. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region is the median of the final `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _append_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - end = arr.shape[axis] - 1 - if num is not None: - med_slice = tuple( - slice(None) if i != axis else slice(end, end - num, -1) - for (i, x) in enumerate(arr.shape)) - else: - med_slice = tuple(slice(None) for x in arr.shape) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate median, reshape to add singleton dimension back - med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton) - _round_ifneeded(med_chunk, arr.dtype) - - # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` - return np.concatenate( - (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis) - - -def _prepend_min(arr, pad_amt, num, axis=-1): - """ - Prepend `pad_amt` minimum values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - num : int - Depth into `arr` along `axis` to calculate minimum. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values prepended along `axis`. The - prepended region is the minimum of the first `num` values along - `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _prepend_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - min_slice = tuple(slice(None) if i != axis else slice(num) - for (i, x) in enumerate(arr.shape)) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate min, reshape to add singleton dimension back - min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) - - # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` - return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr), - axis=axis) - - -def _append_min(arr, pad_amt, num, axis=-1): - """ - Append `pad_amt` median values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - num : int - Depth into `arr` along `axis` to calculate minimum. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region is the minimum of the final `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _append_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - end = arr.shape[axis] - 1 - if num is not None: - min_slice = tuple( - slice(None) if i != axis else slice(end, end - num, -1) - for (i, x) in enumerate(arr.shape)) - else: - min_slice = tuple(slice(None) for x in arr.shape) - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - - # Extract slice, calculate min, reshape to add singleton dimension back - min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton) - - # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` - return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)), - axis=axis) - - -def _pad_ref(arr, pad_amt, method, axis=-1): - """ - Pad `axis` of `arr` by reflection. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : tuple of ints, length 2 - Padding to (prepend, append) along `axis`. - method : str - Controls method of reflection; options are 'even' or 'odd'. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` - values appended along `axis`. Both regions are padded with reflected - values from the original array. - - Notes - ----- - This algorithm does not pad with repetition, i.e. the edges are not - repeated in the reflection. For that behavior, use `method='symmetric'`. - - The modes 'reflect', 'symmetric', and 'wrap' must be padded with a - single function, lest the indexing tricks in non-integer multiples of the - original shape would violate repetition in the final iteration. - - """ - # Implicit booleanness to test for zero (or None) in any scalar type - if pad_amt[0] == 0 and pad_amt[1] == 0: - return arr - - ########################################################################## - # Prepended region - - # Slice off a reverse indexed chunk from near edge to pad `arr` before - ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1) - for (i, x) in enumerate(arr.shape)) - - ref_chunk1 = arr[ref_slice] - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - if pad_amt[0] == 1: - ref_chunk1 = ref_chunk1.reshape(pad_singleton) - - # Memory/computationally more expensive, only do this if `method='odd'` - if 'odd' in method and pad_amt[0] > 0: - edge_slice1 = tuple(slice(None) if i != axis else 0 - for (i, x) in enumerate(arr.shape)) - edge_chunk = arr[edge_slice1].reshape(pad_singleton) - ref_chunk1 = 2 * edge_chunk - ref_chunk1 - del edge_chunk - - ########################################################################## - # Appended region - - # Slice off a reverse indexed chunk from far edge to pad `arr` after - start = arr.shape[axis] - pad_amt[1] - 1 - end = arr.shape[axis] - 1 - ref_slice = tuple(slice(None) if i != axis else slice(start, end) - for (i, x) in enumerate(arr.shape)) - rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) - for (i, x) in enumerate(arr.shape)) - ref_chunk2 = arr[ref_slice][rev_idx] - - if pad_amt[1] == 1: - ref_chunk2 = ref_chunk2.reshape(pad_singleton) - - if 'odd' in method: - edge_slice2 = tuple(slice(None) if i != axis else -1 - for (i, x) in enumerate(arr.shape)) - edge_chunk = arr[edge_slice2].reshape(pad_singleton) - ref_chunk2 = 2 * edge_chunk - ref_chunk2 - del edge_chunk - - # Concatenate `arr` with both chunks, extending along `axis` - return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis) - - -def _pad_sym(arr, pad_amt, method, axis=-1): - """ - Pad `axis` of `arr` by symmetry. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : tuple of ints, length 2 - Padding to (prepend, append) along `axis`. - method : str - Controls method of symmetry; options are 'even' or 'odd'. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` - values appended along `axis`. Both regions are padded with symmetric - values from the original array. - - Notes - ----- - This algorithm DOES pad with repetition, i.e. the edges are repeated. - For a method that does not repeat edges, use `method='reflect'`. - - The modes 'reflect', 'symmetric', and 'wrap' must be padded with a - single function, lest the indexing tricks in non-integer multiples of the - original shape would violate repetition in the final iteration. - - """ - # Implicit booleanness to test for zero (or None) in any scalar type - if pad_amt[0] == 0 and pad_amt[1] == 0: - return arr - - ########################################################################## - # Prepended region - - # Slice off a reverse indexed chunk from near edge to pad `arr` before - sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0]) - for (i, x) in enumerate(arr.shape)) - rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1) - for (i, x) in enumerate(arr.shape)) - sym_chunk1 = arr[sym_slice][rev_idx] - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - if pad_amt[0] == 1: - sym_chunk1 = sym_chunk1.reshape(pad_singleton) - - # Memory/computationally more expensive, only do this if `method='odd'` - if 'odd' in method and pad_amt[0] > 0: - edge_slice1 = tuple(slice(None) if i != axis else 0 - for (i, x) in enumerate(arr.shape)) - edge_chunk = arr[edge_slice1].reshape(pad_singleton) - sym_chunk1 = 2 * edge_chunk - sym_chunk1 - del edge_chunk - - ########################################################################## - # Appended region - - # Slice off a reverse indexed chunk from far edge to pad `arr` after - start = arr.shape[axis] - pad_amt[1] - end = arr.shape[axis] - sym_slice = tuple(slice(None) if i != axis else slice(start, end) - for (i, x) in enumerate(arr.shape)) - sym_chunk2 = arr[sym_slice][rev_idx] - - if pad_amt[1] == 1: - sym_chunk2 = sym_chunk2.reshape(pad_singleton) - - if 'odd' in method: - edge_slice2 = tuple(slice(None) if i != axis else -1 - for (i, x) in enumerate(arr.shape)) - edge_chunk = arr[edge_slice2].reshape(pad_singleton) - sym_chunk2 = 2 * edge_chunk - sym_chunk2 - del edge_chunk - - # Concatenate `arr` with both chunks, extending along `axis` - return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis) - - -def _pad_wrap(arr, pad_amt, axis=-1): - """ - Pad `axis` of `arr` via wrapping. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : tuple of ints, length 2 - Padding to (prepend, append) along `axis`. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` - values appended along `axis`. Both regions are padded wrapped values - from the opposite end of `axis`. - - Notes - ----- - This method of padding is also known as 'tile' or 'tiling'. - - The modes 'reflect', 'symmetric', and 'wrap' must be padded with a - single function, lest the indexing tricks in non-integer multiples of the - original shape would violate repetition in the final iteration. - - """ - # Implicit booleanness to test for zero (or None) in any scalar type - if pad_amt[0] == 0 and pad_amt[1] == 0: - return arr - - ########################################################################## - # Prepended region - - # Slice off a reverse indexed chunk from near edge to pad `arr` before - start = arr.shape[axis] - pad_amt[0] - end = arr.shape[axis] - wrap_slice = tuple(slice(None) if i != axis else slice(start, end) - for (i, x) in enumerate(arr.shape)) - wrap_chunk1 = arr[wrap_slice] - - # Shape to restore singleton dimension after slicing - pad_singleton = tuple(x if i != axis else 1 - for (i, x) in enumerate(arr.shape)) - if pad_amt[0] == 1: - wrap_chunk1 = wrap_chunk1.reshape(pad_singleton) - - ########################################################################## - # Appended region - - # Slice off a reverse indexed chunk from far edge to pad `arr` after - wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1]) - for (i, x) in enumerate(arr.shape)) - wrap_chunk2 = arr[wrap_slice] - - if pad_amt[1] == 1: - wrap_chunk2 = wrap_chunk2.reshape(pad_singleton) - - # Concatenate `arr` with both chunks, extending along `axis` - return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis) - - -def _normalize_shape(narray, shape): - """ - Private function which does some checks and normalizes the possibly - much simpler representations of 'pad_width', 'stat_length', - 'constant_values', 'end_values'. - - Parameters - ---------- - narray : ndarray - Input ndarray - shape : {sequence, int}, optional - The width of padding (pad_width) or the number of elements on the - edge of the narray used for statistics (stat_length). - ((before_1, after_1), ... (before_N, after_N)) unique number of - elements for each axis where `N` is rank of `narray`. - ((before, after),) yields same before and after constants for each - axis. - (constant,) or int is a shortcut for before = after = constant for - all axes. - - Returns - ------- - _normalize_shape : tuple of tuples - int => ((int, int), (int, int), ...) - [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...) - ((int1, int2), (int3, int4), ...) => no change - [[int1, int2], ] => ((int1, int2), (int1, int2), ...) - ((int1, int2), ) => ((int1, int2), (int1, int2), ...) - [[int , ], ] => ((int, int), (int, int), ...) - ((int , ), ) => ((int, int), (int, int), ...) - - """ - normshp = None - shapelen = len(np.shape(narray)) - if (isinstance(shape, int)) or shape is None: - normshp = ((shape, shape), ) * shapelen - elif (isinstance(shape, (tuple, list)) - and isinstance(shape[0], (tuple, list)) - and len(shape) == shapelen): - normshp = shape - for i in normshp: - if len(i) != 2: - fmt = "Unable to create correctly shaped tuple from %s" - raise ValueError(fmt % (normshp,)) - elif (isinstance(shape, (tuple, list)) - and isinstance(shape[0], (int, float, long)) - and len(shape) == 1): - normshp = ((shape[0], shape[0]), ) * shapelen - elif (isinstance(shape, (tuple, list)) - and isinstance(shape[0], (int, float, long)) - and len(shape) == 2): - normshp = (shape, ) * shapelen - if normshp is None: - fmt = "Unable to create correctly shaped tuple from %s" - raise ValueError(fmt % (shape,)) - return normshp - - -def _validate_lengths(narray, number_elements): - """ - Private function which does some checks and reformats pad_width and - stat_length using _normalize_shape. - - Parameters - ---------- - narray : ndarray - Input ndarray - number_elements : {sequence, int}, optional - The width of padding (pad_width) or the number of elements on the edge - of the narray used for statistics (stat_length). - ((before_1, after_1), ... (before_N, after_N)) unique number of - elements for each axis. - ((before, after),) yields same before and after constants for each - axis. - (constant,) or int is a shortcut for before = after = constant for all - axes. - - Returns - ------- - _validate_lengths : tuple of tuples - int => ((int, int), (int, int), ...) - [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...) - ((int1, int2), (int3, int4), ...) => no change - [[int1, int2], ] => ((int1, int2), (int1, int2), ...) - ((int1, int2), ) => ((int1, int2), (int1, int2), ...) - [[int , ], ] => ((int, int), (int, int), ...) - ((int , ), ) => ((int, int), (int, int), ...) - - """ - normshp = _normalize_shape(narray, number_elements) - for i in normshp: - chk = [1 if x is None else x for x in i] - chk = [1 if x >= 0 else -1 for x in chk] - if (chk[0] < 0) or (chk[1] < 0): - fmt = "%s cannot contain negative values." - raise ValueError(fmt % (number_elements,)) - return normshp - - -############################################################################### -# Public functions - - -def pad(array, pad_width, mode=None, **kwargs): - """ - Pads an array. - - Parameters - ---------- - array : array_like of rank N - Input array - pad_width : {sequence, int} - Number of values padded to the edges of each axis. - ((before_1, after_1), ... (before_N, after_N)) unique pad widths - for each axis. - ((before, after),) yields same before and after pad for each axis. - (pad,) or int is a shortcut for before = after = pad width for all - axes. - mode : {str, function} - One of the following string values or a user supplied function. - - 'constant' - Pads with a constant value. - 'edge' - Pads with the edge values of array. - 'linear_ramp' - Pads with the linear ramp between end_value and the - array edge value. - 'maximum' - Pads with the maximum value of all or part of the - vector along each axis. - 'mean' - Pads with the mean value of all or part of the - vector along each axis. - 'median' - Pads with the median value of all or part of the - vector along each axis. - 'minimum' - Pads with the minimum value of all or part of the - vector along each axis. - 'reflect' - Pads with the reflection of the vector mirrored on - the first and last values of the vector along each - axis. - 'symmetric' - Pads with the reflection of the vector mirrored - along the edge of the array. - 'wrap' - Pads with the wrap of the vector along the axis. - The first values are used to pad the end and the - end values are used to pad the beginning. - - Padding function, see Notes. - stat_length : {sequence, int}, optional - Used in 'maximum', 'mean', 'median', and 'minimum'. Number of - values at edge of each axis used to calculate the statistic value. - - ((before_1, after_1), ... (before_N, after_N)) unique statistic - lengths for each axis. - - ((before, after),) yields same before and after statistic lengths - for each axis. - - (stat_length,) or int is a shortcut for before = after = statistic - length for all axes. - - Default is ``None``, to use the entire axis. - constant_values : {sequence, int}, optional - Used in 'constant'. The values to set the padded values for each - axis. - - ((before_1, after_1), ... (before_N, after_N)) unique pad constants - for each axis. - - ((before, after),) yields same before and after constants for each - axis. - - (constant,) or int is a shortcut for before = after = constant for - all axes. - - Default is 0. - end_values : {sequence, int}, optional - Used in 'linear_ramp'. The values used for the ending value of the - linear_ramp and that will form the edge of the padded array. - - ((before_1, after_1), ... (before_N, after_N)) unique end values - for each axis. - - ((before, after),) yields same before and after end values for each - axis. - - (constant,) or int is a shortcut for before = after = end value for - all axes. - - Default is 0. - reflect_type : str {'even', 'odd'}, optional - Used in 'reflect', and 'symmetric'. The 'even' style is the - default with an unaltered reflection around the edge value. For - the 'odd' style, the extented part of the array is created by - subtracting the reflected values from two times the edge value. - - Returns - ------- - pad : ndarray - Padded array of rank equal to `array` with shape increased - according to `pad_width`. - - Notes - ----- - .. versionadded:: 1.7.0 - - For an array with rank greater than 1, some of the padding of later - axes is calculated from padding of previous axes. This is easiest to - think about with a rank 2 array where the corners of the padded array - are calculated by using padded values from the first axis. - - The padding function, if used, should return a rank 1 array equal in - length to the vector argument with padded values replaced. It has the - following signature:: - - padding_func(vector, iaxis_pad_width, iaxis, **kwargs) - - where - - vector : ndarray - A rank 1 array already padded with zeros. Padded values are - vector[:pad_tuple[0]] and vector[-pad_tuple[1]:]. - iaxis_pad_width : tuple - A 2-tuple of ints, iaxis_pad_width[0] represents the number of - values padded at the beginning of vector where - iaxis_pad_width[1] represents the number of values padded at - the end of vector. - iaxis : int - The axis currently being calculated. - kwargs : misc - Any keyword arguments the function requires. - - Examples - -------- - >>> a = [1, 2, 3, 4, 5] - >>> np.lib.pad(a, (2,3), 'constant', constant_values=(4,6)) - array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) - - >>> np.lib.pad(a, (2,3), 'edge') - array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5]) - - >>> np.lib.pad(a, (2,3), 'linear_ramp', end_values=(5,-4)) - array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) - - >>> np.lib.pad(a, (2,), 'maximum') - array([5, 5, 1, 2, 3, 4, 5, 5, 5]) - - >>> np.lib.pad(a, (2,), 'mean') - array([3, 3, 1, 2, 3, 4, 5, 3, 3]) - - >>> np.lib.pad(a, (2,), 'median') - array([3, 3, 1, 2, 3, 4, 5, 3, 3]) - - >>> a = [[1,2], [3,4]] - >>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum') - array([[1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1], - [3, 3, 3, 4, 3, 3, 3], - [1, 1, 1, 2, 1, 1, 1], - [1, 1, 1, 2, 1, 1, 1]]) - - >>> a = [1, 2, 3, 4, 5] - >>> np.lib.pad(a, (2,3), 'reflect') - array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) - - >>> np.lib.pad(a, (2,3), 'reflect', reflect_type='odd') - array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) - - >>> np.lib.pad(a, (2,3), 'symmetric') - array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) - - >>> np.lib.pad(a, (2,3), 'symmetric', reflect_type='odd') - array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) - - >>> np.lib.pad(a, (2,3), 'wrap') - array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) - - >>> def padwithtens(vector, pad_width, iaxis, kwargs): - ... vector[:pad_width[0]] = 10 - ... vector[-pad_width[1]:] = 10 - ... return vector - - >>> a = np.arange(6) - >>> a = a.reshape((2,3)) - - >>> np.lib.pad(a, 2, padwithtens) - array([[10, 10, 10, 10, 10, 10, 10], - [10, 10, 10, 10, 10, 10, 10], - [10, 10, 0, 1, 2, 10, 10], - [10, 10, 3, 4, 5, 10, 10], - [10, 10, 10, 10, 10, 10, 10], - [10, 10, 10, 10, 10, 10, 10]]) - """ - - narray = np.array(array) - pad_width = _validate_lengths(narray, pad_width) - - allowedkwargs = { - 'constant': ['constant_values'], - 'edge': [], - 'linear_ramp': ['end_values'], - 'maximum': ['stat_length'], - 'mean': ['stat_length'], - 'median': ['stat_length'], - 'minimum': ['stat_length'], - 'reflect': ['reflect_type'], - 'symmetric': ['reflect_type'], - 'wrap': [], - } - - kwdefaults = { - 'stat_length': None, - 'constant_values': 0, - 'end_values': 0, - 'reflect_type': 'even', - } - - if isinstance(mode, str): - # Make sure have allowed kwargs appropriate for mode - for key in kwargs: - if key not in allowedkwargs[mode]: - raise ValueError('%s keyword not in allowed keywords %s' % - (key, allowedkwargs[mode])) - - # Set kwarg defaults - for kw in allowedkwargs[mode]: - kwargs.setdefault(kw, kwdefaults[kw]) - - # Need to only normalize particular keywords. - for i in kwargs: - if i == 'stat_length': - kwargs[i] = _validate_lengths(narray, kwargs[i]) - if i in ['end_values', 'constant_values']: - kwargs[i] = _normalize_shape(narray, kwargs[i]) - elif mode is None: - raise ValueError('Keyword "mode" must be a function or one of %s.' % - (list(allowedkwargs.keys()),)) - else: - # Drop back to old, slower np.apply_along_axis mode for user-supplied - # vector function - function = mode - - # Create a new padded array - rank = list(range(len(narray.shape))) - total_dim_increase = [np.sum(pad_width[i]) for i in rank] - offset_slices = [slice(pad_width[i][0], - pad_width[i][0] + narray.shape[i]) - for i in rank] - new_shape = np.array(narray.shape) + total_dim_increase - newmat = np.zeros(new_shape, narray.dtype) - - # Insert the original array into the padded array - newmat[offset_slices] = narray - - # This is the core of pad ... - for iaxis in rank: - np.apply_along_axis(function, - iaxis, - newmat, - pad_width[iaxis], - iaxis, - kwargs) - return newmat - - # If we get here, use new padding method - newmat = narray.copy() - - # API preserved, but completely new algorithm which pads by building the - # entire block to pad before/after `arr` with in one step, for each axis. - if mode == 'constant': - for axis, ((pad_before, pad_after), (before_val, after_val)) \ - in enumerate(zip(pad_width, kwargs['constant_values'])): - newmat = _prepend_const(newmat, pad_before, before_val, axis) - newmat = _append_const(newmat, pad_after, after_val, axis) - - elif mode == 'edge': - for axis, (pad_before, pad_after) in enumerate(pad_width): - newmat = _prepend_edge(newmat, pad_before, axis) - newmat = _append_edge(newmat, pad_after, axis) - - elif mode == 'linear_ramp': - for axis, ((pad_before, pad_after), (before_val, after_val)) \ - in enumerate(zip(pad_width, kwargs['end_values'])): - newmat = _prepend_ramp(newmat, pad_before, before_val, axis) - newmat = _append_ramp(newmat, pad_after, after_val, axis) - - elif mode == 'maximum': - for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ - in enumerate(zip(pad_width, kwargs['stat_length'])): - newmat = _prepend_max(newmat, pad_before, chunk_before, axis) - newmat = _append_max(newmat, pad_after, chunk_after, axis) - - elif mode == 'mean': - for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ - in enumerate(zip(pad_width, kwargs['stat_length'])): - newmat = _prepend_mean(newmat, pad_before, chunk_before, axis) - newmat = _append_mean(newmat, pad_after, chunk_after, axis) - - elif mode == 'median': - for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ - in enumerate(zip(pad_width, kwargs['stat_length'])): - newmat = _prepend_med(newmat, pad_before, chunk_before, axis) - newmat = _append_med(newmat, pad_after, chunk_after, axis) - - elif mode == 'minimum': - for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ - in enumerate(zip(pad_width, kwargs['stat_length'])): - newmat = _prepend_min(newmat, pad_before, chunk_before, axis) - newmat = _append_min(newmat, pad_after, chunk_after, axis) - - elif mode == 'reflect': - for axis, (pad_before, pad_after) in enumerate(pad_width): - # Recursive padding along any axis where `pad_amt` is too large - # for indexing tricks. We can only safely pad the original axis - # length, to keep the period of the reflections consistent. - if ((pad_before > 0) or - (pad_after > 0)) and newmat.shape[axis] == 1: - # Extending singleton dimension for 'reflect' is legacy - # behavior; it really should raise an error. - newmat = _prepend_edge(newmat, pad_before, axis) - newmat = _append_edge(newmat, pad_after, axis) - continue - - method = kwargs['reflect_type'] - safe_pad = newmat.shape[axis] - 1 - while ((pad_before > safe_pad) or (pad_after > safe_pad)): - offset = 0 - pad_iter_b = min(safe_pad, - safe_pad * (pad_before // safe_pad)) - pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) - newmat = _pad_ref(newmat, (pad_iter_b, - pad_iter_a), method, axis) - pad_before -= pad_iter_b - pad_after -= pad_iter_a - if pad_iter_b > 0: - offset += 1 - if pad_iter_a > 0: - offset += 1 - safe_pad += pad_iter_b + pad_iter_a - newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis) - - elif mode == 'symmetric': - for axis, (pad_before, pad_after) in enumerate(pad_width): - # Recursive padding along any axis where `pad_amt` is too large - # for indexing tricks. We can only safely pad the original axis - # length, to keep the period of the reflections consistent. - method = kwargs['reflect_type'] - safe_pad = newmat.shape[axis] - while ((pad_before > safe_pad) or - (pad_after > safe_pad)): - pad_iter_b = min(safe_pad, - safe_pad * (pad_before // safe_pad)) - pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) - newmat = _pad_sym(newmat, (pad_iter_b, - pad_iter_a), method, axis) - pad_before -= pad_iter_b - pad_after -= pad_iter_a - safe_pad += pad_iter_b + pad_iter_a - newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis) - - elif mode == 'wrap': - for axis, (pad_before, pad_after) in enumerate(pad_width): - # Recursive padding along any axis where `pad_amt` is too large - # for indexing tricks. We can only safely pad the original axis - # length, to keep the period of the reflections consistent. - safe_pad = newmat.shape[axis] - while ((pad_before > safe_pad) or - (pad_after > safe_pad)): - pad_iter_b = min(safe_pad, - safe_pad * (pad_before // safe_pad)) - pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) - newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis) - - pad_before -= pad_iter_b - pad_after -= pad_iter_a - safe_pad += pad_iter_b + pad_iter_a - newmat = _pad_wrap(newmat, (pad_before, pad_after), axis) - - return newmat diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraysetops.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraysetops.py deleted file mode 100644 index 2d98c35d2c9d8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arraysetops.py +++ /dev/null @@ -1,463 +0,0 @@ -""" -Set operations for 1D numeric arrays based on sorting. - -:Contains: - ediff1d, - unique, - intersect1d, - setxor1d, - in1d, - union1d, - setdiff1d - -:Notes: - -For floating point arrays, inaccurate results may appear due to usual round-off -and floating point comparison issues. - -Speed could be gained in some operations by an implementation of -sort(), that can provide directly the permutation vectors, avoiding -thus calls to argsort(). - -To do: Optionally return indices analogously to unique for all functions. - -:Author: Robert Cimrman - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np - - -__all__ = [ - 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique', - 'in1d' - ] - - -def ediff1d(ary, to_end=None, to_begin=None): - """ - The differences between consecutive elements of an array. - - Parameters - ---------- - ary : array_like - If necessary, will be flattened before the differences are taken. - to_end : array_like, optional - Number(s) to append at the end of the returned differences. - to_begin : array_like, optional - Number(s) to prepend at the beginning of the returned differences. - - Returns - ------- - ediff1d : ndarray - The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. - - See Also - -------- - diff, gradient - - Notes - ----- - When applied to masked arrays, this function drops the mask information - if the `to_begin` and/or `to_end` parameters are used. - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 0]) - >>> np.ediff1d(x) - array([ 1, 2, 3, -7]) - - >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) - array([-99, 1, 2, 3, -7, 88, 99]) - - The returned array is always 1D. - - >>> y = [[1, 2, 4], [1, 6, 24]] - >>> np.ediff1d(y) - array([ 1, 2, -3, 5, 18]) - - """ - ary = np.asanyarray(ary).flat - ed = ary[1:] - ary[:-1] - arrays = [ed] - if to_begin is not None: - arrays.insert(0, to_begin) - if to_end is not None: - arrays.append(to_end) - - if len(arrays) != 1: - # We'll save ourselves a copy of a potentially large array in - # the common case where neither to_begin or to_end was given. - ed = np.hstack(arrays) - - return ed - -def unique(ar, return_index=False, return_inverse=False, return_counts=False): - """ - Find the unique elements of an array. - - Returns the sorted unique elements of an array. There are two optional - outputs in addition to the unique elements: the indices of the input array - that give the unique values, and the indices of the unique array that - reconstruct the input array. - - Parameters - ---------- - ar : array_like - Input array. This will be flattened if it is not already 1-D. - return_index : bool, optional - If True, also return the indices of `ar` that result in the unique - array. - return_inverse : bool, optional - If True, also return the indices of the unique array that can be used - to reconstruct `ar`. - return_counts : bool, optional - .. versionadded:: 1.9.0 - If True, also return the number of times each unique value comes up - in `ar`. - - Returns - ------- - unique : ndarray - The sorted unique values. - unique_indices : ndarray, optional - The indices of the first occurrences of the unique values in the - (flattened) original array. Only provided if `return_index` is True. - unique_inverse : ndarray, optional - The indices to reconstruct the (flattened) original array from the - unique array. Only provided if `return_inverse` is True. - unique_counts : ndarray, optional - .. versionadded:: 1.9.0 - The number of times each of the unique values comes up in the - original array. Only provided if `return_counts` is True. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.unique([1, 1, 2, 2, 3, 3]) - array([1, 2, 3]) - >>> a = np.array([[1, 1], [2, 3]]) - >>> np.unique(a) - array([1, 2, 3]) - - Return the indices of the original array that give the unique values: - - >>> a = np.array(['a', 'b', 'b', 'c', 'a']) - >>> u, indices = np.unique(a, return_index=True) - >>> u - array(['a', 'b', 'c'], - dtype='|S1') - >>> indices - array([0, 1, 3]) - >>> a[indices] - array(['a', 'b', 'c'], - dtype='|S1') - - Reconstruct the input array from the unique values: - - >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) - >>> u, indices = np.unique(a, return_inverse=True) - >>> u - array([1, 2, 3, 4, 6]) - >>> indices - array([0, 1, 4, 3, 1, 2, 1]) - >>> u[indices] - array([1, 2, 6, 4, 2, 3, 2]) - - """ - ar = np.asanyarray(ar).flatten() - - optional_indices = return_index or return_inverse - optional_returns = optional_indices or return_counts - - if ar.size == 0: - if not optional_returns: - ret = ar - else: - ret = (ar,) - if return_index: - ret += (np.empty(0, np.bool),) - if return_inverse: - ret += (np.empty(0, np.bool),) - if return_counts: - ret += (np.empty(0, np.intp),) - return ret - - if optional_indices: - perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') - aux = ar[perm] - else: - ar.sort() - aux = ar - flag = np.concatenate(([True], aux[1:] != aux[:-1])) - - if not optional_returns: - ret = aux[flag] - else: - ret = (aux[flag],) - if return_index: - ret += (perm[flag],) - if return_inverse: - iflag = np.cumsum(flag) - 1 - iperm = perm.argsort() - ret += (np.take(iflag, iperm),) - if return_counts: - idx = np.concatenate(np.nonzero(flag) + ([ar.size],)) - ret += (np.diff(idx),) - return ret - -def intersect1d(ar1, ar2, assume_unique=False): - """ - Find the intersection of two arrays. - - Return the sorted, unique values that are in both of the input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - intersect1d : ndarray - Sorted 1D array of common and unique elements. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) - array([1, 3]) - - """ - if not assume_unique: - # Might be faster than unique( intersect1d( ar1, ar2 ) )? - ar1 = unique(ar1) - ar2 = unique(ar2) - aux = np.concatenate((ar1, ar2)) - aux.sort() - return aux[:-1][aux[1:] == aux[:-1]] - -def setxor1d(ar1, ar2, assume_unique=False): - """ - Find the set exclusive-or of two arrays. - - Return the sorted, unique values that are in only one (not both) of the - input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - setxor1d : ndarray - Sorted 1D array of unique values that are in only one of the input - arrays. - - Examples - -------- - >>> a = np.array([1, 2, 3, 2, 4]) - >>> b = np.array([2, 3, 5, 7, 5]) - >>> np.setxor1d(a,b) - array([1, 4, 5, 7]) - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - - aux = np.concatenate((ar1, ar2)) - if aux.size == 0: - return aux - - aux.sort() -# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 - flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) -# flag2 = ediff1d( flag ) == 0 - flag2 = flag[1:] == flag[:-1] - return aux[flag2] - -def in1d(ar1, ar2, assume_unique=False, invert=False): - """ - Test whether each element of a 1-D array is also present in a second array. - - Returns a boolean array the same length as `ar1` that is True - where an element of `ar1` is in `ar2` and False otherwise. - - Parameters - ---------- - ar1 : (M,) array_like - Input array. - ar2 : array_like - The values against which to test each value of `ar1`. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - invert : bool, optional - If True, the values in the returned array are inverted (that is, - False where an element of `ar1` is in `ar2` and True otherwise). - Default is False. ``np.in1d(a, b, invert=True)`` is equivalent - to (but is faster than) ``np.invert(in1d(a, b))``. - - .. versionadded:: 1.8.0 - - Returns - ------- - in1d : (M,) ndarray, bool - The values `ar1[in1d]` are in `ar2`. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Notes - ----- - `in1d` can be considered as an element-wise function version of the - python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly - equivalent to ``np.array([item in b for item in a])``. - - .. versionadded:: 1.4.0 - - Examples - -------- - >>> test = np.array([0, 1, 2, 5, 0]) - >>> states = [0, 2] - >>> mask = np.in1d(test, states) - >>> mask - array([ True, False, True, False, True], dtype=bool) - >>> test[mask] - array([0, 2, 0]) - >>> mask = np.in1d(test, states, invert=True) - >>> mask - array([False, True, False, True, False], dtype=bool) - >>> test[mask] - array([1, 5]) - """ - # Ravel both arrays, behavior for the first array could be different - ar1 = np.asarray(ar1).ravel() - ar2 = np.asarray(ar2).ravel() - - # This code is significantly faster when the condition is satisfied. - if len(ar2) < 10 * len(ar1) ** 0.145: - if invert: - mask = np.ones(len(ar1), dtype=np.bool) - for a in ar2: - mask &= (ar1 != a) - else: - mask = np.zeros(len(ar1), dtype=np.bool) - for a in ar2: - mask |= (ar1 == a) - return mask - - # Otherwise use sorting - if not assume_unique: - ar1, rev_idx = np.unique(ar1, return_inverse=True) - ar2 = np.unique(ar2) - - ar = np.concatenate((ar1, ar2)) - # We need this to be a stable sort, so always use 'mergesort' - # here. The values from the first array should always come before - # the values from the second array. - order = ar.argsort(kind='mergesort') - sar = ar[order] - if invert: - bool_ar = (sar[1:] != sar[:-1]) - else: - bool_ar = (sar[1:] == sar[:-1]) - flag = np.concatenate((bool_ar, [invert])) - indx = order.argsort(kind='mergesort')[:len(ar1)] - - if assume_unique: - return flag[indx] - else: - return flag[indx][rev_idx] - -def union1d(ar1, ar2): - """ - Find the union of two arrays. - - Return the unique, sorted array of values that are in either of the two - input arrays. - - Parameters - ---------- - ar1, ar2 : array_like - Input arrays. They are flattened if they are not already 1D. - - Returns - ------- - union1d : ndarray - Unique, sorted union of the input arrays. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> np.union1d([-1, 0, 1], [-2, 0, 2]) - array([-2, -1, 0, 1, 2]) - - """ - return unique(np.concatenate((ar1, ar2))) - -def setdiff1d(ar1, ar2, assume_unique=False): - """ - Find the set difference of two arrays. - - Return the sorted, unique values in `ar1` that are not in `ar2`. - - Parameters - ---------- - ar1 : array_like - Input array. - ar2 : array_like - Input comparison array. - assume_unique : bool - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - - Returns - ------- - setdiff1d : ndarray - Sorted 1D array of values in `ar1` that are not in `ar2`. - - See Also - -------- - numpy.lib.arraysetops : Module with a number of other functions for - performing set operations on arrays. - - Examples - -------- - >>> a = np.array([1, 2, 3, 2, 4, 1]) - >>> b = np.array([3, 4, 5, 6]) - >>> np.setdiff1d(a, b) - array([1, 2]) - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - aux = in1d(ar1, ar2, assume_unique=True) - if aux.size == 0: - return aux - else: - return np.asarray(ar1)[aux == 0] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arrayterator.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arrayterator.py deleted file mode 100644 index d9839feeb89bd..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/arrayterator.py +++ /dev/null @@ -1,226 +0,0 @@ -""" -A buffered iterator for big arrays. - -This module solves the problem of iterating over a big file-based array -without having to read it into memory. The `Arrayterator` class wraps -an array object, and when iterated it will return sub-arrays with at most -a user-specified number of elements. - -""" -from __future__ import division, absolute_import, print_function - -from operator import mul -from functools import reduce - -from numpy.compat import long - -__all__ = ['Arrayterator'] - - -class Arrayterator(object): - """ - Buffered iterator for big arrays. - - `Arrayterator` creates a buffered iterator for reading big arrays in small - contiguous blocks. The class is useful for objects stored in the - file system. It allows iteration over the object *without* reading - everything in memory; instead, small blocks are read and iterated over. - - `Arrayterator` can be used with any object that supports multidimensional - slices. This includes NumPy arrays, but also variables from - Scientific.IO.NetCDF or pynetcdf for example. - - Parameters - ---------- - var : array_like - The object to iterate over. - buf_size : int, optional - The buffer size. If `buf_size` is supplied, the maximum amount of - data that will be read into memory is `buf_size` elements. - Default is None, which will read as many element as possible - into memory. - - Attributes - ---------- - var - buf_size - start - stop - step - shape - flat - - See Also - -------- - ndenumerate : Multidimensional array iterator. - flatiter : Flat array iterator. - memmap : Create a memory-map to an array stored in a binary file on disk. - - Notes - ----- - The algorithm works by first finding a "running dimension", along which - the blocks will be extracted. Given an array of dimensions - ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the - first dimension will be used. If, on the other hand, - ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. - Blocks are extracted along this dimension, and when the last block is - returned the process continues from the next dimension, until all - elements have been read. - - Examples - -------- - >>> import numpy as np - >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.arrayterator.Arrayterator(a, 2) - >>> a_itor.shape - (3, 4, 5, 6) - - Now we can iterate over ``a_itor``, and it will return arrays of size - two. Since `buf_size` was smaller than any dimension, the first - dimension will be iterated over first: - - >>> for subarr in a_itor: - ... if not subarr.all(): - ... print subarr, subarr.shape - ... - [[[[0 1]]]] (1, 1, 1, 2) - - """ - - def __init__(self, var, buf_size=None): - self.var = var - self.buf_size = buf_size - - self.start = [0 for dim in var.shape] - self.stop = [dim for dim in var.shape] - self.step = [1 for dim in var.shape] - - def __getattr__(self, attr): - return getattr(self.var, attr) - - def __getitem__(self, index): - """ - Return a new arrayterator. - - """ - # Fix index, handling ellipsis and incomplete slices. - if not isinstance(index, tuple): - index = (index,) - fixed = [] - length, dims = len(index), len(self.shape) - for slice_ in index: - if slice_ is Ellipsis: - fixed.extend([slice(None)] * (dims-length+1)) - length = len(fixed) - elif isinstance(slice_, (int, long)): - fixed.append(slice(slice_, slice_+1, 1)) - else: - fixed.append(slice_) - index = tuple(fixed) - if len(index) < dims: - index += (slice(None),) * (dims-len(index)) - - # Return a new arrayterator object. - out = self.__class__(self.var, self.buf_size) - for i, (start, stop, step, slice_) in enumerate( - zip(self.start, self.stop, self.step, index)): - out.start[i] = start + (slice_.start or 0) - out.step[i] = step * (slice_.step or 1) - out.stop[i] = start + (slice_.stop or stop-start) - out.stop[i] = min(stop, out.stop[i]) - return out - - def __array__(self): - """ - Return corresponding data. - - """ - slice_ = tuple(slice(*t) for t in zip( - self.start, self.stop, self.step)) - return self.var[slice_] - - @property - def flat(self): - """ - A 1-D flat iterator for Arrayterator objects. - - This iterator returns elements of the array to be iterated over in - `Arrayterator` one by one. It is similar to `flatiter`. - - See Also - -------- - `Arrayterator` - flatiter - - Examples - -------- - >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.arrayterator.Arrayterator(a, 2) - - >>> for subarr in a_itor.flat: - ... if not subarr: - ... print subarr, type(subarr) - ... - 0 - - """ - for block in self: - for value in block.flat: - yield value - - @property - def shape(self): - """ - The shape of the array to be iterated over. - - For an example, see `Arrayterator`. - - """ - return tuple(((stop-start-1)//step+1) for start, stop, step in - zip(self.start, self.stop, self.step)) - - def __iter__(self): - # Skip arrays with degenerate dimensions - if [dim for dim in self.shape if dim <= 0]: - raise StopIteration - - start = self.start[:] - stop = self.stop[:] - step = self.step[:] - ndims = len(self.var.shape) - - while True: - count = self.buf_size or reduce(mul, self.shape) - - # iterate over each dimension, looking for the - # running dimension (ie, the dimension along which - # the blocks will be built from) - rundim = 0 - for i in range(ndims-1, -1, -1): - # if count is zero we ran out of elements to read - # along higher dimensions, so we read only a single position - if count == 0: - stop[i] = start[i]+1 - elif count <= self.shape[i]: - # limit along this dimension - stop[i] = start[i] + count*step[i] - rundim = i - else: - # read everything along this dimension - stop[i] = self.stop[i] - stop[i] = min(self.stop[i], stop[i]) - count = count//self.shape[i] - - # yield a block - slice_ = tuple(slice(*t) for t in zip(start, stop, step)) - yield self.var[slice_] - - # Update start position, taking care of overflow to - # other dimensions - start[rundim] = stop[rundim] # start where we stopped - for i in range(ndims-1, 0, -1): - if start[i] >= self.stop[i]: - start[i] = self.start[i] - start[i-1] += self.step[i-1] - if start[0] >= self.stop[0]: - raise StopIteration diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/financial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/financial.py deleted file mode 100644 index 5b96e5b8e979d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/financial.py +++ /dev/null @@ -1,737 +0,0 @@ -"""Some simple financial calculations - -patterned after spreadsheet computations. - -There is some complexity in each function -so that the functions behave like ufuncs with -broadcasting and being able to be called with scalars -or arrays (or other sequences). - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np - -__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate', - 'irr', 'npv', 'mirr'] - -_when_to_num = {'end':0, 'begin':1, - 'e':0, 'b':1, - 0:0, 1:1, - 'beginning':1, - 'start':1, - 'finish':0} - -def _convert_when(when): - #Test to see if when has already been converted to ndarray - #This will happen if one function calls another, for example ppmt - if isinstance(when, np.ndarray): - return when - try: - return _when_to_num[when] - except (KeyError, TypeError): - return [_when_to_num[x] for x in when] - - -def fv(rate, nper, pmt, pv, when='end'): - """ - Compute the future value. - - Given: - * a present value, `pv` - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * a (fixed) payment, `pmt`, paid either - * at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the value at the end of the `nper` periods - - Parameters - ---------- - rate : scalar or array_like of shape(M, ) - Rate of interest as decimal (not per cent) per period - nper : scalar or array_like of shape(M, ) - Number of compounding periods - pmt : scalar or array_like of shape(M, ) - Payment - pv : scalar or array_like of shape(M, ) - Present value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)). - Defaults to {'end', 0}. - - Returns - ------- - out : ndarray - Future values. If all input is scalar, returns a scalar float. If - any input is array_like, returns future values for each input element. - If multiple inputs are array_like, they all must have the same shape. - - Notes - ----- - The future value is computed by solving the equation:: - - fv + - pv*(1+rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 - - or, when ``rate == 0``:: - - fv + pv + pmt * nper == 0 - - References - ---------- - .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - Examples - -------- - What is the future value after 10 years of saving $100 now, with - an additional monthly savings of $100. Assume the interest rate is - 5% (annually) compounded monthly? - - >>> np.fv(0.05/12, 10*12, -100, -100) - 15692.928894335748 - - By convention, the negative sign represents cash flow out (i.e. money not - available today). Thus, saving $100 a month at 5% annual interest leads - to $15,692.93 available to spend in 10 years. - - If any input is array_like, returns an array of equal shape. Let's - compare different interest rates from the example above. - - >>> a = np.array((0.05, 0.06, 0.07))/12 - >>> np.fv(a, 10*12, -100, -100) - array([ 15692.92889434, 16569.87435405, 17509.44688102]) - - """ - when = _convert_when(when) - (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when]) - temp = (1+rate)**nper - miter = np.broadcast(rate, nper, pmt, pv, when) - zer = np.zeros(miter.shape) - fact = np.where(rate == zer, nper + zer, - (1 + rate*when)*(temp - 1)/rate + zer) - return -(pv*temp + pmt*fact) - -def pmt(rate, nper, pv, fv=0, when='end'): - """ - Compute the payment against loan principal plus interest. - - Given: - * a present value, `pv` (e.g., an amount borrowed) - * a future value, `fv` (e.g., 0) - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * and (optional) specification of whether payment is made - at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the (fixed) periodic payment. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - nper : array_like - Number of compounding periods - pv : array_like - Present value - fv : array_like (optional) - Future value (default = 0) - when : {{'begin', 1}, {'end', 0}}, {string, int} - When payments are due ('begin' (1) or 'end' (0)) - - Returns - ------- - out : ndarray - Payment against loan plus interest. If all input is scalar, returns a - scalar float. If any input is array_like, returns payment for each - input element. If multiple inputs are array_like, they all must have - the same shape. - - Notes - ----- - The payment is computed by solving the equation:: - - fv + - pv*(1 + rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0 - - or, when ``rate == 0``:: - - fv + pv + pmt * nper == 0 - - for ``pmt``. - - Note that computing a monthly mortgage payment is only - one use for this function. For example, pmt returns the - periodic deposit one must make to achieve a specified - future balance given an initial deposit, a fixed, - periodically compounded interest rate, and the total - number of periods. - - References - ---------- - .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php - ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt - - Examples - -------- - What is the monthly payment needed to pay off a $200,000 loan in 15 - years at an annual interest rate of 7.5%? - - >>> np.pmt(0.075/12, 12*15, 200000) - -1854.0247200054619 - - In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained - today, a monthly payment of $1,854.02 would be required. Note that this - example illustrates usage of `fv` having a default value of 0. - - """ - when = _convert_when(when) - (rate, nper, pv, fv, when) = map(np.asarray, [rate, nper, pv, fv, when]) - temp = (1+rate)**nper - miter = np.broadcast(rate, nper, pv, fv, when) - zer = np.zeros(miter.shape) - fact = np.where(rate == zer, nper + zer, - (1 + rate*when)*(temp - 1)/rate + zer) - return -(fv + pv*temp) / fact - -def nper(rate, pmt, pv, fv=0, when='end'): - """ - Compute the number of periodic payments. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - pmt : array_like - Payment - pv : array_like - Present value - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - - Notes - ----- - The number of periods ``nper`` is computed by solving the equation:: - - fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0 - - but if ``rate = 0`` then:: - - fv + pv + pmt*nper = 0 - - Examples - -------- - If you only had $150/month to pay towards the loan, how long would it take - to pay-off a loan of $8,000 at 7% annual interest? - - >>> print round(np.nper(0.07/12, -150, 8000), 5) - 64.07335 - - So, over 64 months would be required to pay off the loan. - - The same analysis could be done with several different interest rates - and/or payments and/or total amounts to produce an entire table. - - >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12, - ... -150 : -99 : 50 , - ... 8000 : 9001 : 1000])) - array([[[ 64.07334877, 74.06368256], - [ 108.07548412, 127.99022654]], - [[ 66.12443902, 76.87897353], - [ 114.70165583, 137.90124779]]]) - - """ - when = _convert_when(when) - (rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when]) - - use_zero_rate = False - with np.errstate(divide="raise"): - try: - z = pmt*(1.0+rate*when)/rate - except FloatingPointError: - use_zero_rate = True - - if use_zero_rate: - return (-fv + pv) / (pmt + 0.0) - else: - A = -(fv + pv)/(pmt+0.0) - B = np.log((-fv+z) / (pv+z))/np.log(1.0+rate) - miter = np.broadcast(rate, pmt, pv, fv, when) - zer = np.zeros(miter.shape) - return np.where(rate == zer, A + zer, B + zer) + 0.0 - -def ipmt(rate, per, nper, pv, fv=0.0, when='end'): - """ - Compute the interest portion of a payment. - - Parameters - ---------- - rate : scalar or array_like of shape(M, ) - Rate of interest as decimal (not per cent) per period - per : scalar or array_like of shape(M, ) - Interest paid against the loan changes during the life or the loan. - The `per` is the payment period to calculate the interest amount. - nper : scalar or array_like of shape(M, ) - Number of compounding periods - pv : scalar or array_like of shape(M, ) - Present value - fv : scalar or array_like of shape(M, ), optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)). - Defaults to {'end', 0}. - - Returns - ------- - out : ndarray - Interest portion of payment. If all input is scalar, returns a scalar - float. If any input is array_like, returns interest payment for each - input element. If multiple inputs are array_like, they all must have - the same shape. - - See Also - -------- - ppmt, pmt, pv - - Notes - ----- - The total payment is made up of payment against principal plus interest. - - ``pmt = ppmt + ipmt`` - - Examples - -------- - What is the amortization schedule for a 1 year loan of $2500 at - 8.24% interest per year compounded monthly? - - >>> principal = 2500.00 - - The 'per' variable represents the periods of the loan. Remember that - financial equations start the period count at 1! - - >>> per = np.arange(1*12) + 1 - >>> ipmt = np.ipmt(0.0824/12, per, 1*12, principal) - >>> ppmt = np.ppmt(0.0824/12, per, 1*12, principal) - - Each element of the sum of the 'ipmt' and 'ppmt' arrays should equal - 'pmt'. - - >>> pmt = np.pmt(0.0824/12, 1*12, principal) - >>> np.allclose(ipmt + ppmt, pmt) - True - - >>> fmt = '{0:2d} {1:8.2f} {2:8.2f} {3:8.2f}' - >>> for payment in per: - ... index = payment - 1 - ... principal = principal + ppmt[index] - ... print fmt.format(payment, ppmt[index], ipmt[index], principal) - 1 -200.58 -17.17 2299.42 - 2 -201.96 -15.79 2097.46 - 3 -203.35 -14.40 1894.11 - 4 -204.74 -13.01 1689.37 - 5 -206.15 -11.60 1483.22 - 6 -207.56 -10.18 1275.66 - 7 -208.99 -8.76 1066.67 - 8 -210.42 -7.32 856.25 - 9 -211.87 -5.88 644.38 - 10 -213.32 -4.42 431.05 - 11 -214.79 -2.96 216.26 - 12 -216.26 -1.49 -0.00 - - >>> interestpd = np.sum(ipmt) - >>> np.round(interestpd, 2) - -112.98 - - """ - when = _convert_when(when) - rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper, - pv, fv, when) - total_pmt = pmt(rate, nper, pv, fv, when) - ipmt = _rbl(rate, per, total_pmt, pv, when)*rate - try: - ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt) - ipmt = np.where(np.logical_and(when == 1, per == 1), 0.0, ipmt) - except IndexError: - pass - return ipmt - -def _rbl(rate, per, pmt, pv, when): - """ - This function is here to simply have a different name for the 'fv' - function to not interfere with the 'fv' keyword argument within the 'ipmt' - function. It is the 'remaining balance on loan' which might be useful as - it's own function, but is easily calculated with the 'fv' function. - """ - return fv(rate, (per - 1), pmt, pv, when) - -def ppmt(rate, per, nper, pv, fv=0.0, when='end'): - """ - Compute the payment against loan principal. - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - per : array_like, int - Amount paid against the loan changes. The `per` is the period of - interest. - nper : array_like - Number of compounding periods - pv : array_like - Present value - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int} - When payments are due ('begin' (1) or 'end' (0)) - - See Also - -------- - pmt, pv, ipmt - - """ - total = pmt(rate, nper, pv, fv, when) - return total - ipmt(rate, per, nper, pv, fv, when) - -def pv(rate, nper, pmt, fv=0.0, when='end'): - """ - Compute the present value. - - Given: - * a future value, `fv` - * an interest `rate` compounded once per period, of which - there are - * `nper` total - * a (fixed) payment, `pmt`, paid either - * at the beginning (`when` = {'begin', 1}) or the end - (`when` = {'end', 0}) of each period - - Return: - the value now - - Parameters - ---------- - rate : array_like - Rate of interest (per period) - nper : array_like - Number of compounding periods - pmt : array_like - Payment - fv : array_like, optional - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - - Returns - ------- - out : ndarray, float - Present value of a series of payments or investments. - - Notes - ----- - The present value is computed by solving the equation:: - - fv + - pv*(1 + rate)**nper + - pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0 - - or, when ``rate = 0``:: - - fv + pv + pmt * nper = 0 - - for `pv`, which is then returned. - - References - ---------- - .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). - Open Document Format for Office Applications (OpenDocument)v1.2, - Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version, - Pre-Draft 12. Organization for the Advancement of Structured Information - Standards (OASIS). Billerica, MA, USA. [ODT Document]. - Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - Examples - -------- - What is the present value (e.g., the initial investment) - of an investment that needs to total $15692.93 - after 10 years of saving $100 every month? Assume the - interest rate is 5% (annually) compounded monthly. - - >>> np.pv(0.05/12, 10*12, -100, 15692.93) - -100.00067131625819 - - By convention, the negative sign represents cash flow out - (i.e., money not available today). Thus, to end up with - $15,692.93 in 10 years saving $100 a month at 5% annual - interest, one's initial deposit should also be $100. - - If any input is array_like, ``pv`` returns an array of equal shape. - Let's compare different interest rates in the example above: - - >>> a = np.array((0.05, 0.04, 0.03))/12 - >>> np.pv(a, 10*12, -100, 15692.93) - array([ -100.00067132, -649.26771385, -1273.78633713]) - - So, to end up with the same $15692.93 under the same $100 per month - "savings plan," for annual interest rates of 4% and 3%, one would - need initial investments of $649.27 and $1273.79, respectively. - - """ - when = _convert_when(when) - (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when]) - temp = (1+rate)**nper - miter = np.broadcast(rate, nper, pmt, fv, when) - zer = np.zeros(miter.shape) - fact = np.where(rate == zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer) - return -(fv + pmt*fact)/temp - -# Computed with Sage -# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - -# p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + -# p*((r + 1)^n - 1)*w/r) - -def _g_div_gp(r, n, p, x, y, w): - t1 = (r+1)**n - t2 = (r+1)**(n-1) - return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) / - (n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + - p*(t1 - 1)*w/r)) - -# Use Newton's iteration until the change is less than 1e-6 -# for all values or a maximum of 100 iterations is reached. -# Newton's rule is -# r_{n+1} = r_{n} - g(r_n)/g'(r_n) -# where -# g(r) is the formula -# g'(r) is the derivative with respect to r. -def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100): - """ - Compute the rate of interest per period. - - Parameters - ---------- - nper : array_like - Number of compounding periods - pmt : array_like - Payment - pv : array_like - Present value - fv : array_like - Future value - when : {{'begin', 1}, {'end', 0}}, {string, int}, optional - When payments are due ('begin' (1) or 'end' (0)) - guess : float, optional - Starting guess for solving the rate of interest - tol : float, optional - Required tolerance for the solution - maxiter : int, optional - Maximum iterations in finding the solution - - Notes - ----- - The rate of interest is computed by iteratively solving the - (non-linear) equation:: - - fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0 - - for ``rate``. - - References - ---------- - Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document - Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated - Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12. - Organization for the Advancement of Structured Information Standards - (OASIS). Billerica, MA, USA. [ODT Document]. Available: - http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula - OpenDocument-formula-20090508.odt - - """ - when = _convert_when(when) - (nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when]) - rn = guess - iter = 0 - close = False - while (iter < maxiter) and not close: - rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when) - diff = abs(rnp1-rn) - close = np.all(diff < tol) - iter += 1 - rn = rnp1 - if not close: - # Return nan's in array of the same shape as rn - return np.nan + rn - else: - return rn - -def irr(values): - """ - Return the Internal Rate of Return (IRR). - - This is the "average" periodically compounded rate of return - that gives a net present value of 0.0; for a more complete explanation, - see Notes below. - - Parameters - ---------- - values : array_like, shape(N,) - Input cash flows per time period. By convention, net "deposits" - are negative and net "withdrawals" are positive. Thus, for - example, at least the first element of `values`, which represents - the initial investment, will typically be negative. - - Returns - ------- - out : float - Internal Rate of Return for periodic input values. - - Notes - ----- - The IRR is perhaps best understood through an example (illustrated - using np.irr in the Examples section below). Suppose one invests 100 - units and then makes the following withdrawals at regular (fixed) - intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100 - unit investment yields 173 units; however, due to the combination of - compounding and the periodic withdrawals, the "average" rate of return - is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution - (for :math:`r`) of the equation: - - .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2} - + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0 - - In general, for `values` :math:`= [v_0, v_1, ... v_M]`, - irr is the solution of the equation: [G]_ - - .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0 - - References - ---------- - .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., - Addison-Wesley, 2003, pg. 348. - - Examples - -------- - >>> round(irr([-100, 39, 59, 55, 20]), 5) - 0.28095 - >>> round(irr([-100, 0, 0, 74]), 5) - -0.0955 - >>> round(irr([-100, 100, 0, -7]), 5) - -0.0833 - >>> round(irr([-100, 100, 0, 7]), 5) - 0.06206 - >>> round(irr([-5, 10.5, 1, -8, 1]), 5) - 0.0886 - - (Compare with the Example given for numpy.lib.financial.npv) - - """ - res = np.roots(values[::-1]) - mask = (res.imag == 0) & (res.real > 0) - if res.size == 0: - return np.nan - res = res[mask].real - # NPV(rate) = 0 can have more than one solution so we return - # only the solution closest to zero. - rate = 1.0/res - 1 - rate = rate.item(np.argmin(np.abs(rate))) - return rate - -def npv(rate, values): - """ - Returns the NPV (Net Present Value) of a cash flow series. - - Parameters - ---------- - rate : scalar - The discount rate. - values : array_like, shape(M, ) - The values of the time series of cash flows. The (fixed) time - interval between cash flow "events" must be the same as that for - which `rate` is given (i.e., if `rate` is per year, then precisely - a year is understood to elapse between each cash flow event). By - convention, investments or "deposits" are negative, income or - "withdrawals" are positive; `values` must begin with the initial - investment, thus `values[0]` will typically be negative. - - Returns - ------- - out : float - The NPV of the input cash flow series `values` at the discount - `rate`. - - Notes - ----- - Returns the result of: [G]_ - - .. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}} - - References - ---------- - .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed., - Addison-Wesley, 2003, pg. 346. - - Examples - -------- - >>> np.npv(0.281,[-100, 39, 59, 55, 20]) - -0.0084785916384548798 - - (Compare with the Example given for numpy.lib.financial.irr) - - """ - values = np.asarray(values) - return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0) - -def mirr(values, finance_rate, reinvest_rate): - """ - Modified internal rate of return. - - Parameters - ---------- - values : array_like - Cash flows (must contain at least one positive and one negative - value) or nan is returned. The first value is considered a sunk - cost at time zero. - finance_rate : scalar - Interest rate paid on the cash flows - reinvest_rate : scalar - Interest rate received on the cash flows upon reinvestment - - Returns - ------- - out : float - Modified internal rate of return - - """ - values = np.asarray(values, dtype=np.double) - n = values.size - pos = values > 0 - neg = values < 0 - if not (pos.any() and neg.any()): - return np.nan - numer = np.abs(npv(reinvest_rate, values*pos)) - denom = np.abs(npv(finance_rate, values*neg)) - return (numer/denom)**(1.0/(n - 1))*(1 + reinvest_rate) - 1 diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/format.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/format.py deleted file mode 100644 index 98743b6ad48f4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/format.py +++ /dev/null @@ -1,730 +0,0 @@ -""" -Define a simple format for saving numpy arrays to disk with the full -information about them. - -The ``.npy`` format is the standard binary file format in NumPy for -persisting a *single* arbitrary NumPy array on disk. The format stores all -of the shape and dtype information necessary to reconstruct the array -correctly even on another machine with a different architecture. -The format is designed to be as simple as possible while achieving -its limited goals. - -The ``.npz`` format is the standard format for persisting *multiple* NumPy -arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` -files, one for each array. - -Capabilities ------------- - -- Can represent all NumPy arrays including nested record arrays and - object arrays. - -- Represents the data in its native binary form. - -- Supports Fortran-contiguous arrays directly. - -- Stores all of the necessary information to reconstruct the array - including shape and dtype on a machine of a different - architecture. Both little-endian and big-endian arrays are - supported, and a file with little-endian numbers will yield - a little-endian array on any machine reading the file. The - types are described in terms of their actual sizes. For example, - if a machine with a 64-bit C "long int" writes out an array with - "long ints", a reading machine with 32-bit C "long ints" will yield - an array with 64-bit integers. - -- Is straightforward to reverse engineer. Datasets often live longer than - the programs that created them. A competent developer should be - able to create a solution in his preferred programming language to - read most ``.npy`` files that he has been given without much - documentation. - -- Allows memory-mapping of the data. See `open_memmep`. - -- Can be read from a filelike stream object instead of an actual file. - -- Stores object arrays, i.e. arrays containing elements that are arbitrary - Python objects. Files with object arrays are not to be mmapable, but - can be read and written to disk. - -Limitations ------------ - -- Arbitrary subclasses of numpy.ndarray are not completely preserved. - Subclasses will be accepted for writing, but only the array data will - be written out. A regular numpy.ndarray object will be created - upon reading the file. - -.. warning:: - - Due to limitations in the interpretation of structured dtypes, dtypes - with fields with empty names will have the names replaced by 'f0', 'f1', - etc. Such arrays will not round-trip through the format entirely - accurately. The data is intact; only the field names will differ. We are - working on a fix for this. This fix will not require a change in the - file format. The arrays with such structures can still be saved and - restored, and the correct dtype may be restored by using the - ``loadedarray.view(correct_dtype)`` method. - -File extensions ---------------- - -We recommend using the ``.npy`` and ``.npz`` extensions for files saved -in this format. This is by no means a requirement; applications may wish -to use these file formats but use an extension specific to the -application. In the absence of an obvious alternative, however, -we suggest using ``.npy`` and ``.npz``. - -Version numbering ------------------ - -The version numbering of these formats is independent of NumPy version -numbering. If the format is upgraded, the code in `numpy.io` will still -be able to read and write Version 1.0 files. - -Format Version 1.0 ------------------- - -The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. - -The next 1 byte is an unsigned byte: the major version number of the file -format, e.g. ``\\x01``. - -The next 1 byte is an unsigned byte: the minor version number of the file -format, e.g. ``\\x00``. Note: the version of the file format is not tied -to the version of the numpy package. - -The next 2 bytes form a little-endian unsigned short int: the length of -the header data HEADER_LEN. - -The next HEADER_LEN bytes form the header data describing the array's -format. It is an ASCII string which contains a Python literal expression -of a dictionary. It is terminated by a newline (``\\n``) and padded with -spaces (``\\x20``) to make the total length of -``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment -purposes. - -The dictionary contains three keys: - - "descr" : dtype.descr - An object that can be passed as an argument to the `numpy.dtype` - constructor to create the array's dtype. - "fortran_order" : bool - Whether the array data is Fortran-contiguous or not. Since - Fortran-contiguous arrays are a common form of non-C-contiguity, - we allow them to be written directly to disk for efficiency. - "shape" : tuple of int - The shape of the array. - -For repeatability and readability, the dictionary keys are sorted in -alphabetic order. This is for convenience only. A writer SHOULD implement -this if possible. A reader MUST NOT depend on this. - -Following the header comes the array data. If the dtype contains Python -objects (i.e. ``dtype.hasobject is True``), then the data is a Python -pickle of the array. Otherwise the data is the contiguous (either C- -or Fortran-, depending on ``fortran_order``) bytes of the array. -Consumers can figure out the number of bytes by multiplying the number -of elements given by the shape (noting that ``shape=()`` means there is -1 element) by ``dtype.itemsize``. - -Notes ------ -The ``.npy`` format, including reasons for creating it and a comparison of -alternatives, is described fully in the "npy-format" NEP. - -""" -from __future__ import division, absolute_import, print_function - -import numpy -import sys -import io -import warnings -from numpy.lib.utils import safe_eval -from numpy.compat import asbytes, isfileobj, long, basestring - -if sys.version_info[0] >= 3: - import pickle -else: - import cPickle as pickle - -MAGIC_PREFIX = asbytes('\x93NUMPY') -MAGIC_LEN = len(MAGIC_PREFIX) + 2 -BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes - -# difference between version 1.0 and 2.0 is a 4 byte (I) header length -# instead of 2 bytes (H) allowing storage of large structured arrays - -def _check_version(version): - if version not in [(1, 0), (2, 0), None]: - msg = "we only support format version (1,0) and (2, 0), not %s" - raise ValueError(msg % (version,)) - -def magic(major, minor): - """ Return the magic string for the given file format version. - - Parameters - ---------- - major : int in [0, 255] - minor : int in [0, 255] - - Returns - ------- - magic : str - - Raises - ------ - ValueError if the version cannot be formatted. - """ - if major < 0 or major > 255: - raise ValueError("major version must be 0 <= major < 256") - if minor < 0 or minor > 255: - raise ValueError("minor version must be 0 <= minor < 256") - if sys.version_info[0] < 3: - return MAGIC_PREFIX + chr(major) + chr(minor) - else: - return MAGIC_PREFIX + bytes([major, minor]) - -def read_magic(fp): - """ Read the magic string to get the version of the file format. - - Parameters - ---------- - fp : filelike object - - Returns - ------- - major : int - minor : int - """ - magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") - if magic_str[:-2] != MAGIC_PREFIX: - msg = "the magic string is not correct; expected %r, got %r" - raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) - if sys.version_info[0] < 3: - major, minor = map(ord, magic_str[-2:]) - else: - major, minor = magic_str[-2:] - return major, minor - -def dtype_to_descr(dtype): - """ - Get a serializable descriptor from the dtype. - - The .descr attribute of a dtype object cannot be round-tripped through - the dtype() constructor. Simple types, like dtype('float32'), have - a descr which looks like a record array with one field with '' as - a name. The dtype() constructor interprets this as a request to give - a default name. Instead, we construct descriptor that can be passed to - dtype(). - - Parameters - ---------- - dtype : dtype - The dtype of the array that will be written to disk. - - Returns - ------- - descr : object - An object that can be passed to `numpy.dtype()` in order to - replicate the input dtype. - - """ - if dtype.names is not None: - # This is a record array. The .descr is fine. XXX: parts of the - # record array with an empty name, like padding bytes, still get - # fiddled with. This needs to be fixed in the C implementation of - # dtype(). - return dtype.descr - else: - return dtype.str - -def header_data_from_array_1_0(array): - """ Get the dictionary of header metadata from a numpy.ndarray. - - Parameters - ---------- - array : numpy.ndarray - - Returns - ------- - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - """ - d = {} - d['shape'] = array.shape - if array.flags.c_contiguous: - d['fortran_order'] = False - elif array.flags.f_contiguous: - d['fortran_order'] = True - else: - # Totally non-contiguous data. We will have to make it C-contiguous - # before writing. Note that we need to test for C_CONTIGUOUS first - # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. - d['fortran_order'] = False - - d['descr'] = dtype_to_descr(array.dtype) - return d - -def _write_array_header(fp, d, version=None): - """ Write the header for an array and returns the version used - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - version: tuple or None - None means use oldest that works - explicit version will raise a ValueError if the format does not - allow saving this data. Default: None - Returns - ------- - version : tuple of int - the file version which needs to be used to store the data - """ - import struct - header = ["{"] - for key, value in sorted(d.items()): - # Need to use repr here, since we eval these when reading - header.append("'%s': %s, " % (key, repr(value))) - header.append("}") - header = "".join(header) - # Pad the header with spaces and a final newline such that the magic - # string, the header-length short and the header are aligned on a - # 16-byte boundary. Hopefully, some system, possibly memory-mapping, - # can take advantage of our premature optimization. - current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline - topad = 16 - (current_header_len % 16) - header = asbytes(header + ' '*topad + '\n') - - if len(header) >= (256*256) and version == (1, 0): - raise ValueError("header does not fit inside %s bytes required by the" - " 1.0 format" % (256*256)) - if len(header) < (256*256): - header_len_str = struct.pack('= 1.9", UserWarning) - - # Set buffer size to 16 MiB to hide the Python loop overhead. - buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) - - if array.dtype.hasobject: - # We contain Python objects so we cannot write out the data - # directly. Instead, we will pickle it out with version 2 of the - # pickle protocol. - pickle.dump(array, fp, protocol=2) - elif array.flags.f_contiguous and not array.flags.c_contiguous: - if isfileobj(fp): - array.T.tofile(fp) - else: - for chunk in numpy.nditer( - array, flags=['external_loop', 'buffered', 'zerosize_ok'], - buffersize=buffersize, order='F'): - fp.write(chunk.tobytes('C')) - else: - if isfileobj(fp): - array.tofile(fp) - else: - for chunk in numpy.nditer( - array, flags=['external_loop', 'buffered', 'zerosize_ok'], - buffersize=buffersize, order='C'): - fp.write(chunk.tobytes('C')) - - -def read_array(fp): - """ - Read an array from an NPY file. - - Parameters - ---------- - fp : file_like object - If this is not a real file object, then this may take extra memory - and time. - - Returns - ------- - array : ndarray - The array from the data on disk. - - Raises - ------ - ValueError - If the data is invalid. - - """ - version = read_magic(fp) - _check_version(version) - shape, fortran_order, dtype = _read_array_header(fp, version) - if len(shape) == 0: - count = 1 - else: - count = numpy.multiply.reduce(shape) - - # Now read the actual data. - if dtype.hasobject: - # The array contained Python objects. We need to unpickle the data. - array = pickle.load(fp) - else: - if isfileobj(fp): - # We can use the fast fromfile() function. - array = numpy.fromfile(fp, dtype=dtype, count=count) - else: - # This is not a real file. We have to read it the - # memory-intensive way. - # crc32 module fails on reads greater than 2 ** 32 bytes, - # breaking large reads from gzip streams. Chunk reads to - # BUFFER_SIZE bytes to avoid issue and reduce memory overhead - # of the read. In non-chunked case count < max_read_count, so - # only one read is performed. - - max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) - - array = numpy.empty(count, dtype=dtype) - for i in range(0, count, max_read_count): - read_count = min(max_read_count, count - i) - read_size = int(read_count * dtype.itemsize) - data = _read_bytes(fp, read_size, "array data") - array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, - count=read_count) - - if fortran_order: - array.shape = shape[::-1] - array = array.transpose() - else: - array.shape = shape - - return array - - -def open_memmap(filename, mode='r+', dtype=None, shape=None, - fortran_order=False, version=None): - """ - Open a .npy file as a memory-mapped array. - - This may be used to read an existing file or create a new one. - - Parameters - ---------- - filename : str - The name of the file on disk. This may *not* be a file-like - object. - mode : str, optional - The mode in which to open the file; the default is 'r+'. In - addition to the standard file modes, 'c' is also accepted to mean - "copy on write." See `memmap` for the available mode strings. - dtype : data-type, optional - The data type of the array if we are creating a new file in "write" - mode, if not, `dtype` is ignored. The default value is None, which - results in a data-type of `float64`. - shape : tuple of int - The shape of the array if we are creating a new file in "write" - mode, in which case this parameter is required. Otherwise, this - parameter is ignored and is thus optional. - fortran_order : bool, optional - Whether the array should be Fortran-contiguous (True) or - C-contiguous (False, the default) if we are creating a new file in - "write" mode. - version : tuple of int (major, minor) or None - If the mode is a "write" mode, then this is the version of the file - format used to create the file. None means use the oldest - supported version that is able to store the data. Default: None - - Returns - ------- - marray : memmap - The memory-mapped array. - - Raises - ------ - ValueError - If the data or the mode is invalid. - IOError - If the file is not found or cannot be opened correctly. - - See Also - -------- - memmap - - """ - if not isinstance(filename, basestring): - raise ValueError("Filename must be a string. Memmap cannot use" - " existing file handles.") - - if 'w' in mode: - # We are creating the file, not reading it. - # Check if we ought to create the file. - _check_version(version) - # Ensure that the given dtype is an authentic dtype object rather - # than just something that can be interpreted as a dtype object. - dtype = numpy.dtype(dtype) - if dtype.hasobject: - msg = "Array can't be memory-mapped: Python objects in dtype." - raise ValueError(msg) - d = dict( - descr=dtype_to_descr(dtype), - fortran_order=fortran_order, - shape=shape, - ) - # If we got here, then it should be safe to create the file. - fp = open(filename, mode+'b') - try: - used_ver = _write_array_header(fp, d, version) - # this warning can be removed when 1.9 has aged enough - if version != (2, 0) and used_ver == (2, 0): - warnings.warn("Stored array in format 2.0. It can only be" - "read by NumPy >= 1.9", UserWarning) - offset = fp.tell() - finally: - fp.close() - else: - # Read the header of the file first. - fp = open(filename, 'rb') - try: - version = read_magic(fp) - _check_version(version) - - shape, fortran_order, dtype = _read_array_header(fp, version) - if dtype.hasobject: - msg = "Array can't be memory-mapped: Python objects in dtype." - raise ValueError(msg) - offset = fp.tell() - finally: - fp.close() - - if fortran_order: - order = 'F' - else: - order = 'C' - - # We need to change a write-only mode to a read-write mode since we've - # already written data to the file. - if mode == 'w+': - mode = 'r+' - - marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, - mode=mode, offset=offset) - - return marray - - -def _read_bytes(fp, size, error_template="ran out of data"): - """ - Read from file-like object until size bytes are read. - Raises ValueError if not EOF is encountered before size bytes are read. - Non-blocking objects only supported if they derive from io objects. - - Required as e.g. ZipExtFile in python 2.6 can return less data than - requested. - """ - data = bytes() - while True: - # io files (default in python3) return None or raise on - # would-block, python2 file will truncate, probably nothing can be - # done about that. note that regular files can't be non-blocking - try: - r = fp.read(size - len(data)) - data += r - if len(r) == 0 or len(data) == size: - break - except io.BlockingIOError: - pass - if len(data) != size: - msg = "EOF: reading %s, expected %d bytes got %d" - raise ValueError(msg % (error_template, size, len(data))) - else: - return data diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/function_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/function_base.py deleted file mode 100644 index 47be2f12fc973..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/function_base.py +++ /dev/null @@ -1,3872 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings -import sys -import collections -import operator - -import numpy as np -import numpy.core.numeric as _nx -from numpy.core import linspace, atleast_1d, atleast_2d -from numpy.core.numeric import ( - ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, - empty_like, ndarray, around, floor, ceil, take, dot, where, intp, - integer, isscalar - ) -from numpy.core.umath import ( - pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, - mod, exp, log10 - ) -from numpy.core.fromnumeric import ( - ravel, nonzero, sort, partition, mean - ) -from numpy.core.numerictypes import typecodes, number -from numpy.lib.twodim_base import diag -from .utils import deprecate -from ._compiled_base import _insert, add_docstring -from ._compiled_base import digitize, bincount, interp as compiled_interp -from ._compiled_base import add_newdoc_ufunc -from numpy.compat import long - -# Force range to be a generator, for np.delete's usage. -if sys.version_info[0] < 3: - range = xrange - - -__all__ = [ - 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', - 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', - 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', - 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', - 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', - 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', - 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' - ] - - -def iterable(y): - """ - Check whether or not an object can be iterated over. - - Parameters - ---------- - y : object - Input object. - - Returns - ------- - b : {0, 1} - Return 1 if the object has an iterator method or is a sequence, - and 0 otherwise. - - - Examples - -------- - >>> np.iterable([1, 2, 3]) - 1 - >>> np.iterable(2) - 0 - - """ - try: - iter(y) - except: - return 0 - return 1 - - -def histogram(a, bins=10, range=None, normed=False, weights=None, - density=None): - """ - Compute the histogram of a set of data. - - Parameters - ---------- - a : array_like - Input data. The histogram is computed over the flattened array. - bins : int or sequence of scalars, optional - If `bins` is an int, it defines the number of equal-width - bins in the given range (10, by default). If `bins` is a sequence, - it defines the bin edges, including the rightmost edge, allowing - for non-uniform bin widths. - range : (float, float), optional - The lower and upper range of the bins. If not provided, range - is simply ``(a.min(), a.max())``. Values outside the range are - ignored. - normed : bool, optional - This keyword is deprecated in Numpy 1.6 due to confusing/buggy - behavior. It will be removed in Numpy 2.0. Use the density keyword - instead. - If False, the result will contain the number of samples - in each bin. If True, the result is the value of the - probability *density* function at the bin, normalized such that - the *integral* over the range is 1. Note that this latter behavior is - known to be buggy with unequal bin widths; use `density` instead. - weights : array_like, optional - An array of weights, of the same shape as `a`. Each value in `a` - only contributes its associated weight towards the bin count - (instead of 1). If `normed` is True, the weights are normalized, - so that the integral of the density over the range remains 1 - density : bool, optional - If False, the result will contain the number of samples - in each bin. If True, the result is the value of the - probability *density* function at the bin, normalized such that - the *integral* over the range is 1. Note that the sum of the - histogram values will not be equal to 1 unless bins of unity - width are chosen; it is not a probability *mass* function. - Overrides the `normed` keyword if given. - - Returns - ------- - hist : array - The values of the histogram. See `normed` and `weights` for a - description of the possible semantics. - bin_edges : array of dtype float - Return the bin edges ``(length(hist)+1)``. - - - See Also - -------- - histogramdd, bincount, searchsorted, digitize - - Notes - ----- - All but the last (righthand-most) bin is half-open. In other words, if - `bins` is:: - - [1, 2, 3, 4] - - then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the - second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* - 4. - - Examples - -------- - >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) - (array([0, 2, 1]), array([0, 1, 2, 3])) - >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) - (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) - >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) - (array([1, 4, 1]), array([0, 1, 2, 3])) - - >>> a = np.arange(5) - >>> hist, bin_edges = np.histogram(a, density=True) - >>> hist - array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) - >>> hist.sum() - 2.4999999999999996 - >>> np.sum(hist*np.diff(bin_edges)) - 1.0 - - """ - - a = asarray(a) - if weights is not None: - weights = asarray(weights) - if np.any(weights.shape != a.shape): - raise ValueError( - 'weights should have the same shape as a.') - weights = weights.ravel() - a = a.ravel() - - if (range is not None): - mn, mx = range - if (mn > mx): - raise AttributeError( - 'max must be larger than min in range parameter.') - - if not iterable(bins): - if np.isscalar(bins) and bins < 1: - raise ValueError( - '`bins` should be a positive integer.') - if range is None: - if a.size == 0: - # handle empty arrays. Can't determine range, so use 0-1. - range = (0, 1) - else: - range = (a.min(), a.max()) - mn, mx = [mi + 0.0 for mi in range] - if mn == mx: - mn -= 0.5 - mx += 0.5 - bins = linspace(mn, mx, bins + 1, endpoint=True) - else: - bins = asarray(bins) - if (np.diff(bins) < 0).any(): - raise AttributeError( - 'bins must increase monotonically.') - - # Histogram is an integer or a float array depending on the weights. - if weights is None: - ntype = int - else: - ntype = weights.dtype - n = np.zeros(bins.shape, ntype) - - block = 65536 - if weights is None: - for i in arange(0, len(a), block): - sa = sort(a[i:i+block]) - n += np.r_[sa.searchsorted(bins[:-1], 'left'), - sa.searchsorted(bins[-1], 'right')] - else: - zero = array(0, dtype=ntype) - for i in arange(0, len(a), block): - tmp_a = a[i:i+block] - tmp_w = weights[i:i+block] - sorting_index = np.argsort(tmp_a) - sa = tmp_a[sorting_index] - sw = tmp_w[sorting_index] - cw = np.concatenate(([zero, ], sw.cumsum())) - bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), - sa.searchsorted(bins[-1], 'right')] - n += cw[bin_index] - - n = np.diff(n) - - if density is not None: - if density: - db = array(np.diff(bins), float) - return n/db/n.sum(), bins - else: - return n, bins - else: - # deprecated, buggy behavior. Remove for Numpy 2.0 - if normed: - db = array(np.diff(bins), float) - return n/(n*db).sum(), bins - else: - return n, bins - - -def histogramdd(sample, bins=10, range=None, normed=False, weights=None): - """ - Compute the multidimensional histogram of some data. - - Parameters - ---------- - sample : array_like - The data to be histogrammed. It must be an (N,D) array or data - that can be converted to such. The rows of the resulting array - are the coordinates of points in a D dimensional polytope. - bins : sequence or int, optional - The bin specification: - - * A sequence of arrays describing the bin edges along each dimension. - * The number of bins for each dimension (nx, ny, ... =bins) - * The number of bins for all dimensions (nx=ny=...=bins). - - range : sequence, optional - A sequence of lower and upper bin edges to be used if the edges are - not given explicitly in `bins`. Defaults to the minimum and maximum - values along each dimension. - normed : bool, optional - If False, returns the number of samples in each bin. If True, - returns the bin density ``bin_count / sample_count / bin_volume``. - weights : array_like (N,), optional - An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. - Weights are normalized to 1 if normed is True. If normed is False, - the values of the returned histogram are equal to the sum of the - weights belonging to the samples falling into each bin. - - Returns - ------- - H : ndarray - The multidimensional histogram of sample x. See normed and weights - for the different possible semantics. - edges : list - A list of D arrays describing the bin edges for each dimension. - - See Also - -------- - histogram: 1-D histogram - histogram2d: 2-D histogram - - Examples - -------- - >>> r = np.random.randn(100,3) - >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) - >>> H.shape, edges[0].size, edges[1].size, edges[2].size - ((5, 8, 4), 6, 9, 5) - - """ - - try: - # Sample is an ND-array. - N, D = sample.shape - except (AttributeError, ValueError): - # Sample is a sequence of 1D arrays. - sample = atleast_2d(sample).T - N, D = sample.shape - - nbin = empty(D, int) - edges = D*[None] - dedges = D*[None] - if weights is not None: - weights = asarray(weights) - - try: - M = len(bins) - if M != D: - raise AttributeError( - 'The dimension of bins must be equal to the dimension of the ' - ' sample x.') - except TypeError: - # bins is an integer - bins = D*[bins] - - # Select range for each dimension - # Used only if number of bins is given. - if range is None: - # Handle empty input. Range can't be determined in that case, use 0-1. - if N == 0: - smin = zeros(D) - smax = ones(D) - else: - smin = atleast_1d(array(sample.min(0), float)) - smax = atleast_1d(array(sample.max(0), float)) - else: - smin = zeros(D) - smax = zeros(D) - for i in arange(D): - smin[i], smax[i] = range[i] - - # Make sure the bins have a finite width. - for i in arange(len(smin)): - if smin[i] == smax[i]: - smin[i] = smin[i] - .5 - smax[i] = smax[i] + .5 - - # avoid rounding issues for comparisons when dealing with inexact types - if np.issubdtype(sample.dtype, np.inexact): - edge_dt = sample.dtype - else: - edge_dt = float - # Create edge arrays - for i in arange(D): - if isscalar(bins[i]): - if bins[i] < 1: - raise ValueError( - "Element at index %s in `bins` should be a positive " - "integer." % i) - nbin[i] = bins[i] + 2 # +2 for outlier bins - edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) - else: - edges[i] = asarray(bins[i], edge_dt) - nbin[i] = len(edges[i]) + 1 # +1 for outlier bins - dedges[i] = diff(edges[i]) - if np.any(np.asarray(dedges[i]) <= 0): - raise ValueError( - "Found bin edge of size <= 0. Did you specify `bins` with" - "non-monotonic sequence?") - - nbin = asarray(nbin) - - # Handle empty input. - if N == 0: - return np.zeros(nbin-2), edges - - # Compute the bin number each sample falls into. - Ncount = {} - for i in arange(D): - Ncount[i] = digitize(sample[:, i], edges[i]) - - # Using digitize, values that fall on an edge are put in the right bin. - # For the rightmost bin, we want values equal to the right edge to be - # counted in the last bin, and not as an outlier. - for i in arange(D): - # Rounding precision - mindiff = dedges[i].min() - if not np.isinf(mindiff): - decimal = int(-log10(mindiff)) + 6 - # Find which points are on the rightmost edge. - not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) - on_edge = (around(sample[:, i], decimal) == - around(edges[i][-1], decimal)) - # Shift these points one bin to the left. - Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 - - # Flattened histogram matrix (1D) - # Reshape is used so that overlarge arrays - # will raise an error. - hist = zeros(nbin, float).reshape(-1) - - # Compute the sample indices in the flattened histogram matrix. - ni = nbin.argsort() - xy = zeros(N, int) - for i in arange(0, D-1): - xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() - xy += Ncount[ni[-1]] - - # Compute the number of repetitions in xy and assign it to the - # flattened histmat. - if len(xy) == 0: - return zeros(nbin-2, int), edges - - flatcount = bincount(xy, weights) - a = arange(len(flatcount)) - hist[a] = flatcount - - # Shape into a proper matrix - hist = hist.reshape(sort(nbin)) - for i in arange(nbin.size): - j = ni.argsort()[i] - hist = hist.swapaxes(i, j) - ni[i], ni[j] = ni[j], ni[i] - - # Remove outliers (indices 0 and -1 for each dimension). - core = D*[slice(1, -1)] - hist = hist[core] - - # Normalize if normed is True - if normed: - s = hist.sum() - for i in arange(D): - shape = ones(D, int) - shape[i] = nbin[i] - 2 - hist = hist / dedges[i].reshape(shape) - hist /= s - - if (hist.shape != nbin - 2).any(): - raise RuntimeError( - "Internal Shape Error") - return hist, edges - - -def average(a, axis=None, weights=None, returned=False): - """ - Compute the weighted average along the specified axis. - - Parameters - ---------- - a : array_like - Array containing data to be averaged. If `a` is not an array, a - conversion is attempted. - axis : int, optional - Axis along which to average `a`. If `None`, averaging is done over - the flattened array. - weights : array_like, optional - An array of weights associated with the values in `a`. Each value in - `a` contributes to the average according to its associated weight. - The weights array can either be 1-D (in which case its length must be - the size of `a` along the given axis) or of the same shape as `a`. - If `weights=None`, then all data in `a` are assumed to have a - weight equal to one. - returned : bool, optional - Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) - is returned, otherwise only the average is returned. - If `weights=None`, `sum_of_weights` is equivalent to the number of - elements over which the average is taken. - - - Returns - ------- - average, [sum_of_weights] : {array_type, double} - Return the average along the specified axis. When returned is `True`, - return a tuple with the average as the first element and the sum - of the weights as the second element. The return type is `Float` - if `a` is of integer type, otherwise it is of the same type as `a`. - `sum_of_weights` is of the same type as `average`. - - Raises - ------ - ZeroDivisionError - When all weights along axis are zero. See `numpy.ma.average` for a - version robust to this type of error. - TypeError - When the length of 1D `weights` is not the same as the shape of `a` - along axis. - - See Also - -------- - mean - - ma.average : average for masked arrays -- useful if your data contains - "missing" values - - Examples - -------- - >>> data = range(1,5) - >>> data - [1, 2, 3, 4] - >>> np.average(data) - 2.5 - >>> np.average(range(1,11), weights=range(10,0,-1)) - 4.0 - - >>> data = np.arange(6).reshape((3,2)) - >>> data - array([[0, 1], - [2, 3], - [4, 5]]) - >>> np.average(data, axis=1, weights=[1./4, 3./4]) - array([ 0.75, 2.75, 4.75]) - >>> np.average(data, weights=[1./4, 3./4]) - Traceback (most recent call last): - ... - TypeError: Axis must be specified when shapes of a and weights differ. - - """ - if not isinstance(a, np.matrix): - a = np.asarray(a) - - if weights is None: - avg = a.mean(axis) - scl = avg.dtype.type(a.size/avg.size) - else: - a = a + 0.0 - wgt = np.array(weights, dtype=a.dtype, copy=0) - - # Sanity checks - if a.shape != wgt.shape: - if axis is None: - raise TypeError( - "Axis must be specified when shapes of a and weights " - "differ.") - if wgt.ndim != 1: - raise TypeError( - "1D weights expected when shapes of a and weights differ.") - if wgt.shape[0] != a.shape[axis]: - raise ValueError( - "Length of weights not compatible with specified axis.") - - # setup wgt to broadcast along axis - wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis) - - scl = wgt.sum(axis=axis) - if (scl == 0.0).any(): - raise ZeroDivisionError( - "Weights sum to zero, can't be normalized") - - avg = np.multiply(a, wgt).sum(axis)/scl - - if returned: - scl = np.multiply(avg, 0) + scl - return avg, scl - else: - return avg - - -def asarray_chkfinite(a, dtype=None, order=None): - """ - Convert the input to an array, checking for NaNs or Infs. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists and ndarrays. Success requires no NaNs or Infs. - dtype : data-type, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Defaults to 'C'. - - Returns - ------- - out : ndarray - Array interpretation of `a`. No copy is performed if the input - is already an ndarray. If `a` is a subclass of ndarray, a base - class ndarray is returned. - - Raises - ------ - ValueError - Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). - - See Also - -------- - asarray : Create and array. - asanyarray : Similar function which passes through subclasses. - ascontiguousarray : Convert input to a contiguous array. - asfarray : Convert input to a floating point ndarray. - asfortranarray : Convert input to an ndarray with column-major - memory order. - fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. - - Examples - -------- - Convert a list into an array. If all elements are finite - ``asarray_chkfinite`` is identical to ``asarray``. - - >>> a = [1, 2] - >>> np.asarray_chkfinite(a, dtype=float) - array([1., 2.]) - - Raises ValueError if array_like contains Nans or Infs. - - >>> a = [1, 2, np.inf] - >>> try: - ... np.asarray_chkfinite(a) - ... except ValueError: - ... print 'ValueError' - ... - ValueError - - """ - a = asarray(a, dtype=dtype, order=order) - if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): - raise ValueError( - "array must not contain infs or NaNs") - return a - - -def piecewise(x, condlist, funclist, *args, **kw): - """ - Evaluate a piecewise-defined function. - - Given a set of conditions and corresponding functions, evaluate each - function on the input data wherever its condition is true. - - Parameters - ---------- - x : ndarray - The input domain. - condlist : list of bool arrays - Each boolean array corresponds to a function in `funclist`. Wherever - `condlist[i]` is True, `funclist[i](x)` is used as the output value. - - Each boolean array in `condlist` selects a piece of `x`, - and should therefore be of the same shape as `x`. - - The length of `condlist` must correspond to that of `funclist`. - If one extra function is given, i.e. if - ``len(funclist) - len(condlist) == 1``, then that extra function - is the default value, used wherever all conditions are false. - funclist : list of callables, f(x,*args,**kw), or scalars - Each function is evaluated over `x` wherever its corresponding - condition is True. It should take an array as input and give an array - or a scalar value as output. If, instead of a callable, - a scalar is provided then a constant function (``lambda x: scalar``) is - assumed. - args : tuple, optional - Any further arguments given to `piecewise` are passed to the functions - upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then - each function is called as ``f(x, 1, 'a')``. - kw : dict, optional - Keyword arguments used in calling `piecewise` are passed to the - functions upon execution, i.e., if called - ``piecewise(..., ..., lambda=1)``, then each function is called as - ``f(x, lambda=1)``. - - Returns - ------- - out : ndarray - The output is the same shape and type as x and is found by - calling the functions in `funclist` on the appropriate portions of `x`, - as defined by the boolean arrays in `condlist`. Portions not covered - by any condition have a default value of 0. - - - See Also - -------- - choose, select, where - - Notes - ----- - This is similar to choose or select, except that functions are - evaluated on elements of `x` that satisfy the corresponding condition from - `condlist`. - - The result is:: - - |-- - |funclist[0](x[condlist[0]]) - out = |funclist[1](x[condlist[1]]) - |... - |funclist[n2](x[condlist[n2]]) - |-- - - Examples - -------- - Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. - - >>> x = np.linspace(-2.5, 2.5, 6) - >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) - array([-1., -1., -1., 1., 1., 1.]) - - Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for - ``x >= 0``. - - >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) - array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) - - """ - x = asanyarray(x) - n2 = len(funclist) - if (isscalar(condlist) or not (isinstance(condlist[0], list) or - isinstance(condlist[0], ndarray))): - condlist = [condlist] - condlist = array(condlist, dtype=bool) - n = len(condlist) - # This is a hack to work around problems with NumPy's - # handling of 0-d arrays and boolean indexing with - # numpy.bool_ scalars - zerod = False - if x.ndim == 0: - x = x[None] - zerod = True - if condlist.shape[-1] != 1: - condlist = condlist.T - if n == n2 - 1: # compute the "otherwise" condition. - totlist = np.logical_or.reduce(condlist, axis=0) - condlist = np.vstack([condlist, ~totlist]) - n += 1 - if (n != n2): - raise ValueError( - "function list and condition list must be the same") - - y = zeros(x.shape, x.dtype) - for k in range(n): - item = funclist[k] - if not isinstance(item, collections.Callable): - y[condlist[k]] = item - else: - vals = x[condlist[k]] - if vals.size > 0: - y[condlist[k]] = item(vals, *args, **kw) - if zerod: - y = y.squeeze() - return y - - -def select(condlist, choicelist, default=0): - """ - Return an array drawn from elements in choicelist, depending on conditions. - - Parameters - ---------- - condlist : list of bool ndarrays - The list of conditions which determine from which array in `choicelist` - the output elements are taken. When multiple conditions are satisfied, - the first one encountered in `condlist` is used. - choicelist : list of ndarrays - The list of arrays from which the output elements are taken. It has - to be of the same length as `condlist`. - default : scalar, optional - The element inserted in `output` when all conditions evaluate to False. - - Returns - ------- - output : ndarray - The output at position m is the m-th element of the array in - `choicelist` where the m-th element of the corresponding array in - `condlist` is True. - - See Also - -------- - where : Return elements from one of two arrays depending on condition. - take, choose, compress, diag, diagonal - - Examples - -------- - >>> x = np.arange(10) - >>> condlist = [x<3, x>5] - >>> choicelist = [x, x**2] - >>> np.select(condlist, choicelist) - array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) - - """ - # Check the size of condlist and choicelist are the same, or abort. - if len(condlist) != len(choicelist): - raise ValueError( - 'list of cases must be same length as list of conditions') - - # Now that the dtype is known, handle the deprecated select([], []) case - if len(condlist) == 0: - warnings.warn("select with an empty condition list is not possible" - "and will be deprecated", - DeprecationWarning) - return np.asarray(default)[()] - - choicelist = [np.asarray(choice) for choice in choicelist] - choicelist.append(np.asarray(default)) - - # need to get the result type before broadcasting for correct scalar - # behaviour - dtype = np.result_type(*choicelist) - - # Convert conditions to arrays and broadcast conditions and choices - # as the shape is needed for the result. Doing it seperatly optimizes - # for example when all choices are scalars. - condlist = np.broadcast_arrays(*condlist) - choicelist = np.broadcast_arrays(*choicelist) - - # If cond array is not an ndarray in boolean format or scalar bool, abort. - deprecated_ints = False - for i in range(len(condlist)): - cond = condlist[i] - if cond.dtype.type is not np.bool_: - if np.issubdtype(cond.dtype, np.integer): - # A previous implementation accepted int ndarrays accidentally. - # Supported here deliberately, but deprecated. - condlist[i] = condlist[i].astype(bool) - deprecated_ints = True - else: - raise ValueError( - 'invalid entry in choicelist: should be boolean ndarray') - - if deprecated_ints: - msg = "select condlists containing integer ndarrays is deprecated " \ - "and will be removed in the future. Use `.astype(bool)` to " \ - "convert to bools." - warnings.warn(msg, DeprecationWarning) - - if choicelist[0].ndim == 0: - # This may be common, so avoid the call. - result_shape = condlist[0].shape - else: - result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape - - result = np.full(result_shape, choicelist[-1], dtype) - - # Use np.copyto to burn each choicelist array onto result, using the - # corresponding condlist as a boolean mask. This is done in reverse - # order since the first choice should take precedence. - choicelist = choicelist[-2::-1] - condlist = condlist[::-1] - for choice, cond in zip(choicelist, condlist): - np.copyto(result, choice, where=cond) - - return result - - -def copy(a, order='K'): - """ - Return an array copy of the given object. - - Parameters - ---------- - a : array_like - Input data. - order : {'C', 'F', 'A', 'K'}, optional - Controls the memory layout of the copy. 'C' means C-order, - 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, - 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. (Note that this function and :meth:ndarray.copy are very - similar, but have different default values for their order= - arguments.) - - Returns - ------- - arr : ndarray - Array interpretation of `a`. - - Notes - ----- - This is equivalent to - - >>> np.array(a, copy=True) #doctest: +SKIP - - Examples - -------- - Create an array x, with a reference y and a copy z: - - >>> x = np.array([1, 2, 3]) - >>> y = x - >>> z = np.copy(x) - - Note that, when we modify x, y changes, but not z: - - >>> x[0] = 10 - >>> x[0] == y[0] - True - >>> x[0] == z[0] - False - - """ - return array(a, order=order, copy=True) - -# Basic operations - - -def gradient(f, *varargs): - """ - Return the gradient of an N-dimensional array. - - The gradient is computed using second order accurate central differences - in the interior and second order accurate one-sides (forward or backwards) - differences at the boundaries. The returned gradient hence has the same - shape as the input array. - - Parameters - ---------- - f : array_like - An N-dimensional array containing samples of a scalar function. - `*varargs` : scalars - 0, 1, or N scalars specifying the sample distances in each direction, - that is: `dx`, `dy`, `dz`, ... The default distance is 1. - - Returns - ------- - gradient : ndarray - N arrays of the same shape as `f` giving the derivative of `f` with - respect to each dimension. - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) - >>> np.gradient(x) - array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) - >>> np.gradient(x, 2) - array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) - - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) - [array([[ 2., 2., -1.], - [ 2., 2., -1.]]), - array([[ 1. , 2.5, 4. ], - [ 1. , 1. , 1. ]])] - - >>> x = np.array([0,1,2,3,4]) - >>> dx = gradient(x) - >>> y = x**2 - >>> gradient(y,dx) - array([0., 2., 4., 6., 8.]) - """ - f = np.asanyarray(f) - N = len(f.shape) # number of dimensions - n = len(varargs) - if n == 0: - dx = [1.0]*N - elif n == 1: - dx = [varargs[0]]*N - elif n == N: - dx = list(varargs) - else: - raise SyntaxError( - "invalid number of arguments") - - # use central differences on interior and one-sided differences on the - # endpoints. This preserves second order-accuracy over the full domain. - - outvals = [] - - # create slice objects --- initially all are [:, :, ..., :] - slice1 = [slice(None)]*N - slice2 = [slice(None)]*N - slice3 = [slice(None)]*N - slice4 = [slice(None)]*N - - otype = f.dtype.char - if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: - otype = 'd' - - # Difference of datetime64 elements results in timedelta64 - if otype == 'M': - # Need to use the full dtype name because it contains unit information - otype = f.dtype.name.replace('datetime', 'timedelta') - elif otype == 'm': - # Needs to keep the specific units, can't be a general unit - otype = f.dtype - - # Convert datetime64 data into ints. Make dummy variable `y` - # that is a view of ints if the data is datetime64, otherwise - # just set y equal to the the array `f`. - if f.dtype.char in ["M", "m"]: - y = f.view('int64') - else: - y = f - - for axis in range(N): - - if y.shape[axis] < 2: - raise ValueError( - "Shape of array too small to calculate a numerical gradient, " - "at least two elements are required.") - - # Numerical differentiation: 1st order edges, 2nd order interior - if y.shape[axis] == 2: - # Use first order differences for time data - out = np.empty_like(y, dtype=otype) - - slice1[axis] = slice(1, -1) - slice2[axis] = slice(2, None) - slice3[axis] = slice(None, -2) - # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 - out[slice1] = (y[slice2] - y[slice3])/2.0 - - slice1[axis] = 0 - slice2[axis] = 1 - slice3[axis] = 0 - # 1D equivalent -- out[0] = (y[1] - y[0]) - out[slice1] = (y[slice2] - y[slice3]) - - slice1[axis] = -1 - slice2[axis] = -1 - slice3[axis] = -2 - # 1D equivalent -- out[-1] = (y[-1] - y[-2]) - out[slice1] = (y[slice2] - y[slice3]) - - # Numerical differentiation: 2st order edges, 2nd order interior - else: - # Use second order differences where possible - out = np.empty_like(y, dtype=otype) - - slice1[axis] = slice(1, -1) - slice2[axis] = slice(2, None) - slice3[axis] = slice(None, -2) - # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 - out[slice1] = (y[slice2] - y[slice3])/2.0 - - slice1[axis] = 0 - slice2[axis] = 0 - slice3[axis] = 1 - slice4[axis] = 2 - # 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0 - out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 - - slice1[axis] = -1 - slice2[axis] = -1 - slice3[axis] = -2 - slice4[axis] = -3 - # 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3]) - out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 - - # divide by step size - outvals.append(out / dx[axis]) - - # reset the slice object in this dimension to ":" - slice1[axis] = slice(None) - slice2[axis] = slice(None) - slice3[axis] = slice(None) - slice4[axis] = slice(None) - - if N == 1: - return outvals[0] - else: - return outvals - - -def diff(a, n=1, axis=-1): - """ - Calculate the n-th order discrete difference along given axis. - - The first order difference is given by ``out[n] = a[n+1] - a[n]`` along - the given axis, higher order differences are calculated by using `diff` - recursively. - - Parameters - ---------- - a : array_like - Input array - n : int, optional - The number of times values are differenced. - axis : int, optional - The axis along which the difference is taken, default is the last axis. - - Returns - ------- - diff : ndarray - The `n` order differences. The shape of the output is the same as `a` - except along `axis` where the dimension is smaller by `n`. - - See Also - -------- - gradient, ediff1d, cumsum - - Examples - -------- - >>> x = np.array([1, 2, 4, 7, 0]) - >>> np.diff(x) - array([ 1, 2, 3, -7]) - >>> np.diff(x, n=2) - array([ 1, 1, -10]) - - >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) - >>> np.diff(x) - array([[2, 3, 4], - [5, 1, 2]]) - >>> np.diff(x, axis=0) - array([[-1, 2, 0, -2]]) - - """ - if n == 0: - return a - if n < 0: - raise ValueError( - "order must be non-negative but got " + repr(n)) - a = asanyarray(a) - nd = len(a.shape) - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd - slice1[axis] = slice(1, None) - slice2[axis] = slice(None, -1) - slice1 = tuple(slice1) - slice2 = tuple(slice2) - if n > 1: - return diff(a[slice1]-a[slice2], n-1, axis=axis) - else: - return a[slice1]-a[slice2] - - -def interp(x, xp, fp, left=None, right=None): - """ - One-dimensional linear interpolation. - - Returns the one-dimensional piecewise linear interpolant to a function - with given values at discrete data-points. - - Parameters - ---------- - x : array_like - The x-coordinates of the interpolated values. - - xp : 1-D sequence of floats - The x-coordinates of the data points, must be increasing. - - fp : 1-D sequence of floats - The y-coordinates of the data points, same length as `xp`. - - left : float, optional - Value to return for `x < xp[0]`, default is `fp[0]`. - - right : float, optional - Value to return for `x > xp[-1]`, default is `fp[-1]`. - - Returns - ------- - y : {float, ndarray} - The interpolated values, same shape as `x`. - - Raises - ------ - ValueError - If `xp` and `fp` have different length - - Notes - ----- - Does not check that the x-coordinate sequence `xp` is increasing. - If `xp` is not increasing, the results are nonsense. - A simple check for increasing is:: - - np.all(np.diff(xp) > 0) - - - Examples - -------- - >>> xp = [1, 2, 3] - >>> fp = [3, 2, 0] - >>> np.interp(2.5, xp, fp) - 1.0 - >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) - array([ 3. , 3. , 2.5 , 0.56, 0. ]) - >>> UNDEF = -99.0 - >>> np.interp(3.14, xp, fp, right=UNDEF) - -99.0 - - Plot an interpolant to the sine function: - - >>> x = np.linspace(0, 2*np.pi, 10) - >>> y = np.sin(x) - >>> xvals = np.linspace(0, 2*np.pi, 50) - >>> yinterp = np.interp(xvals, x, y) - >>> import matplotlib.pyplot as plt - >>> plt.plot(x, y, 'o') - [] - >>> plt.plot(xvals, yinterp, '-x') - [] - >>> plt.show() - - """ - if isinstance(x, (float, int, number)): - return compiled_interp([x], xp, fp, left, right).item() - elif isinstance(x, np.ndarray) and x.ndim == 0: - return compiled_interp([x], xp, fp, left, right).item() - else: - return compiled_interp(x, xp, fp, left, right) - - -def angle(z, deg=0): - """ - Return the angle of the complex argument. - - Parameters - ---------- - z : array_like - A complex number or sequence of complex numbers. - deg : bool, optional - Return angle in degrees if True, radians if False (default). - - Returns - ------- - angle : {ndarray, scalar} - The counterclockwise angle from the positive real axis on - the complex plane, with dtype as numpy.float64. - - See Also - -------- - arctan2 - absolute - - - - Examples - -------- - >>> np.angle([1.0, 1.0j, 1+1j]) # in radians - array([ 0. , 1.57079633, 0.78539816]) - >>> np.angle(1+1j, deg=True) # in degrees - 45.0 - - """ - if deg: - fact = 180/pi - else: - fact = 1.0 - z = asarray(z) - if (issubclass(z.dtype.type, _nx.complexfloating)): - zimag = z.imag - zreal = z.real - else: - zimag = 0 - zreal = z - return arctan2(zimag, zreal) * fact - - -def unwrap(p, discont=pi, axis=-1): - """ - Unwrap by changing deltas between values to 2*pi complement. - - Unwrap radian phase `p` by changing absolute jumps greater than - `discont` to their 2*pi complement along the given axis. - - Parameters - ---------- - p : array_like - Input array. - discont : float, optional - Maximum discontinuity between values, default is ``pi``. - axis : int, optional - Axis along which unwrap will operate, default is the last axis. - - Returns - ------- - out : ndarray - Output array. - - See Also - -------- - rad2deg, deg2rad - - Notes - ----- - If the discontinuity in `p` is smaller than ``pi``, but larger than - `discont`, no unwrapping is done because taking the 2*pi complement - would only make the discontinuity larger. - - Examples - -------- - >>> phase = np.linspace(0, np.pi, num=5) - >>> phase[3:] += np.pi - >>> phase - array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) - >>> np.unwrap(phase) - array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) - - """ - p = asarray(p) - nd = len(p.shape) - dd = diff(p, axis=axis) - slice1 = [slice(None, None)]*nd # full slices - slice1[axis] = slice(1, None) - ddmod = mod(dd + pi, 2*pi) - pi - _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) - ph_correct = ddmod - dd - _nx.copyto(ph_correct, 0, where=abs(dd) < discont) - up = array(p, copy=True, dtype='d') - up[slice1] = p[slice1] + ph_correct.cumsum(axis) - return up - - -def sort_complex(a): - """ - Sort a complex array using the real part first, then the imaginary part. - - Parameters - ---------- - a : array_like - Input array - - Returns - ------- - out : complex ndarray - Always returns a sorted complex array. - - Examples - -------- - >>> np.sort_complex([5, 3, 6, 2, 1]) - array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) - - >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) - array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) - - """ - b = array(a, copy=True) - b.sort() - if not issubclass(b.dtype.type, _nx.complexfloating): - if b.dtype.char in 'bhBH': - return b.astype('F') - elif b.dtype.char == 'g': - return b.astype('G') - else: - return b.astype('D') - else: - return b - - -def trim_zeros(filt, trim='fb'): - """ - Trim the leading and/or trailing zeros from a 1-D array or sequence. - - Parameters - ---------- - filt : 1-D array or sequence - Input array. - trim : str, optional - A string with 'f' representing trim from front and 'b' to trim from - back. Default is 'fb', trim zeros from both front and back of the - array. - - Returns - ------- - trimmed : 1-D array or sequence - The result of trimming the input. The input data type is preserved. - - Examples - -------- - >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) - >>> np.trim_zeros(a) - array([1, 2, 3, 0, 2, 1]) - - >>> np.trim_zeros(a, 'b') - array([0, 0, 0, 1, 2, 3, 0, 2, 1]) - - The input data type is preserved, list/tuple in means list/tuple out. - - >>> np.trim_zeros([0, 1, 2, 0]) - [1, 2] - - """ - first = 0 - trim = trim.upper() - if 'F' in trim: - for i in filt: - if i != 0.: - break - else: - first = first + 1 - last = len(filt) - if 'B' in trim: - for i in filt[::-1]: - if i != 0.: - break - else: - last = last - 1 - return filt[first:last] - - -@deprecate -def unique(x): - """ - This function is deprecated. Use numpy.lib.arraysetops.unique() - instead. - """ - try: - tmp = x.flatten() - if tmp.size == 0: - return tmp - tmp.sort() - idx = concatenate(([True], tmp[1:] != tmp[:-1])) - return tmp[idx] - except AttributeError: - items = sorted(set(x)) - return asarray(items) - - -def extract(condition, arr): - """ - Return the elements of an array that satisfy some condition. - - This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If - `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. - - Parameters - ---------- - condition : array_like - An array whose nonzero or True entries indicate the elements of `arr` - to extract. - arr : array_like - Input array of the same size as `condition`. - - Returns - ------- - extract : ndarray - Rank 1 array of values from `arr` where `condition` is True. - - See Also - -------- - take, put, copyto, compress - - Examples - -------- - >>> arr = np.arange(12).reshape((3, 4)) - >>> arr - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> condition = np.mod(arr, 3)==0 - >>> condition - array([[ True, False, False, True], - [False, False, True, False], - [False, True, False, False]], dtype=bool) - >>> np.extract(condition, arr) - array([0, 3, 6, 9]) - - - If `condition` is boolean: - - >>> arr[condition] - array([0, 3, 6, 9]) - - """ - return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) - - -def place(arr, mask, vals): - """ - Change elements of an array based on conditional and input values. - - Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that - `place` uses the first N elements of `vals`, where N is the number of - True values in `mask`, while `copyto` uses the elements where `mask` - is True. - - Note that `extract` does the exact opposite of `place`. - - Parameters - ---------- - arr : array_like - Array to put data into. - mask : array_like - Boolean mask array. Must have the same size as `a`. - vals : 1-D sequence - Values to put into `a`. Only the first N elements are used, where - N is the number of True values in `mask`. If `vals` is smaller - than N it will be repeated. - - See Also - -------- - copyto, put, take, extract - - Examples - -------- - >>> arr = np.arange(6).reshape(2, 3) - >>> np.place(arr, arr>2, [44, 55]) - >>> arr - array([[ 0, 1, 2], - [44, 55, 44]]) - - """ - return _insert(arr, mask, vals) - - -def disp(mesg, device=None, linefeed=True): - """ - Display a message on a device. - - Parameters - ---------- - mesg : str - Message to display. - device : object - Device to write message. If None, defaults to ``sys.stdout`` which is - very similar to ``print``. `device` needs to have ``write()`` and - ``flush()`` methods. - linefeed : bool, optional - Option whether to print a line feed or not. Defaults to True. - - Raises - ------ - AttributeError - If `device` does not have a ``write()`` or ``flush()`` method. - - Examples - -------- - Besides ``sys.stdout``, a file-like object can also be used as it has - both required methods: - - >>> from StringIO import StringIO - >>> buf = StringIO() - >>> np.disp('"Display" in a file', device=buf) - >>> buf.getvalue() - '"Display" in a file\\n' - - """ - if device is None: - device = sys.stdout - if linefeed: - device.write('%s\n' % mesg) - else: - device.write('%s' % mesg) - device.flush() - return - - -class vectorize(object): - """ - vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False) - - Generalized function class. - - Define a vectorized function which takes a nested sequence - of objects or numpy arrays as inputs and returns a - numpy array as output. The vectorized function evaluates `pyfunc` over - successive tuples of the input arrays like the python map function, - except it uses the broadcasting rules of numpy. - - The data type of the output of `vectorized` is determined by calling - the function with the first element of the input. This can be avoided - by specifying the `otypes` argument. - - Parameters - ---------- - pyfunc : callable - A python function or method. - otypes : str or list of dtypes, optional - The output data type. It must be specified as either a string of - typecode characters or a list of data type specifiers. There should - be one data type specifier for each output. - doc : str, optional - The docstring for the function. If `None`, the docstring will be the - ``pyfunc.__doc__``. - excluded : set, optional - Set of strings or integers representing the positional or keyword - arguments for which the function will not be vectorized. These will be - passed directly to `pyfunc` unmodified. - - .. versionadded:: 1.7.0 - - cache : bool, optional - If `True`, then cache the first function call that determines the number - of outputs if `otypes` is not provided. - - .. versionadded:: 1.7.0 - - Returns - ------- - vectorized : callable - Vectorized function. - - Examples - -------- - >>> def myfunc(a, b): - ... "Return a-b if a>b, otherwise return a+b" - ... if a > b: - ... return a - b - ... else: - ... return a + b - - >>> vfunc = np.vectorize(myfunc) - >>> vfunc([1, 2, 3, 4], 2) - array([3, 4, 1, 2]) - - The docstring is taken from the input function to `vectorize` unless it - is specified - - >>> vfunc.__doc__ - 'Return a-b if a>b, otherwise return a+b' - >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') - >>> vfunc.__doc__ - 'Vectorized `myfunc`' - - The output type is determined by evaluating the first element of the input, - unless it is specified - - >>> out = vfunc([1, 2, 3, 4], 2) - >>> type(out[0]) - - >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) - >>> out = vfunc([1, 2, 3, 4], 2) - >>> type(out[0]) - - - The `excluded` argument can be used to prevent vectorizing over certain - arguments. This can be useful for array-like arguments of a fixed length - such as the coefficients for a polynomial as in `polyval`: - - >>> def mypolyval(p, x): - ... _p = list(p) - ... res = _p.pop(0) - ... while _p: - ... res = res*x + _p.pop(0) - ... return res - >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) - >>> vpolyval(p=[1, 2, 3], x=[0, 1]) - array([3, 6]) - - Positional arguments may also be excluded by specifying their position: - - >>> vpolyval.excluded.add(0) - >>> vpolyval([1, 2, 3], x=[0, 1]) - array([3, 6]) - - Notes - ----- - The `vectorize` function is provided primarily for convenience, not for - performance. The implementation is essentially a for loop. - - If `otypes` is not specified, then a call to the function with the - first argument will be used to determine the number of outputs. The - results of this call will be cached if `cache` is `True` to prevent - calling the function twice. However, to implement the cache, the - original function must be wrapped which will slow down subsequent - calls, so only do this if your function is expensive. - - The new keyword argument interface and `excluded` argument support - further degrades performance. - - """ - - def __init__(self, pyfunc, otypes='', doc=None, excluded=None, - cache=False): - self.pyfunc = pyfunc - self.cache = cache - self._ufunc = None # Caching to improve default performance - - if doc is None: - self.__doc__ = pyfunc.__doc__ - else: - self.__doc__ = doc - - if isinstance(otypes, str): - self.otypes = otypes - for char in self.otypes: - if char not in typecodes['All']: - raise ValueError( - "Invalid otype specified: %s" % (char,)) - elif iterable(otypes): - self.otypes = ''.join([_nx.dtype(x).char for x in otypes]) - else: - raise ValueError( - "Invalid otype specification") - - # Excluded variable support - if excluded is None: - excluded = set() - self.excluded = set(excluded) - - def __call__(self, *args, **kwargs): - """ - Return arrays with the results of `pyfunc` broadcast (vectorized) over - `args` and `kwargs` not in `excluded`. - """ - excluded = self.excluded - if not kwargs and not excluded: - func = self.pyfunc - vargs = args - else: - # The wrapper accepts only positional arguments: we use `names` and - # `inds` to mutate `the_args` and `kwargs` to pass to the original - # function. - nargs = len(args) - - names = [_n for _n in kwargs if _n not in excluded] - inds = [_i for _i in range(nargs) if _i not in excluded] - the_args = list(args) - - def func(*vargs): - for _n, _i in enumerate(inds): - the_args[_i] = vargs[_n] - kwargs.update(zip(names, vargs[len(inds):])) - return self.pyfunc(*the_args, **kwargs) - - vargs = [args[_i] for _i in inds] - vargs.extend([kwargs[_n] for _n in names]) - - return self._vectorize_call(func=func, args=vargs) - - def _get_ufunc_and_otypes(self, func, args): - """Return (ufunc, otypes).""" - # frompyfunc will fail if args is empty - if not args: - raise ValueError('args can not be empty') - - if self.otypes: - otypes = self.otypes - nout = len(otypes) - - # Note logic here: We only *use* self._ufunc if func is self.pyfunc - # even though we set self._ufunc regardless. - if func is self.pyfunc and self._ufunc is not None: - ufunc = self._ufunc - else: - ufunc = self._ufunc = frompyfunc(func, len(args), nout) - else: - # Get number of outputs and output types by calling the function on - # the first entries of args. We also cache the result to prevent - # the subsequent call when the ufunc is evaluated. - # Assumes that ufunc first evaluates the 0th elements in the input - # arrays (the input values are not checked to ensure this) - inputs = [asarray(_a).flat[0] for _a in args] - outputs = func(*inputs) - - # Performance note: profiling indicates that -- for simple - # functions at least -- this wrapping can almost double the - # execution time. - # Hence we make it optional. - if self.cache: - _cache = [outputs] - - def _func(*vargs): - if _cache: - return _cache.pop() - else: - return func(*vargs) - else: - _func = func - - if isinstance(outputs, tuple): - nout = len(outputs) - else: - nout = 1 - outputs = (outputs,) - - otypes = ''.join([asarray(outputs[_k]).dtype.char - for _k in range(nout)]) - - # Performance note: profiling indicates that creating the ufunc is - # not a significant cost compared with wrapping so it seems not - # worth trying to cache this. - ufunc = frompyfunc(_func, len(args), nout) - - return ufunc, otypes - - def _vectorize_call(self, func, args): - """Vectorized call to `func` over positional `args`.""" - if not args: - _res = func() - else: - ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) - - # Convert args to object arrays first - inputs = [array(_a, copy=False, subok=True, dtype=object) - for _a in args] - - outputs = ufunc(*inputs) - - if ufunc.nout == 1: - _res = array(outputs, - copy=False, subok=True, dtype=otypes[0]) - else: - _res = tuple([array(_x, copy=False, subok=True, dtype=_t) - for _x, _t in zip(outputs, otypes)]) - return _res - - -def cov(m, y=None, rowvar=1, bias=0, ddof=None): - """ - Estimate a covariance matrix, given data. - - Covariance indicates the level to which two variables vary together. - If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, - then the covariance matrix element :math:`C_{ij}` is the covariance of - :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance - of :math:`x_i`. - - Parameters - ---------- - m : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `m` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - form as that of `m`. - rowvar : int, optional - If `rowvar` is non-zero (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : int, optional - Default normalization is by ``(N - 1)``, where ``N`` is the number of - observations given (unbiased estimate). If `bias` is 1, then - normalization is by ``N``. These values can be overridden by using - the keyword ``ddof`` in numpy versions >= 1.5. - ddof : int, optional - .. versionadded:: 1.5 - If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is - the number of observations; this overrides the value implied by - ``bias``. The default value is ``None``. - - Returns - ------- - out : ndarray - The covariance matrix of the variables. - - See Also - -------- - corrcoef : Normalized covariance matrix - - Examples - -------- - Consider two variables, :math:`x_0` and :math:`x_1`, which - correlate perfectly, but in opposite directions: - - >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T - >>> x - array([[0, 1, 2], - [2, 1, 0]]) - - Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance - matrix shows this clearly: - - >>> np.cov(x) - array([[ 1., -1.], - [-1., 1.]]) - - Note that element :math:`C_{0,1}`, which shows the correlation between - :math:`x_0` and :math:`x_1`, is negative. - - Further, note how `x` and `y` are combined: - - >>> x = [-2.1, -1, 4.3] - >>> y = [3, 1.1, 0.12] - >>> X = np.vstack((x,y)) - >>> print np.cov(X) - [[ 11.71 -4.286 ] - [ -4.286 2.14413333]] - >>> print np.cov(x, y) - [[ 11.71 -4.286 ] - [ -4.286 2.14413333]] - >>> print np.cov(x) - 11.71 - - """ - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError( - "ddof must be integer") - - # Handles complex arrays too - m = np.asarray(m) - if y is None: - dtype = np.result_type(m, np.float64) - else: - y = np.asarray(y) - dtype = np.result_type(m, y, np.float64) - X = array(m, ndmin=2, dtype=dtype) - - if X.shape[0] == 1: - rowvar = 1 - if rowvar: - N = X.shape[1] - axis = 0 - else: - N = X.shape[0] - axis = 1 - - # check ddof - if ddof is None: - if bias == 0: - ddof = 1 - else: - ddof = 0 - fact = float(N - ddof) - if fact <= 0: - warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning) - fact = 0.0 - - if y is not None: - y = array(y, copy=False, ndmin=2, dtype=dtype) - X = concatenate((X, y), axis) - - X -= X.mean(axis=1-axis, keepdims=True) - if not rowvar: - return (dot(X.T, X.conj()) / fact).squeeze() - else: - return (dot(X, X.T.conj()) / fact).squeeze() - - -def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None): - """ - Return correlation coefficients. - - Please refer to the documentation for `cov` for more detail. The - relationship between the correlation coefficient matrix, `P`, and the - covariance matrix, `C`, is - - .. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } - - The values of `P` are between -1 and 1, inclusive. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `m` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - shape as `m`. - rowvar : int, optional - If `rowvar` is non-zero (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : int, optional - Default normalization is by ``(N - 1)``, where ``N`` is the number of - observations (unbiased estimate). If `bias` is 1, then - normalization is by ``N``. These values can be overridden by using - the keyword ``ddof`` in numpy versions >= 1.5. - ddof : {None, int}, optional - .. versionadded:: 1.5 - If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is - the number of observations; this overrides the value implied by - ``bias``. The default value is ``None``. - - Returns - ------- - out : ndarray - The correlation coefficient matrix of the variables. - - See Also - -------- - cov : Covariance matrix - - """ - c = cov(x, y, rowvar, bias, ddof) - try: - d = diag(c) - except ValueError: # scalar covariance - # nan if incorrect value (nan, inf, 0), 1 otherwise - return c / c - return c / sqrt(multiply.outer(d, d)) - - -def blackman(M): - """ - Return the Blackman window. - - The Blackman window is a taper formed by using the first three - terms of a summation of cosines. It was designed to have close to the - minimal leakage possible. It is close to optimal, only slightly worse - than a Kaiser window. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an empty - array is returned. - - Returns - ------- - out : ndarray - The window, with the maximum value normalized to one (the value one - appears only if the number of samples is odd). - - See Also - -------- - bartlett, hamming, hanning, kaiser - - Notes - ----- - The Blackman window is defined as - - .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) - - Most references to the Blackman window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. It is known as a - "near optimal" tapering function, almost as good (by some measures) - as the kaiser window. - - References - ---------- - Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, - Dover Publications, New York. - - Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. - Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. - - Examples - -------- - >>> np.blackman(12) - array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, - 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, - 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, - 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) - - - Plot the window and the frequency response: - - >>> from numpy.fft import fft, fftshift - >>> window = np.blackman(51) - >>> plt.plot(window) - [] - >>> plt.title("Blackman window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Blackman window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) - - -def bartlett(M): - """ - Return the Bartlett window. - - The Bartlett window is very similar to a triangular window, except - that the end points are at zero. It is often used in signal - processing for tapering a signal, without generating too much - ripple in the frequency domain. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : array - The triangular window, with the maximum value normalized to one - (the value one appears only if the number of samples is odd), with - the first and last samples equal to zero. - - See Also - -------- - blackman, hamming, hanning, kaiser - - Notes - ----- - The Bartlett window is defined as - - .. math:: w(n) = \\frac{2}{M-1} \\left( - \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| - \\right) - - Most references to the Bartlett window come from the signal - processing literature, where it is used as one of many windowing - functions for smoothing values. Note that convolution with this - window produces linear interpolation. It is also known as an - apodization (which means"removing the foot", i.e. smoothing - discontinuities at the beginning and end of the sampled signal) or - tapering function. The fourier transform of the Bartlett is the product - of two sinc functions. - Note the excellent discussion in Kanasewich. - - References - ---------- - .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", - Biometrika 37, 1-16, 1950. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", - The University of Alberta Press, 1975, pp. 109-110. - .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal - Processing", Prentice-Hall, 1999, pp. 468-471. - .. [4] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function - .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 429. - - - Examples - -------- - >>> np.bartlett(12) - array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, - 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, - 0.18181818, 0. ]) - - Plot the window and its frequency response (requires SciPy and matplotlib): - - >>> from numpy.fft import fft, fftshift - >>> window = np.bartlett(51) - >>> plt.plot(window) - [] - >>> plt.title("Bartlett window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Bartlett window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) - - -def hanning(M): - """ - Return the Hanning window. - - The Hanning window is a taper formed by using a weighted cosine. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : ndarray, shape(M,) - The window, with the maximum value normalized to one (the value - one appears only if `M` is odd). - - See Also - -------- - bartlett, blackman, hamming, kaiser - - Notes - ----- - The Hanning window is defined as - - .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) - \\qquad 0 \\leq n \\leq M-1 - - The Hanning was named for Julius van Hann, an Austrian meteorologist. - It is also known as the Cosine Bell. Some authors prefer that it be - called a Hann window, to help avoid confusion with the very similar - Hamming window. - - Most references to the Hanning window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power - spectra, Dover Publications, New York. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", - The University of Alberta Press, 1975, pp. 106-108. - .. [3] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function - .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 425. - - Examples - -------- - >>> np.hanning(12) - array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, - 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, - 0.07937323, 0. ]) - - Plot the window and its frequency response: - - >>> from numpy.fft import fft, fftshift - >>> window = np.hanning(51) - >>> plt.plot(window) - [] - >>> plt.title("Hann window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of the Hann window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) - - -def hamming(M): - """ - Return the Hamming window. - - The Hamming window is a taper formed by using a weighted cosine. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - - Returns - ------- - out : ndarray - The window, with the maximum value normalized to one (the value - one appears only if the number of samples is odd). - - See Also - -------- - bartlett, blackman, hanning, kaiser - - Notes - ----- - The Hamming window is defined as - - .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) - \\qquad 0 \\leq n \\leq M-1 - - The Hamming was named for R. W. Hamming, an associate of J. W. Tukey - and is described in Blackman and Tukey. It was recommended for - smoothing the truncated autocovariance function in the time domain. - Most references to the Hamming window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power - spectra, Dover Publications, New York. - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The - University of Alberta Press, 1975, pp. 109-110. - .. [3] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function - .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, - "Numerical Recipes", Cambridge University Press, 1986, page 425. - - Examples - -------- - >>> np.hamming(12) - array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, - 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, - 0.15302337, 0.08 ]) - - Plot the window and the frequency response: - - >>> from numpy.fft import fft, fftshift - >>> window = np.hamming(51) - >>> plt.plot(window) - [] - >>> plt.title("Hamming window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Hamming window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - if M < 1: - return array([]) - if M == 1: - return ones(1, float) - n = arange(0, M) - return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) - -## Code from cephes for i0 - -_i0A = [ - -4.41534164647933937950E-18, - 3.33079451882223809783E-17, - -2.43127984654795469359E-16, - 1.71539128555513303061E-15, - -1.16853328779934516808E-14, - 7.67618549860493561688E-14, - -4.85644678311192946090E-13, - 2.95505266312963983461E-12, - -1.72682629144155570723E-11, - 9.67580903537323691224E-11, - -5.18979560163526290666E-10, - 2.65982372468238665035E-9, - -1.30002500998624804212E-8, - 6.04699502254191894932E-8, - -2.67079385394061173391E-7, - 1.11738753912010371815E-6, - -4.41673835845875056359E-6, - 1.64484480707288970893E-5, - -5.75419501008210370398E-5, - 1.88502885095841655729E-4, - -5.76375574538582365885E-4, - 1.63947561694133579842E-3, - -4.32430999505057594430E-3, - 1.05464603945949983183E-2, - -2.37374148058994688156E-2, - 4.93052842396707084878E-2, - -9.49010970480476444210E-2, - 1.71620901522208775349E-1, - -3.04682672343198398683E-1, - 6.76795274409476084995E-1 - ] - -_i0B = [ - -7.23318048787475395456E-18, - -4.83050448594418207126E-18, - 4.46562142029675999901E-17, - 3.46122286769746109310E-17, - -2.82762398051658348494E-16, - -3.42548561967721913462E-16, - 1.77256013305652638360E-15, - 3.81168066935262242075E-15, - -9.55484669882830764870E-15, - -4.15056934728722208663E-14, - 1.54008621752140982691E-14, - 3.85277838274214270114E-13, - 7.18012445138366623367E-13, - -1.79417853150680611778E-12, - -1.32158118404477131188E-11, - -3.14991652796324136454E-11, - 1.18891471078464383424E-11, - 4.94060238822496958910E-10, - 3.39623202570838634515E-9, - 2.26666899049817806459E-8, - 2.04891858946906374183E-7, - 2.89137052083475648297E-6, - 6.88975834691682398426E-5, - 3.36911647825569408990E-3, - 8.04490411014108831608E-1 - ] - - -def _chbevl(x, vals): - b0 = vals[0] - b1 = 0.0 - - for i in range(1, len(vals)): - b2 = b1 - b1 = b0 - b0 = x*b1 - b2 + vals[i] - - return 0.5*(b0 - b2) - - -def _i0_1(x): - return exp(x) * _chbevl(x/2.0-2, _i0A) - - -def _i0_2(x): - return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) - - -def i0(x): - """ - Modified Bessel function of the first kind, order 0. - - Usually denoted :math:`I_0`. This function does broadcast, but will *not* - "up-cast" int dtype arguments unless accompanied by at least one float or - complex dtype argument (see Raises below). - - Parameters - ---------- - x : array_like, dtype float or complex - Argument of the Bessel function. - - Returns - ------- - out : ndarray, shape = x.shape, dtype = x.dtype - The modified Bessel function evaluated at each of the elements of `x`. - - Raises - ------ - TypeError: array cannot be safely cast to required type - If argument consists exclusively of int dtypes. - - See Also - -------- - scipy.special.iv, scipy.special.ive - - Notes - ----- - We use the algorithm published by Clenshaw [1]_ and referenced by - Abramowitz and Stegun [2]_, for which the function domain is - partitioned into the two intervals [0,8] and (8,inf), and Chebyshev - polynomial expansions are employed in each interval. Relative error on - the domain [0,30] using IEEE arithmetic is documented [3]_ as having a - peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). - - References - ---------- - .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in - *National Physical Laboratory Mathematical Tables*, vol. 5, London: - Her Majesty's Stationery Office, 1962. - .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical - Functions*, 10th printing, New York: Dover, 1964, pp. 379. - http://www.math.sfu.ca/~cbm/aands/page_379.htm - .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html - - Examples - -------- - >>> np.i0([0.]) - array(1.0) - >>> np.i0([0., 1. + 2j]) - array([ 1.00000000+0.j , 0.18785373+0.64616944j]) - - """ - x = atleast_1d(x).copy() - y = empty_like(x) - ind = (x < 0) - x[ind] = -x[ind] - ind = (x <= 8.0) - y[ind] = _i0_1(x[ind]) - ind2 = ~ind - y[ind2] = _i0_2(x[ind2]) - return y.squeeze() - -## End of cephes code for i0 - - -def kaiser(M, beta): - """ - Return the Kaiser window. - - The Kaiser window is a taper formed by using a Bessel function. - - Parameters - ---------- - M : int - Number of points in the output window. If zero or less, an - empty array is returned. - beta : float - Shape parameter for window. - - Returns - ------- - out : array - The window, with the maximum value normalized to one (the value - one appears only if the number of samples is odd). - - See Also - -------- - bartlett, blackman, hamming, hanning - - Notes - ----- - The Kaiser window is defined as - - .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} - \\right)/I_0(\\beta) - - with - - .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, - - where :math:`I_0` is the modified zeroth-order Bessel function. - - The Kaiser was named for Jim Kaiser, who discovered a simple - approximation to the DPSS window based on Bessel functions. The Kaiser - window is a very good approximation to the Digital Prolate Spheroidal - Sequence, or Slepian window, which is the transform which maximizes the - energy in the main lobe of the window relative to total energy. - - The Kaiser can approximate many other windows by varying the beta - parameter. - - ==== ======================= - beta Window shape - ==== ======================= - 0 Rectangular - 5 Similar to a Hamming - 6 Similar to a Hanning - 8.6 Similar to a Blackman - ==== ======================= - - A beta value of 14 is probably a good starting point. Note that as beta - gets large, the window narrows, and so the number of samples needs to be - large enough to sample the increasingly narrow spike, otherwise NaNs will - get returned. - - Most references to the Kaiser window come from the signal processing - literature, where it is used as one of many windowing functions for - smoothing values. It is also known as an apodization (which means - "removing the foot", i.e. smoothing discontinuities at the beginning - and end of the sampled signal) or tapering function. - - References - ---------- - .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by - digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. - John Wiley and Sons, New York, (1966). - .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The - University of Alberta Press, 1975, pp. 177-178. - .. [3] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function - - Examples - -------- - >>> np.kaiser(12, 14) - array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, - 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, - 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, - 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) - - - Plot the window and the frequency response: - - >>> from numpy.fft import fft, fftshift - >>> window = np.kaiser(51, 14) - >>> plt.plot(window) - [] - >>> plt.title("Kaiser window") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("Sample") - - >>> plt.show() - - >>> plt.figure() - - >>> A = fft(window, 2048) / 25.5 - >>> mag = np.abs(fftshift(A)) - >>> freq = np.linspace(-0.5, 0.5, len(A)) - >>> response = 20 * np.log10(mag) - >>> response = np.clip(response, -100, 100) - >>> plt.plot(freq, response) - [] - >>> plt.title("Frequency response of Kaiser window") - - >>> plt.ylabel("Magnitude [dB]") - - >>> plt.xlabel("Normalized frequency [cycles per sample]") - - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) - >>> plt.show() - - """ - from numpy.dual import i0 - if M == 1: - return np.array([1.]) - n = arange(0, M) - alpha = (M-1)/2.0 - return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) - - -def sinc(x): - """ - Return the sinc function. - - The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. - - Parameters - ---------- - x : ndarray - Array (possibly multi-dimensional) of values for which to to - calculate ``sinc(x)``. - - Returns - ------- - out : ndarray - ``sinc(x)``, which has the same shape as the input. - - Notes - ----- - ``sinc(0)`` is the limit value 1. - - The name sinc is short for "sine cardinal" or "sinus cardinalis". - - The sinc function is used in various signal processing applications, - including in anti-aliasing, in the construction of a Lanczos resampling - filter, and in interpolation. - - For bandlimited interpolation of discrete-time signals, the ideal - interpolation kernel is proportional to the sinc function. - - References - ---------- - .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web - Resource. http://mathworld.wolfram.com/SincFunction.html - .. [2] Wikipedia, "Sinc function", - http://en.wikipedia.org/wiki/Sinc_function - - Examples - -------- - >>> x = np.linspace(-4, 4, 41) - >>> np.sinc(x) - array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, - -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, - 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, - 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, - -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, - 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, - 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, - 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, - 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, - -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, - -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, - 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, - -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, - -4.92362781e-02, -3.89804309e-17]) - - >>> plt.plot(x, np.sinc(x)) - [] - >>> plt.title("Sinc Function") - - >>> plt.ylabel("Amplitude") - - >>> plt.xlabel("X") - - >>> plt.show() - - It works in 2-D as well: - - >>> x = np.linspace(-4, 4, 401) - >>> xx = np.outer(x, x) - >>> plt.imshow(np.sinc(xx)) - - - """ - x = np.asanyarray(x) - y = pi * where(x == 0, 1.0e-20, x) - return sin(y)/y - - -def msort(a): - """ - Return a copy of an array sorted along the first axis. - - Parameters - ---------- - a : array_like - Array to be sorted. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - sort - - Notes - ----- - ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. - - """ - b = array(a, subok=True, copy=True) - b.sort(0) - return b - - -def _ureduce(a, func, **kwargs): - """ - Internal Function. - Call `func` with `a` as first argument swapping the axes to use extended - axis on functions that don't support it natively. - - Returns result and a.shape with axis dims set to 1. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - func : callable - Reduction function Kapable of receiving an axis argument. - It is is called with `a` as first argument followed by `kwargs`. - kwargs : keyword arguments - additional keyword arguments to pass to `func`. - - Returns - ------- - result : tuple - Result of func(a, **kwargs) and a.shape with axis dims set to 1 - which can be used to reshape the result to the same shape a ufunc with - keepdims=True would produce. - - """ - a = np.asanyarray(a) - axis = kwargs.get('axis', None) - if axis is not None: - keepdim = list(a.shape) - nd = a.ndim - try: - axis = operator.index(axis) - if axis >= nd or axis < -nd: - raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim)) - keepdim[axis] = 1 - except TypeError: - sax = set() - for x in axis: - if x >= nd or x < -nd: - raise IndexError("axis %d out of bounds (%d)" % (x, nd)) - if x in sax: - raise ValueError("duplicate value in axis") - sax.add(x % nd) - keepdim[x] = 1 - keep = sax.symmetric_difference(frozenset(range(nd))) - nkeep = len(keep) - # swap axis that should not be reduced to front - for i, s in enumerate(sorted(keep)): - a = a.swapaxes(i, s) - # merge reduced axis - a = a.reshape(a.shape[:nkeep] + (-1,)) - kwargs['axis'] = -1 - else: - keepdim = [1] * a.ndim - - r = func(a, **kwargs) - return r, keepdim - - -def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): - """ - Compute the median along the specified axis. - - Returns the median of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : int or sequence of int, optional - Axis along which the medians are computed. The default (axis=None) - is to compute the median along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape and buffer length as the expected output, but the - type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array (a) for - calculations. The input array will be modified by the call to - median. This will save memory when you do not need to preserve the - contents of the input array. Treat the input as undefined, but it - will probably be fully or partially sorted. Default is False. Note - that, if `overwrite_input` is True and the input is not already an - ndarray, an error will be raised. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - .. versionadded:: 1.9.0 - - - Returns - ------- - median : ndarray - A new array holding the result (unless `out` is specified, in which - case that array is returned instead). If the input contains - integers, or floats of smaller precision than 64, then the output - data-type is float64. Otherwise, the output data-type is the same - as that of the input. - - See Also - -------- - mean, percentile - - Notes - ----- - Given a vector V of length N, the median of V is the middle value of - a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is - odd. When N is even, it is the average of the two middle values of - ``V_sorted``. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.median(a) - 3.5 - >>> np.median(a, axis=0) - array([ 6.5, 4.5, 2.5]) - >>> np.median(a, axis=1) - array([ 7., 2.]) - >>> m = np.median(a, axis=0) - >>> out = np.zeros_like(m) - >>> np.median(a, axis=0, out=m) - array([ 6.5, 4.5, 2.5]) - >>> m - array([ 6.5, 4.5, 2.5]) - >>> b = a.copy() - >>> np.median(b, axis=1, overwrite_input=True) - array([ 7., 2.]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.median(b, axis=None, overwrite_input=True) - 3.5 - >>> assert not np.all(a==b) - - """ - r, k = _ureduce(a, func=_median, axis=axis, out=out, - overwrite_input=overwrite_input) - if keepdims: - return r.reshape(k) - else: - return r - -def _median(a, axis=None, out=None, overwrite_input=False): - # can't be reasonably be implemented in terms of percentile as we have to - # call mean to not break astropy - a = np.asanyarray(a) - if axis is not None and axis >= a.ndim: - raise IndexError( - "axis %d out of bounds (%d)" % (axis, a.ndim)) - - if overwrite_input: - if axis is None: - part = a.ravel() - sz = part.size - if sz % 2 == 0: - szh = sz // 2 - part.partition((szh - 1, szh)) - else: - part.partition((sz - 1) // 2) - else: - sz = a.shape[axis] - if sz % 2 == 0: - szh = sz // 2 - a.partition((szh - 1, szh), axis=axis) - else: - a.partition((sz - 1) // 2, axis=axis) - part = a - else: - if axis is None: - sz = a.size - else: - sz = a.shape[axis] - if sz % 2 == 0: - part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis) - else: - part = partition(a, (sz - 1) // 2, axis=axis) - if part.shape == (): - # make 0-D arrays work - return part.item() - if axis is None: - axis = 0 - indexer = [slice(None)] * part.ndim - index = part.shape[axis] // 2 - if part.shape[axis] % 2 == 1: - # index with slice to allow mean (below) to work - indexer[axis] = slice(index, index+1) - else: - indexer[axis] = slice(index-1, index+1) - # Use mean in odd and even case to coerce data type - # and check, use out array. - return mean(part[indexer], axis=axis, out=out) - - -def percentile(a, q, axis=None, out=None, - overwrite_input=False, interpolation='linear', keepdims=False): - """ - Compute the qth percentile of the data along the specified axis. - - Returns the qth percentile of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - q : float in range of [0,100] (or sequence of floats) - Percentile to compute which must be between 0 and 100 inclusive. - axis : int or sequence of int, optional - Axis along which the percentiles are computed. The default (None) - is to compute the percentiles along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array `a` for - calculations. The input array will be modified by the call to - percentile. This will save memory when you do not need to preserve - the contents of the input array. In this case you should not make - any assumptions about the content of the passed in array `a` after - this function completes -- treat it as undefined. Default is False. - Note that, if the `a` input is not already an array this parameter - will have no effect, `a` will be converted to an array internally - regardless of the value of this parameter. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to use, - when the desired quantile lies between two data points `i` and `j`: - * linear: `i + (j - i) * fraction`, where `fraction` is the - fractional part of the index surrounded by `i` and `j`. - * lower: `i`. - * higher: `j`. - * nearest: `i` or `j` whichever is nearest. - * midpoint: (`i` + `j`) / 2. - - .. versionadded:: 1.9.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - .. versionadded:: 1.9.0 - - Returns - ------- - percentile : scalar or ndarray - If a single percentile `q` is given and axis=None a scalar is - returned. If multiple percentiles `q` are given an array holding - the result is returned. The results are listed in the first axis. - (If `out` is specified, in which case that array is returned - instead). If the input contains integers, or floats of smaller - precision than 64, then the output data-type is float64. Otherwise, - the output data-type is the same as that of the input. - - See Also - -------- - mean, median - - Notes - ----- - Given a vector V of length N, the q-th percentile of V is the q-th ranked - value in a sorted copy of V. The values and distances of the two - nearest neighbors as well as the `interpolation` parameter will - determine the percentile if the normalized ranking does not match q - exactly. This function is the same as the median if ``q=50``, the same - as the minimum if ``q=0`` and the same as the maximum if ``q=100``. - - Examples - -------- - >>> a = np.array([[10, 7, 4], [3, 2, 1]]) - >>> a - array([[10, 7, 4], - [ 3, 2, 1]]) - >>> np.percentile(a, 50) - array([ 3.5]) - >>> np.percentile(a, 50, axis=0) - array([[ 6.5, 4.5, 2.5]]) - >>> np.percentile(a, 50, axis=1) - array([[ 7.], - [ 2.]]) - - >>> m = np.percentile(a, 50, axis=0) - >>> out = np.zeros_like(m) - >>> np.percentile(a, 50, axis=0, out=m) - array([[ 6.5, 4.5, 2.5]]) - >>> m - array([[ 6.5, 4.5, 2.5]]) - - >>> b = a.copy() - >>> np.percentile(b, 50, axis=1, overwrite_input=True) - array([[ 7.], - [ 2.]]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.percentile(b, 50, axis=None, overwrite_input=True) - array([ 3.5]) - - """ - q = array(q, dtype=np.float64, copy=True) - r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, - overwrite_input=overwrite_input, - interpolation=interpolation) - if keepdims: - if q.ndim == 0: - return r.reshape(k) - else: - return r.reshape([len(q)] + k) - else: - return r - - -def _percentile(a, q, axis=None, out=None, - overwrite_input=False, interpolation='linear', keepdims=False): - a = asarray(a) - if q.ndim == 0: - # Do not allow 0-d arrays because following code fails for scalar - zerod = True - q = q[None] - else: - zerod = False - - # avoid expensive reductions, relevant for arrays with < O(1000) elements - if q.size < 10: - for i in range(q.size): - if q[i] < 0. or q[i] > 100.: - raise ValueError("Percentiles must be in the range [0,100]") - q[i] /= 100. - else: - # faster than any() - if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): - raise ValueError("Percentiles must be in the range [0,100]") - q /= 100. - - # prepare a for partioning - if overwrite_input: - if axis is None: - ap = a.ravel() - else: - ap = a - else: - if axis is None: - ap = a.flatten() - else: - ap = a.copy() - - if axis is None: - axis = 0 - - Nx = ap.shape[axis] - indices = q * (Nx - 1) - - # round fractional indices according to interpolation method - if interpolation == 'lower': - indices = floor(indices).astype(intp) - elif interpolation == 'higher': - indices = ceil(indices).astype(intp) - elif interpolation == 'midpoint': - indices = floor(indices) + 0.5 - elif interpolation == 'nearest': - indices = around(indices).astype(intp) - elif interpolation == 'linear': - pass # keep index as fraction and interpolate - else: - raise ValueError( - "interpolation can only be 'linear', 'lower' 'higher', " - "'midpoint', or 'nearest'") - - if indices.dtype == intp: # take the points along axis - ap.partition(indices, axis=axis) - # ensure axis with qth is first - ap = np.rollaxis(ap, axis, 0) - axis = 0 - - if zerod: - indices = indices[0] - r = take(ap, indices, axis=axis, out=out) - else: # weight the points above and below the indices - indices_below = floor(indices).astype(intp) - indices_above = indices_below + 1 - indices_above[indices_above > Nx - 1] = Nx - 1 - - weights_above = indices - indices_below - weights_below = 1.0 - weights_above - - weights_shape = [1, ] * ap.ndim - weights_shape[axis] = len(indices) - weights_below.shape = weights_shape - weights_above.shape = weights_shape - - ap.partition(concatenate((indices_below, indices_above)), axis=axis) - x1 = take(ap, indices_below, axis=axis) * weights_below - x2 = take(ap, indices_above, axis=axis) * weights_above - - # ensure axis with qth is first - x1 = np.rollaxis(x1, axis, 0) - x2 = np.rollaxis(x2, axis, 0) - - if zerod: - x1 = x1.squeeze(0) - x2 = x2.squeeze(0) - - if out is not None: - r = add(x1, x2, out=out) - else: - r = add(x1, x2) - - return r - - -def trapz(y, x=None, dx=1.0, axis=-1): - """ - Integrate along the given axis using the composite trapezoidal rule. - - Integrate `y` (`x`) along given axis. - - Parameters - ---------- - y : array_like - Input array to integrate. - x : array_like, optional - If `x` is None, then spacing between all `y` elements is `dx`. - dx : scalar, optional - If `x` is None, spacing given by `dx` is assumed. Default is 1. - axis : int, optional - Specify the axis. - - Returns - ------- - trapz : float - Definite integral as approximated by trapezoidal rule. - - See Also - -------- - sum, cumsum - - Notes - ----- - Image [2]_ illustrates trapezoidal rule -- y-axis locations of points - will be taken from `y` array, by default x-axis distances between - points will be 1.0, alternatively they can be provided with `x` array - or with `dx` scalar. Return value will be equal to combined area under - the red lines. - - - References - ---------- - .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule - - .. [2] Illustration image: - http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png - - Examples - -------- - >>> np.trapz([1,2,3]) - 4.0 - >>> np.trapz([1,2,3], x=[4,6,8]) - 8.0 - >>> np.trapz([1,2,3], dx=2) - 8.0 - >>> a = np.arange(6).reshape(2, 3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> np.trapz(a, axis=0) - array([ 1.5, 2.5, 3.5]) - >>> np.trapz(a, axis=1) - array([ 2., 8.]) - - """ - y = asanyarray(y) - if x is None: - d = dx - else: - x = asanyarray(x) - if x.ndim == 1: - d = diff(x) - # reshape to correct shape - shape = [1]*y.ndim - shape[axis] = d.shape[0] - d = d.reshape(shape) - else: - d = diff(x, axis=axis) - nd = len(y.shape) - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd - slice1[axis] = slice(1, None) - slice2[axis] = slice(None, -1) - try: - ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) - except ValueError: - # Operations didn't work, cast to ndarray - d = np.asarray(d) - y = np.asarray(y) - ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) - return ret - - -#always succeed -def add_newdoc(place, obj, doc): - """Adds documentation to obj which is in module place. - - If doc is a string add it to obj as a docstring - - If doc is a tuple, then the first element is interpreted as - an attribute of obj and the second as the docstring - (method, docstring) - - If doc is a list, then each element of the list should be a - sequence of length two --> [(method1, docstring1), - (method2, docstring2), ...] - - This routine never raises an error. - - This routine cannot modify read-only docstrings, as appear - in new-style classes or built-in functions. Because this - routine never raises an error the caller must check manually - that the docstrings were changed. - """ - try: - new = getattr(__import__(place, globals(), {}, [obj]), obj) - if isinstance(doc, str): - add_docstring(new, doc.strip()) - elif isinstance(doc, tuple): - add_docstring(getattr(new, doc[0]), doc[1].strip()) - elif isinstance(doc, list): - for val in doc: - add_docstring(getattr(new, val[0]), val[1].strip()) - except: - pass - - -# Based on scitools meshgrid -def meshgrid(*xi, **kwargs): - """ - Return coordinate matrices from coordinate vectors. - - Make N-D coordinate arrays for vectorized evaluations of - N-D scalar/vector fields over N-D grids, given - one-dimensional coordinate arrays x1, x2,..., xn. - - .. versionchanged:: 1.9 - 1-D and 0-D cases are allowed. - - Parameters - ---------- - x1, x2,..., xn : array_like - 1-D arrays representing the coordinates of a grid. - indexing : {'xy', 'ij'}, optional - Cartesian ('xy', default) or matrix ('ij') indexing of output. - See Notes for more details. - - .. versionadded:: 1.7.0 - sparse : bool, optional - If True a sparse grid is returned in order to conserve memory. - Default is False. - - .. versionadded:: 1.7.0 - copy : bool, optional - If False, a view into the original arrays are returned in order to - conserve memory. Default is True. Please note that - ``sparse=False, copy=False`` will likely return non-contiguous - arrays. Furthermore, more than one element of a broadcast array - may refer to a single memory location. If you need to write to the - arrays, make copies first. - - .. versionadded:: 1.7.0 - - Returns - ------- - X1, X2,..., XN : ndarray - For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , - return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' - or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' - with the elements of `xi` repeated to fill the matrix along - the first dimension for `x1`, the second for `x2` and so on. - - Notes - ----- - This function supports both indexing conventions through the indexing - keyword argument. Giving the string 'ij' returns a meshgrid with - matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. - In the 2-D case with inputs of length M and N, the outputs are of shape - (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case - with inputs of length M, N and P, outputs are of shape (N, M, P) for - 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is - illustrated by the following code snippet:: - - xv, yv = meshgrid(x, y, sparse=False, indexing='ij') - for i in range(nx): - for j in range(ny): - # treat xv[i,j], yv[i,j] - - xv, yv = meshgrid(x, y, sparse=False, indexing='xy') - for i in range(nx): - for j in range(ny): - # treat xv[j,i], yv[j,i] - - In the 1-D and 0-D case, the indexing and sparse keywords have no effect. - - See Also - -------- - index_tricks.mgrid : Construct a multi-dimensional "meshgrid" - using indexing notation. - index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" - using indexing notation. - - Examples - -------- - >>> nx, ny = (3, 2) - >>> x = np.linspace(0, 1, nx) - >>> y = np.linspace(0, 1, ny) - >>> xv, yv = meshgrid(x, y) - >>> xv - array([[ 0. , 0.5, 1. ], - [ 0. , 0.5, 1. ]]) - >>> yv - array([[ 0., 0., 0.], - [ 1., 1., 1.]]) - >>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays - >>> xv - array([[ 0. , 0.5, 1. ]]) - >>> yv - array([[ 0.], - [ 1.]]) - - `meshgrid` is very useful to evaluate functions on a grid. - - >>> x = np.arange(-5, 5, 0.1) - >>> y = np.arange(-5, 5, 0.1) - >>> xx, yy = meshgrid(x, y, sparse=True) - >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) - >>> h = plt.contourf(x,y,z) - - """ - ndim = len(xi) - - copy_ = kwargs.pop('copy', True) - sparse = kwargs.pop('sparse', False) - indexing = kwargs.pop('indexing', 'xy') - - if kwargs: - raise TypeError("meshgrid() got an unexpected keyword argument '%s'" - % (list(kwargs)[0],)) - - if indexing not in ['xy', 'ij']: - raise ValueError( - "Valid values for `indexing` are 'xy' and 'ij'.") - - s0 = (1,) * ndim - output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::]) - for i, x in enumerate(xi)] - - shape = [x.size for x in output] - - if indexing == 'xy' and ndim > 1: - # switch first and second axis - output[0].shape = (1, -1) + (1,)*(ndim - 2) - output[1].shape = (-1, 1) + (1,)*(ndim - 2) - shape[0], shape[1] = shape[1], shape[0] - - if sparse: - if copy_: - return [x.copy() for x in output] - else: - return output - else: - # Return the full N-D matrix (not only the 1-D vector) - if copy_: - mult_fact = np.ones(shape, dtype=int) - return [x * mult_fact for x in output] - else: - return np.broadcast_arrays(*output) - - -def delete(arr, obj, axis=None): - """ - Return a new array with sub-arrays along an axis deleted. For a one - dimensional array, this returns those entries not returned by - `arr[obj]`. - - Parameters - ---------- - arr : array_like - Input array. - obj : slice, int or array of ints - Indicate which sub-arrays to remove. - axis : int, optional - The axis along which to delete the subarray defined by `obj`. - If `axis` is None, `obj` is applied to the flattened array. - - Returns - ------- - out : ndarray - A copy of `arr` with the elements specified by `obj` removed. Note - that `delete` does not occur in-place. If `axis` is None, `out` is - a flattened array. - - See Also - -------- - insert : Insert elements into an array. - append : Append elements at the end of an array. - - Notes - ----- - Often it is preferable to use a boolean mask. For example: - - >>> mask = np.ones(len(arr), dtype=bool) - >>> mask[[0,2,4]] = False - >>> result = arr[mask,...] - - Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further - use of `mask`. - - Examples - -------- - >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) - >>> arr - array([[ 1, 2, 3, 4], - [ 5, 6, 7, 8], - [ 9, 10, 11, 12]]) - >>> np.delete(arr, 1, 0) - array([[ 1, 2, 3, 4], - [ 9, 10, 11, 12]]) - - >>> np.delete(arr, np.s_[::2], 1) - array([[ 2, 4], - [ 6, 8], - [10, 12]]) - >>> np.delete(arr, [1,3,5], None) - array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) - - """ - wrap = None - if type(arr) is not ndarray: - try: - wrap = arr.__array_wrap__ - except AttributeError: - pass - - arr = asarray(arr) - ndim = arr.ndim - if axis is None: - if ndim != 1: - arr = arr.ravel() - ndim = arr.ndim - axis = ndim - 1 - if ndim == 0: - warnings.warn( - "in the future the special handling of scalars will be removed " - "from delete and raise an error", DeprecationWarning) - if wrap: - return wrap(arr) - else: - return arr.copy() - - slobj = [slice(None)]*ndim - N = arr.shape[axis] - newshape = list(arr.shape) - - if isinstance(obj, slice): - start, stop, step = obj.indices(N) - xr = range(start, stop, step) - numtodel = len(xr) - - if numtodel <= 0: - if wrap: - return wrap(arr.copy()) - else: - return arr.copy() - - # Invert if step is negative: - if step < 0: - step = -step - start = xr[-1] - stop = xr[0] + 1 - - newshape[axis] -= numtodel - new = empty(newshape, arr.dtype, arr.flags.fnc) - # copy initial chunk - if start == 0: - pass - else: - slobj[axis] = slice(None, start) - new[slobj] = arr[slobj] - # copy end chunck - if stop == N: - pass - else: - slobj[axis] = slice(stop-numtodel, None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(stop, None) - new[slobj] = arr[slobj2] - # copy middle pieces - if step == 1: - pass - else: # use array indexing. - keep = ones(stop-start, dtype=bool) - keep[:stop-start:step] = False - slobj[axis] = slice(start, stop-numtodel) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(start, stop) - arr = arr[slobj2] - slobj2[axis] = keep - new[slobj] = arr[slobj2] - if wrap: - return wrap(new) - else: - return new - - _obj = obj - obj = np.asarray(obj) - # After removing the special handling of booleans and out of - # bounds values, the conversion to the array can be removed. - if obj.dtype == bool: - warnings.warn( - "in the future insert will treat boolean arrays and array-likes " - "as boolean index instead of casting it to integer", FutureWarning) - obj = obj.astype(intp) - if isinstance(_obj, (int, long, integer)): - # optimization for a single value - obj = obj.item() - if (obj < -N or obj >= N): - raise IndexError( - "index %i is out of bounds for axis %i with " - "size %i" % (obj, axis, N)) - if (obj < 0): - obj += N - newshape[axis] -= 1 - new = empty(newshape, arr.dtype, arr.flags.fnc) - slobj[axis] = slice(None, obj) - new[slobj] = arr[slobj] - slobj[axis] = slice(obj, None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(obj+1, None) - new[slobj] = arr[slobj2] - else: - if obj.size == 0 and not isinstance(_obj, np.ndarray): - obj = obj.astype(intp) - if not np.can_cast(obj, intp, 'same_kind'): - # obj.size = 1 special case always failed and would just - # give superfluous warnings. - warnings.warn( - "using a non-integer array as obj in delete will result in an " - "error in the future", DeprecationWarning) - obj = obj.astype(intp) - keep = ones(N, dtype=bool) - - # Test if there are out of bound indices, this is deprecated - inside_bounds = (obj < N) & (obj >= -N) - if not inside_bounds.all(): - warnings.warn( - "in the future out of bounds indices will raise an error " - "instead of being ignored by `numpy.delete`.", - DeprecationWarning) - obj = obj[inside_bounds] - positive_indices = obj >= 0 - if not positive_indices.all(): - warnings.warn( - "in the future negative indices will not be ignored by " - "`numpy.delete`.", FutureWarning) - obj = obj[positive_indices] - - keep[obj, ] = False - slobj[axis] = keep - new = arr[slobj] - - if wrap: - return wrap(new) - else: - return new - - -def insert(arr, obj, values, axis=None): - """ - Insert values along the given axis before the given indices. - - Parameters - ---------- - arr : array_like - Input array. - obj : int, slice or sequence of ints - Object that defines the index or indices before which `values` is - inserted. - - .. versionadded:: 1.8.0 - - Support for multiple insertions when `obj` is a single scalar or a - sequence with one element (similar to calling insert multiple - times). - values : array_like - Values to insert into `arr`. If the type of `values` is different - from that of `arr`, `values` is converted to the type of `arr`. - `values` should be shaped so that ``arr[...,obj,...] = values`` - is legal. - axis : int, optional - Axis along which to insert `values`. If `axis` is None then `arr` - is flattened first. - - Returns - ------- - out : ndarray - A copy of `arr` with `values` inserted. Note that `insert` - does not occur in-place: a new array is returned. If - `axis` is None, `out` is a flattened array. - - See Also - -------- - append : Append elements at the end of an array. - concatenate : Join a sequence of arrays together. - delete : Delete elements from an array. - - Notes - ----- - Note that for higher dimensional inserts `obj=0` behaves very different - from `obj=[0]` just like `arr[:,0,:] = values` is different from - `arr[:,[0],:] = values`. - - Examples - -------- - >>> a = np.array([[1, 1], [2, 2], [3, 3]]) - >>> a - array([[1, 1], - [2, 2], - [3, 3]]) - >>> np.insert(a, 1, 5) - array([1, 5, 1, 2, 2, 3, 3]) - >>> np.insert(a, 1, 5, axis=1) - array([[1, 5, 1], - [2, 5, 2], - [3, 5, 3]]) - - Difference between sequence and scalars: - >>> np.insert(a, [1], [[1],[2],[3]], axis=1) - array([[1, 1, 1], - [2, 2, 2], - [3, 3, 3]]) - >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), - ... np.insert(a, [1], [[1],[2],[3]], axis=1)) - True - - >>> b = a.flatten() - >>> b - array([1, 1, 2, 2, 3, 3]) - >>> np.insert(b, [2, 2], [5, 6]) - array([1, 1, 5, 6, 2, 2, 3, 3]) - - >>> np.insert(b, slice(2, 4), [5, 6]) - array([1, 1, 5, 2, 6, 2, 3, 3]) - - >>> np.insert(b, [2, 2], [7.13, False]) # type casting - array([1, 1, 7, 0, 2, 2, 3, 3]) - - >>> x = np.arange(8).reshape(2, 4) - >>> idx = (1, 3) - >>> np.insert(x, idx, 999, axis=1) - array([[ 0, 999, 1, 2, 999, 3], - [ 4, 999, 5, 6, 999, 7]]) - - """ - wrap = None - if type(arr) is not ndarray: - try: - wrap = arr.__array_wrap__ - except AttributeError: - pass - - arr = asarray(arr) - ndim = arr.ndim - if axis is None: - if ndim != 1: - arr = arr.ravel() - ndim = arr.ndim - axis = ndim - 1 - else: - if ndim > 0 and (axis < -ndim or axis >= ndim): - raise IndexError( - "axis %i is out of bounds for an array of " - "dimension %i" % (axis, ndim)) - if (axis < 0): - axis += ndim - if (ndim == 0): - warnings.warn( - "in the future the special handling of scalars will be removed " - "from insert and raise an error", DeprecationWarning) - arr = arr.copy() - arr[...] = values - if wrap: - return wrap(arr) - else: - return arr - slobj = [slice(None)]*ndim - N = arr.shape[axis] - newshape = list(arr.shape) - - if isinstance(obj, slice): - # turn it into a range object - indices = arange(*obj.indices(N), **{'dtype': intp}) - else: - # need to copy obj, because indices will be changed in-place - indices = np.array(obj) - if indices.dtype == bool: - # See also delete - warnings.warn( - "in the future insert will treat boolean arrays and " - "array-likes as a boolean index instead of casting it to " - "integer", FutureWarning) - indices = indices.astype(intp) - # Code after warning period: - #if obj.ndim != 1: - # raise ValueError('boolean array argument obj to insert ' - # 'must be one dimensional') - #indices = np.flatnonzero(obj) - elif indices.ndim > 1: - raise ValueError( - "index array argument obj to insert must be one dimensional " - "or scalar") - if indices.size == 1: - index = indices.item() - if index < -N or index > N: - raise IndexError( - "index %i is out of bounds for axis %i with " - "size %i" % (obj, axis, N)) - if (index < 0): - index += N - - # There are some object array corner cases here, but we cannot avoid - # that: - values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) - if indices.ndim == 0: - # broadcasting is very different here, since a[:,0,:] = ... behaves - # very different from a[:,[0],:] = ...! This changes values so that - # it works likes the second case. (here a[:,0:1,:]) - values = np.rollaxis(values, 0, (axis % values.ndim) + 1) - numnew = values.shape[axis] - newshape[axis] += numnew - new = empty(newshape, arr.dtype, arr.flags.fnc) - slobj[axis] = slice(None, index) - new[slobj] = arr[slobj] - slobj[axis] = slice(index, index+numnew) - new[slobj] = values - slobj[axis] = slice(index+numnew, None) - slobj2 = [slice(None)] * ndim - slobj2[axis] = slice(index, None) - new[slobj] = arr[slobj2] - if wrap: - return wrap(new) - return new - elif indices.size == 0 and not isinstance(obj, np.ndarray): - # Can safely cast the empty list to intp - indices = indices.astype(intp) - - if not np.can_cast(indices, intp, 'same_kind'): - warnings.warn( - "using a non-integer array as obj in insert will result in an " - "error in the future", DeprecationWarning) - indices = indices.astype(intp) - - indices[indices < 0] += N - - numnew = len(indices) - order = indices.argsort(kind='mergesort') # stable sort - indices[order] += np.arange(numnew) - - newshape[axis] += numnew - old_mask = ones(newshape[axis], dtype=bool) - old_mask[indices] = False - - new = empty(newshape, arr.dtype, arr.flags.fnc) - slobj2 = [slice(None)]*ndim - slobj[axis] = indices - slobj2[axis] = old_mask - new[slobj] = values - new[slobj2] = arr - - if wrap: - return wrap(new) - return new - - -def append(arr, values, axis=None): - """ - Append values to the end of an array. - - Parameters - ---------- - arr : array_like - Values are appended to a copy of this array. - values : array_like - These values are appended to a copy of `arr`. It must be of the - correct shape (the same shape as `arr`, excluding `axis`). If - `axis` is not specified, `values` can be any shape and will be - flattened before use. - axis : int, optional - The axis along which `values` are appended. If `axis` is not - given, both `arr` and `values` are flattened before use. - - Returns - ------- - append : ndarray - A copy of `arr` with `values` appended to `axis`. Note that - `append` does not occur in-place: a new array is allocated and - filled. If `axis` is None, `out` is a flattened array. - - See Also - -------- - insert : Insert elements into an array. - delete : Delete elements from an array. - - Examples - -------- - >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) - array([1, 2, 3, 4, 5, 6, 7, 8, 9]) - - When `axis` is specified, `values` must have the correct shape. - - >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) - array([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]]) - >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) - Traceback (most recent call last): - ... - ValueError: arrays must have same number of dimensions - - """ - arr = asanyarray(arr) - if axis is None: - if arr.ndim != 1: - arr = arr.ravel() - values = ravel(values) - axis = arr.ndim-1 - return concatenate((arr, values), axis=axis) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/index_tricks.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/index_tricks.py deleted file mode 100644 index 98c6b291b41c2..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/index_tricks.py +++ /dev/null @@ -1,869 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import math - -import numpy.core.numeric as _nx -from numpy.core.numeric import ( - asarray, ScalarType, array, alltrue, cumprod, arange - ) -from numpy.core.numerictypes import find_common_type - -from . import function_base -import numpy.matrixlib as matrix -from .function_base import diff -from numpy.lib._compiled_base import ravel_multi_index, unravel_index -from numpy.lib.stride_tricks import as_strided - -makemat = matrix.matrix - - -__all__ = [ - 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', - 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', - 'diag_indices', 'diag_indices_from' - ] - - -def ix_(*args): - """ - Construct an open mesh from multiple sequences. - - This function takes N 1-D sequences and returns N outputs with N - dimensions each, such that the shape is 1 in all but one dimension - and the dimension with the non-unit shape value cycles through all - N dimensions. - - Using `ix_` one can quickly construct index arrays that will index - the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array - ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. - - Parameters - ---------- - args : 1-D sequences - - Returns - ------- - out : tuple of ndarrays - N arrays with N dimensions each, with N the number of input - sequences. Together these arrays form an open mesh. - - See Also - -------- - ogrid, mgrid, meshgrid - - Examples - -------- - >>> a = np.arange(10).reshape(2, 5) - >>> a - array([[0, 1, 2, 3, 4], - [5, 6, 7, 8, 9]]) - >>> ixgrid = np.ix_([0,1], [2,4]) - >>> ixgrid - (array([[0], - [1]]), array([[2, 4]])) - >>> ixgrid[0].shape, ixgrid[1].shape - ((2, 1), (1, 2)) - >>> a[ixgrid] - array([[2, 4], - [7, 9]]) - - """ - out = [] - nd = len(args) - baseshape = [1]*nd - for k in range(nd): - new = _nx.asarray(args[k]) - if (new.ndim != 1): - raise ValueError("Cross index must be 1 dimensional") - if issubclass(new.dtype.type, _nx.bool_): - new = new.nonzero()[0] - baseshape[k] = len(new) - new = new.reshape(tuple(baseshape)) - out.append(new) - baseshape[k] = 1 - return tuple(out) - -class nd_grid(object): - """ - Construct a multi-dimensional "meshgrid". - - ``grid = nd_grid()`` creates an instance which will return a mesh-grid - when indexed. The dimension and number of the output arrays are equal - to the number of indexing dimensions. If the step length is not a - complex number, then the stop is not inclusive. - - However, if the step length is a **complex number** (e.g. 5j), then the - integer part of its magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value **is inclusive**. - - If instantiated with an argument of ``sparse=True``, the mesh-grid is - open (or not fleshed out) so that only one-dimension of each returned - argument is greater than 1. - - Parameters - ---------- - sparse : bool, optional - Whether the grid is sparse or not. Default is False. - - Notes - ----- - Two instances of `nd_grid` are made available in the NumPy namespace, - `mgrid` and `ogrid`:: - - mgrid = nd_grid(sparse=False) - ogrid = nd_grid(sparse=True) - - Users should use these pre-defined instances instead of using `nd_grid` - directly. - - Examples - -------- - >>> mgrid = np.lib.index_tricks.nd_grid() - >>> mgrid[0:5,0:5] - array([[[0, 0, 0, 0, 0], - [1, 1, 1, 1, 1], - [2, 2, 2, 2, 2], - [3, 3, 3, 3, 3], - [4, 4, 4, 4, 4]], - [[0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4]]]) - >>> mgrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - - >>> ogrid = np.lib.index_tricks.nd_grid(sparse=True) - >>> ogrid[0:5,0:5] - [array([[0], - [1], - [2], - [3], - [4]]), array([[0, 1, 2, 3, 4]])] - - """ - - def __init__(self, sparse=False): - self.sparse = sparse - - def __getitem__(self, key): - try: - size = [] - typ = int - for k in range(len(key)): - step = key[k].step - start = key[k].start - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - size.append(int(abs(step))) - typ = float - else: - size.append( - int(math.ceil((key[k].stop - start)/(step*1.0)))) - if (isinstance(step, float) or - isinstance(start, float) or - isinstance(key[k].stop, float)): - typ = float - if self.sparse: - nn = [_nx.arange(_x, dtype=_t) - for _x, _t in zip(size, (typ,)*len(size))] - else: - nn = _nx.indices(size, typ) - for k in range(len(size)): - step = key[k].step - start = key[k].start - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - step = int(abs(step)) - if step != 1: - step = (key[k].stop - start)/float(step-1) - nn[k] = (nn[k]*step+start) - if self.sparse: - slobj = [_nx.newaxis]*len(size) - for k in range(len(size)): - slobj[k] = slice(None, None) - nn[k] = nn[k][slobj] - slobj[k] = _nx.newaxis - return nn - except (IndexError, TypeError): - step = key.step - stop = key.stop - start = key.start - if start is None: - start = 0 - if isinstance(step, complex): - step = abs(step) - length = int(step) - if step != 1: - step = (key.stop-start)/float(step-1) - stop = key.stop + step - return _nx.arange(0, length, 1, float)*step + start - else: - return _nx.arange(start, stop, step) - - def __getslice__(self, i, j): - return _nx.arange(i, j) - - def __len__(self): - return 0 - -mgrid = nd_grid(sparse=False) -ogrid = nd_grid(sparse=True) -mgrid.__doc__ = None # set in numpy.add_newdocs -ogrid.__doc__ = None # set in numpy.add_newdocs - -class AxisConcatenator(object): - """ - Translates slice objects to concatenation along an axis. - - For detailed documentation on usage, see `r_`. - - """ - - def _retval(self, res): - if self.matrix: - oldndim = res.ndim - res = makemat(res) - if oldndim == 1 and self.col: - res = res.T - self.axis = self._axis - self.matrix = self._matrix - self.col = 0 - return res - - def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): - self._axis = axis - self._matrix = matrix - self.axis = axis - self.matrix = matrix - self.col = 0 - self.trans1d = trans1d - self.ndmin = ndmin - - def __getitem__(self, key): - trans1d = self.trans1d - ndmin = self.ndmin - if isinstance(key, str): - frame = sys._getframe().f_back - mymat = matrix.bmat(key, frame.f_globals, frame.f_locals) - return mymat - if not isinstance(key, tuple): - key = (key,) - objs = [] - scalars = [] - arraytypes = [] - scalartypes = [] - for k in range(len(key)): - scalar = False - if isinstance(key[k], slice): - step = key[k].step - start = key[k].start - stop = key[k].stop - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - size = int(abs(step)) - newobj = function_base.linspace(start, stop, num=size) - else: - newobj = _nx.arange(start, stop, step) - if ndmin > 1: - newobj = array(newobj, copy=False, ndmin=ndmin) - if trans1d != -1: - newobj = newobj.swapaxes(-1, trans1d) - elif isinstance(key[k], str): - if k != 0: - raise ValueError("special directives must be the " - "first entry.") - key0 = key[0] - if key0 in 'rc': - self.matrix = True - self.col = (key0 == 'c') - continue - if ',' in key0: - vec = key0.split(',') - try: - self.axis, ndmin = \ - [int(x) for x in vec[:2]] - if len(vec) == 3: - trans1d = int(vec[2]) - continue - except: - raise ValueError("unknown special directive") - try: - self.axis = int(key[k]) - continue - except (ValueError, TypeError): - raise ValueError("unknown special directive") - elif type(key[k]) in ScalarType: - newobj = array(key[k], ndmin=ndmin) - scalars.append(k) - scalar = True - scalartypes.append(newobj.dtype) - else: - newobj = key[k] - if ndmin > 1: - tempobj = array(newobj, copy=False, subok=True) - newobj = array(newobj, copy=False, subok=True, - ndmin=ndmin) - if trans1d != -1 and tempobj.ndim < ndmin: - k2 = ndmin-tempobj.ndim - if (trans1d < 0): - trans1d += k2 + 1 - defaxes = list(range(ndmin)) - k1 = trans1d - axes = defaxes[:k1] + defaxes[k2:] + \ - defaxes[k1:k2] - newobj = newobj.transpose(axes) - del tempobj - objs.append(newobj) - if not scalar and isinstance(newobj, _nx.ndarray): - arraytypes.append(newobj.dtype) - - # Esure that scalars won't up-cast unless warranted - final_dtype = find_common_type(arraytypes, scalartypes) - if final_dtype is not None: - for k in scalars: - objs[k] = objs[k].astype(final_dtype) - - res = _nx.concatenate(tuple(objs), axis=self.axis) - return self._retval(res) - - def __getslice__(self, i, j): - res = _nx.arange(i, j) - return self._retval(res) - - def __len__(self): - return 0 - -# separate classes are used here instead of just making r_ = concatentor(0), -# etc. because otherwise we couldn't get the doc string to come out right -# in help(r_) - -class RClass(AxisConcatenator): - """ - Translates slice objects to concatenation along the first axis. - - This is a simple way to build up arrays quickly. There are two use cases. - - 1. If the index expression contains comma separated arrays, then stack - them along their first axis. - 2. If the index expression contains slice notation or scalars then create - a 1-D array with a range indicated by the slice notation. - - If slice notation is used, the syntax ``start:stop:step`` is equivalent - to ``np.arange(start, stop, step)`` inside of the brackets. However, if - ``step`` is an imaginary number (i.e. 100j) then its integer portion is - interpreted as a number-of-points desired and the start and stop are - inclusive. In other words ``start:stop:stepj`` is interpreted as - ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. - After expansion of slice notation, all comma separated sequences are - concatenated together. - - Optional character strings placed as the first element of the index - expression can be used to change the output. The strings 'r' or 'c' result - in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) - matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 - (column) matrix is produced. If the result is 2-D then both provide the - same matrix result. - - A string integer specifies which axis to stack multiple comma separated - arrays along. A string of two comma-separated integers allows indication - of the minimum number of dimensions to force each entry into as the - second integer (the axis to concatenate along is still the first integer). - - A string with three comma-separated integers allows specification of the - axis to concatenate along, the minimum number of dimensions to force the - entries to, and which axis should contain the start of the arrays which - are less than the specified number of dimensions. In other words the third - integer allows you to specify where the 1's should be placed in the shape - of the arrays that have their shapes upgraded. By default, they are placed - in the front of the shape tuple. The third argument allows you to specify - where the start of the array should be instead. Thus, a third argument of - '0' would place the 1's at the end of the array shape. Negative integers - specify where in the new shape tuple the last dimension of upgraded arrays - should be placed, so the default is '-1'. - - Parameters - ---------- - Not a function, so takes no parameters - - - Returns - ------- - A concatenated ndarray or matrix. - - See Also - -------- - concatenate : Join a sequence of arrays together. - c_ : Translates slice objects to concatenation along the second axis. - - Examples - -------- - >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] - array([1, 2, 3, 0, 0, 4, 5, 6]) - >>> np.r_[-1:1:6j, [0]*3, 5, 6] - array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) - - String integers specify the axis to concatenate along or the minimum - number of dimensions to force entries into. - - >>> a = np.array([[0, 1, 2], [3, 4, 5]]) - >>> np.r_['-1', a, a] # concatenate along last axis - array([[0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5]]) - >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 - array([[1, 2, 3], - [4, 5, 6]]) - - >>> np.r_['0,2,0', [1,2,3], [4,5,6]] - array([[1], - [2], - [3], - [4], - [5], - [6]]) - >>> np.r_['1,2,0', [1,2,3], [4,5,6]] - array([[1, 4], - [2, 5], - [3, 6]]) - - Using 'r' or 'c' as a first string argument creates a matrix. - - >>> np.r_['r',[1,2,3], [4,5,6]] - matrix([[1, 2, 3, 4, 5, 6]]) - - """ - - def __init__(self): - AxisConcatenator.__init__(self, 0) - -r_ = RClass() - -class CClass(AxisConcatenator): - """ - Translates slice objects to concatenation along the second axis. - - This is short-hand for ``np.r_['-1,2,0', index expression]``, which is - useful because of its common occurrence. In particular, arrays will be - stacked along their last axis after being upgraded to at least 2-D with - 1's post-pended to the shape (column vectors made out of 1-D arrays). - - For detailed documentation, see `r_`. - - Examples - -------- - >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] - array([[1, 2, 3, 0, 0, 4, 5, 6]]) - - """ - - def __init__(self): - AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) - -c_ = CClass() - -class ndenumerate(object): - """ - Multidimensional index iterator. - - Return an iterator yielding pairs of array coordinates and values. - - Parameters - ---------- - a : ndarray - Input array. - - See Also - -------- - ndindex, flatiter - - Examples - -------- - >>> a = np.array([[1, 2], [3, 4]]) - >>> for index, x in np.ndenumerate(a): - ... print index, x - (0, 0) 1 - (0, 1) 2 - (1, 0) 3 - (1, 1) 4 - - """ - - def __init__(self, arr): - self.iter = asarray(arr).flat - - def __next__(self): - """ - Standard iterator method, returns the index tuple and array value. - - Returns - ------- - coords : tuple of ints - The indices of the current iteration. - val : scalar - The array element of the current iteration. - - """ - return self.iter.coords, next(self.iter) - - def __iter__(self): - return self - - next = __next__ - - -class ndindex(object): - """ - An N-dimensional iterator object to index arrays. - - Given the shape of an array, an `ndindex` instance iterates over - the N-dimensional index of the array. At each iteration a tuple - of indices is returned, the last dimension is iterated over first. - - Parameters - ---------- - `*args` : ints - The size of each dimension of the array. - - See Also - -------- - ndenumerate, flatiter - - Examples - -------- - >>> for index in np.ndindex(3, 2, 1): - ... print index - (0, 0, 0) - (0, 1, 0) - (1, 0, 0) - (1, 1, 0) - (2, 0, 0) - (2, 1, 0) - - """ - - def __init__(self, *shape): - if len(shape) == 1 and isinstance(shape[0], tuple): - shape = shape[0] - x = as_strided(_nx.zeros(1), shape=shape, - strides=_nx.zeros_like(shape)) - self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], - order='C') - - def __iter__(self): - return self - - def ndincr(self): - """ - Increment the multi-dimensional index by one. - - This method is for backward compatibility only: do not use. - """ - next(self) - - def __next__(self): - """ - Standard iterator method, updates the index and returns the index - tuple. - - Returns - ------- - val : tuple of ints - Returns a tuple containing the indices of the current - iteration. - - """ - next(self._it) - return self._it.multi_index - - next = __next__ - - -# You can do all this with slice() plus a few special objects, -# but there's a lot to remember. This version is simpler because -# it uses the standard array indexing syntax. -# -# Written by Konrad Hinsen -# last revision: 1999-7-23 -# -# Cosmetic changes by T. Oliphant 2001 -# -# - -class IndexExpression(object): - """ - A nicer way to build up index tuples for arrays. - - .. note:: - Use one of the two predefined instances `index_exp` or `s_` - rather than directly using `IndexExpression`. - - For any index combination, including slicing and axis insertion, - ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any - array `a`. However, ``np.index_exp[indices]`` can be used anywhere - in Python code and returns a tuple of slice objects that can be - used in the construction of complex index expressions. - - Parameters - ---------- - maketuple : bool - If True, always returns a tuple. - - See Also - -------- - index_exp : Predefined instance that always returns a tuple: - `index_exp = IndexExpression(maketuple=True)`. - s_ : Predefined instance without tuple conversion: - `s_ = IndexExpression(maketuple=False)`. - - Notes - ----- - You can do all this with `slice()` plus a few special objects, - but there's a lot to remember and this version is simpler because - it uses the standard array indexing syntax. - - Examples - -------- - >>> np.s_[2::2] - slice(2, None, 2) - >>> np.index_exp[2::2] - (slice(2, None, 2),) - - >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] - array([2, 4]) - - """ - - def __init__(self, maketuple): - self.maketuple = maketuple - - def __getitem__(self, item): - if self.maketuple and not isinstance(item, tuple): - return (item,) - else: - return item - -index_exp = IndexExpression(maketuple=True) -s_ = IndexExpression(maketuple=False) - -# End contribution from Konrad. - - -# The following functions complement those in twodim_base, but are -# applicable to N-dimensions. - -def fill_diagonal(a, val, wrap=False): - """Fill the main diagonal of the given array of any dimensionality. - - For an array `a` with ``a.ndim > 2``, the diagonal is the list of - locations with indices ``a[i, i, ..., i]`` all identical. This function - modifies the input array in-place, it does not return a value. - - Parameters - ---------- - a : array, at least 2-D. - Array whose diagonal is to be filled, it gets modified in-place. - - val : scalar - Value to be written on the diagonal, its type must be compatible with - that of the array a. - - wrap : bool - For tall matrices in NumPy version up to 1.6.2, the - diagonal "wrapped" after N columns. You can have this behavior - with this option. This affect only tall matrices. - - See also - -------- - diag_indices, diag_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - - This functionality can be obtained via `diag_indices`, but internally - this version uses a much faster implementation that never constructs the - indices and uses simple slicing. - - Examples - -------- - >>> a = np.zeros((3, 3), int) - >>> np.fill_diagonal(a, 5) - >>> a - array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5]]) - - The same function can operate on a 4-D array: - - >>> a = np.zeros((3, 3, 3, 3), int) - >>> np.fill_diagonal(a, 4) - - We only show a few blocks for clarity: - - >>> a[0, 0] - array([[4, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - >>> a[1, 1] - array([[0, 0, 0], - [0, 4, 0], - [0, 0, 0]]) - >>> a[2, 2] - array([[0, 0, 0], - [0, 0, 0], - [0, 0, 4]]) - - # tall matrices no wrap - >>> a = np.zeros((5, 3),int) - >>> fill_diagonal(a, 4) - array([[4, 0, 0], - [0, 4, 0], - [0, 0, 4], - [0, 0, 0], - [0, 0, 0]]) - - # tall matrices wrap - >>> a = np.zeros((5, 3),int) - >>> fill_diagonal(a, 4) - array([[4, 0, 0], - [0, 4, 0], - [0, 0, 4], - [0, 0, 0], - [4, 0, 0]]) - - # wide matrices - >>> a = np.zeros((3, 5),int) - >>> fill_diagonal(a, 4) - array([[4, 0, 0, 0, 0], - [0, 4, 0, 0, 0], - [0, 0, 4, 0, 0]]) - - """ - if a.ndim < 2: - raise ValueError("array must be at least 2-d") - end = None - if a.ndim == 2: - # Explicit, fast formula for the common case. For 2-d arrays, we - # accept rectangular ones. - step = a.shape[1] + 1 - #This is needed to don't have tall matrix have the diagonal wrap. - if not wrap: - end = a.shape[1] * a.shape[1] - else: - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - if not alltrue(diff(a.shape) == 0): - raise ValueError("All dimensions of input must be of equal length") - step = 1 + (cumprod(a.shape[:-1])).sum() - - # Write the value out into the diagonal. - a.flat[:end:step] = val - - -def diag_indices(n, ndim=2): - """ - Return the indices to access the main diagonal of an array. - - This returns a tuple of indices that can be used to access the main - diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape - (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for - ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` - for ``i = [0..n-1]``. - - Parameters - ---------- - n : int - The size, along each dimension, of the arrays for which the returned - indices can be used. - - ndim : int, optional - The number of dimensions. - - See also - -------- - diag_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Create a set of indices to access the diagonal of a (4, 4) array: - - >>> di = np.diag_indices(4) - >>> di - (array([0, 1, 2, 3]), array([0, 1, 2, 3])) - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - >>> a[di] = 100 - >>> a - array([[100, 1, 2, 3], - [ 4, 100, 6, 7], - [ 8, 9, 100, 11], - [ 12, 13, 14, 100]]) - - Now, we create indices to manipulate a 3-D array: - - >>> d3 = np.diag_indices(2, 3) - >>> d3 - (array([0, 1]), array([0, 1]), array([0, 1])) - - And use it to set the diagonal of an array of zeros to 1: - - >>> a = np.zeros((2, 2, 2), dtype=np.int) - >>> a[d3] = 1 - >>> a - array([[[1, 0], - [0, 0]], - [[0, 0], - [0, 1]]]) - - """ - idx = arange(n) - return (idx,) * ndim - - -def diag_indices_from(arr): - """ - Return the indices to access the main diagonal of an n-dimensional array. - - See `diag_indices` for full details. - - Parameters - ---------- - arr : array, at least 2-D - - See Also - -------- - diag_indices - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - - if not arr.ndim >= 2: - raise ValueError("input array must be at least 2-d") - # For more than d=2, the strided formula is only valid for arrays with - # all dimensions equal, so we check first. - if not alltrue(diff(arr.shape) == 0): - raise ValueError("All dimensions of input must be of equal length") - - return diag_indices(arr.shape[0], arr.ndim) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/info.py deleted file mode 100644 index 3fbbab7695630..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/info.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -Basic functions used by several sub-packages and -useful to have in the main name-space. - -Type Handling -------------- -================ =================== -iscomplexobj Test for complex object, scalar result -isrealobj Test for real object, scalar result -iscomplex Test for complex elements, array result -isreal Test for real elements, array result -imag Imaginary part -real Real part -real_if_close Turns complex number with tiny imaginary part to real -isneginf Tests for negative infinity, array result -isposinf Tests for positive infinity, array result -isnan Tests for nans, array result -isinf Tests for infinity, array result -isfinite Tests for finite numbers, array result -isscalar True if argument is a scalar -nan_to_num Replaces NaN's with 0 and infinities with large numbers -cast Dictionary of functions to force cast to each type -common_type Determine the minimum common type code for a group - of arrays -mintypecode Return minimal allowed common typecode. -================ =================== - -Index Tricks ------------- -================ =================== -mgrid Method which allows easy construction of N-d - 'mesh-grids' -``r_`` Append and construct arrays: turns slice objects into - ranges and concatenates them, for 2d arrays appends rows. -index_exp Konrad Hinsen's index_expression class instance which - can be useful for building complicated slicing syntax. -================ =================== - -Useful Functions ----------------- -================ =================== -select Extension of where to multiple conditions and choices -extract Extract 1d array from flattened array according to mask -insert Insert 1d array of values into Nd array according to mask -linspace Evenly spaced samples in linear space -logspace Evenly spaced samples in logarithmic space -fix Round x to nearest integer towards zero -mod Modulo mod(x,y) = x % y except keeps sign of y -amax Array maximum along axis -amin Array minimum along axis -ptp Array max-min along axis -cumsum Cumulative sum along axis -prod Product of elements along axis -cumprod Cumluative product along axis -diff Discrete differences along axis -angle Returns angle of complex argument -unwrap Unwrap phase along given axis (1-d algorithm) -sort_complex Sort a complex-array (based on real, then imaginary) -trim_zeros Trim the leading and trailing zeros from 1D array. -vectorize A class that wraps a Python function taking scalar - arguments into a generalized function which can handle - arrays of arguments using the broadcast rules of - numerix Python. -================ =================== - -Shape Manipulation ------------------- -================ =================== -squeeze Return a with length-one dimensions removed. -atleast_1d Force arrays to be > 1D -atleast_2d Force arrays to be > 2D -atleast_3d Force arrays to be > 3D -vstack Stack arrays vertically (row on row) -hstack Stack arrays horizontally (column on column) -column_stack Stack 1D arrays as columns into 2D array -dstack Stack arrays depthwise (along third dimension) -split Divide array into a list of sub-arrays -hsplit Split into columns -vsplit Split into rows -dsplit Split along third dimension -================ =================== - -Matrix (2D Array) Manipulations -------------------------------- -================ =================== -fliplr 2D array with columns flipped -flipud 2D array with rows flipped -rot90 Rotate a 2D array a multiple of 90 degrees -eye Return a 2D array with ones down a given diagonal -diag Construct a 2D array from a vector, or return a given - diagonal from a 2D array. -mat Construct a Matrix -bmat Build a Matrix from blocks -================ =================== - -Polynomials ------------ -================ =================== -poly1d A one-dimensional polynomial class -poly Return polynomial coefficients from roots -roots Find roots of polynomial given coefficients -polyint Integrate polynomial -polyder Differentiate polynomial -polyadd Add polynomials -polysub Substract polynomials -polymul Multiply polynomials -polydiv Divide polynomials -polyval Evaluate polynomial at given argument -================ =================== - -Import Tricks -------------- -================ =================== -ppimport Postpone module import until trying to use it -ppimport_attr Postpone module import until trying to use its attribute -ppresolve Import postponed module and return it. -================ =================== - -Machine Arithmetics -------------------- -================ =================== -machar_single Single precision floating point arithmetic parameters -machar_double Double precision floating point arithmetic parameters -================ =================== - -Threading Tricks ----------------- -================ =================== -ParallelExec Execute commands in parallel thread. -================ =================== - -1D Array Set Operations ------------------------ -Set operations for 1D numeric arrays based on sort() function. - -================ =================== -ediff1d Array difference (auxiliary function). -unique Unique elements of an array. -intersect1d Intersection of 1D arrays with unique elements. -setxor1d Set exclusive-or of 1D arrays with unique elements. -in1d Test whether elements in a 1D array are also present in - another array. -union1d Union of 1D arrays with unique elements. -setdiff1d Set difference of 1D arrays with unique elements. -================ =================== - -""" -from __future__ import division, absolute_import, print_function - -depends = ['core', 'testing'] -global_symbols = ['*'] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/nanfunctions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/nanfunctions.py deleted file mode 100644 index f5ac35e54e70a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/nanfunctions.py +++ /dev/null @@ -1,1158 +0,0 @@ -""" -Functions that ignore NaN. - -Functions ---------- - -- `nanmin` -- minimum non-NaN value -- `nanmax` -- maximum non-NaN value -- `nanargmin` -- index of minimum non-NaN value -- `nanargmax` -- index of maximum non-NaN value -- `nansum` -- sum of non-NaN values -- `nanmean` -- mean of non-NaN values -- `nanvar` -- variance of non-NaN values -- `nanstd` -- standard deviation of non-NaN values - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -from numpy.lib.function_base import _ureduce as _ureduce - -__all__ = [ - 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', - 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd' - ] - - -def _replace_nan(a, val): - """ - If `a` is of inexact type, make a copy of `a`, replace NaNs with - the `val` value, and return the copy together with a boolean mask - marking the locations where NaNs were present. If `a` is not of - inexact type, do nothing and return `a` together with a mask of None. - - Parameters - ---------- - a : array-like - Input array. - val : float - NaN values are set to val before doing the operation. - - Returns - ------- - y : ndarray - If `a` is of inexact type, return a copy of `a` with the NaNs - replaced by the fill value, otherwise return `a`. - mask: {bool, None} - If `a` is of inexact type, return a boolean mask marking locations of - NaNs, otherwise return None. - - """ - is_new = not isinstance(a, np.ndarray) - if is_new: - a = np.array(a) - if not issubclass(a.dtype.type, np.inexact): - return a, None - if not is_new: - # need copy - a = np.array(a, subok=True) - - mask = np.isnan(a) - np.copyto(a, val, where=mask) - return a, mask - - -def _copyto(a, val, mask): - """ - Replace values in `a` with NaN where `mask` is True. This differs from - copyto in that it will deal with the case where `a` is a numpy scalar. - - Parameters - ---------- - a : ndarray or numpy scalar - Array or numpy scalar some of whose values are to be replaced - by val. - val : numpy scalar - Value used a replacement. - mask : ndarray, scalar - Boolean array. Where True the corresponding element of `a` is - replaced by `val`. Broadcasts. - - Returns - ------- - res : ndarray, scalar - Array with elements replaced or scalar `val`. - - """ - if isinstance(a, np.ndarray): - np.copyto(a, val, where=mask, casting='unsafe') - else: - a = a.dtype.type(val) - return a - - -def _divide_by_count(a, b, out=None): - """ - Compute a/b ignoring invalid results. If `a` is an array the division - is done in place. If `a` is a scalar, then its type is preserved in the - output. If out is None, then then a is used instead so that the - division is in place. Note that this is only called with `a` an inexact - type. - - Parameters - ---------- - a : {ndarray, numpy scalar} - Numerator. Expected to be of inexact type but not checked. - b : {ndarray, numpy scalar} - Denominator. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. - - Returns - ------- - ret : {ndarray, numpy scalar} - The return value is a/b. If `a` was an ndarray the division is done - in place. If `a` is a numpy scalar, the division preserves its type. - - """ - with np.errstate(invalid='ignore'): - if isinstance(a, np.ndarray): - if out is None: - return np.divide(a, b, out=a, casting='unsafe') - else: - return np.divide(a, b, out=out, casting='unsafe') - else: - if out is None: - return a.dtype.type(a / b) - else: - # This is questionable, but currently a numpy scalar can - # be output to a zero dimensional array. - return np.divide(a, b, out=out, casting='unsafe') - - -def nanmin(a, axis=None, out=None, keepdims=False): - """ - Return minimum of an array or minimum along an axis, ignoring any NaNs. - When all-NaN slices are encountered a ``RuntimeWarning`` is raised and - Nan is returned for that slice. - - Parameters - ---------- - a : array_like - Array containing numbers whose minimum is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the minimum is computed. The default is to compute - the minimum of the flattened array. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `doc.ufuncs` for details. - - .. versionadded:: 1.8.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result - will broadcast correctly against the original `a`. - - .. versionadded:: 1.8.0 - - Returns - ------- - nanmin : ndarray - An array with the same shape as `a`, with the specified axis - removed. If `a` is a 0-d array, or if axis is None, an ndarray - scalar is returned. The same dtype as `a` is returned. - - See Also - -------- - nanmax : - The maximum value of an array along a given axis, ignoring any NaNs. - amin : - The minimum value of an array along a given axis, propagating any NaNs. - fmin : - Element-wise minimum of two arrays, ignoring any NaNs. - minimum : - Element-wise minimum of two arrays, propagating any NaNs. - isnan : - Shows which elements are Not a Number (NaN). - isfinite: - Shows which elements are neither NaN nor infinity. - - amax, fmax, maximum - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Positive infinity is treated as a very large number and negative - infinity is treated as a very small (i.e. negative) number. - - If the input has a integer type the function is equivalent to np.min. - - Examples - -------- - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanmin(a) - 1.0 - >>> np.nanmin(a, axis=0) - array([ 1., 2.]) - >>> np.nanmin(a, axis=1) - array([ 1., 3.]) - - When positive infinity and negative infinity are present: - - >>> np.nanmin([1, 2, np.nan, np.inf]) - 1.0 - >>> np.nanmin([1, 2, np.nan, np.NINF]) - -inf - - """ - if not isinstance(a, np.ndarray) or type(a) is np.ndarray: - # Fast, but not safe for subclasses of ndarray - res = np.fmin.reduce(a, axis=axis, out=out, keepdims=keepdims) - if np.isnan(res).any(): - warnings.warn("All-NaN axis encountered", RuntimeWarning) - else: - # Slow, but safe for subclasses of ndarray - a, mask = _replace_nan(a, +np.inf) - res = np.amin(a, axis=axis, out=out, keepdims=keepdims) - if mask is None: - return res - - # Check for all-NaN axis - mask = np.all(mask, axis=axis, keepdims=keepdims) - if np.any(mask): - res = _copyto(res, np.nan, mask) - warnings.warn("All-NaN axis encountered", RuntimeWarning) - return res - - -def nanmax(a, axis=None, out=None, keepdims=False): - """ - Return the maximum of an array or maximum along an axis, ignoring any - NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is - raised and NaN is returned for that slice. - - Parameters - ---------- - a : array_like - Array containing numbers whose maximum is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the maximum is computed. The default is to compute - the maximum of the flattened array. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `doc.ufuncs` for details. - - .. versionadded:: 1.8.0 - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result - will broadcast correctly against the original `a`. - - .. versionadded:: 1.8.0 - - Returns - ------- - nanmax : ndarray - An array with the same shape as `a`, with the specified axis removed. - If `a` is a 0-d array, or if axis is None, an ndarray scalar is - returned. The same dtype as `a` is returned. - - See Also - -------- - nanmin : - The minimum value of an array along a given axis, ignoring any NaNs. - amax : - The maximum value of an array along a given axis, propagating any NaNs. - fmax : - Element-wise maximum of two arrays, ignoring any NaNs. - maximum : - Element-wise maximum of two arrays, propagating any NaNs. - isnan : - Shows which elements are Not a Number (NaN). - isfinite: - Shows which elements are neither NaN nor infinity. - - amin, fmin, minimum - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Positive infinity is treated as a very large number and negative - infinity is treated as a very small (i.e. negative) number. - - If the input has a integer type the function is equivalent to np.max. - - Examples - -------- - >>> a = np.array([[1, 2], [3, np.nan]]) - >>> np.nanmax(a) - 3.0 - >>> np.nanmax(a, axis=0) - array([ 3., 2.]) - >>> np.nanmax(a, axis=1) - array([ 2., 3.]) - - When positive infinity and negative infinity are present: - - >>> np.nanmax([1, 2, np.nan, np.NINF]) - 2.0 - >>> np.nanmax([1, 2, np.nan, np.inf]) - inf - - """ - if not isinstance(a, np.ndarray) or type(a) is np.ndarray: - # Fast, but not safe for subclasses of ndarray - res = np.fmax.reduce(a, axis=axis, out=out, keepdims=keepdims) - if np.isnan(res).any(): - warnings.warn("All-NaN slice encountered", RuntimeWarning) - else: - # Slow, but safe for subclasses of ndarray - a, mask = _replace_nan(a, -np.inf) - res = np.amax(a, axis=axis, out=out, keepdims=keepdims) - if mask is None: - return res - - # Check for all-NaN axis - mask = np.all(mask, axis=axis, keepdims=keepdims) - if np.any(mask): - res = _copyto(res, np.nan, mask) - warnings.warn("All-NaN axis encountered", RuntimeWarning) - return res - - -def nanargmin(a, axis=None): - """ - Return the indices of the minimum values in the specified axis ignoring - NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results - cannot be trusted if a slice contains only NaNs and Infs. - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default flattened input is used. - - Returns - ------- - index_array : ndarray - An array of indices or a single index value. - - See Also - -------- - argmin, nanargmax - - Examples - -------- - >>> a = np.array([[np.nan, 4], [2, 3]]) - >>> np.argmin(a) - 0 - >>> np.nanargmin(a) - 2 - >>> np.nanargmin(a, axis=0) - array([1, 1]) - >>> np.nanargmin(a, axis=1) - array([1, 0]) - - """ - a, mask = _replace_nan(a, np.inf) - res = np.argmin(a, axis=axis) - if mask is not None: - mask = np.all(mask, axis=axis) - if np.any(mask): - raise ValueError("All-NaN slice encountered") - return res - - -def nanargmax(a, axis=None): - """ - Return the indices of the maximum values in the specified axis ignoring - NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the - results cannot be trusted if a slice contains only NaNs and -Infs. - - - Parameters - ---------- - a : array_like - Input data. - axis : int, optional - Axis along which to operate. By default flattened input is used. - - Returns - ------- - index_array : ndarray - An array of indices or a single index value. - - See Also - -------- - argmax, nanargmin - - Examples - -------- - >>> a = np.array([[np.nan, 4], [2, 3]]) - >>> np.argmax(a) - 0 - >>> np.nanargmax(a) - 1 - >>> np.nanargmax(a, axis=0) - array([1, 0]) - >>> np.nanargmax(a, axis=1) - array([1, 1]) - - """ - a, mask = _replace_nan(a, -np.inf) - res = np.argmax(a, axis=axis) - if mask is not None: - mask = np.all(mask, axis=axis) - if np.any(mask): - raise ValueError("All-NaN slice encountered") - return res - - -def nansum(a, axis=None, dtype=None, out=None, keepdims=0): - """ - Return the sum of array elements over a given axis treating Not a - Numbers (NaNs) as zero. - - In Numpy versions <= 1.8 Nan is returned for slices that are all-NaN or - empty. In later versions zero is returned. - - Parameters - ---------- - a : array_like - Array containing numbers whose sum is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the sum is computed. The default is to compute the - sum of the flattened array. - dtype : data-type, optional - The type of the returned array and of the accumulator in which the - elements are summed. By default, the dtype of `a` is used. An - exception is when `a` has an integer type with less precision than - the platform (u)intp. In that case, the default will be either - (u)int32 or (u)int64 depending on whether the platform is 32 or 64 - bits. For inexact inputs, dtype must be inexact. - - .. versionadded:: 1.8.0 - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``. If provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `doc.ufuncs` for details. The casting of NaN to integer can yield - unexpected results. - - .. versionadded:: 1.8.0 - keepdims : bool, optional - If True, the axes which are reduced are left in the result as - dimensions with size one. With this option, the result will - broadcast correctly against the original `arr`. - - .. versionadded:: 1.8.0 - - Returns - ------- - y : ndarray or numpy scalar - - See Also - -------- - numpy.sum : Sum across array propagating NaNs. - isnan : Show which elements are NaN. - isfinite: Show which elements are not NaN or +/-inf. - - Notes - ----- - If both positive and negative infinity are present, the sum will be Not - A Number (NaN). - - Numpy integer arithmetic is modular. If the size of a sum exceeds the - size of an integer accumulator, its value will wrap around and the - result will be incorrect. Specifying ``dtype=double`` can alleviate - that problem. - - Examples - -------- - >>> np.nansum(1) - 1 - >>> np.nansum([1]) - 1 - >>> np.nansum([1, np.nan]) - 1.0 - >>> a = np.array([[1, 1], [1, np.nan]]) - >>> np.nansum(a) - 3.0 - >>> np.nansum(a, axis=0) - array([ 2., 1.]) - >>> np.nansum([1, np.nan, np.inf]) - inf - >>> np.nansum([1, np.nan, np.NINF]) - -inf - >>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present - nan - - """ - a, mask = _replace_nan(a, 0) - return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - -def nanmean(a, axis=None, dtype=None, out=None, keepdims=False): - """ - Compute the arithmetic mean along the specified axis, ignoring NaNs. - - Returns the average of the array elements. The average is taken over - the flattened array by default, otherwise over the specified axis. - `float64` intermediate and return values are used for integer inputs. - - For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array containing numbers whose mean is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the means are computed. The default is to compute - the mean of the flattened array. - dtype : data-type, optional - Type to use in computing the mean. For integer inputs, the default - is `float64`; for inexact inputs, it is the same as the input - dtype. - out : ndarray, optional - Alternate output array in which to place the result. The default - is ``None``; if provided, it must have the same shape as the - expected output, but the type will be cast if necessary. See - `doc.ufuncs` for details. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result - will broadcast correctly against the original `arr`. - - Returns - ------- - m : ndarray, see dtype parameter above - If `out=None`, returns a new array containing the mean values, - otherwise a reference to the output array is returned. Nan is - returned for slices that contain only NaNs. - - See Also - -------- - average : Weighted average - mean : Arithmetic mean taken while not ignoring NaNs - var, nanvar - - Notes - ----- - The arithmetic mean is the sum of the non-NaN elements along the axis - divided by the number of non-NaN elements. - - Note that for floating-point input, the mean is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32`. Specifying a - higher-precision accumulator using the `dtype` keyword can alleviate - this issue. - - Examples - -------- - >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.nanmean(a) - 2.6666666666666665 - >>> np.nanmean(a, axis=0) - array([ 2., 4.]) - >>> np.nanmean(a, axis=1) - array([ 1., 3.5]) - - """ - arr, mask = _replace_nan(a, 0) - if mask is None: - return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - - if dtype is not None: - dtype = np.dtype(dtype) - if dtype is not None and not issubclass(dtype.type, np.inexact): - raise TypeError("If a is inexact, then dtype must be inexact") - if out is not None and not issubclass(out.dtype.type, np.inexact): - raise TypeError("If a is inexact, then out must be inexact") - - # The warning context speeds things up. - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims) - tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - avg = _divide_by_count(tot, cnt, out=out) - - isbad = (cnt == 0) - if isbad.any(): - warnings.warn("Mean of empty slice", RuntimeWarning) - # NaN is the only possible bad value, so no further - # action is needed to handle bad results. - return avg - - -def _nanmedian1d(arr1d, overwrite_input=False): - """ - Private function for rank 1 arrays. Compute the median ignoring NaNs. - See nanmedian for parameter usage - """ - c = np.isnan(arr1d) - s = np.where(c)[0] - if s.size == arr1d.size: - warnings.warn("All-NaN slice encountered", RuntimeWarning) - return np.nan - elif s.size == 0: - return np.median(arr1d, overwrite_input=overwrite_input) - else: - if overwrite_input: - x = arr1d - else: - x = arr1d.copy() - # select non-nans at end of array - enonan = arr1d[-s.size:][~c[-s.size:]] - # fill nans in beginning of array with non-nans of end - x[s[:enonan.size]] = enonan - # slice nans away - return np.median(x[:-s.size], overwrite_input=True) - - -def _nanmedian(a, axis=None, out=None, overwrite_input=False): - """ - Private function that doesn't support extended axis or keepdims. - These methods are extended to this function using _ureduce - See nanmedian for parameter usage - - """ - if axis is None or a.ndim == 1: - part = a.ravel() - if out is None: - return _nanmedian1d(part, overwrite_input) - else: - out[...] = _nanmedian1d(part, overwrite_input) - return out - else: - # for small medians use sort + indexing which is still faster than - # apply_along_axis - if a.shape[axis] < 400: - return _nanmedian_small(a, axis, out, overwrite_input) - result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) - if out is not None: - out[...] = result - return result - -def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): - """ - sort + indexing median, faster for small medians along multiple dimensions - due to the high overhead of apply_along_axis - see nanmedian for parameter usage - """ - a = np.ma.masked_array(a, np.isnan(a)) - m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) - for i in range(np.count_nonzero(m.mask.ravel())): - warnings.warn("All-NaN slice encountered", RuntimeWarning) - if out is not None: - out[...] = m.filled(np.nan) - return out - return m.filled(np.nan) - -def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False): - """ - Compute the median along the specified axis, while ignoring NaNs. - - Returns the median of the array elements. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : int, optional - Axis along which the medians are computed. The default (axis=None) - is to compute the median along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape and buffer length as the expected output, but the - type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array (a) for - calculations. The input array will be modified by the call to - median. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. Default is - False. Note that, if `overwrite_input` is True and the input - is not already an ndarray, an error will be raised. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - - - Returns - ------- - median : ndarray - A new array holding the result. If the input contains integers, or - floats of smaller precision than 64, then the output data-type is - float64. Otherwise, the output data-type is the same as that of the - input. - - See Also - -------- - mean, median, percentile - - Notes - ----- - Given a vector V of length N, the median of V is the middle value of - a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is - odd. When N is even, it is the average of the two middle values of - ``V_sorted``. - - Examples - -------- - >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) - >>> a[0, 1] = np.nan - >>> a - array([[ 10., nan, 4.], - [ 3., 2., 1.]]) - >>> np.median(a) - nan - >>> np.nanmedian(a) - 3.0 - >>> np.nanmedian(a, axis=0) - array([ 6.5, 2., 2.5]) - >>> np.median(a, axis=1) - array([ 7., 2.]) - >>> b = a.copy() - >>> np.nanmedian(b, axis=1, overwrite_input=True) - array([ 7., 2.]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.nanmedian(b, axis=None, overwrite_input=True) - 3.0 - >>> assert not np.all(a==b) - - """ - a = np.asanyarray(a) - # apply_along_axis in _nanmedian doesn't handle empty arrays well, - # so deal them upfront - if a.size == 0: - return np.nanmean(a, axis, out=out, keepdims=keepdims) - - r, k = _ureduce(a, func=_nanmedian, axis=axis, out=out, - overwrite_input=overwrite_input) - if keepdims: - return r.reshape(k) - else: - return r - - -def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=False): - """ - Compute the qth percentile of the data along the specified axis, while - ignoring nan values. - - Returns the qth percentile of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - q : float in range of [0,100] (or sequence of floats) - Percentile to compute which must be between 0 and 100 inclusive. - axis : int or sequence of int, optional - Axis along which the percentiles are computed. The default (None) - is to compute the percentiles along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output, - but the type (of the output) will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array `a` for - calculations. The input array will be modified by the call to - percentile. This will save memory when you do not need to preserve - the contents of the input array. In this case you should not make - any assumptions about the content of the passed in array `a` after - this function completes -- treat it as undefined. Default is False. - Note that, if the `a` input is not already an array this parameter - will have no effect, `a` will be converted to an array internally - regardless of the value of this parameter. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to use, - when the desired quantile lies between two data points `i` and `j`: - * linear: `i + (j - i) * fraction`, where `fraction` is the - fractional part of the index surrounded by `i` and `j`. - * lower: `i`. - * higher: `j`. - * nearest: `i` or `j` whichever is nearest. - * midpoint: (`i` + `j`) / 2. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - - Returns - ------- - nanpercentile : scalar or ndarray - If a single percentile `q` is given and axis=None a scalar is - returned. If multiple percentiles `q` are given an array holding - the result is returned. The results are listed in the first axis. - (If `out` is specified, in which case that array is returned - instead). If the input contains integers, or floats of smaller - precision than 64, then the output data-type is float64. Otherwise, - the output data-type is the same as that of the input. - - See Also - -------- - nanmean, nanmedian, percentile, median, mean - - Notes - ----- - Given a vector V of length N, the q-th percentile of V is the q-th ranked - value in a sorted copy of V. The values and distances of the two - nearest neighbors as well as the `interpolation` parameter will - determine the percentile if the normalized ranking does not match q - exactly. This function is the same as the median if ``q=50``, the same - as the minimum if ``q=0``and the same as the maximum if ``q=100``. - - Examples - -------- - >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) - >>> a[0][1] = np.nan - >>> a - array([[ 10., nan, 4.], - [ 3., 2., 1.]]) - >>> np.percentile(a, 50) - nan - >>> np.nanpercentile(a, 50) - 3.5 - >>> np.nanpercentile(a, 50, axis=0) - array([[ 6.5, 4.5, 2.5]]) - >>> np.nanpercentile(a, 50, axis=1) - array([[ 7.], - [ 2.]]) - >>> m = np.nanpercentile(a, 50, axis=0) - >>> out = np.zeros_like(m) - >>> np.nanpercentile(a, 50, axis=0, out=m) - array([[ 6.5, 4.5, 2.5]]) - >>> m - array([[ 6.5, 4.5, 2.5]]) - >>> b = a.copy() - >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) - array([[ 7.], - [ 2.]]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.nanpercentile(b, 50, axis=None, overwrite_input=True) - array([ 3.5]) - - """ - - a = np.asanyarray(a) - q = np.asanyarray(q) - # apply_along_axis in _nanpercentile doesn't handle empty arrays well, - # so deal them upfront - if a.size == 0: - return np.nanmean(a, axis, out=out, keepdims=keepdims) - - r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out, - overwrite_input=overwrite_input, - interpolation=interpolation) - if keepdims: - if q.ndim == 0: - return r.reshape(k) - else: - return r.reshape([len(q)] + k) - else: - return r - - -def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=False): - """ - Private function that doesn't support extended axis or keepdims. - These methods are extended to this function using _ureduce - See nanpercentile for parameter usage - - """ - if axis is None: - part = a.ravel() - result = _nanpercentile1d(part, q, overwrite_input, interpolation) - else: - result = np.apply_along_axis(_nanpercentile1d, axis, a, q, - overwrite_input, interpolation) - - if out is not None: - out[...] = result - return result - - -def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'): - """ - Private function for rank 1 arrays. Compute percentile ignoring NaNs. - See nanpercentile for parameter usage - - """ - c = np.isnan(arr1d) - s = np.where(c)[0] - if s.size == arr1d.size: - warnings.warn("All-NaN slice encountered", RuntimeWarning) - return np.nan - elif s.size == 0: - return np.percentile(arr1d, q, overwrite_input=overwrite_input, - interpolation=interpolation) - else: - if overwrite_input: - x = arr1d - else: - x = arr1d.copy() - # select non-nans at end of array - enonan = arr1d[-s.size:][~c[-s.size:]] - # fill nans in beginning of array with non-nans of end - x[s[:enonan.size]] = enonan - # slice nans away - return np.percentile(x[:-s.size], q, overwrite_input=True, - interpolation=interpolation) - - -def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """ - Compute the variance along the specified axis, while ignoring NaNs. - - Returns the variance of the array elements, a measure of the spread of - a distribution. The variance is computed for the flattened array by - default, otherwise over the specified axis. - - For all-NaN slices or slices with zero degrees of freedom, NaN is - returned and a `RuntimeWarning` is raised. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Array containing numbers whose variance is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the variance is computed. The default is to compute - the variance of the flattened array. - dtype : data-type, optional - Type to use in computing the variance. For arrays of integer type - the default is `float32`; for arrays of float types it is the same as - the array type. - out : ndarray, optional - Alternate output array in which to place the result. It must have - the same shape as the expected output, but the type is cast if - necessary. - ddof : int, optional - "Delta Degrees of Freedom": the divisor used in the calculation is - ``N - ddof``, where ``N`` represents the number of non-NaN - elements. By default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - variance : ndarray, see dtype parameter above - If `out` is None, return a new array containing the variance, - otherwise return a reference to the output array. If ddof is >= the - number of non-NaN elements in a slice or the slice contains only - NaNs, then the result for that slice is NaN. - - See Also - -------- - std : Standard deviation - mean : Average - var : Variance while not ignoring NaNs - nanstd, nanmean - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - The variance is the average of the squared deviations from the mean, - i.e., ``var = mean(abs(x - x.mean())**2)``. - - The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. - If, however, `ddof` is specified, the divisor ``N - ddof`` is used - instead. In standard statistical practice, ``ddof=1`` provides an - unbiased estimator of the variance of a hypothetical infinite - population. ``ddof=0`` provides a maximum likelihood estimate of the - variance for normally distributed variables. - - Note that for complex numbers, the absolute value is taken before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the variance is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for `float32` (see example - below). Specifying a higher-accuracy accumulator using the ``dtype`` - keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.var(a) - 1.5555555555555554 - >>> np.nanvar(a, axis=0) - array([ 1., 0.]) - >>> np.nanvar(a, axis=1) - array([ 0., 0.25]) - - """ - arr, mask = _replace_nan(a, 0) - if mask is None: - return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - - if dtype is not None: - dtype = np.dtype(dtype) - if dtype is not None and not issubclass(dtype.type, np.inexact): - raise TypeError("If a is inexact, then dtype must be inexact") - if out is not None and not issubclass(out.dtype.type, np.inexact): - raise TypeError("If a is inexact, then out must be inexact") - - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - - # Compute mean - cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=True) - avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=True) - avg = _divide_by_count(avg, cnt) - - # Compute squared deviation from mean. - arr -= avg - arr = _copyto(arr, 0, mask) - if issubclass(arr.dtype.type, np.complexfloating): - sqr = np.multiply(arr, arr.conj(), out=arr).real - else: - sqr = np.multiply(arr, arr, out=arr) - - # Compute variance. - var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) - if var.ndim < cnt.ndim: - # Subclasses of ndarray may ignore keepdims, so check here. - cnt = cnt.squeeze(axis) - dof = cnt - ddof - var = _divide_by_count(var, dof) - - isbad = (dof <= 0) - if np.any(isbad): - warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning) - # NaN, inf, or negative numbers are all possible bad - # values, so explicitly replace them with NaN. - var = _copyto(var, np.nan, isbad) - return var - - -def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): - """ - Compute the standard deviation along the specified axis, while - ignoring NaNs. - - Returns the standard deviation, a measure of the spread of a - distribution, of the non-NaN array elements. The standard deviation is - computed for the flattened array by default, otherwise over the - specified axis. - - For all-NaN slices or slices with zero degrees of freedom, NaN is - returned and a `RuntimeWarning` is raised. - - .. versionadded:: 1.8.0 - - Parameters - ---------- - a : array_like - Calculate the standard deviation of the non-NaN values. - axis : int, optional - Axis along which the standard deviation is computed. The default is - to compute the standard deviation of the flattened array. - dtype : dtype, optional - Type to use in computing the standard deviation. For arrays of - integer type the default is float64, for arrays of float types it - is the same as the array type. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type (of the - calculated values) will be cast if necessary. - ddof : int, optional - Means Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of non-NaN - elements. By default `ddof` is zero. - keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - Returns - ------- - standard_deviation : ndarray, see dtype parameter above. - If `out` is None, return a new array containing the standard - deviation, otherwise return a reference to the output array. If - ddof is >= the number of non-NaN elements in a slice or the slice - contains only NaNs, then the result for that slice is NaN. - - See Also - -------- - var, mean, std - nanvar, nanmean - numpy.doc.ufuncs : Section "Output arguments" - - Notes - ----- - The standard deviation is the square root of the average of the squared - deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. - - The average squared deviation is normally calculated as - ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is - specified, the divisor ``N - ddof`` is used instead. In standard - statistical practice, ``ddof=1`` provides an unbiased estimator of the - variance of the infinite population. ``ddof=0`` provides a maximum - likelihood estimate of the variance for normally distributed variables. - The standard deviation computed in this function is the square root of - the estimated variance, so even with ``ddof=1``, it will not be an - unbiased estimate of the standard deviation per se. - - Note that, for complex numbers, `std` takes the absolute value before - squaring, so that the result is always real and nonnegative. - - For floating-point input, the *std* is computed using the same - precision the input has. Depending on the input data, this can cause - the results to be inaccurate, especially for float32 (see example - below). Specifying a higher-accuracy accumulator using the `dtype` - keyword can alleviate this issue. - - Examples - -------- - >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.nanstd(a) - 1.247219128924647 - >>> np.nanstd(a, axis=0) - array([ 1., 0.]) - >>> np.nanstd(a, axis=1) - array([ 0., 0.5]) - - """ - var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) - if isinstance(var, np.ndarray): - std = np.sqrt(var, out=var) - else: - std = var.dtype.type(np.sqrt(var)) - return std diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/npyio.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/npyio.py deleted file mode 100644 index 138b75510906f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/npyio.py +++ /dev/null @@ -1,1912 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import os -import re -import itertools -import warnings -import weakref -from operator import itemgetter - -import numpy as np -from . import format -from ._datasource import DataSource -from ._compiled_base import packbits, unpackbits -from ._iotools import ( - LineSplitter, NameValidator, StringConverter, ConverterError, - ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields, - flatten_dtype, easy_dtype, _bytes_to_name - ) - -from numpy.compat import ( - asbytes, asstr, asbytes_nested, bytes, basestring, unicode - ) - -if sys.version_info[0] >= 3: - import pickle -else: - import cPickle as pickle - from future_builtins import map - -loads = pickle.loads - -__all__ = [ - 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', - 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', - 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' - ] - - -def seek_gzip_factory(f): - """Use this factory to produce the class so that we can do a lazy - import on gzip. - - """ - import gzip - - class GzipFile(gzip.GzipFile): - - def seek(self, offset, whence=0): - # figure out new position (we can only seek forwards) - if whence == 1: - offset = self.offset + offset - - if whence not in [0, 1]: - raise IOError("Illegal argument") - - if offset < self.offset: - # for negative seek, rewind and do positive seek - self.rewind() - count = offset - self.offset - for i in range(count // 1024): - self.read(1024) - self.read(count % 1024) - - def tell(self): - return self.offset - - if isinstance(f, str): - f = GzipFile(f) - elif isinstance(f, gzip.GzipFile): - # cast to our GzipFile if its already a gzip.GzipFile - - try: - name = f.name - except AttributeError: - # Backward compatibility for <= 2.5 - name = f.filename - mode = f.mode - - f = GzipFile(fileobj=f.fileobj, filename=name) - f.mode = mode - - return f - - -class BagObj(object): - """ - BagObj(obj) - - Convert attribute look-ups to getitems on the object passed in. - - Parameters - ---------- - obj : class instance - Object on which attribute look-up is performed. - - Examples - -------- - >>> from numpy.lib.npyio import BagObj as BO - >>> class BagDemo(object): - ... def __getitem__(self, key): # An instance of BagObj(BagDemo) - ... # will call this method when any - ... # attribute look-up is required - ... result = "Doesn't matter what you want, " - ... return result + "you're gonna get this" - ... - >>> demo_obj = BagDemo() - >>> bagobj = BO(demo_obj) - >>> bagobj.hello_there - "Doesn't matter what you want, you're gonna get this" - >>> bagobj.I_can_be_anything - "Doesn't matter what you want, you're gonna get this" - - """ - - def __init__(self, obj): - # Use weakref to make NpzFile objects collectable by refcount - self._obj = weakref.proxy(obj) - - def __getattribute__(self, key): - try: - return object.__getattribute__(self, '_obj')[key] - except KeyError: - raise AttributeError(key) - - -def zipfile_factory(*args, **kwargs): - import zipfile - kwargs['allowZip64'] = True - return zipfile.ZipFile(*args, **kwargs) - - -class NpzFile(object): - """ - NpzFile(fid) - - A dictionary-like object with lazy-loading of files in the zipped - archive provided on construction. - - `NpzFile` is used to load files in the NumPy ``.npz`` data archive - format. It assumes that files in the archive have a ``.npy`` extension, - other files are ignored. - - The arrays and file strings are lazily loaded on either - getitem access using ``obj['key']`` or attribute lookup using - ``obj.f.key``. A list of all files (without ``.npy`` extensions) can - be obtained with ``obj.files`` and the ZipFile object itself using - ``obj.zip``. - - Attributes - ---------- - files : list of str - List of all files in the archive with a ``.npy`` extension. - zip : ZipFile instance - The ZipFile object initialized with the zipped archive. - f : BagObj instance - An object on which attribute can be performed as an alternative - to getitem access on the `NpzFile` instance itself. - - Parameters - ---------- - fid : file or str - The zipped archive to open. This is either a file-like object - or a string containing the path to the archive. - own_fid : bool, optional - Whether NpzFile should close the file handle. - Requires that `fid` is a file-like object. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - >>> x = np.arange(10) - >>> y = np.sin(x) - >>> np.savez(outfile, x=x, y=y) - >>> outfile.seek(0) - - >>> npz = np.load(outfile) - >>> isinstance(npz, np.lib.io.NpzFile) - True - >>> npz.files - ['y', 'x'] - >>> npz['x'] # getitem access - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> npz.f.x # attribute lookup - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - """ - - def __init__(self, fid, own_fid=False): - # Import is postponed to here since zipfile depends on gzip, an - # optional component of the so-called standard library. - _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) - self.zip = _zip - self.f = BagObj(self) - if own_fid: - self.fid = fid - else: - self.fid = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def close(self): - """ - Close the file. - - """ - if self.zip is not None: - self.zip.close() - self.zip = None - if self.fid is not None: - self.fid.close() - self.fid = None - self.f = None # break reference cycle - - def __del__(self): - self.close() - - def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = 0 - if key in self._files: - member = 1 - elif key in self.files: - member = 1 - key += '.npy' - if member: - bytes = self.zip.open(key) - magic = bytes.read(len(format.MAGIC_PREFIX)) - bytes.close() - if magic == format.MAGIC_PREFIX: - bytes = self.zip.open(key) - return format.read_array(bytes) - else: - return self.zip.read(key) - else: - raise KeyError("%s is not a file in the archive" % key) - - def __iter__(self): - return iter(self.files) - - def items(self): - """ - Return a list of tuples, with each tuple (filename, array in file). - - """ - return [(f, self[f]) for f in self.files] - - def iteritems(self): - """Generator that returns tuples (filename, array in file).""" - for f in self.files: - yield (f, self[f]) - - def keys(self): - """Return files in the archive with a ``.npy`` extension.""" - return self.files - - def iterkeys(self): - """Return an iterator over the files in the archive.""" - return self.__iter__() - - def __contains__(self, key): - return self.files.__contains__(key) - - -def load(file, mmap_mode=None): - """ - Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. - - Parameters - ---------- - file : file-like object or string - The file to read. File-like objects must support the - ``seek()`` and ``read()`` methods. Pickled files require that the - file-like object support the ``readline()`` method as well. - mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional - If not None, then memory-map the file, using the given mode (see - `numpy.memmap` for a detailed description of the modes). A - memory-mapped array is kept on disk. However, it can be accessed - and sliced like any ndarray. Memory mapping is especially useful - for accessing small fragments of large files without reading the - entire file into memory. - - Returns - ------- - result : array, tuple, dict, etc. - Data stored in the file. For ``.npz`` files, the returned instance - of NpzFile class must be closed to avoid leaking file descriptors. - - Raises - ------ - IOError - If the input file does not exist or cannot be read. - - See Also - -------- - save, savez, savez_compressed, loadtxt - memmap : Create a memory-map to an array stored in a file on disk. - - Notes - ----- - - If the file contains pickle data, then whatever object is stored - in the pickle is returned. - - If the file is a ``.npy`` file, then a single array is returned. - - If the file is a ``.npz`` file, then a dictionary-like object is - returned, containing ``{filename: array}`` key-value pairs, one for - each file in the archive. - - If the file is a ``.npz`` file, the returned value supports the - context manager protocol in a similar fashion to the open function:: - - with load('foo.npz') as data: - a = data['a'] - - The underlying file descriptor is closed when exiting the 'with' - block. - - Examples - -------- - Store data to disk, and load it again: - - >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) - >>> np.load('/tmp/123.npy') - array([[1, 2, 3], - [4, 5, 6]]) - - Store compressed data to disk, and load it again: - - >>> a=np.array([[1, 2, 3], [4, 5, 6]]) - >>> b=np.array([1, 2]) - >>> np.savez('/tmp/123.npz', a=a, b=b) - >>> data = np.load('/tmp/123.npz') - >>> data['a'] - array([[1, 2, 3], - [4, 5, 6]]) - >>> data['b'] - array([1, 2]) - >>> data.close() - - Mem-map the stored array, and then access the second row - directly from disk: - - >>> X = np.load('/tmp/123.npy', mmap_mode='r') - >>> X[1, :] - memmap([4, 5, 6]) - - """ - import gzip - - own_fid = False - if isinstance(file, basestring): - fid = open(file, "rb") - own_fid = True - elif isinstance(file, gzip.GzipFile): - fid = seek_gzip_factory(file) - else: - fid = file - - try: - # Code to distinguish from NumPy binary files and pickles. - _ZIP_PREFIX = asbytes('PK\x03\x04') - N = len(format.MAGIC_PREFIX) - magic = fid.read(N) - fid.seek(-N, 1) # back-up - if magic.startswith(_ZIP_PREFIX): - # zip-file (assume .npz) - # Transfer file ownership to NpzFile - tmp = own_fid - own_fid = False - return NpzFile(fid, own_fid=tmp) - elif magic == format.MAGIC_PREFIX: - # .npy file - if mmap_mode: - return format.open_memmap(file, mode=mmap_mode) - else: - return format.read_array(fid) - else: - # Try a pickle - try: - return pickle.load(fid) - except: - raise IOError( - "Failed to interpret file %s as a pickle" % repr(file)) - finally: - if own_fid: - fid.close() - - -def save(file, arr): - """ - Save an array to a binary file in NumPy ``.npy`` format. - - Parameters - ---------- - file : file or str - File or filename to which the data is saved. If file is a file-object, - then the filename is unchanged. If file is a string, a ``.npy`` - extension will be appended to the file name if it does not already - have one. - arr : array_like - Array data to be saved. - - See Also - -------- - savez : Save several arrays into a ``.npz`` archive - savetxt, load - - Notes - ----- - For a description of the ``.npy`` format, see `format`. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - - >>> x = np.arange(10) - >>> np.save(outfile, x) - - >>> outfile.seek(0) # Only needed here to simulate closing & reopening file - >>> np.load(outfile) - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - """ - own_fid = False - if isinstance(file, basestring): - if not file.endswith('.npy'): - file = file + '.npy' - fid = open(file, "wb") - own_fid = True - else: - fid = file - - try: - arr = np.asanyarray(arr) - format.write_array(fid, arr) - finally: - if own_fid: - fid.close() - - -def savez(file, *args, **kwds): - """ - Save several arrays into a single file in uncompressed ``.npz`` format. - - If arguments are passed in with no keywords, the corresponding variable - names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword - arguments are given, the corresponding variable names, in the ``.npz`` - file will match the keyword names. - - Parameters - ---------- - file : str or file - Either the file name (string) or an open file (file-like object) - where the data will be saved. If file is a string, the ``.npz`` - extension will be appended to the file name if it is not already there. - args : Arguments, optional - Arrays to save to the file. Since it is not possible for Python to - know the names of the arrays outside `savez`, the arrays will be saved - with names "arr_0", "arr_1", and so on. These arguments can be any - expression. - kwds : Keyword arguments, optional - Arrays to save to the file. Arrays will be saved in the file with the - keyword names. - - Returns - ------- - None - - See Also - -------- - save : Save a single array to a binary file in NumPy format. - savetxt : Save an array to a file as plain text. - savez_compressed : Save several arrays into a compressed ``.npz`` archive - - Notes - ----- - The ``.npz`` file format is a zipped archive of files named after the - variables they contain. The archive is not compressed and each file - in the archive contains one variable in ``.npy`` format. For a - description of the ``.npy`` format, see `format`. - - When opening the saved ``.npz`` file with `load` a `NpzFile` object is - returned. This is a dictionary-like object which can be queried for - its list of arrays (with the ``.files`` attribute), and for the arrays - themselves. - - Examples - -------- - >>> from tempfile import TemporaryFile - >>> outfile = TemporaryFile() - >>> x = np.arange(10) - >>> y = np.sin(x) - - Using `savez` with \\*args, the arrays are saved with default names. - - >>> np.savez(outfile, x, y) - >>> outfile.seek(0) # Only needed here to simulate closing & reopening file - >>> npzfile = np.load(outfile) - >>> npzfile.files - ['arr_1', 'arr_0'] - >>> npzfile['arr_0'] - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - Using `savez` with \\**kwds, the arrays are saved with the keyword names. - - >>> outfile = TemporaryFile() - >>> np.savez(outfile, x=x, y=y) - >>> outfile.seek(0) - >>> npzfile = np.load(outfile) - >>> npzfile.files - ['y', 'x'] - >>> npzfile['x'] - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - """ - _savez(file, args, kwds, False) - - -def savez_compressed(file, *args, **kwds): - """ - Save several arrays into a single file in compressed ``.npz`` format. - - If keyword arguments are given, then filenames are taken from the keywords. - If arguments are passed in with no keywords, then stored file names are - arr_0, arr_1, etc. - - Parameters - ---------- - file : str - File name of ``.npz`` file. - args : Arguments - Function arguments. - kwds : Keyword arguments - Keywords. - - See Also - -------- - numpy.savez : Save several arrays into an uncompressed ``.npz`` file format - numpy.load : Load the files created by savez_compressed. - - """ - _savez(file, args, kwds, True) - - -def _savez(file, args, kwds, compress): - # Import is postponed to here since zipfile depends on gzip, an optional - # component of the so-called standard library. - import zipfile - # Import deferred for startup time improvement - import tempfile - - if isinstance(file, basestring): - if not file.endswith('.npz'): - file = file + '.npz' - - namedict = kwds - for i, val in enumerate(args): - key = 'arr_%d' % i - if key in namedict.keys(): - raise ValueError( - "Cannot use un-named variables and keyword %s" % key) - namedict[key] = val - - if compress: - compression = zipfile.ZIP_DEFLATED - else: - compression = zipfile.ZIP_STORED - - zipf = zipfile_factory(file, mode="w", compression=compression) - - # Stage arrays in a temporary file on disk, before writing to zip. - fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy') - os.close(fd) - try: - for key, val in namedict.items(): - fname = key + '.npy' - fid = open(tmpfile, 'wb') - try: - format.write_array(fid, np.asanyarray(val)) - fid.close() - fid = None - zipf.write(tmpfile, arcname=fname) - finally: - if fid: - fid.close() - finally: - os.remove(tmpfile) - - zipf.close() - - -def _getconv(dtype): - """ Find the correct dtype converter. Adapted from matplotlib """ - typ = dtype.type - if issubclass(typ, np.bool_): - return lambda x: bool(int(x)) - if issubclass(typ, np.uint64): - return np.uint64 - if issubclass(typ, np.int64): - return np.int64 - if issubclass(typ, np.integer): - return lambda x: int(float(x)) - elif issubclass(typ, np.floating): - return float - elif issubclass(typ, np.complex): - return complex - elif issubclass(typ, np.bytes_): - return bytes - else: - return str - - -def loadtxt(fname, dtype=float, comments='#', delimiter=None, - converters=None, skiprows=0, usecols=None, unpack=False, - ndmin=0): - """ - Load data from a text file. - - Each row in the text file must have the same number of values. - - Parameters - ---------- - fname : file or str - File, filename, or generator to read. If the filename extension is - ``.gz`` or ``.bz2``, the file is first decompressed. Note that - generators should return byte strings for Python 3k. - dtype : data-type, optional - Data-type of the resulting array; default: float. If this is a - record data-type, the resulting array will be 1-dimensional, and - each row will be interpreted as an element of the array. In this - case, the number of columns used must match the number of fields in - the data-type. - comments : str, optional - The character used to indicate the start of a comment; - default: '#'. - delimiter : str, optional - The string used to separate values. By default, this is any - whitespace. - converters : dict, optional - A dictionary mapping column number to a function that will convert - that column to a float. E.g., if column 0 is a date string: - ``converters = {0: datestr2num}``. Converters can also be used to - provide a default value for missing data (but see also `genfromtxt`): - ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. - skiprows : int, optional - Skip the first `skiprows` lines; default: 0. - usecols : sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. - The default, None, results in all columns being read. - unpack : bool, optional - If True, the returned array is transposed, so that arguments may be - unpacked using ``x, y, z = loadtxt(...)``. When used with a record - data-type, arrays are returned for each field. Default is False. - ndmin : int, optional - The returned array will have at least `ndmin` dimensions. - Otherwise mono-dimensional axes will be squeezed. - Legal values: 0 (default), 1 or 2. - - .. versionadded:: 1.6.0 - - Returns - ------- - out : ndarray - Data read from the text file. - - See Also - -------- - load, fromstring, fromregex - genfromtxt : Load data with missing values handled as specified. - scipy.io.loadmat : reads MATLAB data files - - Notes - ----- - This function aims to be a fast reader for simply formatted files. The - `genfromtxt` function provides more sophisticated handling of, e.g., - lines with missing values. - - Examples - -------- - >>> from StringIO import StringIO # StringIO behaves like a file object - >>> c = StringIO("0 1\\n2 3") - >>> np.loadtxt(c) - array([[ 0., 1.], - [ 2., 3.]]) - - >>> d = StringIO("M 21 72\\nF 35 58") - >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), - ... 'formats': ('S1', 'i4', 'f4')}) - array([('M', 21, 72.0), ('F', 35, 58.0)], - dtype=[('gender', '|S1'), ('age', '>> c = StringIO("1,0,2\\n3,0,4") - >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) - >>> x - array([ 1., 3.]) - >>> y - array([ 2., 4.]) - - """ - # Type conversions for Py3 convenience - comments = asbytes(comments) - user_converters = converters - if delimiter is not None: - delimiter = asbytes(delimiter) - if usecols is not None: - usecols = list(usecols) - - fown = False - try: - if _is_string_like(fname): - fown = True - if fname.endswith('.gz'): - fh = iter(seek_gzip_factory(fname)) - elif fname.endswith('.bz2'): - import bz2 - fh = iter(bz2.BZ2File(fname)) - elif sys.version_info[0] == 2: - fh = iter(open(fname, 'U')) - else: - fh = iter(open(fname)) - else: - fh = iter(fname) - except TypeError: - raise ValueError('fname must be a string, file handle, or generator') - X = [] - - def flatten_dtype(dt): - """Unpack a structured data-type, and produce re-packing info.""" - if dt.names is None: - # If the dtype is flattened, return. - # If the dtype has a shape, the dtype occurs - # in the list more than once. - shape = dt.shape - if len(shape) == 0: - return ([dt.base], None) - else: - packing = [(shape[-1], list)] - if len(shape) > 1: - for dim in dt.shape[-2::-1]: - packing = [(dim*packing[0][0], packing*dim)] - return ([dt.base] * int(np.prod(dt.shape)), packing) - else: - types = [] - packing = [] - for field in dt.names: - tp, bytes = dt.fields[field] - flat_dt, flat_packing = flatten_dtype(tp) - types.extend(flat_dt) - # Avoid extra nesting for subarrays - if len(tp.shape) > 0: - packing.extend(flat_packing) - else: - packing.append((len(flat_dt), flat_packing)) - return (types, packing) - - def pack_items(items, packing): - """Pack items into nested lists based on re-packing info.""" - if packing is None: - return items[0] - elif packing is tuple: - return tuple(items) - elif packing is list: - return list(items) - else: - start = 0 - ret = [] - for length, subpacking in packing: - ret.append(pack_items(items[start:start+length], subpacking)) - start += length - return tuple(ret) - - def split_line(line): - """Chop off comments, strip, and split at delimiter.""" - line = asbytes(line).split(comments)[0].strip(asbytes('\r\n')) - if line: - return line.split(delimiter) - else: - return [] - - try: - # Make sure we're dealing with a proper dtype - dtype = np.dtype(dtype) - defconv = _getconv(dtype) - - # Skip the first `skiprows` lines - for i in range(skiprows): - next(fh) - - # Read until we find a line with some values, and use - # it to estimate the number of columns, N. - first_vals = None - try: - while not first_vals: - first_line = next(fh) - first_vals = split_line(first_line) - except StopIteration: - # End of lines reached - first_line = '' - first_vals = [] - warnings.warn('loadtxt: Empty input file: "%s"' % fname) - N = len(usecols or first_vals) - - dtype_types, packing = flatten_dtype(dtype) - if len(dtype_types) > 1: - # We're dealing with a structured array, each field of - # the dtype matches a column - converters = [_getconv(dt) for dt in dtype_types] - else: - # All fields have the same dtype - converters = [defconv for i in range(N)] - if N > 1: - packing = [(N, tuple)] - - # By preference, use the converters specified by the user - for i, conv in (user_converters or {}).items(): - if usecols: - try: - i = usecols.index(i) - except ValueError: - # Unused converter specified - continue - converters[i] = conv - - # Parse each line, including the first - for i, line in enumerate(itertools.chain([first_line], fh)): - vals = split_line(line) - if len(vals) == 0: - continue - if usecols: - vals = [vals[i] for i in usecols] - if len(vals) != N: - line_num = i + skiprows + 1 - raise ValueError("Wrong number of columns at line %d" - % line_num) - - # Convert each value according to its column and store - items = [conv(val) for (conv, val) in zip(converters, vals)] - # Then pack it according to the dtype's nesting - items = pack_items(items, packing) - X.append(items) - finally: - if fown: - fh.close() - - X = np.array(X, dtype) - # Multicolumn data are returned with shape (1, N, M), i.e. - # (1, 1, M) for a single row - remove the singleton dimension there - if X.ndim == 3 and X.shape[:2] == (1, 1): - X.shape = (1, -1) - - # Verify that the array has at least dimensions `ndmin`. - # Check correctness of the values of `ndmin` - if ndmin not in [0, 1, 2]: - raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) - # Tweak the size and shape of the arrays - remove extraneous dimensions - if X.ndim > ndmin: - X = np.squeeze(X) - # and ensure we have the minimum number of dimensions asked for - # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 - if X.ndim < ndmin: - if ndmin == 1: - X = np.atleast_1d(X) - elif ndmin == 2: - X = np.atleast_2d(X).T - - if unpack: - if len(dtype_types) > 1: - # For structured arrays, return an array for each field. - return [X[field] for field in dtype.names] - else: - return X.T - else: - return X - - -def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', - footer='', comments='# '): - """ - Save an array to a text file. - - Parameters - ---------- - fname : filename or file handle - If the filename ends in ``.gz``, the file is automatically saved in - compressed gzip format. `loadtxt` understands gzipped files - transparently. - X : array_like - Data to be saved to a text file. - fmt : str or sequence of strs, optional - A single format (%10.5f), a sequence of formats, or a - multi-format string, e.g. 'Iteration %d -- %10.5f', in which - case `delimiter` is ignored. For complex `X`, the legal options - for `fmt` are: - a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted - like `' (%s+%sj)' % (fmt, fmt)` - b) a full string specifying every real and imaginary part, e.g. - `' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns - c) a list of specifiers, one per column - in this case, the real - and imaginary part must have separate specifiers, - e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns - delimiter : str, optional - String or character separating columns. - newline : str, optional - String or character separating lines. - - .. versionadded:: 1.5.0 - header : str, optional - String that will be written at the beginning of the file. - - .. versionadded:: 1.7.0 - footer : str, optional - String that will be written at the end of the file. - - .. versionadded:: 1.7.0 - comments : str, optional - String that will be prepended to the ``header`` and ``footer`` strings, - to mark them as comments. Default: '# ', as expected by e.g. - ``numpy.loadtxt``. - - .. versionadded:: 1.7.0 - - - See Also - -------- - save : Save an array to a binary file in NumPy ``.npy`` format - savez : Save several arrays into an uncompressed ``.npz`` archive - savez_compressed : Save several arrays into a compressed ``.npz`` archive - - Notes - ----- - Further explanation of the `fmt` parameter - (``%[flag]width[.precision]specifier``): - - flags: - ``-`` : left justify - - ``+`` : Forces to precede result with + or -. - - ``0`` : Left pad the number with zeros instead of space (see width). - - width: - Minimum number of characters to be printed. The value is not truncated - if it has more characters. - - precision: - - For integer specifiers (eg. ``d,i,o,x``), the minimum number of - digits. - - For ``e, E`` and ``f`` specifiers, the number of digits to print - after the decimal point. - - For ``g`` and ``G``, the maximum number of significant digits. - - For ``s``, the maximum number of characters. - - specifiers: - ``c`` : character - - ``d`` or ``i`` : signed decimal integer - - ``e`` or ``E`` : scientific notation with ``e`` or ``E``. - - ``f`` : decimal floating point - - ``g,G`` : use the shorter of ``e,E`` or ``f`` - - ``o`` : signed octal - - ``s`` : string of characters - - ``u`` : unsigned decimal integer - - ``x,X`` : unsigned hexadecimal integer - - This explanation of ``fmt`` is not complete, for an exhaustive - specification see [1]_. - - References - ---------- - .. [1] `Format Specification Mini-Language - `_, Python Documentation. - - Examples - -------- - >>> x = y = z = np.arange(0.0,5.0,1.0) - >>> np.savetxt('test.out', x, delimiter=',') # X is an array - >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays - >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation - - """ - - # Py3 conversions first - if isinstance(fmt, bytes): - fmt = asstr(fmt) - delimiter = asstr(delimiter) - - own_fh = False - if _is_string_like(fname): - own_fh = True - if fname.endswith('.gz'): - import gzip - fh = gzip.open(fname, 'wb') - else: - if sys.version_info[0] >= 3: - fh = open(fname, 'wb') - else: - fh = open(fname, 'w') - elif hasattr(fname, 'write'): - fh = fname - else: - raise ValueError('fname must be a string or file handle') - - try: - X = np.asarray(X) - - # Handle 1-dimensional arrays - if X.ndim == 1: - # Common case -- 1d array of numbers - if X.dtype.names is None: - X = np.atleast_2d(X).T - ncol = 1 - - # Complex dtype -- each field indicates a separate column - else: - ncol = len(X.dtype.descr) - else: - ncol = X.shape[1] - - iscomplex_X = np.iscomplexobj(X) - # `fmt` can be a string with multiple insertion points or a - # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') - if type(fmt) in (list, tuple): - if len(fmt) != ncol: - raise AttributeError('fmt has wrong shape. %s' % str(fmt)) - format = asstr(delimiter).join(map(asstr, fmt)) - elif isinstance(fmt, str): - n_fmt_chars = fmt.count('%') - error = ValueError('fmt has wrong number of %% formats: %s' % fmt) - if n_fmt_chars == 1: - if iscomplex_X: - fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol - else: - fmt = [fmt, ] * ncol - format = delimiter.join(fmt) - elif iscomplex_X and n_fmt_chars != (2 * ncol): - raise error - elif ((not iscomplex_X) and n_fmt_chars != ncol): - raise error - else: - format = fmt - else: - raise ValueError('invalid fmt: %r' % (fmt,)) - - if len(header) > 0: - header = header.replace('\n', '\n' + comments) - fh.write(asbytes(comments + header + newline)) - if iscomplex_X: - for row in X: - row2 = [] - for number in row: - row2.append(number.real) - row2.append(number.imag) - fh.write(asbytes(format % tuple(row2) + newline)) - else: - for row in X: - fh.write(asbytes(format % tuple(row) + newline)) - if len(footer) > 0: - footer = footer.replace('\n', '\n' + comments) - fh.write(asbytes(comments + footer + newline)) - finally: - if own_fh: - fh.close() - - -def fromregex(file, regexp, dtype): - """ - Construct an array from a text file, using regular expression parsing. - - The returned array is always a structured array, and is constructed from - all matches of the regular expression in the file. Groups in the regular - expression are converted to fields of the structured array. - - Parameters - ---------- - file : str or file - File name or file object to read. - regexp : str or regexp - Regular expression used to parse the file. - Groups in the regular expression correspond to fields in the dtype. - dtype : dtype or list of dtypes - Dtype for the structured array. - - Returns - ------- - output : ndarray - The output array, containing the part of the content of `file` that - was matched by `regexp`. `output` is always a structured array. - - Raises - ------ - TypeError - When `dtype` is not a valid dtype for a structured array. - - See Also - -------- - fromstring, loadtxt - - Notes - ----- - Dtypes for structured arrays can be specified in several forms, but all - forms specify at least the data type and field name. For details see - `doc.structured_arrays`. - - Examples - -------- - >>> f = open('test.dat', 'w') - >>> f.write("1312 foo\\n1534 bar\\n444 qux") - >>> f.close() - - >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] - >>> output = np.fromregex('test.dat', regexp, - ... [('num', np.int64), ('key', 'S3')]) - >>> output - array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], - dtype=[('num', '>> output['num'] - array([1312, 1534, 444], dtype=int64) - - """ - own_fh = False - if not hasattr(file, "read"): - file = open(file, 'rb') - own_fh = True - - try: - if not hasattr(regexp, 'match'): - regexp = re.compile(asbytes(regexp)) - if not isinstance(dtype, np.dtype): - dtype = np.dtype(dtype) - - seq = regexp.findall(file.read()) - if seq and not isinstance(seq[0], tuple): - # Only one group is in the regexp. - # Create the new array as a single data-type and then - # re-interpret as a single-field structured array. - newdtype = np.dtype(dtype[dtype.names[0]]) - output = np.array(seq, dtype=newdtype) - output.dtype = dtype - else: - output = np.array(seq, dtype=dtype) - - return output - finally: - if own_fh: - file.close() - - -#####-------------------------------------------------------------------------- -#---- --- ASCII functions --- -#####-------------------------------------------------------------------------- - - -def genfromtxt(fname, dtype=float, comments='#', delimiter=None, - skiprows=0, skip_header=0, skip_footer=0, converters=None, - missing='', missing_values=None, filling_values=None, - usecols=None, names=None, - excludelist=None, deletechars=None, replace_space='_', - autostrip=False, case_sensitive=True, defaultfmt="f%i", - unpack=None, usemask=False, loose=True, invalid_raise=True): - """ - Load data from a text file, with missing values handled as specified. - - Each line past the first `skip_header` lines is split at the `delimiter` - character, and characters following the `comments` character are discarded. - - Parameters - ---------- - fname : file or str - File, filename, or generator to read. If the filename extension is - `.gz` or `.bz2`, the file is first decompressed. Note that - generators must return byte strings in Python 3k. - dtype : dtype, optional - Data type of the resulting array. - If None, the dtypes will be determined by the contents of each - column, individually. - comments : str, optional - The character used to indicate the start of a comment. - All the characters occurring on a line after a comment are discarded - delimiter : str, int, or sequence, optional - The string used to separate values. By default, any consecutive - whitespaces act as delimiter. An integer or sequence of integers - can also be provided as width(s) of each field. - skip_rows : int, optional - `skip_rows` was deprecated in numpy 1.5, and will be removed in - numpy 2.0. Please use `skip_header` instead. - skip_header : int, optional - The number of lines to skip at the beginning of the file. - skip_footer : int, optional - The number of lines to skip at the end of the file. - converters : variable, optional - The set of functions that convert the data of a column to a value. - The converters can also be used to provide a default value - for missing data: ``converters = {3: lambda s: float(s or 0)}``. - missing : variable, optional - `missing` was deprecated in numpy 1.5, and will be removed in - numpy 2.0. Please use `missing_values` instead. - missing_values : variable, optional - The set of strings corresponding to missing data. - filling_values : variable, optional - The set of values to be used as default when the data are missing. - usecols : sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. - names : {None, True, str, sequence}, optional - If `names` is True, the field names are read from the first valid line - after the first `skip_header` lines. - If `names` is a sequence or a single-string of comma-separated names, - the names will be used to define the field names in a structured dtype. - If `names` is None, the names of the dtype fields will be used, if any. - excludelist : sequence, optional - A list of names to exclude. This list is appended to the default list - ['return','file','print']. Excluded names are appended an underscore: - for example, `file` would become `file_`. - deletechars : str, optional - A string combining invalid characters that must be deleted from the - names. - defaultfmt : str, optional - A format used to define default field names, such as "f%i" or "f_%02i". - autostrip : bool, optional - Whether to automatically strip white spaces from the variables. - replace_space : char, optional - Character(s) used in replacement of white spaces in the variables - names. By default, use a '_'. - case_sensitive : {True, False, 'upper', 'lower'}, optional - If True, field names are case sensitive. - If False or 'upper', field names are converted to upper case. - If 'lower', field names are converted to lower case. - unpack : bool, optional - If True, the returned array is transposed, so that arguments may be - unpacked using ``x, y, z = loadtxt(...)`` - usemask : bool, optional - If True, return a masked array. - If False, return a regular array. - loose : bool, optional - If True, do not raise errors for invalid values. - invalid_raise : bool, optional - If True, an exception is raised if an inconsistency is detected in the - number of columns. - If False, a warning is emitted and the offending lines are skipped. - - Returns - ------- - out : ndarray - Data read from the text file. If `usemask` is True, this is a - masked array. - - See Also - -------- - numpy.loadtxt : equivalent function when no data is missing. - - Notes - ----- - * When spaces are used as delimiters, or when no delimiter has been given - as input, there should not be any missing data between two fields. - * When the variables are named (either by a flexible dtype or with `names`, - there must not be any header in the file (else a ValueError - exception is raised). - * Individual values are not stripped of spaces by default. - When using a custom converter, make sure the function does remove spaces. - - References - ---------- - .. [1] Numpy User Guide, section `I/O with Numpy - `_. - - Examples - --------- - >>> from StringIO import StringIO - >>> import numpy as np - - Comma delimited file with mixed dtype - - >>> s = StringIO("1,1.3,abcde") - >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), - ... ('mystring','S5')], delimiter=",") - >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '>> s.seek(0) # needed for StringIO example only - >>> data = np.genfromtxt(s, dtype=None, - ... names = ['myint','myfloat','mystring'], delimiter=",") - >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '>> s.seek(0) - >>> data = np.genfromtxt(s, dtype="i8,f8,S5", - ... names=['myint','myfloat','mystring'], delimiter=",") - >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '>> s = StringIO("11.3abcde") - >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], - ... delimiter=[1,3,5]) - >>> data - array((1, 1.3, 'abcde'), - dtype=[('intvar', ' nbcols): - descr = dtype.descr - dtype = np.dtype([descr[_] for _ in usecols]) - names = list(dtype.names) - # If `names` is not None, update the names - elif (names is not None) and (len(names) > nbcols): - names = [names[_] for _ in usecols] - elif (names is not None) and (dtype is not None): - names = list(dtype.names) - - # Process the missing values ............................... - # Rename missing_values for convenience - user_missing_values = missing_values or () - - # Define the list of missing_values (one column: one list) - missing_values = [list([asbytes('')]) for _ in range(nbcols)] - - # We have a dictionary: process it field by field - if isinstance(user_missing_values, dict): - # Loop on the items - for (key, val) in user_missing_values.items(): - # Is the key a string ? - if _is_string_like(key): - try: - # Transform it into an integer - key = names.index(key) - except ValueError: - # We couldn't find it: the name must have been dropped - continue - # Redefine the key as needed if it's a column number - if usecols: - try: - key = usecols.index(key) - except ValueError: - pass - # Transform the value as a list of string - if isinstance(val, (list, tuple)): - val = [str(_) for _ in val] - else: - val = [str(val), ] - # Add the value(s) to the current list of missing - if key is None: - # None acts as default - for miss in missing_values: - miss.extend(val) - else: - missing_values[key].extend(val) - # We have a sequence : each item matches a column - elif isinstance(user_missing_values, (list, tuple)): - for (value, entry) in zip(user_missing_values, missing_values): - value = str(value) - if value not in entry: - entry.append(value) - # We have a string : apply it to all entries - elif isinstance(user_missing_values, bytes): - user_value = user_missing_values.split(asbytes(",")) - for entry in missing_values: - entry.extend(user_value) - # We have something else: apply it to all entries - else: - for entry in missing_values: - entry.extend([str(user_missing_values)]) - - # Process the deprecated `missing` - if missing != asbytes(''): - warnings.warn( - "The use of `missing` is deprecated, it will be removed in " - "Numpy 2.0.\nPlease use `missing_values` instead.", - DeprecationWarning) - values = [str(_) for _ in missing.split(asbytes(","))] - for entry in missing_values: - entry.extend(values) - - # Process the filling_values ............................... - # Rename the input for convenience - user_filling_values = filling_values or [] - # Define the default - filling_values = [None] * nbcols - # We have a dictionary : update each entry individually - if isinstance(user_filling_values, dict): - for (key, val) in user_filling_values.items(): - if _is_string_like(key): - try: - # Transform it into an integer - key = names.index(key) - except ValueError: - # We couldn't find it: the name must have been dropped, - continue - # Redefine the key if it's a column number and usecols is defined - if usecols: - try: - key = usecols.index(key) - except ValueError: - pass - # Add the value to the list - filling_values[key] = val - # We have a sequence : update on a one-to-one basis - elif isinstance(user_filling_values, (list, tuple)): - n = len(user_filling_values) - if (n <= nbcols): - filling_values[:n] = user_filling_values - else: - filling_values = user_filling_values[:nbcols] - # We have something else : use it for all entries - else: - filling_values = [user_filling_values] * nbcols - - # Initialize the converters ................................ - if dtype is None: - # Note: we can't use a [...]*nbcols, as we would have 3 times the same - # ... converter, instead of 3 different converters. - converters = [StringConverter(None, missing_values=miss, default=fill) - for (miss, fill) in zip(missing_values, filling_values)] - else: - dtype_flat = flatten_dtype(dtype, flatten_base=True) - # Initialize the converters - if len(dtype_flat) > 1: - # Flexible type : get a converter from each dtype - zipit = zip(dtype_flat, missing_values, filling_values) - converters = [StringConverter(dt, locked=True, - missing_values=miss, default=fill) - for (dt, miss, fill) in zipit] - else: - # Set to a default converter (but w/ different missing values) - zipit = zip(missing_values, filling_values) - converters = [StringConverter(dtype, locked=True, - missing_values=miss, default=fill) - for (miss, fill) in zipit] - # Update the converters to use the user-defined ones - uc_update = [] - for (j, conv) in user_converters.items(): - # If the converter is specified by column names, use the index instead - if _is_string_like(j): - try: - j = names.index(j) - i = j - except ValueError: - continue - elif usecols: - try: - i = usecols.index(j) - except ValueError: - # Unused converter specified - continue - else: - i = j - # Find the value to test - first_line is not filtered by usecols: - if len(first_line): - testing_value = first_values[j] - else: - testing_value = None - converters[i].update(conv, locked=True, - testing_value=testing_value, - default=filling_values[i], - missing_values=missing_values[i],) - uc_update.append((i, conv)) - # Make sure we have the corrected keys in user_converters... - user_converters.update(uc_update) - - # Fixme: possible error as following variable never used. - #miss_chars = [_.missing_values for _ in converters] - - # Initialize the output lists ... - # ... rows - rows = [] - append_to_rows = rows.append - # ... masks - if usemask: - masks = [] - append_to_masks = masks.append - # ... invalid - invalid = [] - append_to_invalid = invalid.append - - # Parse each line - for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): - values = split_line(line) - nbvalues = len(values) - # Skip an empty line - if nbvalues == 0: - continue - # Select only the columns we need - if usecols: - try: - values = [values[_] for _ in usecols] - except IndexError: - append_to_invalid((i + skip_header + 1, nbvalues)) - continue - elif nbvalues != nbcols: - append_to_invalid((i + skip_header + 1, nbvalues)) - continue - # Store the values - append_to_rows(tuple(values)) - if usemask: - append_to_masks(tuple([v.strip() in m - for (v, m) in zip(values, missing_values)])) - - if own_fhd: - fhd.close() - - # Upgrade the converters (if needed) - if dtype is None: - for (i, converter) in enumerate(converters): - current_column = [itemgetter(i)(_m) for _m in rows] - try: - converter.iterupgrade(current_column) - except ConverterLockError: - errmsg = "Converter #%i is locked and cannot be upgraded: " % i - current_column = map(itemgetter(i), rows) - for (j, value) in enumerate(current_column): - try: - converter.upgrade(value) - except (ConverterError, ValueError): - errmsg += "(occurred line #%i for value '%s')" - errmsg %= (j + 1 + skip_header, value) - raise ConverterError(errmsg) - - # Check that we don't have invalid values - nbinvalid = len(invalid) - if nbinvalid > 0: - nbrows = len(rows) + nbinvalid - skip_footer - # Construct the error message - template = " Line #%%i (got %%i columns instead of %i)" % nbcols - if skip_footer > 0: - nbinvalid_skipped = len([_ for _ in invalid - if _[0] > nbrows + skip_header]) - invalid = invalid[:nbinvalid - nbinvalid_skipped] - skip_footer -= nbinvalid_skipped -# -# nbrows -= skip_footer -# errmsg = [template % (i, nb) -# for (i, nb) in invalid if i < nbrows] -# else: - errmsg = [template % (i, nb) - for (i, nb) in invalid] - if len(errmsg): - errmsg.insert(0, "Some errors were detected !") - errmsg = "\n".join(errmsg) - # Raise an exception ? - if invalid_raise: - raise ValueError(errmsg) - # Issue a warning ? - else: - warnings.warn(errmsg, ConversionWarning) - - # Strip the last skip_footer data - if skip_footer > 0: - rows = rows[:-skip_footer] - if usemask: - masks = masks[:-skip_footer] - - # Convert each value according to the converter: - # We want to modify the list in place to avoid creating a new one... - if loose: - rows = list( - zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] - for (i, conv) in enumerate(converters)])) - else: - rows = list( - zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] - for (i, conv) in enumerate(converters)])) - - # Reset the dtype - data = rows - if dtype is None: - # Get the dtypes from the types of the converters - column_types = [conv.type for conv in converters] - # Find the columns with strings... - strcolidx = [i for (i, v) in enumerate(column_types) - if v in (type('S'), np.string_)] - # ... and take the largest number of chars. - for i in strcolidx: - column_types[i] = "|S%i" % max(len(row[i]) for row in data) - # - if names is None: - # If the dtype is uniform, don't define names, else use '' - base = set([c.type for c in converters if c._checked]) - if len(base) == 1: - (ddtype, mdtype) = (list(base)[0], np.bool) - else: - ddtype = [(defaultfmt % i, dt) - for (i, dt) in enumerate(column_types)] - if usemask: - mdtype = [(defaultfmt % i, np.bool) - for (i, dt) in enumerate(column_types)] - else: - ddtype = list(zip(names, column_types)) - mdtype = list(zip(names, [np.bool] * len(column_types))) - output = np.array(data, dtype=ddtype) - if usemask: - outputmask = np.array(masks, dtype=mdtype) - else: - # Overwrite the initial dtype names if needed - if names and dtype.names: - dtype.names = names - # Case 1. We have a structured type - if len(dtype_flat) > 1: - # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] - # First, create the array using a flattened dtype: - # [('a', int), ('b1', int), ('b2', float)] - # Then, view the array using the specified dtype. - if 'O' in (_.char for _ in dtype_flat): - if has_nested_fields(dtype): - raise NotImplementedError( - "Nested fields involving objects are not supported...") - else: - output = np.array(data, dtype=dtype) - else: - rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) - output = rows.view(dtype) - # Now, process the rowmasks the same way - if usemask: - rowmasks = np.array( - masks, dtype=np.dtype([('', np.bool) for t in dtype_flat])) - # Construct the new dtype - mdtype = make_mask_descr(dtype) - outputmask = rowmasks.view(mdtype) - # Case #2. We have a basic dtype - else: - # We used some user-defined converters - if user_converters: - ishomogeneous = True - descr = [] - for i, ttype in enumerate([conv.type for conv in converters]): - # Keep the dtype of the current converter - if i in user_converters: - ishomogeneous &= (ttype == dtype.type) - if ttype == np.string_: - ttype = "|S%i" % max(len(row[i]) for row in data) - descr.append(('', ttype)) - else: - descr.append(('', dtype)) - # So we changed the dtype ? - if not ishomogeneous: - # We have more than one field - if len(descr) > 1: - dtype = np.dtype(descr) - # We have only one field: drop the name if not needed. - else: - dtype = np.dtype(ttype) - # - output = np.array(data, dtype) - if usemask: - if dtype.names: - mdtype = [(_, np.bool) for _ in dtype.names] - else: - mdtype = np.bool - outputmask = np.array(masks, dtype=mdtype) - # Try to take care of the missing data we missed - names = output.dtype.names - if usemask and names: - for (name, conv) in zip(names or (), converters): - missing_values = [conv(_) for _ in conv.missing_values - if _ != asbytes('')] - for mval in missing_values: - outputmask[name] |= (output[name] == mval) - # Construct the final array - if usemask: - output = output.view(MaskedArray) - output._mask = outputmask - if unpack: - return output.squeeze().T - return output.squeeze() - - -def ndfromtxt(fname, **kwargs): - """ - Load ASCII data stored in a file and return it as a single array. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function. - - """ - kwargs['usemask'] = False - return genfromtxt(fname, **kwargs) - - -def mafromtxt(fname, **kwargs): - """ - Load ASCII data stored in a text file and return a masked array. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function to load ASCII data. - - """ - kwargs['usemask'] = True - return genfromtxt(fname, **kwargs) - - -def recfromtxt(fname, **kwargs): - """ - Load ASCII data from a file and return it in a record array. - - If ``usemask=False`` a standard `recarray` is returned, - if ``usemask=True`` a MaskedRecords array is returned. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - kwargs.setdefault("dtype", None) - usemask = kwargs.get('usemask', False) - output = genfromtxt(fname, **kwargs) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output - - -def recfromcsv(fname, **kwargs): - """ - Load ASCII data stored in a comma-separated file. - - The returned array is a record array (if ``usemask=False``, see - `recarray`) or a masked record array (if ``usemask=True``, - see `ma.mrecords.MaskedRecords`). - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function to load ASCII data. - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - # Set default kwargs for genfromtxt as relevant to csv import. - kwargs.setdefault("case_sensitive", "lower") - kwargs.setdefault("names", True) - kwargs.setdefault("delimiter", ",") - kwargs.setdefault("dtype", None) - output = genfromtxt(fname, **kwargs) - - usemask = kwargs.get("usemask", False) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/polynomial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/polynomial.py deleted file mode 100644 index 6a1adc7730806..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/polynomial.py +++ /dev/null @@ -1,1271 +0,0 @@ -""" -Functions to operate on polynomials. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', - 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', - 'polyfit', 'RankWarning'] - -import re -import warnings -import numpy.core.numeric as NX - -from numpy.core import isscalar, abs, finfo, atleast_1d, hstack, dot -from numpy.lib.twodim_base import diag, vander -from numpy.lib.function_base import trim_zeros, sort_complex -from numpy.lib.type_check import iscomplex, real, imag -from numpy.linalg import eigvals, lstsq, inv - -class RankWarning(UserWarning): - """ - Issued by `polyfit` when the Vandermonde matrix is rank deficient. - - For more information, a way to suppress the warning, and an example of - `RankWarning` being issued, see `polyfit`. - - """ - pass - -def poly(seq_of_zeros): - """ - Find the coefficients of a polynomial with the given sequence of roots. - - Returns the coefficients of the polynomial whose leading coefficient - is one for the given sequence of zeros (multiple roots must be included - in the sequence as many times as their multiplicity; see Examples). - A square matrix (or array, which will be treated as a matrix) can also - be given, in which case the coefficients of the characteristic polynomial - of the matrix are returned. - - Parameters - ---------- - seq_of_zeros : array_like, shape (N,) or (N, N) - A sequence of polynomial roots, or a square array or matrix object. - - Returns - ------- - c : ndarray - 1D array of polynomial coefficients from highest to lowest degree: - - ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` - where c[0] always equals 1. - - Raises - ------ - ValueError - If input is the wrong shape (the input must be a 1-D or square - 2-D array). - - See Also - -------- - polyval : Evaluate a polynomial at a point. - roots : Return the roots of a polynomial. - polyfit : Least squares polynomial fit. - poly1d : A one-dimensional polynomial class. - - Notes - ----- - Specifying the roots of a polynomial still leaves one degree of - freedom, typically represented by an undetermined leading - coefficient. [1]_ In the case of this function, that coefficient - - the first one in the returned array - is always taken as one. (If - for some reason you have one other point, the only automatic way - presently to leverage that information is to use ``polyfit``.) - - The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` - matrix **A** is given by - - :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, - - where **I** is the `n`-by-`n` identity matrix. [2]_ - - References - ---------- - .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry, - Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. - - .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," - Academic Press, pg. 182, 1980. - - Examples - -------- - Given a sequence of a polynomial's zeros: - - >>> np.poly((0, 0, 0)) # Multiple root example - array([1, 0, 0, 0]) - - The line above represents z**3 + 0*z**2 + 0*z + 0. - - >>> np.poly((-1./2, 0, 1./2)) - array([ 1. , 0. , -0.25, 0. ]) - - The line above represents z**3 - z/4 - - >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0])) - array([ 1. , -0.77086955, 0.08618131, 0. ]) #random - - Given a square array object: - - >>> P = np.array([[0, 1./3], [-1./2, 0]]) - >>> np.poly(P) - array([ 1. , 0. , 0.16666667]) - - Or a square matrix object: - - >>> np.poly(np.matrix(P)) - array([ 1. , 0. , 0.16666667]) - - Note how in all cases the leading coefficient is always 1. - - """ - seq_of_zeros = atleast_1d(seq_of_zeros) - sh = seq_of_zeros.shape - if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: - seq_of_zeros = eigvals(seq_of_zeros) - elif len(sh) == 1: - pass - else: - raise ValueError("input must be 1d or non-empty square 2d array.") - - if len(seq_of_zeros) == 0: - return 1.0 - - a = [1] - for k in range(len(seq_of_zeros)): - a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full') - - if issubclass(a.dtype.type, NX.complexfloating): - # if complex roots are all complex conjugates, the roots are real. - roots = NX.asarray(seq_of_zeros, complex) - pos_roots = sort_complex(NX.compress(roots.imag > 0, roots)) - neg_roots = NX.conjugate(sort_complex( - NX.compress(roots.imag < 0, roots))) - if (len(pos_roots) == len(neg_roots) and - NX.alltrue(neg_roots == pos_roots)): - a = a.real.copy() - - return a - -def roots(p): - """ - Return the roots of a polynomial with coefficients given in p. - - The values in the rank-1 array `p` are coefficients of a polynomial. - If the length of `p` is n+1 then the polynomial is described by:: - - p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] - - Parameters - ---------- - p : array_like - Rank-1 array of polynomial coefficients. - - Returns - ------- - out : ndarray - An array containing the complex roots of the polynomial. - - Raises - ------ - ValueError - When `p` cannot be converted to a rank-1 array. - - See also - -------- - poly : Find the coefficients of a polynomial with a given sequence - of roots. - polyval : Evaluate a polynomial at a point. - polyfit : Least squares polynomial fit. - poly1d : A one-dimensional polynomial class. - - Notes - ----- - The algorithm relies on computing the eigenvalues of the - companion matrix [1]_. - - References - ---------- - .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: - Cambridge University Press, 1999, pp. 146-7. - - Examples - -------- - >>> coeff = [3.2, 2, 1] - >>> np.roots(coeff) - array([-0.3125+0.46351241j, -0.3125-0.46351241j]) - - """ - # If input is scalar, this makes it an array - p = atleast_1d(p) - if len(p.shape) != 1: - raise ValueError("Input must be a rank-1 array.") - - # find non-zero array entries - non_zero = NX.nonzero(NX.ravel(p))[0] - - # Return an empty array if polynomial is all zeros - if len(non_zero) == 0: - return NX.array([]) - - # find the number of trailing zeros -- this is the number of roots at 0. - trailing_zeros = len(p) - non_zero[-1] - 1 - - # strip leading and trailing zeros - p = p[int(non_zero[0]):int(non_zero[-1])+1] - - # casting: if incoming array isn't floating point, make it floating point. - if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): - p = p.astype(float) - - N = len(p) - if N > 1: - # build companion matrix and find its eigenvalues (the roots) - A = diag(NX.ones((N-2,), p.dtype), -1) - A[0,:] = -p[1:] / p[0] - roots = eigvals(A) - else: - roots = NX.array([]) - - # tack any zeros onto the back of the array - roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) - return roots - -def polyint(p, m=1, k=None): - """ - Return an antiderivative (indefinite integral) of a polynomial. - - The returned order `m` antiderivative `P` of polynomial `p` satisfies - :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` - integration constants `k`. The constants determine the low-order - polynomial part - - .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} - - of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. - - Parameters - ---------- - p : {array_like, poly1d} - Polynomial to differentiate. - A sequence is interpreted as polynomial coefficients, see `poly1d`. - m : int, optional - Order of the antiderivative. (Default: 1) - k : {None, list of `m` scalars, scalar}, optional - Integration constants. They are given in the order of integration: - those corresponding to highest-order terms come first. - - If ``None`` (default), all constants are assumed to be zero. - If `m = 1`, a single scalar can be given instead of a list. - - See Also - -------- - polyder : derivative of a polynomial - poly1d.integ : equivalent method - - Examples - -------- - The defining property of the antiderivative: - - >>> p = np.poly1d([1,1,1]) - >>> P = np.polyint(p) - >>> P - poly1d([ 0.33333333, 0.5 , 1. , 0. ]) - >>> np.polyder(P) == p - True - - The integration constants default to zero, but can be specified: - - >>> P = np.polyint(p, 3) - >>> P(0) - 0.0 - >>> np.polyder(P)(0) - 0.0 - >>> np.polyder(P, 2)(0) - 0.0 - >>> P = np.polyint(p, 3, k=[6,5,3]) - >>> P - poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) - - Note that 3 = 6 / 2!, and that the constants are given in the order of - integrations. Constant of the highest-order polynomial term comes first: - - >>> np.polyder(P, 2)(0) - 6.0 - >>> np.polyder(P, 1)(0) - 5.0 - >>> P(0) - 3.0 - - """ - m = int(m) - if m < 0: - raise ValueError("Order of integral must be positive (see polyder)") - if k is None: - k = NX.zeros(m, float) - k = atleast_1d(k) - if len(k) == 1 and m > 1: - k = k[0]*NX.ones(m, float) - if len(k) < m: - raise ValueError( - "k must be a scalar or a rank-1 array of length 1 or >m.") - - truepoly = isinstance(p, poly1d) - p = NX.asarray(p) - if m == 0: - if truepoly: - return poly1d(p) - return p - else: - # Note: this must work also with object and integer arrays - y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) - val = polyint(y, m - 1, k=k[1:]) - if truepoly: - return poly1d(val) - return val - -def polyder(p, m=1): - """ - Return the derivative of the specified order of a polynomial. - - Parameters - ---------- - p : poly1d or sequence - Polynomial to differentiate. - A sequence is interpreted as polynomial coefficients, see `poly1d`. - m : int, optional - Order of differentiation (default: 1) - - Returns - ------- - der : poly1d - A new polynomial representing the derivative. - - See Also - -------- - polyint : Anti-derivative of a polynomial. - poly1d : Class for one-dimensional polynomials. - - Examples - -------- - The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: - - >>> p = np.poly1d([1,1,1,1]) - >>> p2 = np.polyder(p) - >>> p2 - poly1d([3, 2, 1]) - - which evaluates to: - - >>> p2(2.) - 17.0 - - We can verify this, approximating the derivative with - ``(f(x + h) - f(x))/h``: - - >>> (p(2. + 0.001) - p(2.)) / 0.001 - 17.007000999997857 - - The fourth-order derivative of a 3rd-order polynomial is zero: - - >>> np.polyder(p, 2) - poly1d([6, 2]) - >>> np.polyder(p, 3) - poly1d([6]) - >>> np.polyder(p, 4) - poly1d([ 0.]) - - """ - m = int(m) - if m < 0: - raise ValueError("Order of derivative must be positive (see polyint)") - - truepoly = isinstance(p, poly1d) - p = NX.asarray(p) - n = len(p) - 1 - y = p[:-1] * NX.arange(n, 0, -1) - if m == 0: - val = p - else: - val = polyder(y, m - 1) - if truepoly: - val = poly1d(val) - return val - -def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - """ - Least squares polynomial fit. - - Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` - to points `(x, y)`. Returns a vector of coefficients `p` that minimises - the squared error. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (M,), optional - weights to apply to the y-coordinates of the sample points. - cov : bool, optional - Return the estimate and the covariance matrix of the estimate - If full is True, then cov is not returned. - - Returns - ------- - p : ndarray, shape (M,) or (M, K) - Polynomial coefficients, highest power first. If `y` was 2-D, the - coefficients for `k`-th data set are in ``p[:,k]``. - - residuals, rank, singular_values, rcond : - Present only if `full` = True. Residuals of the least-squares fit, - the effective rank of the scaled Vandermonde coefficient matrix, - its singular values, and the specified value of `rcond`. For more - details, see `linalg.lstsq`. - - V : ndarray, shape (M,M) or (M,M,K) - Present only if `full` = False and `cov`=True. The covariance - matrix of the polynomial coefficient estimates. The diagonal of - this matrix are the variance estimates for each coefficient. If y - is a 2-D array, then the covariance matrix for the `k`-th data set - are in ``V[:,:,k]`` - - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. - - The warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', np.RankWarning) - - See Also - -------- - polyval : Computes polynomial values. - linalg.lstsq : Computes a least-squares fit. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution minimizes the squared error - - .. math :: - E = \\sum_{j=0}^k |p(x_j) - y_j|^2 - - in the equations:: - - x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] - x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] - ... - x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] - - The coefficient matrix of the coefficients `p` is a Vandermonde matrix. - - `polyfit` issues a `RankWarning` when the least-squares fit is badly - conditioned. This implies that the best fit is not well-defined due - to numerical error. The results may be improved by lowering the polynomial - degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter - can also be set to a value smaller than its default, but the resulting - fit may be spurious: including contributions from the small singular - values can add numerical noise to the result. - - Note that fitting polynomial coefficients is inherently badly conditioned - when the degree of the polynomial is large or the interval of sample points - is badly centered. The quality of the fit should always be checked in these - cases. When polynomial fits are not satisfactory, splines may be a good - alternative. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - .. [2] Wikipedia, "Polynomial interpolation", - http://en.wikipedia.org/wiki/Polynomial_interpolation - - Examples - -------- - >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) - >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) - >>> z = np.polyfit(x, y, 3) - >>> z - array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) - - It is convenient to use `poly1d` objects for dealing with polynomials: - - >>> p = np.poly1d(z) - >>> p(0.5) - 0.6143849206349179 - >>> p(3.5) - -0.34732142857143039 - >>> p(10) - 22.579365079365115 - - High-order polynomials may oscillate wildly: - - >>> p30 = np.poly1d(np.polyfit(x, y, 30)) - /... RankWarning: Polyfit may be poorly conditioned... - >>> p30(4) - -0.80000000000000204 - >>> p30(5) - -0.99999999999999445 - >>> p30(4.5) - -0.10547061179440398 - - Illustration: - - >>> import matplotlib.pyplot as plt - >>> xp = np.linspace(-2, 6, 100) - >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') - >>> plt.ylim(-2,2) - (-2, 2) - >>> plt.show() - - """ - order = int(deg) + 1 - x = NX.asarray(x) + 0.0 - y = NX.asarray(y) + 0.0 - - # check arguments. - if deg < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if x.shape[0] != y.shape[0]: - raise TypeError("expected x and y to have same length") - - # set rcond - if rcond is None: - rcond = len(x)*finfo(x.dtype).eps - - # set up least squares equation for powers of x - lhs = vander(x, order) - rhs = y - - # apply weighting - if w is not None: - w = NX.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected a 1-d array for weights") - if w.shape[0] != y.shape[0]: - raise TypeError("expected w and y to have the same length") - lhs *= w[:, NX.newaxis] - if rhs.ndim == 2: - rhs *= w[:, NX.newaxis] - else: - rhs *= w - - # scale lhs to improve condition number and solve - scale = NX.sqrt((lhs*lhs).sum(axis=0)) - lhs /= scale - c, resids, rank, s = lstsq(lhs, rhs, rcond) - c = (c.T/scale).T # broadcast scale coefficients - - # warn on rank reduction, which indicates an ill conditioned matrix - if rank != order and not full: - msg = "Polyfit may be poorly conditioned" - warnings.warn(msg, RankWarning) - - if full: - return c, resids, rank, s, rcond - elif cov: - Vbase = inv(dot(lhs.T, lhs)) - Vbase /= NX.outer(scale, scale) - # Some literature ignores the extra -2.0 factor in the denominator, but - # it is included here because the covariance of Multivariate Student-T - # (which is implied by a Bayesian uncertainty analysis) includes it. - # Plus, it gives a slightly more conservative estimate of uncertainty. - fac = resids / (len(x) - order - 2.0) - if y.ndim == 1: - return c, Vbase * fac - else: - return c, Vbase[:,:, NX.newaxis] * fac - else: - return c - - -def polyval(p, x): - """ - Evaluate a polynomial at specific values. - - If `p` is of length N, this function returns the value: - - ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` - - If `x` is a sequence, then `p(x)` is returned for each element of `x`. - If `x` is another polynomial then the composite polynomial `p(x(t))` - is returned. - - Parameters - ---------- - p : array_like or poly1d object - 1D array of polynomial coefficients (including coefficients equal - to zero) from highest degree to the constant term, or an - instance of poly1d. - x : array_like or poly1d object - A number, a 1D array of numbers, or an instance of poly1d, "at" - which to evaluate `p`. - - Returns - ------- - values : ndarray or poly1d - If `x` is a poly1d instance, the result is the composition of the two - polynomials, i.e., `x` is "substituted" in `p` and the simplified - result is returned. In addition, the type of `x` - array_like or - poly1d - governs the type of the output: `x` array_like => `values` - array_like, `x` a poly1d object => `values` is also. - - See Also - -------- - poly1d: A polynomial class. - - Notes - ----- - Horner's scheme [1]_ is used to evaluate the polynomial. Even so, - for polynomials of high degree the values may be inaccurate due to - rounding errors. Use carefully. - - References - ---------- - .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. - trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand - Reinhold Co., 1985, pg. 720. - - Examples - -------- - >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 - 76 - >>> np.polyval([3,0,1], np.poly1d(5)) - poly1d([ 76.]) - >>> np.polyval(np.poly1d([3,0,1]), 5) - 76 - >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) - poly1d([ 76.]) - - """ - p = NX.asarray(p) - if isinstance(x, poly1d): - y = 0 - else: - x = NX.asarray(x) - y = NX.zeros_like(x) - for i in range(len(p)): - y = x * y + p[i] - return y - -def polyadd(a1, a2): - """ - Find the sum of two polynomials. - - Returns the polynomial resulting from the sum of two input polynomials. - Each input must be either a poly1d object or a 1D sequence of polynomial - coefficients, from highest to lowest degree. - - Parameters - ---------- - a1, a2 : array_like or poly1d object - Input polynomials. - - Returns - ------- - out : ndarray or poly1d object - The sum of the inputs. If either input is a poly1d object, then the - output is also a poly1d object. Otherwise, it is a 1D array of - polynomial coefficients from highest to lowest degree. - - See Also - -------- - poly1d : A one-dimensional polynomial class. - poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval - - Examples - -------- - >>> np.polyadd([1, 2], [9, 5, 4]) - array([9, 6, 6]) - - Using poly1d objects: - - >>> p1 = np.poly1d([1, 2]) - >>> p2 = np.poly1d([9, 5, 4]) - >>> print p1 - 1 x + 2 - >>> print p2 - 2 - 9 x + 5 x + 4 - >>> print np.polyadd(p1, p2) - 2 - 9 x + 6 x + 6 - - """ - truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1 = atleast_1d(a1) - a2 = atleast_1d(a2) - diff = len(a2) - len(a1) - if diff == 0: - val = a1 + a2 - elif diff > 0: - zr = NX.zeros(diff, a1.dtype) - val = NX.concatenate((zr, a1)) + a2 - else: - zr = NX.zeros(abs(diff), a2.dtype) - val = a1 + NX.concatenate((zr, a2)) - if truepoly: - val = poly1d(val) - return val - -def polysub(a1, a2): - """ - Difference (subtraction) of two polynomials. - - Given two polynomials `a1` and `a2`, returns ``a1 - a2``. - `a1` and `a2` can be either array_like sequences of the polynomials' - coefficients (including coefficients equal to zero), or `poly1d` objects. - - Parameters - ---------- - a1, a2 : array_like or poly1d - Minuend and subtrahend polynomials, respectively. - - Returns - ------- - out : ndarray or poly1d - Array or `poly1d` object of the difference polynomial's coefficients. - - See Also - -------- - polyval, polydiv, polymul, polyadd - - Examples - -------- - .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) - - >>> np.polysub([2, 10, -2], [3, 10, -4]) - array([-1, 0, 2]) - - """ - truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1 = atleast_1d(a1) - a2 = atleast_1d(a2) - diff = len(a2) - len(a1) - if diff == 0: - val = a1 - a2 - elif diff > 0: - zr = NX.zeros(diff, a1.dtype) - val = NX.concatenate((zr, a1)) - a2 - else: - zr = NX.zeros(abs(diff), a2.dtype) - val = a1 - NX.concatenate((zr, a2)) - if truepoly: - val = poly1d(val) - return val - - -def polymul(a1, a2): - """ - Find the product of two polynomials. - - Finds the polynomial resulting from the multiplication of the two input - polynomials. Each input must be either a poly1d object or a 1D sequence - of polynomial coefficients, from highest to lowest degree. - - Parameters - ---------- - a1, a2 : array_like or poly1d object - Input polynomials. - - Returns - ------- - out : ndarray or poly1d object - The polynomial resulting from the multiplication of the inputs. If - either inputs is a poly1d object, then the output is also a poly1d - object. Otherwise, it is a 1D array of polynomial coefficients from - highest to lowest degree. - - See Also - -------- - poly1d : A one-dimensional polynomial class. - poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, - polyval - convolve : Array convolution. Same output as polymul, but has parameter - for overlap mode. - - Examples - -------- - >>> np.polymul([1, 2, 3], [9, 5, 1]) - array([ 9, 23, 38, 17, 3]) - - Using poly1d objects: - - >>> p1 = np.poly1d([1, 2, 3]) - >>> p2 = np.poly1d([9, 5, 1]) - >>> print p1 - 2 - 1 x + 2 x + 3 - >>> print p2 - 2 - 9 x + 5 x + 1 - >>> print np.polymul(p1, p2) - 4 3 2 - 9 x + 23 x + 38 x + 17 x + 3 - - """ - truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1, a2 = poly1d(a1), poly1d(a2) - val = NX.convolve(a1, a2) - if truepoly: - val = poly1d(val) - return val - -def polydiv(u, v): - """ - Returns the quotient and remainder of polynomial division. - - The input arrays are the coefficients (including any coefficients - equal to zero) of the "numerator" (dividend) and "denominator" - (divisor) polynomials, respectively. - - Parameters - ---------- - u : array_like or poly1d - Dividend polynomial's coefficients. - - v : array_like or poly1d - Divisor polynomial's coefficients. - - Returns - ------- - q : ndarray - Coefficients, including those equal to zero, of the quotient. - r : ndarray - Coefficients, including those equal to zero, of the remainder. - - See Also - -------- - poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub, - polyval - - Notes - ----- - Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need - not equal `v.ndim`. In other words, all four possible combinations - - ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, - ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. - - Examples - -------- - .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 - - >>> x = np.array([3.0, 5.0, 2.0]) - >>> y = np.array([2.0, 1.0]) - >>> np.polydiv(x, y) - (array([ 1.5 , 1.75]), array([ 0.25])) - - """ - truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) - u = atleast_1d(u) + 0.0 - v = atleast_1d(v) + 0.0 - # w has the common type - w = u[0] + v[0] - m = len(u) - 1 - n = len(v) - 1 - scale = 1. / v[0] - q = NX.zeros((max(m - n + 1, 1),), w.dtype) - r = u.copy() - for k in range(0, m-n+1): - d = scale * r[k] - q[k] = d - r[k:k+n+1] -= d*v - while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): - r = r[1:] - if truepoly: - return poly1d(q), poly1d(r) - return q, r - -_poly_mat = re.compile(r"[*][*]([0-9]*)") -def _raise_power(astr, wrap=70): - n = 0 - line1 = '' - line2 = '' - output = ' ' - while True: - mat = _poly_mat.search(astr, n) - if mat is None: - break - span = mat.span() - power = mat.groups()[0] - partstr = astr[n:span[0]] - n = span[1] - toadd2 = partstr + ' '*(len(power)-1) - toadd1 = ' '*(len(partstr)-1) + power - if ((len(line2) + len(toadd2) > wrap) or - (len(line1) + len(toadd1) > wrap)): - output += line1 + "\n" + line2 + "\n " - line1 = toadd1 - line2 = toadd2 - else: - line2 += partstr + ' '*(len(power)-1) - line1 += ' '*(len(partstr)-1) + power - output += line1 + "\n" + line2 - return output + astr[n:] - - -class poly1d(object): - """ - A one-dimensional polynomial class. - - A convenience class, used to encapsulate "natural" operations on - polynomials so that said operations may take on their customary - form in code (see Examples). - - Parameters - ---------- - c_or_r : array_like - The polynomial's coefficients, in decreasing powers, or if - the value of the second parameter is True, the polynomial's - roots (values where the polynomial evaluates to 0). For example, - ``poly1d([1, 2, 3])`` returns an object that represents - :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns - one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. - r : bool, optional - If True, `c_or_r` specifies the polynomial's roots; the default - is False. - variable : str, optional - Changes the variable used when printing `p` from `x` to `variable` - (see Examples). - - Examples - -------- - Construct the polynomial :math:`x^2 + 2x + 3`: - - >>> p = np.poly1d([1, 2, 3]) - >>> print np.poly1d(p) - 2 - 1 x + 2 x + 3 - - Evaluate the polynomial at :math:`x = 0.5`: - - >>> p(0.5) - 4.25 - - Find the roots: - - >>> p.r - array([-1.+1.41421356j, -1.-1.41421356j]) - >>> p(p.r) - array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) - - These numbers in the previous line represent (0, 0) to machine precision - - Show the coefficients: - - >>> p.c - array([1, 2, 3]) - - Display the order (the leading zero-coefficients are removed): - - >>> p.order - 2 - - Show the coefficient of the k-th power in the polynomial - (which is equivalent to ``p.c[-(i+1)]``): - - >>> p[1] - 2 - - Polynomials can be added, subtracted, multiplied, and divided - (returns quotient and remainder): - - >>> p * p - poly1d([ 1, 4, 10, 12, 9]) - - >>> (p**3 + 4) / p - (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.])) - - ``asarray(p)`` gives the coefficient array, so polynomials can be - used in all functions that accept arrays: - - >>> p**2 # square of polynomial - poly1d([ 1, 4, 10, 12, 9]) - - >>> np.square(p) # square of individual coefficients - array([1, 4, 9]) - - The variable used in the string representation of `p` can be modified, - using the `variable` parameter: - - >>> p = np.poly1d([1,2,3], variable='z') - >>> print p - 2 - 1 z + 2 z + 3 - - Construct a polynomial from its roots: - - >>> np.poly1d([1, 2], True) - poly1d([ 1, -3, 2]) - - This is the same polynomial as obtained by: - - >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) - poly1d([ 1, -3, 2]) - - """ - coeffs = None - order = None - variable = None - __hash__ = None - - def __init__(self, c_or_r, r=0, variable=None): - if isinstance(c_or_r, poly1d): - for key in c_or_r.__dict__.keys(): - self.__dict__[key] = c_or_r.__dict__[key] - if variable is not None: - self.__dict__['variable'] = variable - return - if r: - c_or_r = poly(c_or_r) - c_or_r = atleast_1d(c_or_r) - if len(c_or_r.shape) > 1: - raise ValueError("Polynomial must be 1d only.") - c_or_r = trim_zeros(c_or_r, trim='f') - if len(c_or_r) == 0: - c_or_r = NX.array([0.]) - self.__dict__['coeffs'] = c_or_r - self.__dict__['order'] = len(c_or_r) - 1 - if variable is None: - variable = 'x' - self.__dict__['variable'] = variable - - def __array__(self, t=None): - if t: - return NX.asarray(self.coeffs, t) - else: - return NX.asarray(self.coeffs) - - def __repr__(self): - vals = repr(self.coeffs) - vals = vals[6:-1] - return "poly1d(%s)" % vals - - def __len__(self): - return self.order - - def __str__(self): - thestr = "0" - var = self.variable - - # Remove leading zeros - coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] - N = len(coeffs)-1 - - def fmt_float(q): - s = '%.4g' % q - if s.endswith('.0000'): - s = s[:-5] - return s - - for k in range(len(coeffs)): - if not iscomplex(coeffs[k]): - coefstr = fmt_float(real(coeffs[k])) - elif real(coeffs[k]) == 0: - coefstr = '%sj' % fmt_float(imag(coeffs[k])) - else: - coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])), - fmt_float(imag(coeffs[k]))) - - power = (N-k) - if power == 0: - if coefstr != '0': - newstr = '%s' % (coefstr,) - else: - if k == 0: - newstr = '0' - else: - newstr = '' - elif power == 1: - if coefstr == '0': - newstr = '' - elif coefstr == 'b': - newstr = var - else: - newstr = '%s %s' % (coefstr, var) - else: - if coefstr == '0': - newstr = '' - elif coefstr == 'b': - newstr = '%s**%d' % (var, power,) - else: - newstr = '%s %s**%d' % (coefstr, var, power) - - if k > 0: - if newstr != '': - if newstr.startswith('-'): - thestr = "%s - %s" % (thestr, newstr[1:]) - else: - thestr = "%s + %s" % (thestr, newstr) - else: - thestr = newstr - return _raise_power(thestr) - - def __call__(self, val): - return polyval(self.coeffs, val) - - def __neg__(self): - return poly1d(-self.coeffs) - - def __pos__(self): - return self - - def __mul__(self, other): - if isscalar(other): - return poly1d(self.coeffs * other) - else: - other = poly1d(other) - return poly1d(polymul(self.coeffs, other.coeffs)) - - def __rmul__(self, other): - if isscalar(other): - return poly1d(other * self.coeffs) - else: - other = poly1d(other) - return poly1d(polymul(self.coeffs, other.coeffs)) - - def __add__(self, other): - other = poly1d(other) - return poly1d(polyadd(self.coeffs, other.coeffs)) - - def __radd__(self, other): - other = poly1d(other) - return poly1d(polyadd(self.coeffs, other.coeffs)) - - def __pow__(self, val): - if not isscalar(val) or int(val) != val or val < 0: - raise ValueError("Power to non-negative integers only.") - res = [1] - for _ in range(val): - res = polymul(self.coeffs, res) - return poly1d(res) - - def __sub__(self, other): - other = poly1d(other) - return poly1d(polysub(self.coeffs, other.coeffs)) - - def __rsub__(self, other): - other = poly1d(other) - return poly1d(polysub(other.coeffs, self.coeffs)) - - def __div__(self, other): - if isscalar(other): - return poly1d(self.coeffs/other) - else: - other = poly1d(other) - return polydiv(self, other) - - __truediv__ = __div__ - - def __rdiv__(self, other): - if isscalar(other): - return poly1d(other/self.coeffs) - else: - other = poly1d(other) - return polydiv(other, self) - - __rtruediv__ = __rdiv__ - - def __eq__(self, other): - if self.coeffs.shape != other.coeffs.shape: - return False - return (self.coeffs == other.coeffs).all() - - def __ne__(self, other): - return not self.__eq__(other) - - def __setattr__(self, key, val): - raise ValueError("Attributes cannot be changed this way.") - - def __getattr__(self, key): - if key in ['r', 'roots']: - return roots(self.coeffs) - elif key in ['c', 'coef', 'coefficients']: - return self.coeffs - elif key in ['o']: - return self.order - else: - try: - return self.__dict__[key] - except KeyError: - raise AttributeError( - "'%s' has no attribute '%s'" % (self.__class__, key)) - - def __getitem__(self, val): - ind = self.order - val - if val > self.order: - return 0 - if val < 0: - return 0 - return self.coeffs[ind] - - def __setitem__(self, key, val): - ind = self.order - key - if key < 0: - raise ValueError("Does not support negative powers.") - if key > self.order: - zr = NX.zeros(key-self.order, self.coeffs.dtype) - self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs)) - self.__dict__['order'] = key - ind = 0 - self.__dict__['coeffs'][ind] = val - return - - def __iter__(self): - return iter(self.coeffs) - - def integ(self, m=1, k=0): - """ - Return an antiderivative (indefinite integral) of this polynomial. - - Refer to `polyint` for full documentation. - - See Also - -------- - polyint : equivalent function - - """ - return poly1d(polyint(self.coeffs, m=m, k=k)) - - def deriv(self, m=1): - """ - Return a derivative of this polynomial. - - Refer to `polyder` for full documentation. - - See Also - -------- - polyder : equivalent function - - """ - return poly1d(polyder(self.coeffs, m=m)) - -# Stuff to do on module import - -warnings.simplefilter('always', RankWarning) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/recfunctions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/recfunctions.py deleted file mode 100644 index a61b1749b566f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/recfunctions.py +++ /dev/null @@ -1,1003 +0,0 @@ -""" -Collection of utilities to manipulate structured arrays. - -Most of these functions were initially implemented by John Hunter for -matplotlib. They have been rewritten and extended for convenience. - -""" -from __future__ import division, absolute_import, print_function - -import sys -import itertools -import numpy as np -import numpy.ma as ma -from numpy import ndarray, recarray -from numpy.ma import MaskedArray -from numpy.ma.mrecords import MaskedRecords -from numpy.lib._iotools import _is_string_like -from numpy.compat import basestring - -if sys.version_info[0] < 3: - from future_builtins import zip - -_check_fill_value = np.ma.core._check_fill_value - - -__all__ = [ - 'append_fields', 'drop_fields', 'find_duplicates', - 'get_fieldstructure', 'join_by', 'merge_arrays', - 'rec_append_fields', 'rec_drop_fields', 'rec_join', - 'recursive_fill_fields', 'rename_fields', 'stack_arrays', - ] - - -def recursive_fill_fields(input, output): - """ - Fills fields from output with fields from input, - with support for nested structures. - - Parameters - ---------- - input : ndarray - Input array. - output : ndarray - Output array. - - Notes - ----- - * `output` should be at least the same size as `input` - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) - >>> b = np.zeros((3,), dtype=a.dtype) - >>> rfn.recursive_fill_fields(a, b) - array([(1, 10.0), (2, 20.0), (0, 0.0)], - dtype=[('A', '>> from numpy.lib import recfunctions as rfn - >>> rfn.get_names(np.empty((1,), dtype=int)) is None - True - >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) - ('A', 'B') - >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) - >>> rfn.get_names(adtype) - ('a', ('b', ('ba', 'bb'))) - """ - listnames = [] - names = adtype.names - for name in names: - current = adtype[name] - if current.names: - listnames.append((name, tuple(get_names(current)))) - else: - listnames.append(name) - return tuple(listnames) or None - - -def get_names_flat(adtype): - """ - Returns the field names of the input datatype as a tuple. Nested structure - are flattend beforehand. - - Parameters - ---------- - adtype : dtype - Input datatype - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None - True - >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) - ('A', 'B') - >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) - >>> rfn.get_names_flat(adtype) - ('a', 'b', 'ba', 'bb') - """ - listnames = [] - names = adtype.names - for name in names: - listnames.append(name) - current = adtype[name] - if current.names: - listnames.extend(get_names_flat(current)) - return tuple(listnames) or None - - -def flatten_descr(ndtype): - """ - Flatten a structured data-type description. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) - (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) - - """ - names = ndtype.names - if names is None: - return ndtype.descr - else: - descr = [] - for field in names: - (typ, _) = ndtype.fields[field] - if typ.names: - descr.extend(flatten_descr(typ)) - else: - descr.append((field, typ)) - return tuple(descr) - - -def zip_descr(seqarrays, flatten=False): - """ - Combine the dtype description of a series of arrays. - - Parameters - ---------- - seqarrays : sequence of arrays - Sequence of arrays - flatten : {boolean}, optional - Whether to collapse nested descriptions. - """ - newdtype = [] - if flatten: - for a in seqarrays: - newdtype.extend(flatten_descr(a.dtype)) - else: - for a in seqarrays: - current = a.dtype - names = current.names or () - if len(names) > 1: - newdtype.append(('', current.descr)) - else: - newdtype.extend(current.descr) - return np.dtype(newdtype).descr - - -def get_fieldstructure(adtype, lastname=None, parents=None,): - """ - Returns a dictionary with fields indexing lists of their parent fields. - - This function is used to simplify access to fields nested in other fields. - - Parameters - ---------- - adtype : np.dtype - Input datatype - lastname : optional - Last processed field name (used internally during recursion). - parents : dictionary - Dictionary of parent fields (used interbally during recursion). - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> ndtype = np.dtype([('A', int), - ... ('B', [('BA', int), - ... ('BB', [('BBA', int), ('BBB', int)])])]) - >>> rfn.get_fieldstructure(ndtype) - ... # XXX: possible regression, order of BBA and BBB is swapped - {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} - - """ - if parents is None: - parents = {} - names = adtype.names - for name in names: - current = adtype[name] - if current.names: - if lastname: - parents[name] = [lastname, ] - else: - parents[name] = [] - parents.update(get_fieldstructure(current, name, parents)) - else: - lastparent = [_ for _ in (parents.get(lastname, []) or [])] - if lastparent: - lastparent.append(lastname) - elif lastname: - lastparent = [lastname, ] - parents[name] = lastparent or [] - return parents or None - - -def _izip_fields_flat(iterable): - """ - Returns an iterator of concatenated fields from a sequence of arrays, - collapsing any nested structure. - - """ - for element in iterable: - if isinstance(element, np.void): - for f in _izip_fields_flat(tuple(element)): - yield f - else: - yield element - - -def _izip_fields(iterable): - """ - Returns an iterator of concatenated fields from a sequence of arrays. - - """ - for element in iterable: - if (hasattr(element, '__iter__') and - not isinstance(element, basestring)): - for f in _izip_fields(element): - yield f - elif isinstance(element, np.void) and len(tuple(element)) == 1: - for f in _izip_fields(element): - yield f - else: - yield element - - -def izip_records(seqarrays, fill_value=None, flatten=True): - """ - Returns an iterator of concatenated items from a sequence of arrays. - - Parameters - ---------- - seqarray : sequence of arrays - Sequence of arrays. - fill_value : {None, integer} - Value used to pad shorter iterables. - flatten : {True, False}, - Whether to - """ - # OK, that's a complete ripoff from Python2.6 itertools.izip_longest - def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop): - "Yields the fill_value or raises IndexError" - yield counter() - # - fillers = itertools.repeat(fill_value) - iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays] - # Should we flatten the items, or just use a nested approach - if flatten: - zipfunc = _izip_fields_flat - else: - zipfunc = _izip_fields - # - try: - for tup in zip(*iters): - yield tuple(zipfunc(tup)) - except IndexError: - pass - - -def _fix_output(output, usemask=True, asrecarray=False): - """ - Private function: return a recarray, a ndarray, a MaskedArray - or a MaskedRecords depending on the input parameters - """ - if not isinstance(output, MaskedArray): - usemask = False - if usemask: - if asrecarray: - output = output.view(MaskedRecords) - else: - output = ma.filled(output) - if asrecarray: - output = output.view(recarray) - return output - - -def _fix_defaults(output, defaults=None): - """ - Update the fill_value and masked data of `output` - from the default given in a dictionary defaults. - """ - names = output.dtype.names - (data, mask, fill_value) = (output.data, output.mask, output.fill_value) - for (k, v) in (defaults or {}).items(): - if k in names: - fill_value[k] = v - data[k][mask[k]] = v - return output - - -def merge_arrays(seqarrays, fill_value=-1, flatten=False, - usemask=False, asrecarray=False): - """ - Merge arrays field by field. - - Parameters - ---------- - seqarrays : sequence of ndarrays - Sequence of arrays - fill_value : {float}, optional - Filling value used to pad missing data on the shorter arrays. - flatten : {False, True}, optional - Whether to collapse nested fields. - usemask : {False, True}, optional - Whether to return a masked array or not. - asrecarray : {False, True}, optional - Whether to return a recarray (MaskedRecords) or not. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) - masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)], - mask = [(False, False) (False, False) (True, False)], - fill_value = (999999, 1e+20), - dtype = [('f0', '>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])), - ... usemask=False) - array([(1, 10.0), (2, 20.0), (-1, 30.0)], - dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]), - ... np.array([10., 20., 30.])), - ... usemask=False, asrecarray=True) - rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)], - dtype=[('a', '>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - >>> rfn.drop_fields(a, 'a') - array([((2.0, 3),), ((5.0, 6),)], - dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') - array([(1, (3,)), (4, (6,))], - dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) - array([(1,), (4,)], - dtype=[('a', '>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], - ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) - >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) - array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))], - dtype=[('A', ' 1: - data = merge_arrays(data, flatten=True, usemask=usemask, - fill_value=fill_value) - else: - data = data.pop() - # - output = ma.masked_all(max(len(base), len(data)), - dtype=base.dtype.descr + data.dtype.descr) - output = recursive_fill_fields(base, output) - output = recursive_fill_fields(data, output) - # - return _fix_output(output, usemask=usemask, asrecarray=asrecarray) - - -def rec_append_fields(base, names, data, dtypes=None): - """ - Add new fields to an existing array. - - The names of the fields are given with the `names` arguments, - the corresponding values with the `data` arguments. - If a single field is appended, `names`, `data` and `dtypes` do not have - to be lists but just values. - - Parameters - ---------- - base : array - Input array to extend. - names : string, sequence - String or sequence of strings corresponding to the names - of the new fields. - data : array or sequence of arrays - Array or sequence of arrays storing the fields to add to the base. - dtypes : sequence of datatypes, optional - Datatype or sequence of datatypes. - If None, the datatypes are estimated from the `data`. - - See Also - -------- - append_fields - - Returns - ------- - appended_array : np.recarray - """ - return append_fields(base, names, data=data, dtypes=dtypes, - asrecarray=True, usemask=False) - - -def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, - autoconvert=False): - """ - Superposes arrays fields by fields - - Parameters - ---------- - seqarrays : array or sequence - Sequence of input arrays. - defaults : dictionary, optional - Dictionary mapping field names to the corresponding default values. - usemask : {True, False}, optional - Whether to return a MaskedArray (or MaskedRecords is - `asrecarray==True`) or a ndarray. - asrecarray : {False, True}, optional - Whether to return a recarray (or MaskedRecords if `usemask==True`) - or just a flexible-type ndarray. - autoconvert : {False, True}, optional - Whether automatically cast the type of the field to the maximum. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> x = np.array([1, 2,]) - >>> rfn.stack_arrays(x) is x - True - >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) - >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - ... dtype=[('A', '|S3'), ('B', float), ('C', float)]) - >>> test = rfn.stack_arrays((z,zz)) - >>> test - masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0) - ('c', 30.0, 300.0)], - mask = [(False, False, True) (False, False, True) (False, False, False) - (False, False, False) (False, False, False)], - fill_value = ('N/A', 1e+20, 1e+20), - dtype = [('A', '|S3'), ('B', ' np.dtype(current_descr[-1]): - current_descr = list(current_descr) - current_descr[-1] = descr[1] - newdescr[nameidx] = tuple(current_descr) - elif descr[1] != current_descr[-1]: - raise TypeError("Incompatible type '%s' <> '%s'" % - (dict(newdescr)[name], descr[1])) - # Only one field: use concatenate - if len(newdescr) == 1: - output = ma.concatenate(seqarrays) - else: - # - output = ma.masked_all((np.sum(nrecords),), newdescr) - offset = np.cumsum(np.r_[0, nrecords]) - seen = [] - for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): - names = a.dtype.names - if names is None: - output['f%i' % len(seen)][i:j] = a - else: - for name in n: - output[name][i:j] = a[name] - if name not in seen: - seen.append(name) - # - return _fix_output(_fix_defaults(output, defaults), - usemask=usemask, asrecarray=asrecarray) - - -def find_duplicates(a, key=None, ignoremask=True, return_index=False): - """ - Find the duplicates in a structured array along a given key - - Parameters - ---------- - a : array-like - Input array - key : {string, None}, optional - Name of the fields along which to check the duplicates. - If None, the search is performed by records - ignoremask : {True, False}, optional - Whether masked data should be discarded or considered as duplicates. - return_index : {False, True}, optional - Whether to return the indices of the duplicated values. - - Examples - -------- - >>> from numpy.lib import recfunctions as rfn - >>> ndtype = [('a', int)] - >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], - ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) - ... # XXX: judging by the output, the ignoremask flag has no effect - """ - a = np.asanyarray(a).ravel() - # Get a dictionary of fields - fields = get_fieldstructure(a.dtype) - # Get the sorting data (by selecting the corresponding field) - base = a - if key: - for f in fields[key]: - base = base[f] - base = base[key] - # Get the sorting indices and the sorted data - sortidx = base.argsort() - sortedbase = base[sortidx] - sorteddata = sortedbase.filled() - # Compare the sorting data - flag = (sorteddata[:-1] == sorteddata[1:]) - # If masked data must be ignored, set the flag to false where needed - if ignoremask: - sortedmask = sortedbase.recordmask - flag[sortedmask[1:]] = False - flag = np.concatenate(([False], flag)) - # We need to take the point on the left as well (else we're missing it) - flag[:-1] = flag[:-1] + flag[1:] - duplicates = a[sortidx][flag] - if return_index: - return (duplicates, sortidx[flag]) - else: - return duplicates - - -def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', - defaults=None, usemask=True, asrecarray=False): - """ - Join arrays `r1` and `r2` on key `key`. - - The key should be either a string or a sequence of string corresponding - to the fields used to join the array. An exception is raised if the - `key` field cannot be found in the two input arrays. Neither `r1` nor - `r2` should have any duplicates along `key`: the presence of duplicates - will make the output quite unreliable. Note that duplicates are not - looked for by the algorithm. - - Parameters - ---------- - key : {string, sequence} - A string or a sequence of strings corresponding to the fields used - for comparison. - r1, r2 : arrays - Structured arrays. - jointype : {'inner', 'outer', 'leftouter'}, optional - If 'inner', returns the elements common to both r1 and r2. - If 'outer', returns the common elements as well as the elements of - r1 not in r2 and the elements of not in r2. - If 'leftouter', returns the common elements and the elements of r1 - not in r2. - r1postfix : string, optional - String appended to the names of the fields of r1 that are present - in r2 but absent of the key. - r2postfix : string, optional - String appended to the names of the fields of r2 that are present - in r1 but absent of the key. - defaults : {dictionary}, optional - Dictionary mapping field names to the corresponding default values. - usemask : {True, False}, optional - Whether to return a MaskedArray (or MaskedRecords is - `asrecarray==True`) or a ndarray. - asrecarray : {False, True}, optional - Whether to return a recarray (or MaskedRecords if `usemask==True`) - or just a flexible-type ndarray. - - Notes - ----- - * The output is sorted along the key. - * A temporary array is formed by dropping the fields not in the key for - the two arrays and concatenating the result. This array is then - sorted, and the common entries selected. The output is constructed by - filling the fields with the selected entries. Matching is not - preserved if there are some duplicates... - - """ - # Check jointype - if jointype not in ('inner', 'outer', 'leftouter'): - raise ValueError( - "The 'jointype' argument should be in 'inner', " - "'outer' or 'leftouter' (got '%s' instead)" % jointype - ) - # If we have a single key, put it in a tuple - if isinstance(key, basestring): - key = (key,) - - # Check the keys - for name in key: - if name not in r1.dtype.names: - raise ValueError('r1 does not have key field %s' % name) - if name not in r2.dtype.names: - raise ValueError('r2 does not have key field %s' % name) - - # Make sure we work with ravelled arrays - r1 = r1.ravel() - r2 = r2.ravel() - # Fixme: nb2 below is never used. Commenting out for pyflakes. - # (nb1, nb2) = (len(r1), len(r2)) - nb1 = len(r1) - (r1names, r2names) = (r1.dtype.names, r2.dtype.names) - - # Check the names for collision - if (set.intersection(set(r1names), set(r2names)).difference(key) and - not (r1postfix or r2postfix)): - msg = "r1 and r2 contain common names, r1postfix and r2postfix " - msg += "can't be empty" - raise ValueError(msg) - - # Make temporary arrays of just the keys - r1k = drop_fields(r1, [n for n in r1names if n not in key]) - r2k = drop_fields(r2, [n for n in r2names if n not in key]) - - # Concatenate the two arrays for comparison - aux = ma.concatenate((r1k, r2k)) - idx_sort = aux.argsort(order=key) - aux = aux[idx_sort] - # - # Get the common keys - flag_in = ma.concatenate(([False], aux[1:] == aux[:-1])) - flag_in[:-1] = flag_in[1:] + flag_in[:-1] - idx_in = idx_sort[flag_in] - idx_1 = idx_in[(idx_in < nb1)] - idx_2 = idx_in[(idx_in >= nb1)] - nb1 - (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) - if jointype == 'inner': - (r1spc, r2spc) = (0, 0) - elif jointype == 'outer': - idx_out = idx_sort[~flag_in] - idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) - idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) - (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) - elif jointype == 'leftouter': - idx_out = idx_sort[~flag_in] - idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) - (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) - # Select the entries from each input - (s1, s2) = (r1[idx_1], r2[idx_2]) - # - # Build the new description of the output array ....... - # Start with the key fields - ndtype = [list(_) for _ in r1k.dtype.descr] - # Add the other fields - ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key) - # Find the new list of names (it may be different from r1names) - names = list(_[0] for _ in ndtype) - for desc in r2.dtype.descr: - desc = list(desc) - name = desc[0] - # Have we seen the current name already ? - if name in names: - nameidx = ndtype.index(desc) - current = ndtype[nameidx] - # The current field is part of the key: take the largest dtype - if name in key: - current[-1] = max(desc[1], current[-1]) - # The current field is not part of the key: add the suffixes - else: - current[0] += r1postfix - desc[0] += r2postfix - ndtype.insert(nameidx + 1, desc) - #... we haven't: just add the description to the current list - else: - names.extend(desc[0]) - ndtype.append(desc) - # Revert the elements to tuples - ndtype = [tuple(_) for _ in ndtype] - # Find the largest nb of common fields : - # r1cmn and r2cmn should be equal, but... - cmn = max(r1cmn, r2cmn) - # Construct an empty array - output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) - names = output.dtype.names - for f in r1names: - selected = s1[f] - if f not in names or (f in r2names and not r2postfix and f not in key): - f += r1postfix - current = output[f] - current[:r1cmn] = selected[:r1cmn] - if jointype in ('outer', 'leftouter'): - current[cmn:cmn + r1spc] = selected[r1cmn:] - for f in r2names: - selected = s2[f] - if f not in names or (f in r1names and not r1postfix and f not in key): - f += r2postfix - current = output[f] - current[:r2cmn] = selected[:r2cmn] - if (jointype == 'outer') and r2spc: - current[-r2spc:] = selected[r2cmn:] - # Sort and finalize the output - output.sort(order=key) - kwargs = dict(usemask=usemask, asrecarray=asrecarray) - return _fix_output(_fix_defaults(output, defaults), **kwargs) - - -def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', - defaults=None): - """ - Join arrays `r1` and `r2` on keys. - Alternative to join_by, that always returns a np.recarray. - - See Also - -------- - join_by : equivalent function - """ - kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, - defaults=defaults, usemask=False, asrecarray=True) - return join_by(key, r1, r2, **kwargs) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/scimath.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/scimath.py deleted file mode 100644 index e07caf805ed27..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/scimath.py +++ /dev/null @@ -1,566 +0,0 @@ -""" -Wrapper functions to more user-friendly calling of certain math functions -whose output data-type is different than the input data-type in certain -domains of the input. - -For example, for functions like `log` with branch cuts, the versions in this -module provide the mathematically valid answers in the complex plane:: - - >>> import math - >>> from numpy.lib import scimath - >>> scimath.log(-math.exp(1)) == (1+1j*math.pi) - True - -Similarly, `sqrt`, other base logarithms, `power` and trig functions are -correctly handled. See their respective docstrings for specific examples. - -""" -from __future__ import division, absolute_import, print_function - -import numpy.core.numeric as nx -import numpy.core.numerictypes as nt -from numpy.core.numeric import asarray, any -from numpy.lib.type_check import isreal - - -__all__ = [ - 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', - 'arctanh' - ] - - -_ln2 = nx.log(2.0) - - -def _tocomplex(arr): - """Convert its input `arr` to a complex array. - - The input is returned as a complex array of the smallest type that will fit - the original data: types like single, byte, short, etc. become csingle, - while others become cdouble. - - A copy of the input is always made. - - Parameters - ---------- - arr : array - - Returns - ------- - array - An array with the same input data as the input but in complex form. - - Examples - -------- - - First, consider an input of type short: - - >>> a = np.array([1,2,3],np.short) - - >>> ac = np.lib.scimath._tocomplex(a); ac - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) - - >>> ac.dtype - dtype('complex64') - - If the input is of type double, the output is correspondingly of the - complex double type as well: - - >>> b = np.array([1,2,3],np.double) - - >>> bc = np.lib.scimath._tocomplex(b); bc - array([ 1.+0.j, 2.+0.j, 3.+0.j]) - - >>> bc.dtype - dtype('complex128') - - Note that even if the input was complex to begin with, a copy is still - made, since the astype() method always copies: - - >>> c = np.array([1,2,3],np.csingle) - - >>> cc = np.lib.scimath._tocomplex(c); cc - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) - - >>> c *= 2; c - array([ 2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) - - >>> cc - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) - """ - if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, - nt.ushort, nt.csingle)): - return arr.astype(nt.csingle) - else: - return arr.astype(nt.cdouble) - -def _fix_real_lt_zero(x): - """Convert `x` to complex if it has real, negative components. - - Otherwise, output is just the array version of the input (via asarray). - - Parameters - ---------- - x : array_like - - Returns - ------- - array - - Examples - -------- - >>> np.lib.scimath._fix_real_lt_zero([1,2]) - array([1, 2]) - - >>> np.lib.scimath._fix_real_lt_zero([-1,2]) - array([-1.+0.j, 2.+0.j]) - - """ - x = asarray(x) - if any(isreal(x) & (x < 0)): - x = _tocomplex(x) - return x - -def _fix_int_lt_zero(x): - """Convert `x` to double if it has real, negative components. - - Otherwise, output is just the array version of the input (via asarray). - - Parameters - ---------- - x : array_like - - Returns - ------- - array - - Examples - -------- - >>> np.lib.scimath._fix_int_lt_zero([1,2]) - array([1, 2]) - - >>> np.lib.scimath._fix_int_lt_zero([-1,2]) - array([-1., 2.]) - """ - x = asarray(x) - if any(isreal(x) & (x < 0)): - x = x * 1.0 - return x - -def _fix_real_abs_gt_1(x): - """Convert `x` to complex if it has real components x_i with abs(x_i)>1. - - Otherwise, output is just the array version of the input (via asarray). - - Parameters - ---------- - x : array_like - - Returns - ------- - array - - Examples - -------- - >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) - array([0, 1]) - - >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) - array([ 0.+0.j, 2.+0.j]) - """ - x = asarray(x) - if any(isreal(x) & (abs(x) > 1)): - x = _tocomplex(x) - return x - -def sqrt(x): - """ - Compute the square root of x. - - For negative input elements, a complex value is returned - (unlike `numpy.sqrt` which returns NaN). - - Parameters - ---------- - x : array_like - The input value(s). - - Returns - ------- - out : ndarray or scalar - The square root of `x`. If `x` was a scalar, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.sqrt - - Examples - -------- - For real, non-negative inputs this works just like `numpy.sqrt`: - - >>> np.lib.scimath.sqrt(1) - 1.0 - >>> np.lib.scimath.sqrt([1, 4]) - array([ 1., 2.]) - - But it automatically handles negative inputs: - - >>> np.lib.scimath.sqrt(-1) - (0.0+1.0j) - >>> np.lib.scimath.sqrt([-1,4]) - array([ 0.+1.j, 2.+0.j]) - - """ - x = _fix_real_lt_zero(x) - return nx.sqrt(x) - -def log(x): - """ - Compute the natural logarithm of `x`. - - Return the "principal value" (for a description of this, see `numpy.log`) - of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` - returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the - complex principle value is returned. - - Parameters - ---------- - x : array_like - The value(s) whose log is (are) required. - - Returns - ------- - out : ndarray or scalar - The log of the `x` value(s). If `x` was a scalar, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.log - - Notes - ----- - For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` - (note, however, that otherwise `numpy.log` and this `log` are identical, - i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, - notably, the complex principle value if ``x.imag != 0``). - - Examples - -------- - >>> np.emath.log(np.exp(1)) - 1.0 - - Negative arguments are handled "correctly" (recall that - ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): - - >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) - True - - """ - x = _fix_real_lt_zero(x) - return nx.log(x) - -def log10(x): - """ - Compute the logarithm base 10 of `x`. - - Return the "principal value" (for a description of this, see - `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this - is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` - returns ``inf``). Otherwise, the complex principle value is returned. - - Parameters - ---------- - x : array_like or scalar - The value(s) whose log base 10 is (are) required. - - Returns - ------- - out : ndarray or scalar - The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, - otherwise an array object is returned. - - See Also - -------- - numpy.log10 - - Notes - ----- - For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` - (note, however, that otherwise `numpy.log10` and this `log10` are - identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, - and, notably, the complex principle value if ``x.imag != 0``). - - Examples - -------- - - (We set the printing precision so the example can be auto-tested) - - >>> np.set_printoptions(precision=4) - - >>> np.emath.log10(10**1) - 1.0 - - >>> np.emath.log10([-10**1, -10**2, 10**2]) - array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ]) - - """ - x = _fix_real_lt_zero(x) - return nx.log10(x) - -def logn(n, x): - """ - Take log base n of x. - - If `x` contains negative inputs, the answer is computed and returned in the - complex domain. - - Parameters - ---------- - n : int - The base in which the log is taken. - x : array_like - The value(s) whose log base `n` is (are) required. - - Returns - ------- - out : ndarray or scalar - The log base `n` of the `x` value(s). If `x` was a scalar, so is - `out`, otherwise an array is returned. - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.lib.scimath.logn(2, [4, 8]) - array([ 2., 3.]) - >>> np.lib.scimath.logn(2, [-4, -8, 8]) - array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) - - """ - x = _fix_real_lt_zero(x) - n = _fix_real_lt_zero(n) - return nx.log(x)/nx.log(n) - -def log2(x): - """ - Compute the logarithm base 2 of `x`. - - Return the "principal value" (for a description of this, see - `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is - a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns - ``inf``). Otherwise, the complex principle value is returned. - - Parameters - ---------- - x : array_like - The value(s) whose log base 2 is (are) required. - - Returns - ------- - out : ndarray or scalar - The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.log2 - - Notes - ----- - For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` - (note, however, that otherwise `numpy.log2` and this `log2` are - identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, - and, notably, the complex principle value if ``x.imag != 0``). - - Examples - -------- - We set the printing precision so the example can be auto-tested: - - >>> np.set_printoptions(precision=4) - - >>> np.emath.log2(8) - 3.0 - >>> np.emath.log2([-4, -8, 8]) - array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) - - """ - x = _fix_real_lt_zero(x) - return nx.log2(x) - -def power(x, p): - """ - Return x to the power p, (x**p). - - If `x` contains negative values, the output is converted to the - complex domain. - - Parameters - ---------- - x : array_like - The input value(s). - p : array_like of ints - The power(s) to which `x` is raised. If `x` contains multiple values, - `p` has to either be a scalar, or contain the same number of values - as `x`. In the latter case, the result is - ``x[0]**p[0], x[1]**p[1], ...``. - - Returns - ------- - out : ndarray or scalar - The result of ``x**p``. If `x` and `p` are scalars, so is `out`, - otherwise an array is returned. - - See Also - -------- - numpy.power - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.lib.scimath.power([2, 4], 2) - array([ 4, 16]) - >>> np.lib.scimath.power([2, 4], -2) - array([ 0.25 , 0.0625]) - >>> np.lib.scimath.power([-2, 4], 2) - array([ 4.+0.j, 16.+0.j]) - - """ - x = _fix_real_lt_zero(x) - p = _fix_int_lt_zero(p) - return nx.power(x, p) - -def arccos(x): - """ - Compute the inverse cosine of x. - - Return the "principal value" (for a description of this, see - `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that - `abs(x) <= 1`, this is a real number in the closed interval - :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. - - Parameters - ---------- - x : array_like or scalar - The value(s) whose arccos is (are) required. - - Returns - ------- - out : ndarray or scalar - The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so - is `out`, otherwise an array object is returned. - - See Also - -------- - numpy.arccos - - Notes - ----- - For an arccos() that returns ``NAN`` when real `x` is not in the - interval ``[-1,1]``, use `numpy.arccos`. - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.emath.arccos(1) # a scalar is returned - 0.0 - - >>> np.emath.arccos([1,2]) - array([ 0.-0.j , 0.+1.317j]) - - """ - x = _fix_real_abs_gt_1(x) - return nx.arccos(x) - -def arcsin(x): - """ - Compute the inverse sine of x. - - Return the "principal value" (for a description of this, see - `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that - `abs(x) <= 1`, this is a real number in the closed interval - :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is - returned. - - Parameters - ---------- - x : array_like or scalar - The value(s) whose arcsin is (are) required. - - Returns - ------- - out : ndarray or scalar - The inverse sine(s) of the `x` value(s). If `x` was a scalar, so - is `out`, otherwise an array object is returned. - - See Also - -------- - numpy.arcsin - - Notes - ----- - For an arcsin() that returns ``NAN`` when real `x` is not in the - interval ``[-1,1]``, use `numpy.arcsin`. - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.emath.arcsin(0) - 0.0 - - >>> np.emath.arcsin([0,1]) - array([ 0. , 1.5708]) - - """ - x = _fix_real_abs_gt_1(x) - return nx.arcsin(x) - -def arctanh(x): - """ - Compute the inverse hyperbolic tangent of `x`. - - Return the "principal value" (for a description of this, see - `numpy.arctanh`) of `arctanh(x)`. For real `x` such that - `abs(x) < 1`, this is a real number. If `abs(x) > 1`, or if `x` is - complex, the result is complex. Finally, `x = 1` returns``inf`` and - `x=-1` returns ``-inf``. - - Parameters - ---------- - x : array_like - The value(s) whose arctanh is (are) required. - - Returns - ------- - out : ndarray or scalar - The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was - a scalar so is `out`, otherwise an array is returned. - - - See Also - -------- - numpy.arctanh - - Notes - ----- - For an arctanh() that returns ``NAN`` when real `x` is not in the - interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does - return +/-inf for `x = +/-1`). - - Examples - -------- - >>> np.set_printoptions(precision=4) - - >>> np.emath.arctanh(np.matrix(np.eye(2))) - array([[ Inf, 0.], - [ 0., Inf]]) - >>> np.emath.arctanh([1j]) - array([ 0.+0.7854j]) - - """ - x = _fix_real_abs_gt_1(x) - return nx.arctanh(x) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/setup.py deleted file mode 100644 index 68d99c33a78e2..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/setup.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import division, print_function - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('lib', parent_package, top_path) - - config.add_include_dirs(join('..', 'core', 'include')) - - config.add_extension('_compiled_base', - sources=[join('src', '_compiled_base.c')] - ) - - config.add_data_dir('benchmarks') - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/shape_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/shape_base.py deleted file mode 100644 index 70fa3ab032c99..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/shape_base.py +++ /dev/null @@ -1,865 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings - -import numpy.core.numeric as _nx -from numpy.core.numeric import ( - asarray, zeros, outer, concatenate, isscalar, array, asanyarray - ) -from numpy.core.fromnumeric import product, reshape -from numpy.core import vstack, atleast_3d - - -__all__ = [ - 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', - 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', - 'apply_along_axis', 'kron', 'tile', 'get_array_wrap' - ] - - -def apply_along_axis(func1d, axis, arr, *args, **kwargs): - """ - Apply a function to 1-D slices along the given axis. - - Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a` - is a 1-D slice of `arr` along `axis`. - - Parameters - ---------- - func1d : function - This function should accept 1-D arrays. It is applied to 1-D - slices of `arr` along the specified axis. - axis : integer - Axis along which `arr` is sliced. - arr : ndarray - Input array. - args : any - Additional arguments to `func1d`. - kwargs: any - Additional named arguments to `func1d`. - - .. versionadded:: 1.9.0 - - - Returns - ------- - apply_along_axis : ndarray - The output array. The shape of `outarr` is identical to the shape of - `arr`, except along the `axis` dimension, where the length of `outarr` - is equal to the size of the return value of `func1d`. If `func1d` - returns a scalar `outarr` will have one fewer dimensions than `arr`. - - See Also - -------- - apply_over_axes : Apply a function repeatedly over multiple axes. - - Examples - -------- - >>> def my_func(a): - ... \"\"\"Average first and last element of a 1-D array\"\"\" - ... return (a[0] + a[-1]) * 0.5 - >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) - >>> np.apply_along_axis(my_func, 0, b) - array([ 4., 5., 6.]) - >>> np.apply_along_axis(my_func, 1, b) - array([ 2., 5., 8.]) - - For a function that doesn't return a scalar, the number of dimensions in - `outarr` is the same as `arr`. - - >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) - >>> np.apply_along_axis(sorted, 1, b) - array([[1, 7, 8], - [3, 4, 9], - [2, 5, 6]]) - - """ - arr = asarray(arr) - nd = arr.ndim - if axis < 0: - axis += nd - if (axis >= nd): - raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." - % (axis, nd)) - ind = [0]*(nd-1) - i = zeros(nd, 'O') - indlist = list(range(nd)) - indlist.remove(axis) - i[axis] = slice(None, None) - outshape = asarray(arr.shape).take(indlist) - i.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - # if res is a number, then we have a smaller output array - if isscalar(res): - outarr = zeros(outshape, asarray(res).dtype) - outarr[tuple(ind)] = res - Ntot = product(outshape) - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= outshape[n]) and (n > (1-nd)): - ind[n-1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - outarr[tuple(ind)] = res - k += 1 - return outarr - else: - Ntot = product(outshape) - holdshape = outshape - outshape = list(arr.shape) - outshape[axis] = len(res) - outarr = zeros(outshape, asarray(res).dtype) - outarr[tuple(i.tolist())] = res - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= holdshape[n]) and (n > (1-nd)): - ind[n-1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - outarr[tuple(i.tolist())] = res - k += 1 - return outarr - - -def apply_over_axes(func, a, axes): - """ - Apply a function repeatedly over multiple axes. - - `func` is called as `res = func(a, axis)`, where `axis` is the first - element of `axes`. The result `res` of the function call must have - either the same dimensions as `a` or one less dimension. If `res` - has one less dimension than `a`, a dimension is inserted before - `axis`. The call to `func` is then repeated for each axis in `axes`, - with `res` as the first argument. - - Parameters - ---------- - func : function - This function must take two arguments, `func(a, axis)`. - a : array_like - Input array. - axes : array_like - Axes over which `func` is applied; the elements must be integers. - - Returns - ------- - apply_over_axis : ndarray - The output array. The number of dimensions is the same as `a`, - but the shape can be different. This depends on whether `func` - changes the shape of its output with respect to its input. - - See Also - -------- - apply_along_axis : - Apply a function to 1-D slices of an array along the given axis. - - Notes - ------ - This function is equivalent to tuple axis arguments to reorderable ufuncs - with keepdims=True. Tuple axis arguments to ufuncs have been availabe since - version 1.7.0. - - Examples - -------- - >>> a = np.arange(24).reshape(2,3,4) - >>> a - array([[[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]], - [[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23]]]) - - Sum over axes 0 and 2. The result has same number of dimensions - as the original array: - - >>> np.apply_over_axes(np.sum, a, [0,2]) - array([[[ 60], - [ 92], - [124]]]) - - Tuple axis arguments to ufuncs are equivalent: - - >>> np.sum(a, axis=(0,2), keepdims=True) - array([[[ 60], - [ 92], - [124]]]) - - """ - val = asarray(a) - N = a.ndim - if array(axes).ndim == 0: - axes = (axes,) - for axis in axes: - if axis < 0: - axis = N + axis - args = (val, axis) - res = func(*args) - if res.ndim == val.ndim: - val = res - else: - res = expand_dims(res, axis) - if res.ndim == val.ndim: - val = res - else: - raise ValueError("function is not returning " - "an array of the correct shape") - return val - -def expand_dims(a, axis): - """ - Expand the shape of an array. - - Insert a new axis, corresponding to a given position in the array shape. - - Parameters - ---------- - a : array_like - Input array. - axis : int - Position (amongst axes) where new axis is to be inserted. - - Returns - ------- - res : ndarray - Output array. The number of dimensions is one greater than that of - the input array. - - See Also - -------- - doc.indexing, atleast_1d, atleast_2d, atleast_3d - - Examples - -------- - >>> x = np.array([1,2]) - >>> x.shape - (2,) - - The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``: - - >>> y = np.expand_dims(x, axis=0) - >>> y - array([[1, 2]]) - >>> y.shape - (1, 2) - - >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis] - >>> y - array([[1], - [2]]) - >>> y.shape - (2, 1) - - Note that some examples may use ``None`` instead of ``np.newaxis``. These - are the same objects: - - >>> np.newaxis is None - True - - """ - a = asarray(a) - shape = a.shape - if axis < 0: - axis = axis + len(shape) + 1 - return a.reshape(shape[:axis] + (1,) + shape[axis:]) - -row_stack = vstack - -def column_stack(tup): - """ - Stack 1-D arrays as columns into a 2-D array. - - Take a sequence of 1-D arrays and stack them as columns - to make a single 2-D array. 2-D arrays are stacked as-is, - just like with `hstack`. 1-D arrays are turned into 2-D columns - first. - - Parameters - ---------- - tup : sequence of 1-D or 2-D arrays. - Arrays to stack. All of them must have the same first dimension. - - Returns - ------- - stacked : 2-D array - The array formed by stacking the given arrays. - - See Also - -------- - hstack, vstack, concatenate - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - - """ - arrays = [] - for v in tup: - arr = array(v, copy=False, subok=True) - if arr.ndim < 2: - arr = array(arr, copy=False, subok=True, ndmin=2).T - arrays.append(arr) - return _nx.concatenate(arrays, 1) - -def dstack(tup): - """ - Stack arrays in sequence depth wise (along third axis). - - Takes a sequence of arrays and stack them along the third axis - to make a single array. Rebuilds arrays divided by `dsplit`. - This is a simple way to stack 2D arrays (images) into a single - 3D array for processing. - - Parameters - ---------- - tup : sequence of arrays - Arrays to stack. All of them must have the same shape along all - but the third axis. - - Returns - ------- - stacked : ndarray - The array formed by stacking the given arrays. - - See Also - -------- - vstack : Stack along first axis. - hstack : Stack along second axis. - concatenate : Join arrays. - dsplit : Split array along third axis. - - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=2)``. - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.dstack((a,b)) - array([[[1, 2], - [2, 3], - [3, 4]]]) - - >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) - >>> np.dstack((a,b)) - array([[[1, 2]], - [[2, 3]], - [[3, 4]]]) - - """ - return _nx.concatenate([atleast_3d(_m) for _m in tup], 2) - -def _replace_zero_by_x_arrays(sub_arys): - for i in range(len(sub_arys)): - if len(_nx.shape(sub_arys[i])) == 0: - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - return sub_arys - -def array_split(ary, indices_or_sections, axis=0): - """ - Split an array into multiple sub-arrays. - - Please refer to the ``split`` documentation. The only difference - between these functions is that ``array_split`` allows - `indices_or_sections` to be an integer that does *not* equally - divide the axis. - - See Also - -------- - split : Split array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(8.0) - >>> np.array_split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])] - - """ - try: - Ntotal = ary.shape[axis] - except AttributeError: - Ntotal = len(ary) - try: - # handle scalar case. - Nsections = len(indices_or_sections) + 1 - div_points = [0] + list(indices_or_sections) + [Ntotal] - except TypeError: - # indices_or_sections is a scalar, not an array. - Nsections = int(indices_or_sections) - if Nsections <= 0: - raise ValueError('number sections must be larger than 0.') - Neach_section, extras = divmod(Ntotal, Nsections) - section_sizes = ([0] + - extras * [Neach_section+1] + - (Nsections-extras) * [Neach_section]) - div_points = _nx.array(section_sizes).cumsum() - - sub_arys = [] - sary = _nx.swapaxes(ary, axis, 0) - for i in range(Nsections): - st = div_points[i] - end = div_points[i + 1] - sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) - - # This "kludge" was introduced here to replace arrays shaped (0, 10) - # or similar with an array shaped (0,). - # There seems no need for this, so give a FutureWarning to remove later. - if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1: - warnings.warn("in the future np.array_split will retain the shape of " - "arrays with a zero size, instead of replacing them by " - "`array([])`, which always has a shape of (0,).", - FutureWarning) - sub_arys = _replace_zero_by_x_arrays(sub_arys) - - return sub_arys - -def split(ary,indices_or_sections,axis=0): - """ - Split an array into multiple sub-arrays. - - Parameters - ---------- - ary : ndarray - Array to be divided into sub-arrays. - indices_or_sections : int or 1-D array - If `indices_or_sections` is an integer, N, the array will be divided - into N equal arrays along `axis`. If such a split is not possible, - an error is raised. - - If `indices_or_sections` is a 1-D array of sorted integers, the entries - indicate where along `axis` the array is split. For example, - ``[2, 3]`` would, for ``axis=0``, result in - - - ary[:2] - - ary[2:3] - - ary[3:] - - If an index exceeds the dimension of the array along `axis`, - an empty sub-array is returned correspondingly. - axis : int, optional - The axis along which to split, default is 0. - - Returns - ------- - sub-arrays : list of ndarrays - A list of sub-arrays. - - Raises - ------ - ValueError - If `indices_or_sections` is given as an integer, but - a split does not result in equal division. - - See Also - -------- - array_split : Split an array into multiple sub-arrays of equal or - near-equal size. Does not raise an exception if - an equal division cannot be made. - hsplit : Split array into multiple sub-arrays horizontally (column-wise). - vsplit : Split array into multiple sub-arrays vertically (row wise). - dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). - concatenate : Join arrays together. - hstack : Stack arrays in sequence horizontally (column wise). - vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third dimension). - - Examples - -------- - >>> x = np.arange(9.0) - >>> np.split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])] - - >>> x = np.arange(8.0) - >>> np.split(x, [3, 5, 6, 10]) - [array([ 0., 1., 2.]), - array([ 3., 4.]), - array([ 5.]), - array([ 6., 7.]), - array([], dtype=float64)] - - """ - try: - len(indices_or_sections) - except TypeError: - sections = indices_or_sections - N = ary.shape[axis] - if N % sections: - raise ValueError( - 'array split does not result in an equal division') - res = array_split(ary, indices_or_sections, axis) - return res - -def hsplit(ary, indices_or_sections): - """ - Split an array into multiple sub-arrays horizontally (column-wise). - - Please refer to the `split` documentation. `hsplit` is equivalent - to `split` with ``axis=1``, the array is always split along the second - axis regardless of the array dimension. - - See Also - -------- - split : Split an array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(16.0).reshape(4, 4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) - >>> np.hsplit(x, 2) - [array([[ 0., 1.], - [ 4., 5.], - [ 8., 9.], - [ 12., 13.]]), - array([[ 2., 3.], - [ 6., 7.], - [ 10., 11.], - [ 14., 15.]])] - >>> np.hsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2.], - [ 4., 5., 6.], - [ 8., 9., 10.], - [ 12., 13., 14.]]), - array([[ 3.], - [ 7.], - [ 11.], - [ 15.]]), - array([], dtype=float64)] - - With a higher dimensional array the split is still along the second axis. - - >>> x = np.arange(8.0).reshape(2, 2, 2) - >>> x - array([[[ 0., 1.], - [ 2., 3.]], - [[ 4., 5.], - [ 6., 7.]]]) - >>> np.hsplit(x, 2) - [array([[[ 0., 1.]], - [[ 4., 5.]]]), - array([[[ 2., 3.]], - [[ 6., 7.]]])] - - """ - if len(_nx.shape(ary)) == 0: - raise ValueError('hsplit only works on arrays of 1 or more dimensions') - if len(ary.shape) > 1: - return split(ary, indices_or_sections, 1) - else: - return split(ary, indices_or_sections, 0) - -def vsplit(ary, indices_or_sections): - """ - Split an array into multiple sub-arrays vertically (row-wise). - - Please refer to the ``split`` documentation. ``vsplit`` is equivalent - to ``split`` with `axis=0` (default), the array is always split along the - first axis regardless of the array dimension. - - See Also - -------- - split : Split an array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(16.0).reshape(4, 4) - >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) - >>> np.vsplit(x, 2) - [array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.]]), - array([[ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]])] - >>> np.vsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]]), - array([[ 12., 13., 14., 15.]]), - array([], dtype=float64)] - - With a higher dimensional array the split is still along the first axis. - - >>> x = np.arange(8.0).reshape(2, 2, 2) - >>> x - array([[[ 0., 1.], - [ 2., 3.]], - [[ 4., 5.], - [ 6., 7.]]]) - >>> np.vsplit(x, 2) - [array([[[ 0., 1.], - [ 2., 3.]]]), - array([[[ 4., 5.], - [ 6., 7.]]])] - - """ - if len(_nx.shape(ary)) < 2: - raise ValueError('vsplit only works on arrays of 2 or more dimensions') - return split(ary, indices_or_sections, 0) - -def dsplit(ary, indices_or_sections): - """ - Split array into multiple sub-arrays along the 3rd axis (depth). - - Please refer to the `split` documentation. `dsplit` is equivalent - to `split` with ``axis=2``, the array is always split along the third - axis provided the array dimension is greater than or equal to 3. - - See Also - -------- - split : Split an array into multiple sub-arrays of equal size. - - Examples - -------- - >>> x = np.arange(16.0).reshape(2, 2, 4) - >>> x - array([[[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.]], - [[ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]]) - >>> np.dsplit(x, 2) - [array([[[ 0., 1.], - [ 4., 5.]], - [[ 8., 9.], - [ 12., 13.]]]), - array([[[ 2., 3.], - [ 6., 7.]], - [[ 10., 11.], - [ 14., 15.]]])] - >>> np.dsplit(x, np.array([3, 6])) - [array([[[ 0., 1., 2.], - [ 4., 5., 6.]], - [[ 8., 9., 10.], - [ 12., 13., 14.]]]), - array([[[ 3.], - [ 7.]], - [[ 11.], - [ 15.]]]), - array([], dtype=float64)] - - """ - if len(_nx.shape(ary)) < 3: - raise ValueError('dsplit only works on arrays of 3 or more dimensions') - return split(ary, indices_or_sections, 2) - -def get_array_prepare(*args): - """Find the wrapper for the array with the highest priority. - - In case of ties, leftmost wins. If no wrapper is found, return None - """ - wrappers = sorted((getattr(x, '__array_priority__', 0), -i, - x.__array_prepare__) for i, x in enumerate(args) - if hasattr(x, '__array_prepare__')) - if wrappers: - return wrappers[-1][-1] - return None - -def get_array_wrap(*args): - """Find the wrapper for the array with the highest priority. - - In case of ties, leftmost wins. If no wrapper is found, return None - """ - wrappers = sorted((getattr(x, '__array_priority__', 0), -i, - x.__array_wrap__) for i, x in enumerate(args) - if hasattr(x, '__array_wrap__')) - if wrappers: - return wrappers[-1][-1] - return None - -def kron(a, b): - """ - Kronecker product of two arrays. - - Computes the Kronecker product, a composite array made of blocks of the - second array scaled by the first. - - Parameters - ---------- - a, b : array_like - - Returns - ------- - out : ndarray - - See Also - -------- - outer : The outer product - - Notes - ----- - The function assumes that the number of dimenensions of `a` and `b` - are the same, if necessary prepending the smallest with ones. - If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`, - the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`. - The elements are products of elements from `a` and `b`, organized - explicitly by:: - - kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] - - where:: - - kt = it * st + jt, t = 0,...,N - - In the common 2-D case (N=1), the block structure can be visualized:: - - [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], - [ ... ... ], - [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] - - - Examples - -------- - >>> np.kron([1,10,100], [5,6,7]) - array([ 5, 6, 7, 50, 60, 70, 500, 600, 700]) - >>> np.kron([5,6,7], [1,10,100]) - array([ 5, 50, 500, 6, 60, 600, 7, 70, 700]) - - >>> np.kron(np.eye(2), np.ones((2,2))) - array([[ 1., 1., 0., 0.], - [ 1., 1., 0., 0.], - [ 0., 0., 1., 1.], - [ 0., 0., 1., 1.]]) - - >>> a = np.arange(100).reshape((2,5,2,5)) - >>> b = np.arange(24).reshape((2,3,4)) - >>> c = np.kron(a,b) - >>> c.shape - (2, 10, 6, 20) - >>> I = (1,3,0,2) - >>> J = (0,2,1) - >>> J1 = (0,) + J # extend to ndim=4 - >>> S1 = (1,) + b.shape - >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) - >>> c[K] == a[I]*b[J] - True - - """ - b = asanyarray(b) - a = array(a, copy=False, subok=True, ndmin=b.ndim) - ndb, nda = b.ndim, a.ndim - if (nda == 0 or ndb == 0): - return _nx.multiply(a, b) - as_ = a.shape - bs = b.shape - if not a.flags.contiguous: - a = reshape(a, as_) - if not b.flags.contiguous: - b = reshape(b, bs) - nd = ndb - if (ndb != nda): - if (ndb > nda): - as_ = (1,)*(ndb-nda) + as_ - else: - bs = (1,)*(nda-ndb) + bs - nd = nda - result = outer(a, b).reshape(as_+bs) - axis = nd-1 - for _ in range(nd): - result = concatenate(result, axis=axis) - wrapper = get_array_prepare(a, b) - if wrapper is not None: - result = wrapper(result) - wrapper = get_array_wrap(a, b) - if wrapper is not None: - result = wrapper(result) - return result - - -def tile(A, reps): - """ - Construct an array by repeating A the number of times given by reps. - - If `reps` has length ``d``, the result will have dimension of - ``max(d, A.ndim)``. - - If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new - axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, - or shape (1, 1, 3) for 3-D replication. If this is not the desired - behavior, promote `A` to d-dimensions manually before calling this - function. - - If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. - Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as - (1, 1, 2, 2). - - Parameters - ---------- - A : array_like - The input array. - reps : array_like - The number of repetitions of `A` along each axis. - - Returns - ------- - c : ndarray - The tiled output array. - - See Also - -------- - repeat : Repeat elements of an array. - - Examples - -------- - >>> a = np.array([0, 1, 2]) - >>> np.tile(a, 2) - array([0, 1, 2, 0, 1, 2]) - >>> np.tile(a, (2, 2)) - array([[0, 1, 2, 0, 1, 2], - [0, 1, 2, 0, 1, 2]]) - >>> np.tile(a, (2, 1, 2)) - array([[[0, 1, 2, 0, 1, 2]], - [[0, 1, 2, 0, 1, 2]]]) - - >>> b = np.array([[1, 2], [3, 4]]) - >>> np.tile(b, 2) - array([[1, 2, 1, 2], - [3, 4, 3, 4]]) - >>> np.tile(b, (2, 1)) - array([[1, 2], - [3, 4], - [1, 2], - [3, 4]]) - - """ - try: - tup = tuple(reps) - except TypeError: - tup = (reps,) - d = len(tup) - c = _nx.array(A, copy=False, subok=True, ndmin=d) - shape = list(c.shape) - n = max(c.size, 1) - if (d < c.ndim): - tup = (1,)*(c.ndim-d) + tup - for i, nrep in enumerate(tup): - if nrep != 1: - c = c.reshape(-1, n).repeat(nrep, 0) - dim_in = shape[i] - dim_out = dim_in*nrep - shape[i] = dim_out - n //= max(dim_in, 1) - return c.reshape(shape) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/stride_tricks.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/stride_tricks.py deleted file mode 100644 index 12f8bbf131e39..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/stride_tricks.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -Utilities that manipulate strides to achieve desirable effects. - -An explanation of strides can be found in the "ndarray.rst" file in the -NumPy reference guide. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np - -__all__ = ['broadcast_arrays'] - -class DummyArray(object): - """Dummy object that just exists to hang __array_interface__ dictionaries - and possibly keep alive a reference to a base array. - """ - - def __init__(self, interface, base=None): - self.__array_interface__ = interface - self.base = base - -def as_strided(x, shape=None, strides=None): - """ Make an ndarray from the given array with the given shape and strides. - """ - interface = dict(x.__array_interface__) - if shape is not None: - interface['shape'] = tuple(shape) - if strides is not None: - interface['strides'] = tuple(strides) - array = np.asarray(DummyArray(interface, base=x)) - # Make sure dtype is correct in case of custom dtype - if array.dtype.kind == 'V': - array.dtype = x.dtype - return array - -def broadcast_arrays(*args): - """ - Broadcast any number of arrays against each other. - - Parameters - ---------- - `*args` : array_likes - The arrays to broadcast. - - Returns - ------- - broadcasted : list of arrays - These arrays are views on the original arrays. They are typically - not contiguous. Furthermore, more than one element of a - broadcasted array may refer to a single memory location. If you - need to write to the arrays, make copies first. - - Examples - -------- - >>> x = np.array([[1,2,3]]) - >>> y = np.array([[1],[2],[3]]) - >>> np.broadcast_arrays(x, y) - [array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3]]), array([[1, 1, 1], - [2, 2, 2], - [3, 3, 3]])] - - Here is a useful idiom for getting contiguous copies instead of - non-contiguous views. - - >>> [np.array(a) for a in np.broadcast_arrays(x, y)] - [array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3]]), array([[1, 1, 1], - [2, 2, 2], - [3, 3, 3]])] - - """ - args = [np.asarray(_m) for _m in args] - shapes = [x.shape for x in args] - if len(set(shapes)) == 1: - # Common case where nothing needs to be broadcasted. - return args - shapes = [list(s) for s in shapes] - strides = [list(x.strides) for x in args] - nds = [len(s) for s in shapes] - biggest = max(nds) - # Go through each array and prepend dimensions of length 1 to each of - # the shapes in order to make the number of dimensions equal. - for i in range(len(args)): - diff = biggest - nds[i] - if diff > 0: - shapes[i] = [1] * diff + shapes[i] - strides[i] = [0] * diff + strides[i] - # Chech each dimension for compatibility. A dimension length of 1 is - # accepted as compatible with any other length. - common_shape = [] - for axis in range(biggest): - lengths = [s[axis] for s in shapes] - unique = set(lengths + [1]) - if len(unique) > 2: - # There must be at least two non-1 lengths for this axis. - raise ValueError("shape mismatch: two or more arrays have " - "incompatible dimensions on axis %r." % (axis,)) - elif len(unique) == 2: - # There is exactly one non-1 length. The common shape will take - # this value. - unique.remove(1) - new_length = unique.pop() - common_shape.append(new_length) - # For each array, if this axis is being broadcasted from a - # length of 1, then set its stride to 0 so that it repeats its - # data. - for i in range(len(args)): - if shapes[i][axis] == 1: - shapes[i][axis] = new_length - strides[i][axis] = 0 - else: - # Every array has a length of 1 on this axis. Strides can be - # left alone as nothing is broadcasted. - common_shape.append(1) - - # Construct the new arrays. - broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in - zip(args, shapes, strides)] - return broadcasted diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__datasource.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__datasource.py deleted file mode 100644 index 090f71f670c92..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__datasource.py +++ /dev/null @@ -1,351 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -from tempfile import mkdtemp, mkstemp, NamedTemporaryFile -from shutil import rmtree - -from numpy.compat import asbytes -from numpy.testing import ( - run_module_suite, TestCase, assert_ - ) -import numpy.lib._datasource as datasource - -if sys.version_info[0] >= 3: - import urllib.request as urllib_request - from urllib.parse import urlparse - from urllib.error import URLError -else: - import urllib2 as urllib_request - from urlparse import urlparse - from urllib2 import URLError - - -def urlopen_stub(url, data=None): - '''Stub to replace urlopen for testing.''' - if url == valid_httpurl(): - tmpfile = NamedTemporaryFile(prefix='urltmp_') - return tmpfile - else: - raise URLError('Name or service not known') - -# setup and teardown -old_urlopen = None - - -def setup(): - global old_urlopen - - old_urlopen = urllib_request.urlopen - urllib_request.urlopen = urlopen_stub - - -def teardown(): - urllib_request.urlopen = old_urlopen - -# A valid website for more robust testing -http_path = 'http://www.google.com/' -http_file = 'index.html' - -http_fakepath = 'http://fake.abc.web/site/' -http_fakefile = 'fake.txt' - -malicious_files = ['/etc/shadow', '../../shadow', - '..\\system.dat', 'c:\\windows\\system.dat'] - -magic_line = asbytes('three is the magic number') - - -# Utility functions used by many TestCases -def valid_textfile(filedir): - # Generate and return a valid temporary file. - fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) - os.close(fd) - return path - - -def invalid_textfile(filedir): - # Generate and return an invalid filename. - fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) - os.close(fd) - os.remove(path) - return path - - -def valid_httpurl(): - return http_path+http_file - - -def invalid_httpurl(): - return http_fakepath+http_fakefile - - -def valid_baseurl(): - return http_path - - -def invalid_baseurl(): - return http_fakepath - - -def valid_httpfile(): - return http_file - - -def invalid_httpfile(): - return http_fakefile - - -class TestDataSourceOpen(TestCase): - def setUp(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - fh = self.ds.open(valid_httpurl()) - assert_(fh) - fh.close() - - def test_InvalidHTTP(self): - url = invalid_httpurl() - self.assertRaises(IOError, self.ds.open, url) - try: - self.ds.open(url) - except IOError as e: - # Regression test for bug fixed in r4342. - assert_(e.errno is None) - - def test_InvalidHTTPCacheURLError(self): - self.assertRaises(URLError, self.ds._cache, invalid_httpurl()) - - def test_ValidFile(self): - local_file = valid_textfile(self.tmpdir) - fh = self.ds.open(local_file) - assert_(fh) - fh.close() - - def test_InvalidFile(self): - invalid_file = invalid_textfile(self.tmpdir) - self.assertRaises(IOError, self.ds.open, invalid_file) - - def test_ValidGzipFile(self): - try: - import gzip - except ImportError: - # We don't have the gzip capabilities to test. - import nose - raise nose.SkipTest - # Test datasource's internal file_opener for Gzip files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') - fp = gzip.open(filepath, 'w') - fp.write(magic_line) - fp.close() - fp = self.ds.open(filepath) - result = fp.readline() - fp.close() - self.assertEqual(magic_line, result) - - def test_ValidBz2File(self): - try: - import bz2 - except ImportError: - # We don't have the bz2 capabilities to test. - import nose - raise nose.SkipTest - # Test datasource's internal file_opener for BZip2 files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') - fp = bz2.BZ2File(filepath, 'w') - fp.write(magic_line) - fp.close() - fp = self.ds.open(filepath) - result = fp.readline() - fp.close() - self.assertEqual(magic_line, result) - - -class TestDataSourceExists(TestCase): - def setUp(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - assert_(self.ds.exists(valid_httpurl())) - - def test_InvalidHTTP(self): - self.assertEqual(self.ds.exists(invalid_httpurl()), False) - - def test_ValidFile(self): - # Test valid file in destpath - tmpfile = valid_textfile(self.tmpdir) - assert_(self.ds.exists(tmpfile)) - # Test valid local file not in destpath - localdir = mkdtemp() - tmpfile = valid_textfile(localdir) - assert_(self.ds.exists(tmpfile)) - rmtree(localdir) - - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - self.assertEqual(self.ds.exists(tmpfile), False) - - -class TestDataSourceAbspath(TestCase): - def setUp(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.ds = datasource.DataSource(self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.tmpdir, netloc, - upath.strip(os.sep).strip('/')) - self.assertEqual(local_path, self.ds.abspath(valid_httpurl())) - - def test_ValidFile(self): - tmpfile = valid_textfile(self.tmpdir) - tmpfilename = os.path.split(tmpfile)[-1] - # Test with filename only - self.assertEqual(tmpfile, self.ds.abspath(tmpfilename)) - # Test filename with complete path - self.assertEqual(tmpfile, self.ds.abspath(tmpfile)) - - def test_InvalidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) - invalidhttp = os.path.join(self.tmpdir, netloc, - upath.strip(os.sep).strip('/')) - self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl())) - - def test_InvalidFile(self): - invalidfile = valid_textfile(self.tmpdir) - tmpfile = valid_textfile(self.tmpdir) - tmpfilename = os.path.split(tmpfile)[-1] - # Test with filename only - self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename)) - # Test filename with complete path - self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile)) - - def test_sandboxing(self): - tmpfile = valid_textfile(self.tmpdir) - tmpfilename = os.path.split(tmpfile)[-1] - - tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) - - assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(tmpfile).startswith(self.tmpdir)) - assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) - for fn in malicious_files: - assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) - - def test_windows_os_sep(self): - orig_os_sep = os.sep - try: - os.sep = '\\' - self.test_ValidHTTP() - self.test_ValidFile() - self.test_InvalidHTTP() - self.test_InvalidFile() - self.test_sandboxing() - finally: - os.sep = orig_os_sep - - -class TestRepositoryAbspath(TestCase): - def setUp(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.repos._destpath, netloc, - upath.strip(os.sep).strip('/')) - filepath = self.repos.abspath(valid_httpfile()) - self.assertEqual(local_path, filepath) - - def test_sandboxing(self): - tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) - assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) - for fn in malicious_files: - assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) - - def test_windows_os_sep(self): - orig_os_sep = os.sep - try: - os.sep = '\\' - self.test_ValidHTTP() - self.test_sandboxing() - finally: - os.sep = orig_os_sep - - -class TestRepositoryExists(TestCase): - def setUp(self): - self.tmpdir = mkdtemp() - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def tearDown(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidFile(self): - # Create local temp file - tmpfile = valid_textfile(self.tmpdir) - assert_(self.repos.exists(tmpfile)) - - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - self.assertEqual(self.repos.exists(tmpfile), False) - - def test_RemoveHTTPFile(self): - assert_(self.repos.exists(valid_httpurl())) - - def test_CachedHTTPFile(self): - localfile = valid_httpurl() - # Create a locally cached temp file with an URL based - # directory structure. This is similar to what Repository.open - # would do. - scheme, netloc, upath, pms, qry, frg = urlparse(localfile) - local_path = os.path.join(self.repos._destpath, netloc) - os.mkdir(local_path, 0o0700) - tmpfile = valid_textfile(local_path) - assert_(self.repos.exists(tmpfile)) - - -class TestOpenFunc(TestCase): - def setUp(self): - self.tmpdir = mkdtemp() - - def tearDown(self): - rmtree(self.tmpdir) - - def test_DataSourceOpen(self): - local_file = valid_textfile(self.tmpdir) - # Test case where destpath is passed in - fp = datasource.open(local_file, destpath=self.tmpdir) - assert_(fp) - fp.close() - # Test case where default destpath is used - fp = datasource.open(local_file) - assert_(fp) - fp.close() - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__iotools.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__iotools.py deleted file mode 100644 index 4db19382a71ca..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__iotools.py +++ /dev/null @@ -1,326 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import time -from datetime import date - -import numpy as np -from numpy.compat import asbytes, asbytes_nested -from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal - ) -from numpy.lib._iotools import ( - LineSplitter, NameValidator, StringConverter, - has_nested_fields, easy_dtype, flatten_dtype - ) - - -class TestLineSplitter(TestCase): - "Tests the LineSplitter class." - - def test_no_delimiter(self): - "Test LineSplitter w/o delimiter" - strg = asbytes(" 1 2 3 4 5 # test") - test = LineSplitter()(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5'])) - test = LineSplitter('')(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5'])) - - def test_space_delimiter(self): - "Test space delimiter" - strg = asbytes(" 1 2 3 4 5 # test") - test = LineSplitter(asbytes(' '))(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) - test = LineSplitter(asbytes(' '))(strg) - assert_equal(test, asbytes_nested(['1 2 3 4', '5'])) - - def test_tab_delimiter(self): - "Test tab delimiter" - strg = asbytes(" 1\t 2\t 3\t 4\t 5 6") - test = LineSplitter(asbytes('\t'))(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5 6'])) - strg = asbytes(" 1 2\t 3 4\t 5 6") - test = LineSplitter(asbytes('\t'))(strg) - assert_equal(test, asbytes_nested(['1 2', '3 4', '5 6'])) - - def test_other_delimiter(self): - "Test LineSplitter on delimiter" - strg = asbytes("1,2,3,4,,5") - test = LineSplitter(asbytes(','))(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) - # - strg = asbytes(" 1,2,3,4,,5 # test") - test = LineSplitter(asbytes(','))(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5'])) - - def test_constant_fixed_width(self): - "Test LineSplitter w/ fixed-width fields" - strg = asbytes(" 1 2 3 4 5 # test") - test = LineSplitter(3)(strg) - assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5', ''])) - # - strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter(20)(strg) - assert_equal(test, asbytes_nested(['1 3 4 5 6'])) - # - strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter(30)(strg) - assert_equal(test, asbytes_nested(['1 3 4 5 6'])) - - def test_variable_fixed_width(self): - strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter((3, 6, 6, 3))(strg) - assert_equal(test, asbytes_nested(['1', '3', '4 5', '6'])) - # - strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter((6, 6, 9))(strg) - assert_equal(test, asbytes_nested(['1', '3 4', '5 6'])) - -#------------------------------------------------------------------------------- - - -class TestNameValidator(TestCase): - - def test_case_sensitivity(self): - "Test case sensitivity" - names = ['A', 'a', 'b', 'c'] - test = NameValidator().validate(names) - assert_equal(test, ['A', 'a', 'b', 'c']) - test = NameValidator(case_sensitive=False).validate(names) - assert_equal(test, ['A', 'A_1', 'B', 'C']) - test = NameValidator(case_sensitive='upper').validate(names) - assert_equal(test, ['A', 'A_1', 'B', 'C']) - test = NameValidator(case_sensitive='lower').validate(names) - assert_equal(test, ['a', 'a_1', 'b', 'c']) - - def test_excludelist(self): - "Test excludelist" - names = ['dates', 'data', 'Other Data', 'mask'] - validator = NameValidator(excludelist=['dates', 'data', 'mask']) - test = validator.validate(names) - assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) - - def test_missing_names(self): - "Test validate missing names" - namelist = ('a', 'b', 'c') - validator = NameValidator() - assert_equal(validator(namelist), ['a', 'b', 'c']) - namelist = ('', 'b', 'c') - assert_equal(validator(namelist), ['f0', 'b', 'c']) - namelist = ('a', 'b', '') - assert_equal(validator(namelist), ['a', 'b', 'f0']) - namelist = ('', 'f0', '') - assert_equal(validator(namelist), ['f1', 'f0', 'f2']) - - def test_validate_nb_names(self): - "Test validate nb names" - namelist = ('a', 'b', 'c') - validator = NameValidator() - assert_equal(validator(namelist, nbfields=1), ('a',)) - assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), - ['a', 'b', 'c', 'g0', 'g1']) - - def test_validate_wo_names(self): - "Test validate no names" - namelist = None - validator = NameValidator() - assert_(validator(namelist) is None) - assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) - -#------------------------------------------------------------------------------- - - -def _bytes_to_date(s): - if sys.version_info[0] >= 3: - return date(*time.strptime(s.decode('latin1'), "%Y-%m-%d")[:3]) - else: - return date(*time.strptime(s, "%Y-%m-%d")[:3]) - - -class TestStringConverter(TestCase): - "Test StringConverter" - - def test_creation(self): - "Test creation of a StringConverter" - converter = StringConverter(int, -99999) - assert_equal(converter._status, 1) - assert_equal(converter.default, -99999) - - def test_upgrade(self): - "Tests the upgrade method." - converter = StringConverter() - assert_equal(converter._status, 0) - converter.upgrade(asbytes('0')) - assert_equal(converter._status, 1) - converter.upgrade(asbytes('0.')) - assert_equal(converter._status, 2) - converter.upgrade(asbytes('0j')) - assert_equal(converter._status, 3) - converter.upgrade(asbytes('a')) - assert_equal(converter._status, len(converter._mapper) - 1) - - def test_missing(self): - "Tests the use of missing values." - converter = StringConverter(missing_values=(asbytes('missing'), - asbytes('missed'))) - converter.upgrade(asbytes('0')) - assert_equal(converter(asbytes('0')), 0) - assert_equal(converter(asbytes('')), converter.default) - assert_equal(converter(asbytes('missing')), converter.default) - assert_equal(converter(asbytes('missed')), converter.default) - try: - converter('miss') - except ValueError: - pass - - def test_upgrademapper(self): - "Tests updatemapper" - dateparser = _bytes_to_date - StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) - convert = StringConverter(dateparser, date(2000, 1, 1)) - test = convert(asbytes('2001-01-01')) - assert_equal(test, date(2001, 1, 1)) - test = convert(asbytes('2009-01-01')) - assert_equal(test, date(2009, 1, 1)) - test = convert(asbytes('')) - assert_equal(test, date(2000, 1, 1)) - - def test_string_to_object(self): - "Make sure that string-to-object functions are properly recognized" - conv = StringConverter(_bytes_to_date) - assert_equal(conv._mapper[-2][0](0), 0j) - assert_(hasattr(conv, 'default')) - - def test_keep_default(self): - "Make sure we don't lose an explicit default" - converter = StringConverter(None, missing_values=asbytes(''), - default=-999) - converter.upgrade(asbytes('3.14159265')) - assert_equal(converter.default, -999) - assert_equal(converter.type, np.dtype(float)) - # - converter = StringConverter( - None, missing_values=asbytes(''), default=0) - converter.upgrade(asbytes('3.14159265')) - assert_equal(converter.default, 0) - assert_equal(converter.type, np.dtype(float)) - - def test_keep_default_zero(self): - "Check that we don't lose a default of 0" - converter = StringConverter(int, default=0, - missing_values=asbytes("N/A")) - assert_equal(converter.default, 0) - - def test_keep_missing_values(self): - "Check that we're not losing missing values" - converter = StringConverter(int, default=0, - missing_values=asbytes("N/A")) - assert_equal( - converter.missing_values, set(asbytes_nested(['', 'N/A']))) - - def test_int64_dtype(self): - "Check that int64 integer types can be specified" - converter = StringConverter(np.int64, default=0) - val = asbytes("-9223372036854775807") - assert_(converter(val) == -9223372036854775807) - val = asbytes("9223372036854775807") - assert_(converter(val) == 9223372036854775807) - - def test_uint64_dtype(self): - "Check that uint64 integer types can be specified" - converter = StringConverter(np.uint64, default=0) - val = asbytes("9223372043271415339") - assert_(converter(val) == 9223372043271415339) - - -class TestMiscFunctions(TestCase): - - def test_has_nested_dtype(self): - "Test has_nested_dtype" - ndtype = np.dtype(np.float) - assert_equal(has_nested_fields(ndtype), False) - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - assert_equal(has_nested_fields(ndtype), False) - ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) - assert_equal(has_nested_fields(ndtype), True) - - def test_easy_dtype(self): - "Test ndtype on dtypes" - # Simple case - ndtype = float - assert_equal(easy_dtype(ndtype), np.dtype(float)) - # As string w/o names - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype), - np.dtype([('f0', "i4"), ('f1', "f8")])) - # As string w/o names but different default format - assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), - np.dtype([('field_000', "i4"), ('field_001', "f8")])) - # As string w/ names - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype, names="a, b"), - np.dtype([('a', "i4"), ('b', "f8")])) - # As string w/ names (too many) - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype, names="a, b, c"), - np.dtype([('a', "i4"), ('b', "f8")])) - # As string w/ names (not enough) - ndtype = "i4, f8" - assert_equal(easy_dtype(ndtype, names=", b"), - np.dtype([('f0', "i4"), ('b', "f8")])) - # ... (with different default format) - assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), - np.dtype([('a', "i4"), ('f00', "f8")])) - # As list of tuples w/o names - ndtype = [('A', int), ('B', float)] - assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) - # As list of tuples w/ names - assert_equal(easy_dtype(ndtype, names="a,b"), - np.dtype([('a', int), ('b', float)])) - # As list of tuples w/ not enough names - assert_equal(easy_dtype(ndtype, names="a"), - np.dtype([('a', int), ('f0', float)])) - # As list of tuples w/ too many names - assert_equal(easy_dtype(ndtype, names="a,b,c"), - np.dtype([('a', int), ('b', float)])) - # As list of types w/o names - ndtype = (int, float, float) - assert_equal(easy_dtype(ndtype), - np.dtype([('f0', int), ('f1', float), ('f2', float)])) - # As list of types w names - ndtype = (int, float, float) - assert_equal(easy_dtype(ndtype, names="a, b, c"), - np.dtype([('a', int), ('b', float), ('c', float)])) - # As simple dtype w/ names - ndtype = np.dtype(float) - assert_equal(easy_dtype(ndtype, names="a, b, c"), - np.dtype([(_, float) for _ in ('a', 'b', 'c')])) - # As simple dtype w/o names (but multiple fields) - ndtype = np.dtype(float) - assert_equal( - easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), - np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) - - def test_flatten_dtype(self): - "Testing flatten_dtype" - # Standard dtype - dt = np.dtype([("a", "f8"), ("b", "f8")]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [float, float]) - # Recursive dtype - dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) - # dtype with shaped fields - dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [float, int]) - dt_flat = flatten_dtype(dt, True) - assert_equal(dt_flat, [float] * 2 + [int] * 3) - # dtype w/ titles - dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) - dt_flat = flatten_dtype(dt) - assert_equal(dt_flat, [float, float]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__version.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__version.py deleted file mode 100644 index bbafe68eb3554..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test__version.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Tests for the NumpyVersion class. - -""" -from __future__ import division, absolute_import, print_function - -from numpy.testing import assert_, run_module_suite, assert_raises -from numpy.lib import NumpyVersion - - -def test_main_versions(): - assert_(NumpyVersion('1.8.0') == '1.8.0') - for ver in ['1.9.0', '2.0.0', '1.8.1']: - assert_(NumpyVersion('1.8.0') < ver) - - for ver in ['1.7.0', '1.7.1', '0.9.9']: - assert_(NumpyVersion('1.8.0') > ver) - - -def test_version_1_point_10(): - # regression test for gh-2998. - assert_(NumpyVersion('1.9.0') < '1.10.0') - assert_(NumpyVersion('1.11.0') < '1.11.1') - assert_(NumpyVersion('1.11.0') == '1.11.0') - assert_(NumpyVersion('1.99.11') < '1.99.12') - - -def test_alpha_beta_rc(): - assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') - for ver in ['1.8.0', '1.8.0rc2']: - assert_(NumpyVersion('1.8.0rc1') < ver) - - for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: - assert_(NumpyVersion('1.8.0rc1') > ver) - - assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') - - -def test_dev_version(): - assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') - for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: - assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) - - assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') - - -def test_dev_a_b_rc_mixed(): - assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') - assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') - - -def test_raises(): - for ver in ['1.9', '1,9.0', '1.7.x']: - assert_raises(ValueError, NumpyVersion, ver) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraypad.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraypad.py deleted file mode 100644 index f8ba8643abddc..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraypad.py +++ /dev/null @@ -1,560 +0,0 @@ -"""Tests for the pad functions. - -""" -from __future__ import division, absolute_import, print_function - -from numpy.testing import TestCase, run_module_suite, assert_array_equal -from numpy.testing import assert_raises, assert_array_almost_equal -import numpy as np -from numpy.lib import pad - - -class TestStatistic(TestCase): - def test_check_mean_stat_length(self): - a = np.arange(100).astype('f') - a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) - b = np.array( - [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, - 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, - 0.5, 0.5, 0.5, 0.5, 0.5, - - 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., - 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., - 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., - 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., - 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., - 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., - 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., - 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., - - 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., - 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. - ]) - assert_array_equal(a, b) - - def test_check_maximum_1(self): - a = np.arange(100) - a = pad(a, (25, 20), 'maximum') - b = np.array( - [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, - 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, - 99, 99, 99, 99, 99, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, - 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] - ) - assert_array_equal(a, b) - - def test_check_maximum_2(self): - a = np.arange(100) + 1 - a = pad(a, (25, 20), 'maximum') - b = np.array( - [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, - 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, - 100, 100, 100, 100, 100, - - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, - 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, - 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, - 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, - - 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, - 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] - ) - assert_array_equal(a, b) - - def test_check_minimum_1(self): - a = np.arange(100) - a = pad(a, (25, 20), 'minimum') - b = np.array( - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ) - assert_array_equal(a, b) - - def test_check_minimum_2(self): - a = np.arange(100) + 2 - a = pad(a, (25, 20), 'minimum') - b = np.array( - [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, - - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] - ) - assert_array_equal(a, b) - - def test_check_median(self): - a = np.arange(100).astype('f') - a = pad(a, (25, 20), 'median') - b = np.array( - [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, - - 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., - 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., - 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., - 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., - 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., - 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., - 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., - 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., - - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] - ) - assert_array_equal(a, b) - - def test_check_median_01(self): - a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) - a = pad(a, 1, 'median') - b = np.array( - [[4, 4, 5, 4, 4], - - [3, 3, 1, 4, 3], - [5, 4, 5, 9, 5], - [8, 9, 8, 2, 8], - - [4, 4, 5, 4, 4]] - ) - assert_array_equal(a, b) - - def test_check_median_02(self): - a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) - a = pad(a.T, 1, 'median').T - b = np.array( - [[5, 4, 5, 4, 5], - - [3, 3, 1, 4, 3], - [5, 4, 5, 9, 5], - [8, 9, 8, 2, 8], - - [5, 4, 5, 4, 5]] - ) - assert_array_equal(a, b) - - def test_check_mean_shape_one(self): - a = [[4, 5, 6]] - a = pad(a, (5, 7), 'mean', stat_length=2) - b = np.array( - [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], - [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] - ) - assert_array_equal(a, b) - - def test_check_mean_2(self): - a = np.arange(100).astype('f') - a = pad(a, (25, 20), 'mean') - b = np.array( - [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, - - 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., - 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., - 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., - 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., - 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., - 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., - 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., - 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., - - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, - 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] - ) - assert_array_equal(a, b) - - -class TestConstant(TestCase): - def test_check_constant(self): - a = np.arange(100) - a = pad(a, (25, 20), 'constant', constant_values=(10, 20)) - b = np.array( - [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] - ) - assert_array_equal(a, b) - - -class TestLinearRamp(TestCase): - def test_check_simple(self): - a = np.arange(100).astype('f') - a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) - b = np.array( - [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, - 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, - 0.80, 0.64, 0.48, 0.32, 0.16, - - 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, - 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, - 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, - 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, - 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, - 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, - 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, - 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, - 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, - 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, - - 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, - 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] - ) - assert_array_almost_equal(a, b, decimal=5) - - -class TestReflect(TestCase): - def test_check_simple(self): - a = np.arange(100) - a = pad(a, (25, 20), 'reflect') - b = np.array( - [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, - 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, - 5, 4, 3, 2, 1, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, - 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] - ) - assert_array_equal(a, b) - - def test_check_large_pad(self): - a = [[4, 5, 6], [6, 7, 8]] - a = pad(a, (5, 7), 'reflect') - b = np.array( - [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] - ) - assert_array_equal(a, b) - - def test_check_shape(self): - a = [[4, 5, 6]] - a = pad(a, (5, 7), 'reflect') - b = np.array( - [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], - [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] - ) - assert_array_equal(a, b) - - def test_check_01(self): - a = pad([1, 2, 3], 2, 'reflect') - b = np.array([3, 2, 1, 2, 3, 2, 1]) - assert_array_equal(a, b) - - def test_check_02(self): - a = pad([1, 2, 3], 3, 'reflect') - b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) - assert_array_equal(a, b) - - def test_check_03(self): - a = pad([1, 2, 3], 4, 'reflect') - b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) - assert_array_equal(a, b) - - -class TestWrap(TestCase): - def test_check_simple(self): - a = np.arange(100) - a = pad(a, (25, 20), 'wrap') - b = np.array( - [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, - 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, - 95, 96, 97, 98, 99, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - ) - assert_array_equal(a, b) - - def test_check_large_pad(self): - a = np.arange(12) - a = np.reshape(a, (3, 4)) - a = pad(a, (10, 12), 'wrap') - b = np.array( - [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11], - [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, - 3, 0, 1, 2, 3, 0, 1, 2, 3], - [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, - 7, 4, 5, 6, 7, 4, 5, 6, 7], - [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, - 11, 8, 9, 10, 11, 8, 9, 10, 11]] - ) - assert_array_equal(a, b) - - def test_check_01(self): - a = pad([1, 2, 3], 3, 'wrap') - b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) - assert_array_equal(a, b) - - def test_check_02(self): - a = pad([1, 2, 3], 4, 'wrap') - b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) - assert_array_equal(a, b) - - -class TestStatLen(TestCase): - def test_check_simple(self): - a = np.arange(30) - a = np.reshape(a, (6, 5)) - a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) - b = np.array( - [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], - [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], - - [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], - [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], - [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], - [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], - [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], - [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], - - [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], - [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], - [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] - ) - assert_array_equal(a, b) - - -class TestEdge(TestCase): - def test_check_simple(self): - a = np.arange(12) - a = np.reshape(a, (4, 3)) - a = pad(a, ((2, 3), (3, 2)), 'edge') - b = np.array( - [[0, 0, 0, 0, 1, 2, 2, 2], - [0, 0, 0, 0, 1, 2, 2, 2], - - [0, 0, 0, 0, 1, 2, 2, 2], - [3, 3, 3, 3, 4, 5, 5, 5], - [6, 6, 6, 6, 7, 8, 8, 8], - [9, 9, 9, 9, 10, 11, 11, 11], - - [9, 9, 9, 9, 10, 11, 11, 11], - [9, 9, 9, 9, 10, 11, 11, 11], - [9, 9, 9, 9, 10, 11, 11, 11]] - ) - assert_array_equal(a, b) - - -class TestZeroPadWidth(TestCase): - def test_zero_pad_width(self): - arr = np.arange(30) - arr = np.reshape(arr, (6, 5)) - for pad_width in (0, (0, 0), ((0, 0), (0, 0))): - assert_array_equal(arr, pad(arr, pad_width, mode='constant')) - - -class ValueError1(TestCase): - def test_check_simple(self): - arr = np.arange(30) - arr = np.reshape(arr, (6, 5)) - kwargs = dict(mode='mean', stat_length=(3, )) - assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)), - **kwargs) - - def test_check_negative_stat_length(self): - arr = np.arange(30) - arr = np.reshape(arr, (6, 5)) - kwargs = dict(mode='mean', stat_length=(-3, )) - assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)), - **kwargs) - - def test_check_negative_pad_width(self): - arr = np.arange(30) - arr = np.reshape(arr, (6, 5)) - kwargs = dict(mode='mean', stat_length=(3, )) - assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), - **kwargs) - - -class ValueError2(TestCase): - def test_check_simple(self): - arr = np.arange(30) - arr = np.reshape(arr, (6, 5)) - kwargs = dict(mode='mean', stat_length=(3, )) - assert_raises(ValueError, pad, arr, ((2, 3, 4), (3, 2)), - **kwargs) - - -class ValueError3(TestCase): - def test_check_simple(self): - arr = np.arange(30) - arr = np.reshape(arr, (6, 5)) - kwargs = dict(mode='mean', stat_length=(3, )) - assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), - **kwargs) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraysetops.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraysetops.py deleted file mode 100644 index e83f8552e2663..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arraysetops.py +++ /dev/null @@ -1,301 +0,0 @@ -"""Test functions for 1D array set operations. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import ( - run_module_suite, TestCase, assert_array_equal - ) -from numpy.lib.arraysetops import ( - ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d - ) - - -class TestSetOps(TestCase): - - def test_unique(self): - - def check_all(a, b, i1, i2, c, dt): - base_msg = 'check {0} failed for type {1}' - - msg = base_msg.format('values', dt) - v = unique(a) - assert_array_equal(v, b, msg) - - msg = base_msg.format('return_index', dt) - v, j = unique(a, 1, 0, 0) - assert_array_equal(v, b, msg) - assert_array_equal(j, i1, msg) - - msg = base_msg.format('return_inverse', dt) - v, j = unique(a, 0, 1, 0) - assert_array_equal(v, b, msg) - assert_array_equal(j, i2, msg) - - msg = base_msg.format('return_counts', dt) - v, j = unique(a, 0, 0, 1) - assert_array_equal(v, b, msg) - assert_array_equal(j, c, msg) - - msg = base_msg.format('return_index and return_inverse', dt) - v, j1, j2 = unique(a, 1, 1, 0) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i1, msg) - assert_array_equal(j2, i2, msg) - - msg = base_msg.format('return_index and return_counts', dt) - v, j1, j2 = unique(a, 1, 0, 1) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i1, msg) - assert_array_equal(j2, c, msg) - - msg = base_msg.format('return_inverse and return_counts', dt) - v, j1, j2 = unique(a, 0, 1, 1) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i2, msg) - assert_array_equal(j2, c, msg) - - msg = base_msg.format(('return_index, return_inverse ' - 'and return_counts'), dt) - v, j1, j2, j3 = unique(a, 1, 1, 1) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i1, msg) - assert_array_equal(j2, i2, msg) - assert_array_equal(j3, c, msg) - - a = [5, 7, 1, 2, 1, 5, 7]*10 - b = [1, 2, 5, 7] - i1 = [2, 3, 0, 1] - i2 = [2, 3, 0, 1, 0, 2, 3]*10 - c = np.multiply([2, 1, 2, 2], 10) - - # test for numeric arrays - types = [] - types.extend(np.typecodes['AllInteger']) - types.extend(np.typecodes['AllFloat']) - types.append('datetime64[D]') - types.append('timedelta64[D]') - for dt in types: - aa = np.array(a, dt) - bb = np.array(b, dt) - check_all(aa, bb, i1, i2, c, dt) - - # test for object arrays - dt = 'O' - aa = np.empty(len(a), dt) - aa[:] = a - bb = np.empty(len(b), dt) - bb[:] = b - check_all(aa, bb, i1, i2, c, dt) - - # test for structured arrays - dt = [('', 'i'), ('', 'i')] - aa = np.array(list(zip(a, a)), dt) - bb = np.array(list(zip(b, b)), dt) - check_all(aa, bb, i1, i2, c, dt) - - # test for ticket #2799 - aa = [1. + 0.j, 1 - 1.j, 1] - assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) - - # test for ticket #4785 - a = [(1, 2), (1, 2), (2, 3)] - unq = [1, 2, 3] - inv = [0, 1, 0, 1, 1, 2] - a1 = unique(a) - assert_array_equal(a1, unq) - a2, a2_inv = unique(a, return_inverse=True) - assert_array_equal(a2, unq) - assert_array_equal(a2_inv, inv) - - def test_intersect1d(self): - # unique inputs - a = np.array([5, 7, 1, 2]) - b = np.array([2, 4, 3, 1, 5]) - - ec = np.array([1, 2, 5]) - c = intersect1d(a, b, assume_unique=True) - assert_array_equal(c, ec) - - # non-unique inputs - a = np.array([5, 5, 7, 1, 2]) - b = np.array([2, 1, 4, 3, 3, 1, 5]) - - ed = np.array([1, 2, 5]) - c = intersect1d(a, b) - assert_array_equal(c, ed) - - assert_array_equal([], intersect1d([], [])) - - def test_setxor1d(self): - a = np.array([5, 7, 1, 2]) - b = np.array([2, 4, 3, 1, 5]) - - ec = np.array([3, 4, 7]) - c = setxor1d(a, b) - assert_array_equal(c, ec) - - a = np.array([1, 2, 3]) - b = np.array([6, 5, 4]) - - ec = np.array([1, 2, 3, 4, 5, 6]) - c = setxor1d(a, b) - assert_array_equal(c, ec) - - a = np.array([1, 8, 2, 3]) - b = np.array([6, 5, 4, 8]) - - ec = np.array([1, 2, 3, 4, 5, 6]) - c = setxor1d(a, b) - assert_array_equal(c, ec) - - assert_array_equal([], setxor1d([], [])) - - def test_ediff1d(self): - zero_elem = np.array([]) - one_elem = np.array([1]) - two_elem = np.array([1, 2]) - - assert_array_equal([], ediff1d(zero_elem)) - assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) - assert_array_equal([0], ediff1d(zero_elem, to_end=0)) - assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) - assert_array_equal([], ediff1d(one_elem)) - assert_array_equal([1], ediff1d(two_elem)) - - def test_in1d(self): - # we use two different sizes for the b array here to test the - # two different paths in in1d(). - for mult in (1, 10): - # One check without np.array, to make sure lists are handled correct - a = [5, 7, 1, 2] - b = [2, 4, 3, 1, 5] * mult - ec = np.array([True, False, True, True]) - c = in1d(a, b, assume_unique=True) - assert_array_equal(c, ec) - - a[0] = 8 - ec = np.array([False, False, True, True]) - c = in1d(a, b, assume_unique=True) - assert_array_equal(c, ec) - - a[0], a[3] = 4, 8 - ec = np.array([True, False, True, False]) - c = in1d(a, b, assume_unique=True) - assert_array_equal(c, ec) - - a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) - b = [2, 3, 4] * mult - ec = [False, True, False, True, True, True, True, True, True, False, - True, False, False, False] - c = in1d(a, b) - assert_array_equal(c, ec) - - b = b + [5, 5, 4] * mult - ec = [True, True, True, True, True, True, True, True, True, True, - True, False, True, True] - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5, 7, 1, 2]) - b = np.array([2, 4, 3, 1, 5] * mult) - ec = np.array([True, False, True, True]) - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5, 7, 1, 1, 2]) - b = np.array([2, 4, 3, 3, 1, 5] * mult) - ec = np.array([True, False, True, True, True]) - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5, 5]) - b = np.array([2, 2] * mult) - ec = np.array([False, False]) - c = in1d(a, b) - assert_array_equal(c, ec) - - a = np.array([5]) - b = np.array([2]) - ec = np.array([False]) - c = in1d(a, b) - assert_array_equal(c, ec) - - assert_array_equal(in1d([], []), []) - - def test_in1d_char_array(self): - a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) - b = np.array(['a', 'c']) - - ec = np.array([True, False, True, False, False, True, False, False]) - c = in1d(a, b) - - assert_array_equal(c, ec) - - def test_in1d_invert(self): - "Test in1d's invert parameter" - # We use two different sizes for the b array here to test the - # two different paths in in1d(). - for mult in (1, 10): - a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) - b = [2, 3, 4] * mult - assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) - - def test_in1d_ravel(self): - # Test that in1d ravels its input arrays. This is not documented - # behavior however. The test is to ensure consistentency. - a = np.arange(6).reshape(2, 3) - b = np.arange(3, 9).reshape(3, 2) - long_b = np.arange(3, 63).reshape(30, 2) - ec = np.array([False, False, False, True, True, True]) - - assert_array_equal(in1d(a, b, assume_unique=True), ec) - assert_array_equal(in1d(a, b, assume_unique=False), ec) - assert_array_equal(in1d(a, long_b, assume_unique=True), ec) - assert_array_equal(in1d(a, long_b, assume_unique=False), ec) - - def test_union1d(self): - a = np.array([5, 4, 7, 1, 2]) - b = np.array([2, 4, 3, 3, 2, 1, 5]) - - ec = np.array([1, 2, 3, 4, 5, 7]) - c = union1d(a, b) - assert_array_equal(c, ec) - - assert_array_equal([], union1d([], [])) - - def test_setdiff1d(self): - a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) - b = np.array([2, 4, 3, 3, 2, 1, 5]) - - ec = np.array([6, 7]) - c = setdiff1d(a, b) - assert_array_equal(c, ec) - - a = np.arange(21) - b = np.arange(19) - ec = np.array([19, 20]) - c = setdiff1d(a, b) - assert_array_equal(c, ec) - - assert_array_equal([], setdiff1d([], [])) - - def test_setdiff1d_char_array(self): - a = np.array(['a', 'b', 'c']) - b = np.array(['a', 'b', 's']) - assert_array_equal(setdiff1d(a, b), np.array(['c'])) - - def test_manyways(self): - a = np.array([5, 7, 1, 2, 8]) - b = np.array([9, 8, 2, 4, 3, 1, 5]) - - c1 = setxor1d(a, b) - aux1 = intersect1d(a, b) - aux2 = union1d(a, b) - c2 = setdiff1d(aux2, aux1) - assert_array_equal(c1, c2) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arrayterator.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arrayterator.py deleted file mode 100644 index 64ad7f4de4b53..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_arrayterator.py +++ /dev/null @@ -1,52 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from operator import mul -from functools import reduce - -import numpy as np -from numpy.random import randint -from numpy.lib import Arrayterator -from numpy.testing import assert_ - - -def test(): - np.random.seed(np.arange(10)) - - # Create a random array - ndims = randint(5)+1 - shape = tuple(randint(10)+1 for dim in range(ndims)) - els = reduce(mul, shape) - a = np.arange(els) - a.shape = shape - - buf_size = randint(2*els) - b = Arrayterator(a, buf_size) - - # Check that each block has at most ``buf_size`` elements - for block in b: - assert_(len(block.flat) <= (buf_size or els)) - - # Check that all elements are iterated correctly - assert_(list(b.flat) == list(a.flat)) - - # Slice arrayterator - start = [randint(dim) for dim in shape] - stop = [randint(dim)+1 for dim in shape] - step = [randint(dim)+1 for dim in shape] - slice_ = tuple(slice(*t) for t in zip(start, stop, step)) - c = b[slice_] - d = a[slice_] - - # Check that each block has at most ``buf_size`` elements - for block in c: - assert_(len(block.flat) <= (buf_size or els)) - - # Check that the arrayterator is sliced correctly - assert_(np.all(c.__array__() == d)) - - # Check that all elements are iterated correctly - assert_(list(c.flat) == list(d.flat)) - -if __name__ == '__main__': - from numpy.testing import run_module_suite - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_financial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_financial.py deleted file mode 100644 index a4b9cfe2ed32a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_financial.py +++ /dev/null @@ -1,160 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_almost_equal - ) - - -class TestFinancial(TestCase): - def test_rate(self): - assert_almost_equal(np.rate(10, 0, -3500, 10000), - 0.1107, 4) - - def test_irr(self): - v = [-150000, 15000, 25000, 35000, 45000, 60000] - assert_almost_equal(np.irr(v), - 0.0524, 2) - v = [-100, 0, 0, 74] - assert_almost_equal(np.irr(v), - -0.0955, 2) - v = [-100, 39, 59, 55, 20] - assert_almost_equal(np.irr(v), - 0.28095, 2) - v = [-100, 100, 0, -7] - assert_almost_equal(np.irr(v), - -0.0833, 2) - v = [-100, 100, 0, 7] - assert_almost_equal(np.irr(v), - 0.06206, 2) - v = [-5, 10.5, 1, -8, 1] - assert_almost_equal(np.irr(v), - 0.0886, 2) - - def test_pv(self): - assert_almost_equal(np.pv(0.07, 20, 12000, 0), - -127128.17, 2) - - def test_fv(self): - assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), - 86609.36, 2) - - def test_pmt(self): - assert_almost_equal(np.pmt(0.08/12, 5*12, 15000), - -304.146, 3) - - def test_ppmt(self): - np.round(np.ppmt(0.1/12, 1, 60, 55000), 2) == 710.25 - - def test_ipmt(self): - np.round(np.ipmt(0.1/12, 1, 24, 2000), 2) == 16.67 - - def test_nper(self): - assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), - 21.54, 2) - - def test_nper2(self): - assert_almost_equal(np.nper(0.0, -2000, 0, 100000.), - 50.0, 1) - - def test_npv(self): - assert_almost_equal( - np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]), - 122.89, 2) - - def test_mirr(self): - val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000] - assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) - - val = [-120000, 39000, 30000, 21000, 37000, 46000] - assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6) - - val = [100, 200, -50, 300, -200] - assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4) - - val = [39000, 30000, 21000, 37000, 46000] - assert_(np.isnan(np.mirr(val, 0.10, 0.12))) - - def test_when(self): - #begin - assert_almost_equal(np.rate(10, 20, -3500, 10000, 1), - np.rate(10, 20, -3500, 10000, 'begin'), 4) - #end - assert_almost_equal(np.rate(10, 20, -3500, 10000), - np.rate(10, 20, -3500, 10000, 'end'), 4) - assert_almost_equal(np.rate(10, 20, -3500, 10000, 0), - np.rate(10, 20, -3500, 10000, 'end'), 4) - - # begin - assert_almost_equal(np.pv(0.07, 20, 12000, 0, 1), - np.pv(0.07, 20, 12000, 0, 'begin'), 2) - # end - assert_almost_equal(np.pv(0.07, 20, 12000, 0), - np.pv(0.07, 20, 12000, 0, 'end'), 2) - assert_almost_equal(np.pv(0.07, 20, 12000, 0, 0), - np.pv(0.07, 20, 12000, 0, 'end'), 2) - - # begin - assert_almost_equal(np.fv(0.075, 20, -2000, 0, 1), - np.fv(0.075, 20, -2000, 0, 'begin'), 4) - # end - assert_almost_equal(np.fv(0.075, 20, -2000, 0), - np.fv(0.075, 20, -2000, 0, 'end'), 4) - assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), - np.fv(0.075, 20, -2000, 0, 'end'), 4) - - # begin - assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 1), - np.pmt(0.08/12, 5*12, 15000., 0, 'begin'), 4) - # end - assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0), - np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4) - assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 0), - np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4) - - # begin - assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 1), - np.ppmt(0.1/12, 1, 60, 55000, 0, 'begin'), 4) - # end - assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0), - np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4) - assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 0), - np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4) - - # begin - assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 1), - np.ipmt(0.1/12, 1, 24, 2000, 0, 'begin'), 4) - # end - assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0), - np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4) - assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 0), - np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4) - - # begin - assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 1), - np.nper(0.075, -2000, 0, 100000., 'begin'), 4) - # end - assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), - np.nper(0.075, -2000, 0, 100000., 'end'), 4) - assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 0), - np.nper(0.075, -2000, 0, 100000., 'end'), 4) - - def test_broadcast(self): - assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]), - [21.5449442, 20.76156441], 4) - - assert_almost_equal(np.ipmt(0.1/12, list(range(5)), 24, 2000), - [-17.29165168, -16.66666667, -16.03647345, - -15.40102862, -14.76028842], 4) - - assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000), - [-74.998201, -75.62318601, -76.25337923, - -76.88882405, -77.52956425], 4) - - assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000, 0, - [0, 0, 1, 'end', 'begin']), - [-74.998201, -75.62318601, -75.62318601, - -76.88882405, -76.88882405], 4) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_format.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_format.py deleted file mode 100644 index c09386789fbee..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_format.py +++ /dev/null @@ -1,706 +0,0 @@ -from __future__ import division, absolute_import, print_function - -r''' Test the .npy file format. - -Set up: - - >>> import sys - >>> from io import BytesIO - >>> from numpy.lib import format - >>> - >>> scalars = [ - ... np.uint8, - ... np.int8, - ... np.uint16, - ... np.int16, - ... np.uint32, - ... np.int32, - ... np.uint64, - ... np.int64, - ... np.float32, - ... np.float64, - ... np.complex64, - ... np.complex128, - ... object, - ... ] - >>> - >>> basic_arrays = [] - >>> - >>> for scalar in scalars: - ... for endian in '<>': - ... dtype = np.dtype(scalar).newbyteorder(endian) - ... basic = np.arange(15).astype(dtype) - ... basic_arrays.extend([ - ... np.array([], dtype=dtype), - ... np.array(10, dtype=dtype), - ... basic, - ... basic.reshape((3,5)), - ... basic.reshape((3,5)).T, - ... basic.reshape((3,5))[::-1,::2], - ... ]) - ... - >>> - >>> Pdescr = [ - ... ('x', 'i4', (2,)), - ... ('y', 'f8', (2, 2)), - ... ('z', 'u1')] - >>> - >>> - >>> PbufferT = [ - ... ([3,2], [[6.,4.],[6.,4.]], 8), - ... ([4,3], [[7.,5.],[7.,5.]], 9), - ... ] - >>> - >>> - >>> Ndescr = [ - ... ('x', 'i4', (2,)), - ... ('Info', [ - ... ('value', 'c16'), - ... ('y2', 'f8'), - ... ('Info2', [ - ... ('name', 'S2'), - ... ('value', 'c16', (2,)), - ... ('y3', 'f8', (2,)), - ... ('z3', 'u4', (2,))]), - ... ('name', 'S2'), - ... ('z2', 'b1')]), - ... ('color', 'S2'), - ... ('info', [ - ... ('Name', 'U8'), - ... ('Value', 'c16')]), - ... ('y', 'f8', (2, 2)), - ... ('z', 'u1')] - >>> - >>> - >>> NbufferT = [ - ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), - ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), - ... ] - >>> - >>> - >>> record_arrays = [ - ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), - ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), - ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), - ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), - ... ] - -Test the magic string writing. - - >>> format.magic(1, 0) - '\x93NUMPY\x01\x00' - >>> format.magic(0, 0) - '\x93NUMPY\x00\x00' - >>> format.magic(255, 255) - '\x93NUMPY\xff\xff' - >>> format.magic(2, 5) - '\x93NUMPY\x02\x05' - -Test the magic string reading. - - >>> format.read_magic(BytesIO(format.magic(1, 0))) - (1, 0) - >>> format.read_magic(BytesIO(format.magic(0, 0))) - (0, 0) - >>> format.read_magic(BytesIO(format.magic(255, 255))) - (255, 255) - >>> format.read_magic(BytesIO(format.magic(2, 5))) - (2, 5) - -Test the header writing. - - >>> for arr in basic_arrays + record_arrays: - ... f = BytesIO() - ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it - ... print repr(f.getvalue()) - ... - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" - "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" - "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" - "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" - "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" -''' - -import sys -import os -import shutil -import tempfile -import warnings -from io import BytesIO - -import numpy as np -from numpy.compat import asbytes, asbytes_nested -from numpy.testing import ( - run_module_suite, assert_, assert_array_equal, assert_raises, raises, - dec - ) -from numpy.lib import format - - -tempdir = None - -# Module-level setup. - - -def setup_module(): - global tempdir - tempdir = tempfile.mkdtemp() - - -def teardown_module(): - global tempdir - if tempdir is not None and os.path.isdir(tempdir): - shutil.rmtree(tempdir) - tempdir = None - - -# Generate some basic arrays to test with. -scalars = [ - np.uint8, - np.int8, - np.uint16, - np.int16, - np.uint32, - np.int32, - np.uint64, - np.int64, - np.float32, - np.float64, - np.complex64, - np.complex128, - object, -] -basic_arrays = [] -for scalar in scalars: - for endian in '<>': - dtype = np.dtype(scalar).newbyteorder(endian) - basic = np.arange(1500).astype(dtype) - basic_arrays.extend([ - # Empty - np.array([], dtype=dtype), - # Rank-0 - np.array(10, dtype=dtype), - # 1-D - basic, - # 2-D C-contiguous - basic.reshape((30, 50)), - # 2-D F-contiguous - basic.reshape((30, 50)).T, - # 2-D non-contiguous - basic.reshape((30, 50))[::-1, ::2], - ]) - -# More complicated record arrays. -# This is the structure of the table used for plain objects: -# -# +-+-+-+ -# |x|y|z| -# +-+-+-+ - -# Structure of a plain array description: -Pdescr = [ - ('x', 'i4', (2,)), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -# A plain list of tuples with values for testing: -PbufferT = [ - # x y z - ([3, 2], [[6., 4.], [6., 4.]], 8), - ([4, 3], [[7., 5.], [7., 5.]], 9), - ] - - -# This is the structure of the table used for nested objects (DON'T PANIC!): -# -# +-+---------------------------------+-----+----------+-+-+ -# |x|Info |color|info |y|z| -# | +-----+--+----------------+----+--+ +----+-----+ | | -# | |value|y2|Info2 |name|z2| |Name|Value| | | -# | | | +----+-----+--+--+ | | | | | | | -# | | | |name|value|y3|z3| | | | | | | | -# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ -# - -# The corresponding nested array description: -Ndescr = [ - ('x', 'i4', (2,)), - ('Info', [ - ('value', 'c16'), - ('y2', 'f8'), - ('Info2', [ - ('name', 'S2'), - ('value', 'c16', (2,)), - ('y3', 'f8', (2,)), - ('z3', 'u4', (2,))]), - ('name', 'S2'), - ('z2', 'b1')]), - ('color', 'S2'), - ('info', [ - ('Name', 'U8'), - ('Value', 'c16')]), - ('y', 'f8', (2, 2)), - ('z', 'u1')] - -NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 - ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), - 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), - ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), - 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), - ] - -record_arrays = [ - np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), - np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), - np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), - np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), -] - - -#BytesIO that reads a random number of bytes at a time -class BytesIOSRandomSize(BytesIO): - def read(self, size=None): - import random - size = random.randint(1, size) - return super(BytesIOSRandomSize, self).read(size) - - -def roundtrip(arr): - f = BytesIO() - format.write_array(f, arr) - f2 = BytesIO(f.getvalue()) - arr2 = format.read_array(f2) - return arr2 - - -def roundtrip_randsize(arr): - f = BytesIO() - format.write_array(f, arr) - f2 = BytesIOSRandomSize(f.getvalue()) - arr2 = format.read_array(f2) - return arr2 - - -def roundtrip_truncated(arr): - f = BytesIO() - format.write_array(f, arr) - #BytesIO is one byte short - f2 = BytesIO(f.getvalue()[0:-1]) - arr2 = format.read_array(f2) - return arr2 - - -def assert_equal_(o1, o2): - assert_(o1 == o2) - - -def test_roundtrip(): - for arr in basic_arrays + record_arrays: - arr2 = roundtrip(arr) - yield assert_array_equal, arr, arr2 - - -def test_roundtrip_randsize(): - for arr in basic_arrays + record_arrays: - if arr.dtype != object: - arr2 = roundtrip_randsize(arr) - yield assert_array_equal, arr, arr2 - - -def test_roundtrip_truncated(): - for arr in basic_arrays: - if arr.dtype != object: - yield assert_raises, ValueError, roundtrip_truncated, arr - - -def test_long_str(): - # check items larger than internal buffer size, gh-4027 - long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1))) - long_str_arr2 = roundtrip(long_str_arr) - assert_array_equal(long_str_arr, long_str_arr2) - - -@dec.slow -def test_memmap_roundtrip(): - # Fixme: test crashes nose on windows. - if not (sys.platform == 'win32' or sys.platform == 'cygwin'): - for arr in basic_arrays + record_arrays: - if arr.dtype.hasobject: - # Skip these since they can't be mmap'ed. - continue - # Write it out normally and through mmap. - nfn = os.path.join(tempdir, 'normal.npy') - mfn = os.path.join(tempdir, 'memmap.npy') - fp = open(nfn, 'wb') - try: - format.write_array(fp, arr) - finally: - fp.close() - - fortran_order = ( - arr.flags.f_contiguous and not arr.flags.c_contiguous) - ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype, - shape=arr.shape, fortran_order=fortran_order) - ma[...] = arr - del ma - - # Check that both of these files' contents are the same. - fp = open(nfn, 'rb') - normal_bytes = fp.read() - fp.close() - fp = open(mfn, 'rb') - memmap_bytes = fp.read() - fp.close() - yield assert_equal_, normal_bytes, memmap_bytes - - # Check that reading the file using memmap works. - ma = format.open_memmap(nfn, mode='r') - del ma - - -def test_compressed_roundtrip(): - arr = np.random.rand(200, 200) - npz_file = os.path.join(tempdir, 'compressed.npz') - np.savez_compressed(npz_file, arr=arr) - arr1 = np.load(npz_file)['arr'] - assert_array_equal(arr, arr1) - - -def test_version_2_0(): - f = BytesIO() - # requires more than 2 byte for header - dt = [(("%d" % i) * 100, float) for i in range(500)] - d = np.ones(1000, dtype=dt) - - format.write_array(f, d, version=(2, 0)) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) - format.write_array(f, d) - assert_(w[0].category is UserWarning) - - f.seek(0) - n = format.read_array(f) - assert_array_equal(d, n) - - # 1.0 requested but data cannot be saved this way - assert_raises(ValueError, format.write_array, f, d, (1, 0)) - - -def test_version_2_0_memmap(): - # requires more than 2 byte for header - dt = [(("%d" % i) * 100, float) for i in range(500)] - d = np.ones(1000, dtype=dt) - tf = tempfile.mktemp('', 'mmap', dir=tempdir) - - # 1.0 requested but data cannot be saved this way - assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype, - shape=d.shape, version=(1, 0)) - - ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, - shape=d.shape, version=(2, 0)) - ma[...] = d - del ma - - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', UserWarning) - ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, - shape=d.shape, version=None) - assert_(w[0].category is UserWarning) - ma[...] = d - del ma - - ma = format.open_memmap(tf, mode='r') - assert_array_equal(ma, d) - - -def test_write_version(): - f = BytesIO() - arr = np.arange(1) - # These should pass. - format.write_array(f, arr, version=(1, 0)) - format.write_array(f, arr) - - format.write_array(f, arr, version=None) - format.write_array(f, arr) - - format.write_array(f, arr, version=(2, 0)) - format.write_array(f, arr) - - # These should all fail. - bad_versions = [ - (1, 1), - (0, 0), - (0, 1), - (2, 2), - (255, 255), - ] - for version in bad_versions: - try: - format.write_array(f, arr, version=version) - except ValueError: - pass - else: - raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,)) - - -bad_version_magic = asbytes_nested([ - '\x93NUMPY\x01\x01', - '\x93NUMPY\x00\x00', - '\x93NUMPY\x00\x01', - '\x93NUMPY\x02\x00', - '\x93NUMPY\x02\x02', - '\x93NUMPY\xff\xff', -]) -malformed_magic = asbytes_nested([ - '\x92NUMPY\x01\x00', - '\x00NUMPY\x01\x00', - '\x93numpy\x01\x00', - '\x93MATLB\x01\x00', - '\x93NUMPY\x01', - '\x93NUMPY', - '', -]) - - -def test_read_magic_bad_magic(): - for magic in malformed_magic: - f = BytesIO(magic) - yield raises(ValueError)(format.read_magic), f - - -def test_read_version_1_0_bad_magic(): - for magic in bad_version_magic + malformed_magic: - f = BytesIO(magic) - yield raises(ValueError)(format.read_array), f - - -def test_bad_magic_args(): - assert_raises(ValueError, format.magic, -1, 1) - assert_raises(ValueError, format.magic, 256, 1) - assert_raises(ValueError, format.magic, 1, -1) - assert_raises(ValueError, format.magic, 1, 256) - - -def test_large_header(): - s = BytesIO() - d = {'a': 1, 'b': 2} - format.write_array_header_1_0(s, d) - - s = BytesIO() - d = {'a': 1, 'b': 2, 'c': 'x'*256*256} - assert_raises(ValueError, format.write_array_header_1_0, s, d) - - -def test_bad_header(): - # header of length less than 2 should fail - s = BytesIO() - assert_raises(ValueError, format.read_array_header_1_0, s) - s = BytesIO(asbytes('1')) - assert_raises(ValueError, format.read_array_header_1_0, s) - - # header shorter than indicated size should fail - s = BytesIO(asbytes('\x01\x00')) - assert_raises(ValueError, format.read_array_header_1_0, s) - - # headers without the exact keys required should fail - d = {"shape": (1, 2), - "descr": "x"} - s = BytesIO() - format.write_array_header_1_0(s, d) - assert_raises(ValueError, format.read_array_header_1_0, s) - - d = {"shape": (1, 2), - "fortran_order": False, - "descr": "x", - "extrakey": -1} - s = BytesIO() - format.write_array_header_1_0(s, d) - assert_raises(ValueError, format.read_array_header_1_0, s) - - -def test_large_file_support(): - from nose import SkipTest - if (sys.platform == 'win32' or sys.platform == 'cygwin'): - raise SkipTest("Unknown if Windows has sparse filesystems") - # try creating a large sparse file - tf_name = os.path.join(tempdir, 'sparse_file') - try: - # seek past end would work too, but linux truncate somewhat - # increases the chances that we have a sparse filesystem and can - # avoid actually writing 5GB - import subprocess as sp - sp.check_call(["truncate", "-s", "5368709120", tf_name]) - except: - raise SkipTest("Could not create 5GB large file") - # write a small array to the end - with open(tf_name, "wb") as f: - f.seek(5368709120) - d = np.arange(5) - np.save(f, d) - # read it back - with open(tf_name, "rb") as f: - f.seek(5368709120) - r = np.load(f) - assert_array_equal(r, d) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_function_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_function_base.py deleted file mode 100644 index 624b5f3eb58e3..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_function_base.py +++ /dev/null @@ -1,2131 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings - -import numpy as np -from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_raises, - assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex - ) -from numpy.random import rand -from numpy.lib import * -from numpy.compat import long - - -class TestAny(TestCase): - def test_basic(self): - y1 = [0, 0, 1, 0] - y2 = [0, 0, 0, 0] - y3 = [1, 0, 1, 0] - assert_(np.any(y1)) - assert_(np.any(y3)) - assert_(not np.any(y2)) - - def test_nd(self): - y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]] - assert_(np.any(y1)) - assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0]) - assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1]) - - -class TestAll(TestCase): - def test_basic(self): - y1 = [0, 1, 1, 0] - y2 = [0, 0, 0, 0] - y3 = [1, 1, 1, 1] - assert_(not np.all(y1)) - assert_(np.all(y3)) - assert_(not np.all(y2)) - assert_(np.all(~np.array(y2))) - - def test_nd(self): - y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]] - assert_(not np.all(y1)) - assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1]) - assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1]) - - -class TestCopy(TestCase): - def test_basic(self): - a = np.array([[1, 2], [3, 4]]) - a_copy = np.copy(a) - assert_array_equal(a, a_copy) - a_copy[0, 0] = 10 - assert_equal(a[0, 0], 1) - assert_equal(a_copy[0, 0], 10) - - def test_order(self): - # It turns out that people rely on np.copy() preserving order by - # default; changing this broke scikit-learn: - # https://github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 - a = np.array([[1, 2], [3, 4]]) - assert_(a.flags.c_contiguous) - assert_(not a.flags.f_contiguous) - a_fort = np.array([[1, 2], [3, 4]], order="F") - assert_(not a_fort.flags.c_contiguous) - assert_(a_fort.flags.f_contiguous) - a_copy = np.copy(a) - assert_(a_copy.flags.c_contiguous) - assert_(not a_copy.flags.f_contiguous) - a_fort_copy = np.copy(a_fort) - assert_(not a_fort_copy.flags.c_contiguous) - assert_(a_fort_copy.flags.f_contiguous) - - -class TestAverage(TestCase): - def test_basic(self): - y1 = np.array([1, 2, 3]) - assert_(average(y1, axis=0) == 2.) - y2 = np.array([1., 2., 3.]) - assert_(average(y2, axis=0) == 2.) - y3 = [0., 0., 0.] - assert_(average(y3, axis=0) == 0.) - - y4 = np.ones((4, 4)) - y4[0, 1] = 0 - y4[1, 0] = 2 - assert_almost_equal(y4.mean(0), average(y4, 0)) - assert_almost_equal(y4.mean(1), average(y4, 1)) - - y5 = rand(5, 5) - assert_almost_equal(y5.mean(0), average(y5, 0)) - assert_almost_equal(y5.mean(1), average(y5, 1)) - - y6 = np.matrix(rand(5, 5)) - assert_array_equal(y6.mean(0), average(y6, 0)) - - def test_weights(self): - y = np.arange(10) - w = np.arange(10) - actual = average(y, weights=w) - desired = (np.arange(10) ** 2).sum()*1. / np.arange(10).sum() - assert_almost_equal(actual, desired) - - y1 = np.array([[1, 2, 3], [4, 5, 6]]) - w0 = [1, 2] - actual = average(y1, weights=w0, axis=0) - desired = np.array([3., 4., 5.]) - assert_almost_equal(actual, desired) - - w1 = [0, 0, 1] - actual = average(y1, weights=w1, axis=1) - desired = np.array([3., 6.]) - assert_almost_equal(actual, desired) - - # This should raise an error. Can we test for that ? - # assert_equal(average(y1, weights=w1), 9./2.) - - # 2D Case - w2 = [[0, 0, 1], [0, 0, 2]] - desired = np.array([3., 6.]) - assert_array_equal(average(y1, weights=w2, axis=1), desired) - assert_equal(average(y1, weights=w2), 5.) - - def test_returned(self): - y = np.array([[1, 2, 3], [4, 5, 6]]) - - # No weights - avg, scl = average(y, returned=True) - assert_equal(scl, 6.) - - avg, scl = average(y, 0, returned=True) - assert_array_equal(scl, np.array([2., 2., 2.])) - - avg, scl = average(y, 1, returned=True) - assert_array_equal(scl, np.array([3., 3.])) - - # With weights - w0 = [1, 2] - avg, scl = average(y, weights=w0, axis=0, returned=True) - assert_array_equal(scl, np.array([3., 3., 3.])) - - w1 = [1, 2, 3] - avg, scl = average(y, weights=w1, axis=1, returned=True) - assert_array_equal(scl, np.array([6., 6.])) - - w2 = [[0, 0, 1], [1, 2, 3]] - avg, scl = average(y, weights=w2, axis=1, returned=True) - assert_array_equal(scl, np.array([1., 6.])) - - -class TestSelect(TestCase): - choices = [np.array([1, 2, 3]), - np.array([4, 5, 6]), - np.array([7, 8, 9])] - conditions = [np.array([False, False, False]), - np.array([False, True, False]), - np.array([False, False, True])] - - def _select(self, cond, values, default=0): - output = [] - for m in range(len(cond)): - output += [V[m] for V, C in zip(values, cond) if C[m]] or [default] - return output - - def test_basic(self): - choices = self.choices - conditions = self.conditions - assert_array_equal(select(conditions, choices, default=15), - self._select(conditions, choices, default=15)) - - assert_equal(len(choices), 3) - assert_equal(len(conditions), 3) - - def test_broadcasting(self): - conditions = [np.array(True), np.array([False, True, False])] - choices = [1, np.arange(12).reshape(4, 3)] - assert_array_equal(select(conditions, choices), np.ones((4, 3))) - # default can broadcast too: - assert_equal(select([True], [0], default=[0]).shape, (1,)) - - def test_return_dtype(self): - assert_equal(select(self.conditions, self.choices, 1j).dtype, - np.complex_) - # But the conditions need to be stronger then the scalar default - # if it is scalar. - choices = [choice.astype(np.int8) for choice in self.choices] - assert_equal(select(self.conditions, choices).dtype, np.int8) - - d = np.array([1, 2, 3, np.nan, 5, 7]) - m = np.isnan(d) - assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0]) - - def test_deprecated_empty(self): - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - assert_equal(select([], [], 3j), 3j) - - with warnings.catch_warnings(): - warnings.simplefilter("always") - assert_warns(DeprecationWarning, select, [], []) - warnings.simplefilter("error") - assert_raises(DeprecationWarning, select, [], []) - - def test_non_bool_deprecation(self): - choices = self.choices - conditions = self.conditions[:] - with warnings.catch_warnings(): - warnings.filterwarnings("always") - conditions[0] = conditions[0].astype(np.int_) - assert_warns(DeprecationWarning, select, conditions, choices) - conditions[0] = conditions[0].astype(np.uint8) - assert_warns(DeprecationWarning, select, conditions, choices) - warnings.filterwarnings("error") - assert_raises(DeprecationWarning, select, conditions, choices) - - def test_many_arguments(self): - # This used to be limited by NPY_MAXARGS == 32 - conditions = [np.array([False])] * 100 - choices = [np.array([1])] * 100 - select(conditions, choices) - - -class TestInsert(TestCase): - def test_basic(self): - a = [1, 2, 3] - assert_equal(insert(a, 0, 1), [1, 1, 2, 3]) - assert_equal(insert(a, 3, 1), [1, 2, 3, 1]) - assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3]) - assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3]) - assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9]) - assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3]) - assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9]) - b = np.array([0, 1], dtype=np.float64) - assert_equal(insert(b, 0, b[0]), [0., 0., 1.]) - assert_equal(insert(b, [], []), b) - # Bools will be treated differently in the future: - #assert_equal(insert(a, np.array([True]*4), 9), [9,1,9,2,9,3,9]) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', FutureWarning) - assert_equal( - insert(a, np.array([True]*4), 9), [1, 9, 9, 9, 9, 2, 3]) - assert_(w[0].category is FutureWarning) - - def test_multidim(self): - a = [[1, 1, 1]] - r = [[2, 2, 2], - [1, 1, 1]] - assert_equal(insert(a, 0, [1]), [1, 1, 1, 1]) - assert_equal(insert(a, 0, [2, 2, 2], axis=0), r) - assert_equal(insert(a, 0, 2, axis=0), r) - assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]]) - - a = np.array([[1, 1], [2, 2], [3, 3]]) - b = np.arange(1, 4).repeat(3).reshape(3, 3) - c = np.concatenate( - (a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T, - a[:, 1:2]), axis=1) - assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b) - assert_equal(insert(a, [1], [1, 2, 3], axis=1), c) - # scalars behave differently, in this case exactly opposite: - assert_equal(insert(a, 1, [1, 2, 3], axis=1), b) - assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c) - - a = np.arange(4).reshape(2, 2) - assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a) - assert_equal(insert(a[:1, :], 1, a[1, :], axis=0), a) - - # negative axis value - a = np.arange(24).reshape((2, 3, 4)) - assert_equal(insert(a, 1, a[:, :, 3], axis=-1), - insert(a, 1, a[:, :, 3], axis=2)) - assert_equal(insert(a, 1, a[:, 2, :], axis=-2), - insert(a, 1, a[:, 2, :], axis=1)) - - # invalid axis value - assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=3) - assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=-4) - - # negative axis value - a = np.arange(24).reshape((2,3,4)) - assert_equal(insert(a, 1, a[:,:,3], axis=-1), - insert(a, 1, a[:,:,3], axis=2)) - assert_equal(insert(a, 1, a[:,2,:], axis=-2), - insert(a, 1, a[:,2,:], axis=1)) - - def test_0d(self): - # This is an error in the future - a = np.array(1) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', DeprecationWarning) - assert_equal(insert(a, [], 2, axis=0), np.array(2)) - assert_(w[0].category is DeprecationWarning) - - def test_subclass(self): - class SubClass(np.ndarray): - pass - a = np.arange(10).view(SubClass) - assert_(isinstance(np.insert(a, 0, [0]), SubClass)) - assert_(isinstance(np.insert(a, [], []), SubClass)) - assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass)) - assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass)) - assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass)) - # This is an error in the future: - a = np.array(1).view(SubClass) - assert_(isinstance(np.insert(a, 0, [0]), SubClass)) - - def test_index_array_copied(self): - x = np.array([1, 1, 1]) - np.insert([0, 1, 2], x, [3, 4, 5]) - assert_equal(x, np.array([1, 1, 1])) - - def test_structured_array(self): - a = np.array([(1, 'a'), (2, 'b'), (3, 'c')], - dtype=[('foo', 'i'), ('bar', 'a1')]) - val = (4, 'd') - b = np.insert(a, 0, val) - assert_array_equal(b[0], np.array(val, dtype=b.dtype)) - val = [(4, 'd')] * 2 - b = np.insert(a, [0, 2], val) - assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype)) - - -class TestAmax(TestCase): - def test_basic(self): - a = [3, 4, 5, 10, -3, -5, 6.0] - assert_equal(np.amax(a), 10.0) - b = [[3, 6.0, 9.0], - [4, 10.0, 5.0], - [8, 3.0, 2.0]] - assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0]) - assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0]) - - -class TestAmin(TestCase): - def test_basic(self): - a = [3, 4, 5, 10, -3, -5, 6.0] - assert_equal(np.amin(a), -5.0) - b = [[3, 6.0, 9.0], - [4, 10.0, 5.0], - [8, 3.0, 2.0]] - assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0]) - assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0]) - - -class TestPtp(TestCase): - def test_basic(self): - a = [3, 4, 5, 10, -3, -5, 6.0] - assert_equal(np.ptp(a, axis=0), 15.0) - b = [[3, 6.0, 9.0], - [4, 10.0, 5.0], - [8, 3.0, 2.0]] - assert_equal(np.ptp(b, axis=0), [5.0, 7.0, 7.0]) - assert_equal(np.ptp(b, axis=-1), [6.0, 6.0, 6.0]) - - -class TestCumsum(TestCase): - def test_basic(self): - ba = [1, 2, 10, 11, 6, 5, 4] - ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] - for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, - np.uint32, np.float32, np.float64, np.complex64, np.complex128]: - a = np.array(ba, ctype) - a2 = np.array(ba2, ctype) - - tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype) - assert_array_equal(np.cumsum(a, axis=0), tgt) - - tgt = np.array( - [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype) - assert_array_equal(np.cumsum(a2, axis=0), tgt) - - tgt = np.array( - [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype) - assert_array_equal(np.cumsum(a2, axis=1), tgt) - - -class TestProd(TestCase): - def test_basic(self): - ba = [1, 2, 10, 11, 6, 5, 4] - ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] - for ctype in [np.int16, np.uint16, np.int32, np.uint32, - np.float32, np.float64, np.complex64, np.complex128]: - a = np.array(ba, ctype) - a2 = np.array(ba2, ctype) - if ctype in ['1', 'b']: - self.assertRaises(ArithmeticError, prod, a) - self.assertRaises(ArithmeticError, prod, a2, 1) - self.assertRaises(ArithmeticError, prod, a) - else: - assert_equal(np.prod(a, axis=0), 26400) - assert_array_equal(np.prod(a2, axis=0), - np.array([50, 36, 84, 180], ctype)) - assert_array_equal(np.prod(a2, axis=-1), - np.array([24, 1890, 600], ctype)) - - -class TestCumprod(TestCase): - def test_basic(self): - ba = [1, 2, 10, 11, 6, 5, 4] - ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] - for ctype in [np.int16, np.uint16, np.int32, np.uint32, - np.float32, np.float64, np.complex64, np.complex128]: - a = np.array(ba, ctype) - a2 = np.array(ba2, ctype) - if ctype in ['1', 'b']: - self.assertRaises(ArithmeticError, cumprod, a) - self.assertRaises(ArithmeticError, cumprod, a2, 1) - self.assertRaises(ArithmeticError, cumprod, a) - else: - assert_array_equal(np.cumprod(a, axis=-1), - np.array([1, 2, 20, 220, - 1320, 6600, 26400], ctype)) - assert_array_equal(np.cumprod(a2, axis=0), - np.array([[1, 2, 3, 4], - [5, 12, 21, 36], - [50, 36, 84, 180]], ctype)) - assert_array_equal(np.cumprod(a2, axis=-1), - np.array([[1, 2, 6, 24], - [5, 30, 210, 1890], - [10, 30, 120, 600]], ctype)) - - -class TestDiff(TestCase): - def test_basic(self): - x = [1, 4, 6, 7, 12] - out = np.array([3, 2, 1, 5]) - out2 = np.array([-1, -1, 4]) - out3 = np.array([0, 5]) - assert_array_equal(diff(x), out) - assert_array_equal(diff(x, n=2), out2) - assert_array_equal(diff(x, n=3), out3) - - def test_nd(self): - x = 20 * rand(10, 20, 30) - out1 = x[:, :, 1:] - x[:, :, :-1] - out2 = out1[:, :, 1:] - out1[:, :, :-1] - out3 = x[1:, :, :] - x[:-1, :, :] - out4 = out3[1:, :, :] - out3[:-1, :, :] - assert_array_equal(diff(x), out1) - assert_array_equal(diff(x, n=2), out2) - assert_array_equal(diff(x, axis=0), out3) - assert_array_equal(diff(x, n=2, axis=0), out4) - - -class TestDelete(TestCase): - def setUp(self): - self.a = np.arange(5) - self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) - - def _check_inverse_of_slicing(self, indices): - a_del = delete(self.a, indices) - nd_a_del = delete(self.nd_a, indices, axis=1) - msg = 'Delete failed for obj: %r' % indices - # NOTE: The cast should be removed after warning phase for bools - if not isinstance(indices, (slice, int, long, np.integer)): - indices = np.asarray(indices, dtype=np.intp) - indices = indices[(indices >= 0) & (indices < 5)] - assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, - err_msg=msg) - xor = setxor1d(nd_a_del[0, :, 0], self.nd_a[0, indices, 0]) - assert_array_equal(xor, self.nd_a[0, :, 0], err_msg=msg) - - def test_slices(self): - lims = [-6, -2, 0, 1, 2, 4, 5] - steps = [-3, -1, 1, 3] - for start in lims: - for stop in lims: - for step in steps: - s = slice(start, stop, step) - self._check_inverse_of_slicing(s) - - def test_fancy(self): - # Deprecation/FutureWarning tests should be kept after change. - self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) - with warnings.catch_warnings(): - warnings.filterwarnings('error', category=DeprecationWarning) - assert_raises(DeprecationWarning, delete, self.a, [100]) - assert_raises(DeprecationWarning, delete, self.a, [-100]) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', category=FutureWarning) - self._check_inverse_of_slicing([0, -1, 2, 2]) - obj = np.array([True, False, False], dtype=bool) - self._check_inverse_of_slicing(obj) - assert_(w[0].category is FutureWarning) - assert_(w[1].category is FutureWarning) - - def test_single(self): - self._check_inverse_of_slicing(0) - self._check_inverse_of_slicing(-4) - - def test_0d(self): - a = np.array(1) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', DeprecationWarning) - assert_equal(delete(a, [], axis=0), a) - assert_(w[0].category is DeprecationWarning) - - def test_subclass(self): - class SubClass(np.ndarray): - pass - a = self.a.view(SubClass) - assert_(isinstance(delete(a, 0), SubClass)) - assert_(isinstance(delete(a, []), SubClass)) - assert_(isinstance(delete(a, [0, 1]), SubClass)) - assert_(isinstance(delete(a, slice(1, 2)), SubClass)) - assert_(isinstance(delete(a, slice(1, -2)), SubClass)) - - -class TestGradient(TestCase): - def test_basic(self): - v = [[1, 1], [3, 4]] - x = np.array(v) - dx = [np.array([[2., 3.], [2., 3.]]), - np.array([[0., 0.], [1., 1.]])] - assert_array_equal(gradient(x), dx) - assert_array_equal(gradient(v), dx) - - def test_badargs(self): - # for 2D array, gradient can take 0, 1, or 2 extra args - x = np.array([[1, 1], [3, 4]]) - assert_raises(SyntaxError, gradient, x, np.array([1., 1.]), - np.array([1., 1.]), np.array([1., 1.])) - - def test_masked(self): - # Make sure that gradient supports subclasses like masked arrays - x = np.ma.array([[1, 1], [3, 4]]) - assert_equal(type(gradient(x)[0]), type(x)) - - def test_datetime64(self): - # Make sure gradient() can handle special types like datetime64 - x = np.array( - ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12', - '1910-10-12', '1910-12-12', '1912-12-12'], - dtype='datetime64[D]') - dx = np.array( - [-7, -3, 0, 31, 61, 396, 1066], - dtype='timedelta64[D]') - assert_array_equal(gradient(x), dx) - assert_(dx.dtype == np.dtype('timedelta64[D]')) - - def test_timedelta64(self): - # Make sure gradient() can handle special types like timedelta64 - x = np.array( - [-5, -3, 10, 12, 61, 321, 300], - dtype='timedelta64[D]') - dx = np.array( - [-3, 7, 7, 25, 154, 119, -161], - dtype='timedelta64[D]') - assert_array_equal(gradient(x), dx) - assert_(dx.dtype == np.dtype('timedelta64[D]')) - - def test_second_order_accurate(self): - # Testing that the relative numerical error is less that 3% for - # this example problem. This corresponds to second order - # accurate finite differences for all interior and boundary - # points. - x = np.linspace(0, 1, 10) - dx = x[1] - x[0] - y = 2 * x ** 3 + 4 * x ** 2 + 2 * x - analytical = 6 * x ** 2 + 8 * x + 2 - num_error = np.abs((np.gradient(y, dx) / analytical) - 1) - assert_(np.all(num_error < 0.03) == True) - - -class TestAngle(TestCase): - def test_basic(self): - x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2, - 1, 1j, -1, -1j, 1 - 3j, -1 + 3j] - y = angle(x) - yo = [ - np.arctan(3.0 / 1.0), - np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0, - -np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)] - z = angle(x, deg=1) - zo = np.array(yo) * 180 / np.pi - assert_array_almost_equal(y, yo, 11) - assert_array_almost_equal(z, zo, 11) - - -class TestTrimZeros(TestCase): - """ only testing for integer splits. - """ - def test_basic(self): - a = np.array([0, 0, 1, 2, 3, 4, 0]) - res = trim_zeros(a) - assert_array_equal(res, np.array([1, 2, 3, 4])) - - def test_leading_skip(self): - a = np.array([0, 0, 1, 0, 2, 3, 4, 0]) - res = trim_zeros(a) - assert_array_equal(res, np.array([1, 0, 2, 3, 4])) - - def test_trailing_skip(self): - a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0]) - res = trim_zeros(a) - assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4])) - - -class TestExtins(TestCase): - def test_basic(self): - a = np.array([1, 3, 2, 1, 2, 3, 3]) - b = extract(a > 1, a) - assert_array_equal(b, [3, 2, 2, 3, 3]) - - def test_place(self): - a = np.array([1, 4, 3, 2, 5, 8, 7]) - place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) - assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) - - def test_both(self): - a = rand(10) - mask = a > 0.5 - ac = a.copy() - c = extract(mask, a) - place(a, mask, 0) - place(a, mask, c) - assert_array_equal(a, ac) - - -class TestVectorize(TestCase): - def test_simple(self): - def addsubtract(a, b): - if a > b: - return a - b - else: - return a + b - f = vectorize(addsubtract) - r = f([0, 3, 6, 9], [1, 3, 5, 7]) - assert_array_equal(r, [1, 6, 1, 2]) - - def test_scalar(self): - def addsubtract(a, b): - if a > b: - return a - b - else: - return a + b - f = vectorize(addsubtract) - r = f([0, 3, 6, 9], 5) - assert_array_equal(r, [5, 8, 1, 4]) - - def test_large(self): - x = np.linspace(-3, 2, 10000) - f = vectorize(lambda x: x) - y = f(x) - assert_array_equal(y, x) - - def test_ufunc(self): - import math - f = vectorize(math.cos) - args = np.array([0, 0.5*np.pi, np.pi, 1.5*np.pi, 2*np.pi]) - r1 = f(args) - r2 = np.cos(args) - assert_array_equal(r1, r2) - - def test_keywords(self): - import math - - def foo(a, b=1): - return a + b - f = vectorize(foo) - args = np.array([1, 2, 3]) - r1 = f(args) - r2 = np.array([2, 3, 4]) - assert_array_equal(r1, r2) - r1 = f(args, 2) - r2 = np.array([3, 4, 5]) - assert_array_equal(r1, r2) - - def test_keywords_no_func_code(self): - # This needs to test a function that has keywords but - # no func_code attribute, since otherwise vectorize will - # inspect the func_code. - import random - try: - f = vectorize(random.randrange) - except: - raise AssertionError() - - def test_keywords2_ticket_2100(self): - r"""Test kwarg support: enhancement ticket 2100""" - import math - - def foo(a, b=1): - return a + b - f = vectorize(foo) - args = np.array([1, 2, 3]) - r1 = f(a=args) - r2 = np.array([2, 3, 4]) - assert_array_equal(r1, r2) - r1 = f(b=1, a=args) - assert_array_equal(r1, r2) - r1 = f(args, b=2) - r2 = np.array([3, 4, 5]) - assert_array_equal(r1, r2) - - def test_keywords3_ticket_2100(self): - """Test excluded with mixed positional and kwargs: ticket 2100""" - def mypolyval(x, p): - _p = list(p) - res = _p.pop(0) - while _p: - res = res*x + _p.pop(0) - return res - vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) - ans = [3, 6] - assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) - assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) - assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) - - def test_keywords4_ticket_2100(self): - """Test vectorizing function with no positional args.""" - @vectorize - def f(**kw): - res = 1.0 - for _k in kw: - res *= kw[_k] - return res - assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) - - def test_keywords5_ticket_2100(self): - """Test vectorizing function with no kwargs args.""" - @vectorize - def f(*v): - return np.prod(v) - assert_array_equal(f([1, 2], [3, 4]), [3, 8]) - - def test_coverage1_ticket_2100(self): - def foo(): - return 1 - f = vectorize(foo) - assert_array_equal(f(), 1) - - def test_assigning_docstring(self): - def foo(x): - return x - doc = "Provided documentation" - f = vectorize(foo, doc=doc) - assert_equal(f.__doc__, doc) - - def test_UnboundMethod_ticket_1156(self): - """Regression test for issue 1156""" - class Foo: - b = 2 - - def bar(self, a): - return a**self.b - assert_array_equal(vectorize(Foo().bar)(np.arange(9)), - np.arange(9)**2) - assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), - np.arange(9)**2) - - def test_execution_order_ticket_1487(self): - """Regression test for dependence on execution order: issue 1487""" - f1 = vectorize(lambda x: x) - res1a = f1(np.arange(3)) - res1b = f1(np.arange(0.1, 3)) - f2 = vectorize(lambda x: x) - res2b = f2(np.arange(0.1, 3)) - res2a = f2(np.arange(3)) - assert_equal(res1a, res2a) - assert_equal(res1b, res2b) - - def test_string_ticket_1892(self): - """Test vectorization over strings: issue 1892.""" - f = np.vectorize(lambda x: x) - s = '0123456789'*10 - assert_equal(s, f(s)) - #z = f(np.array([s,s])) - #assert_array_equal([s,s], f(s)) - - def test_cache(self): - """Ensure that vectorized func called exactly once per argument.""" - _calls = [0] - - @vectorize - def f(x): - _calls[0] += 1 - return x**2 - f.cache = True - x = np.arange(5) - assert_array_equal(f(x), x*x) - assert_equal(_calls[0], len(x)) - - def test_otypes(self): - f = np.vectorize(lambda x: x) - f.otypes = 'i' - x = np.arange(5) - assert_array_equal(f(x), x) - - -class TestDigitize(TestCase): - def test_forward(self): - x = np.arange(-6, 5) - bins = np.arange(-5, 5) - assert_array_equal(digitize(x, bins), np.arange(11)) - - def test_reverse(self): - x = np.arange(5, -6, -1) - bins = np.arange(5, -5, -1) - assert_array_equal(digitize(x, bins), np.arange(11)) - - def test_random(self): - x = rand(10) - bin = np.linspace(x.min(), x.max(), 10) - assert_(np.all(digitize(x, bin) != 0)) - - def test_right_basic(self): - x = [1, 5, 4, 10, 8, 11, 0] - bins = [1, 5, 10] - default_answer = [1, 2, 1, 3, 2, 3, 0] - assert_array_equal(digitize(x, bins), default_answer) - right_answer = [0, 1, 1, 2, 2, 3, 0] - assert_array_equal(digitize(x, bins, True), right_answer) - - def test_right_open(self): - x = np.arange(-6, 5) - bins = np.arange(-6, 4) - assert_array_equal(digitize(x, bins, True), np.arange(11)) - - def test_right_open_reverse(self): - x = np.arange(5, -6, -1) - bins = np.arange(4, -6, -1) - assert_array_equal(digitize(x, bins, True), np.arange(11)) - - def test_right_open_random(self): - x = rand(10) - bins = np.linspace(x.min(), x.max(), 10) - assert_(np.all(digitize(x, bins, True) != 10)) - - def test_monotonic(self): - x = [-1, 0, 1, 2] - bins = [0, 0, 1] - assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) - assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) - bins = [1, 1, 0] - assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) - assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) - bins = [1, 1, 1, 1] - assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) - assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) - bins = [0, 0, 1, 0] - assert_raises(ValueError, digitize, x, bins) - bins = [1, 1, 0, 1] - assert_raises(ValueError, digitize, x, bins) - - -class TestUnwrap(TestCase): - def test_simple(self): - #check that unwrap removes jumps greather that 2*pi - assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) - #check that unwrap maintans continuity - assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) - - -class TestFilterwindows(TestCase): - def test_hanning(self): - #check symmetry - w = hanning(10) - assert_array_almost_equal(w, flipud(w), 7) - #check known value - assert_almost_equal(np.sum(w, axis=0), 4.500, 4) - - def test_hamming(self): - #check symmetry - w = hamming(10) - assert_array_almost_equal(w, flipud(w), 7) - #check known value - assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) - - def test_bartlett(self): - #check symmetry - w = bartlett(10) - assert_array_almost_equal(w, flipud(w), 7) - #check known value - assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) - - def test_blackman(self): - #check symmetry - w = blackman(10) - assert_array_almost_equal(w, flipud(w), 7) - #check known value - assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) - - -class TestTrapz(TestCase): - def test_simple(self): - x = np.arange(-10, 10, .1) - r = trapz(np.exp(-.5*x**2) / np.sqrt(2*np.pi), dx=0.1) - #check integral of normal equals 1 - assert_almost_equal(r, 1, 7) - - def test_ndim(self): - x = np.linspace(0, 1, 3) - y = np.linspace(0, 2, 8) - z = np.linspace(0, 3, 13) - - wx = np.ones_like(x) * (x[1] - x[0]) - wx[0] /= 2 - wx[-1] /= 2 - wy = np.ones_like(y) * (y[1] - y[0]) - wy[0] /= 2 - wy[-1] /= 2 - wz = np.ones_like(z) * (z[1] - z[0]) - wz[0] /= 2 - wz[-1] /= 2 - - q = x[:, None, None] + y[None, :, None] + z[None, None, :] - - qx = (q * wx[:, None, None]).sum(axis=0) - qy = (q * wy[None, :, None]).sum(axis=1) - qz = (q * wz[None, None, :]).sum(axis=2) - - # n-d `x` - r = trapz(q, x=x[:, None, None], axis=0) - assert_almost_equal(r, qx) - r = trapz(q, x=y[None, :, None], axis=1) - assert_almost_equal(r, qy) - r = trapz(q, x=z[None, None, :], axis=2) - assert_almost_equal(r, qz) - - # 1-d `x` - r = trapz(q, x=x, axis=0) - assert_almost_equal(r, qx) - r = trapz(q, x=y, axis=1) - assert_almost_equal(r, qy) - r = trapz(q, x=z, axis=2) - assert_almost_equal(r, qz) - - def test_masked(self): - #Testing that masked arrays behave as if the function is 0 where - #masked - x = np.arange(5) - y = x * x - mask = x == 2 - ym = np.ma.array(y, mask=mask) - r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) - assert_almost_equal(trapz(ym, x), r) - - xm = np.ma.array(x, mask=mask) - assert_almost_equal(trapz(ym, xm), r) - - xm = np.ma.array(x, mask=mask) - assert_almost_equal(trapz(y, xm), r) - - def test_matrix(self): - #Test to make sure matrices give the same answer as ndarrays - x = np.linspace(0, 5) - y = x * x - r = trapz(y, x) - mx = np.matrix(x) - my = np.matrix(y) - mr = trapz(my, mx) - assert_almost_equal(mr, r) - - -class TestSinc(TestCase): - def test_simple(self): - assert_(sinc(0) == 1) - w = sinc(np.linspace(-1, 1, 100)) - #check symmetry - assert_array_almost_equal(w, flipud(w), 7) - - def test_array_like(self): - x = [0, 0.5] - y1 = sinc(np.array(x)) - y2 = sinc(list(x)) - y3 = sinc(tuple(x)) - assert_array_equal(y1, y2) - assert_array_equal(y1, y3) - - -class TestHistogram(TestCase): - def setUp(self): - pass - - def tearDown(self): - pass - - def test_simple(self): - n = 100 - v = rand(n) - (a, b) = histogram(v) - #check if the sum of the bins equals the number of samples - assert_equal(np.sum(a, axis=0), n) - #check that the bin counts are evenly spaced when the data is from a - # linear function - (a, b) = histogram(np.linspace(0, 10, 100)) - assert_array_equal(a, 10) - - def test_one_bin(self): - # Ticket 632 - hist, edges = histogram([1, 2, 3, 4], [1, 2]) - assert_array_equal(hist, [2, ]) - assert_array_equal(edges, [1, 2]) - assert_raises(ValueError, histogram, [1, 2], bins=0) - h, e = histogram([1, 2], bins=1) - assert_equal(h, np.array([2])) - assert_allclose(e, np.array([1., 2.])) - - def test_normed(self): - # Check that the integral of the density equals 1. - n = 100 - v = rand(n) - a, b = histogram(v, normed=True) - area = np.sum(a * diff(b)) - assert_almost_equal(area, 1) - - # Check with non-constant bin widths (buggy but backwards compatible) - v = np.arange(10) - bins = [0, 1, 5, 9, 10] - a, b = histogram(v, bins, normed=True) - area = np.sum(a * diff(b)) - assert_almost_equal(area, 1) - - def test_density(self): - # Check that the integral of the density equals 1. - n = 100 - v = rand(n) - a, b = histogram(v, density=True) - area = np.sum(a * diff(b)) - assert_almost_equal(area, 1) - - # Check with non-constant bin widths - v = np.arange(10) - bins = [0, 1, 3, 6, 10] - a, b = histogram(v, bins, density=True) - assert_array_equal(a, .1) - assert_equal(np.sum(a*diff(b)), 1) - - # Variale bin widths are especially useful to deal with - # infinities. - v = np.arange(10) - bins = [0, 1, 3, 6, np.inf] - a, b = histogram(v, bins, density=True) - assert_array_equal(a, [.1, .1, .1, 0.]) - - # Taken from a bug report from N. Becker on the numpy-discussion - # mailing list Aug. 6, 2010. - counts, dmy = np.histogram( - [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) - assert_equal(counts, [.25, 0]) - - def test_outliers(self): - # Check that outliers are not tallied - a = np.arange(10) + .5 - - # Lower outliers - h, b = histogram(a, range=[0, 9]) - assert_equal(h.sum(), 9) - - # Upper outliers - h, b = histogram(a, range=[1, 10]) - assert_equal(h.sum(), 9) - - # Normalization - h, b = histogram(a, range=[1, 9], normed=True) - assert_almost_equal((h * diff(b)).sum(), 1, decimal=15) - - # Weights - w = np.arange(10) + .5 - h, b = histogram(a, range=[1, 9], weights=w, normed=True) - assert_equal((h * diff(b)).sum(), 1) - - h, b = histogram(a, bins=8, range=[1, 9], weights=w) - assert_equal(h, w[1:-1]) - - def test_type(self): - # Check the type of the returned histogram - a = np.arange(10) + .5 - h, b = histogram(a) - assert_(issubdtype(h.dtype, int)) - - h, b = histogram(a, normed=True) - assert_(issubdtype(h.dtype, float)) - - h, b = histogram(a, weights=np.ones(10, int)) - assert_(issubdtype(h.dtype, int)) - - h, b = histogram(a, weights=np.ones(10, float)) - assert_(issubdtype(h.dtype, float)) - - def test_f32_rounding(self): - # gh-4799, check that the rounding of the edges works with float32 - x = np.array([276.318359 , -69.593948 , 21.329449], dtype=np.float32) - y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) - counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) - assert_equal(counts_hist.sum(), 3.) - - def test_weights(self): - v = rand(100) - w = np.ones(100) * 5 - a, b = histogram(v) - na, nb = histogram(v, normed=True) - wa, wb = histogram(v, weights=w) - nwa, nwb = histogram(v, weights=w, normed=True) - assert_array_almost_equal(a * 5, wa) - assert_array_almost_equal(na, nwa) - - # Check weights are properly applied. - v = np.linspace(0, 10, 10) - w = np.concatenate((np.zeros(5), np.ones(5))) - wa, wb = histogram(v, bins=np.arange(11), weights=w) - assert_array_almost_equal(wa, w) - - # Check with integer weights - wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) - assert_array_equal(wa, [4, 5, 0, 1]) - wa, wb = histogram( - [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True) - assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) - - # Check weights with non-uniform bin widths - a, b = histogram( - np.arange(9), [0, 1, 3, 6, 10], - weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) - assert_almost_equal(a, [.2, .1, .1, .075]) - - def test_empty(self): - a, b = histogram([], bins=([0, 1])) - assert_array_equal(a, np.array([0])) - assert_array_equal(b, np.array([0, 1])) - - -class TestHistogramdd(TestCase): - def test_simple(self): - x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], - [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) - H, edges = histogramdd(x, (2, 3, 3), - range=[[-1, 1], [0, 3], [0, 3]]) - answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], - [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) - assert_array_equal(H, answer) - - # Check normalization - ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] - H, edges = histogramdd(x, bins=ed, normed=True) - assert_(np.all(H == answer / 12.)) - - # Check that H has the correct shape. - H, edges = histogramdd(x, (2, 3, 4), - range=[[-1, 1], [0, 3], [0, 4]], - normed=True) - answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], - [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) - assert_array_almost_equal(H, answer / 6., 4) - # Check that a sequence of arrays is accepted and H has the correct - # shape. - z = [np.squeeze(y) for y in split(x, 3, axis=1)] - H, edges = histogramdd( - z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) - answer = np.array([[[0, 0], [0, 0], [0, 0]], - [[0, 1], [0, 0], [1, 0]], - [[0, 1], [0, 0], [0, 0]], - [[0, 0], [0, 0], [0, 0]]]) - assert_array_equal(H, answer) - - Z = np.zeros((5, 5, 5)) - Z[list(range(5)), list(range(5)), list(range(5))] = 1. - H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) - assert_array_equal(H, Z) - - def test_shape_3d(self): - # All possible permutations for bins of different lengths in 3D. - bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), - (4, 5, 6)) - r = rand(10, 3) - for b in bins: - H, edges = histogramdd(r, b) - assert_(H.shape == b) - - def test_shape_4d(self): - # All possible permutations for bins of different lengths in 4D. - bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), - (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), - (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), - (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), - (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), - (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) - - r = rand(10, 4) - for b in bins: - H, edges = histogramdd(r, b) - assert_(H.shape == b) - - def test_weights(self): - v = rand(100, 2) - hist, edges = histogramdd(v) - n_hist, edges = histogramdd(v, normed=True) - w_hist, edges = histogramdd(v, weights=np.ones(100)) - assert_array_equal(w_hist, hist) - w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, normed=True) - assert_array_equal(w_hist, n_hist) - w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) - assert_array_equal(w_hist, 2 * hist) - - def test_identical_samples(self): - x = np.zeros((10, 2), int) - hist, edges = histogramdd(x, bins=2) - assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) - - def test_empty(self): - a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) - assert_array_max_ulp(a, np.array([[0.]])) - a, b = np.histogramdd([[], [], []], bins=2) - assert_array_max_ulp(a, np.zeros((2, 2, 2))) - - def test_bins_errors(self): - """There are two ways to specify bins. Check for the right errors when - mixing those.""" - x = np.arange(8).reshape(2, 4) - assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) - assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) - assert_raises( - ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]]) - assert_raises( - ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) - assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) - - def test_inf_edges(self): - """Test using +/-inf bin edges works. See #1788.""" - with np.errstate(invalid='ignore'): - x = np.arange(6).reshape(3, 2) - expected = np.array([[1, 0], [0, 1], [0, 1]]) - h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) - assert_allclose(h, expected) - h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) - assert_allclose(h, expected) - h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) - assert_allclose(h, expected) - - def test_rightmost_binedge(self): - """Test event very close to rightmost binedge. - See Github issue #4266""" - x = [0.9999999995] - bins = [[0.,0.5,1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 1.) - x = [1.0] - bins = [[0.,0.5,1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 1.) - x = [1.0000000001] - bins = [[0.,0.5,1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 1.) - x = [1.0001] - bins = [[0.,0.5,1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 0.0) - - -class TestUnique(TestCase): - def test_simple(self): - x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) - assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) - assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) - x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] - assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) - x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) - assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) - - -class TestCheckFinite(TestCase): - def test_simple(self): - a = [1, 2, 3] - b = [1, 2, np.inf] - c = [1, 2, np.nan] - np.lib.asarray_chkfinite(a) - assert_raises(ValueError, np.lib.asarray_chkfinite, b) - assert_raises(ValueError, np.lib.asarray_chkfinite, c) - - def test_dtype_order(self): - """Regression test for missing dtype and order arguments""" - a = [1, 2, 3] - a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64) - assert_(a.dtype == np.float64) - - -class TestCorrCoef(TestCase): - A = np.array( - [[0.15391142, 0.18045767, 0.14197213], - [0.70461506, 0.96474128, 0.27906989], - [0.9297531, 0.32296769, 0.19267156]]) - B = np.array( - [[0.10377691, 0.5417086, 0.49807457], - [0.82872117, 0.77801674, 0.39226705], - [0.9314666, 0.66800209, 0.03538394]]) - res1 = np.array( - [[1., 0.9379533, -0.04931983], - [0.9379533, 1., 0.30007991], - [-0.04931983, 0.30007991, 1.]]) - res2 = np.array( - [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], - [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], - [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], - [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], - [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], - [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) - - def test_non_array(self): - assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), - [[1., -1.], [-1., 1.]]) - - def test_simple(self): - assert_almost_equal(corrcoef(self.A), self.res1) - assert_almost_equal(corrcoef(self.A, self.B), self.res2) - - def test_ddof(self): - assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) - - def test_complex(self): - x = np.array([[1, 2, 3], [1j, 2j, 3j]]) - assert_allclose(corrcoef(x), np.array([[1., -1.j], [1.j, 1.]])) - - def test_xy(self): - x = np.array([[1, 2, 3]]) - y = np.array([[1j, 2j, 3j]]) - assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) - - def test_empty(self): - with warnings.catch_warnings(record=True): - warnings.simplefilter('always', RuntimeWarning) - assert_array_equal(corrcoef(np.array([])), np.nan) - assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), - np.array([]).reshape(0, 0)) - assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), - np.array([[np.nan, np.nan], [np.nan, np.nan]])) - - def test_wrong_ddof(self): - x = np.array([[0, 2], [1, 1], [2, 0]]).T - with warnings.catch_warnings(record=True): - warnings.simplefilter('always', RuntimeWarning) - assert_array_equal(corrcoef(x, ddof=5), - np.array([[np.nan, np.nan], [np.nan, np.nan]])) - - -class TestCov(TestCase): - def test_basic(self): - x = np.array([[0, 2], [1, 1], [2, 0]]).T - assert_allclose(cov(x), np.array([[1., -1.], [-1., 1.]])) - - def test_complex(self): - x = np.array([[1, 2, 3], [1j, 2j, 3j]]) - assert_allclose(cov(x), np.array([[1., -1.j], [1.j, 1.]])) - - def test_xy(self): - x = np.array([[1, 2, 3]]) - y = np.array([[1j, 2j, 3j]]) - assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) - - def test_empty(self): - with warnings.catch_warnings(record=True): - warnings.simplefilter('always', RuntimeWarning) - assert_array_equal(cov(np.array([])), np.nan) - assert_array_equal(cov(np.array([]).reshape(0, 2)), - np.array([]).reshape(0, 0)) - assert_array_equal(cov(np.array([]).reshape(2, 0)), - np.array([[np.nan, np.nan], [np.nan, np.nan]])) - - def test_wrong_ddof(self): - x = np.array([[0, 2], [1, 1], [2, 0]]).T - with warnings.catch_warnings(record=True): - warnings.simplefilter('always', RuntimeWarning) - assert_array_equal(cov(x, ddof=5), - np.array([[np.inf, -np.inf], [-np.inf, np.inf]])) - - -class Test_I0(TestCase): - def test_simple(self): - assert_almost_equal( - i0(0.5), - np.array(1.0634833707413234)) - - A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549]) - assert_almost_equal( - i0(A), - np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049])) - - B = np.array([[0.827002, 0.99959078], - [0.89694769, 0.39298162], - [0.37954418, 0.05206293], - [0.36465447, 0.72446427], - [0.48164949, 0.50324519]]) - assert_almost_equal( - i0(B), - np.array([[1.17843223, 1.26583466], - [1.21147086, 1.03898290], - [1.03633899, 1.00067775], - [1.03352052, 1.13557954], - [1.05884290, 1.06432317]])) - - -class TestKaiser(TestCase): - def test_simple(self): - assert_(np.isfinite(kaiser(1, 1.0))) - assert_almost_equal(kaiser(0, 1.0), - np.array([])) - assert_almost_equal(kaiser(2, 1.0), - np.array([0.78984831, 0.78984831])) - assert_almost_equal(kaiser(5, 1.0), - np.array([0.78984831, 0.94503323, 1., - 0.94503323, 0.78984831])) - assert_almost_equal(kaiser(5, 1.56789), - np.array([0.58285404, 0.88409679, 1., - 0.88409679, 0.58285404])) - - def test_int_beta(self): - kaiser(3, 4) - - -class TestMsort(TestCase): - def test_simple(self): - A = np.array([[0.44567325, 0.79115165, 0.54900530], - [0.36844147, 0.37325583, 0.96098397], - [0.64864341, 0.52929049, 0.39172155]]) - assert_almost_equal( - msort(A), - np.array([[0.36844147, 0.37325583, 0.39172155], - [0.44567325, 0.52929049, 0.54900530], - [0.64864341, 0.79115165, 0.96098397]])) - - -class TestMeshgrid(TestCase): - def test_simple(self): - [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) - assert_array_equal(X, np.array([[1, 2, 3], - [1, 2, 3], - [1, 2, 3], - [1, 2, 3]])) - assert_array_equal(Y, np.array([[4, 4, 4], - [5, 5, 5], - [6, 6, 6], - [7, 7, 7]])) - - def test_single_input(self): - [X] = meshgrid([1, 2, 3, 4]) - assert_array_equal(X, np.array([1, 2, 3, 4])) - - def test_no_input(self): - args = [] - assert_array_equal([], meshgrid(*args)) - - def test_indexing(self): - x = [1, 2, 3] - y = [4, 5, 6, 7] - [X, Y] = meshgrid(x, y, indexing='ij') - assert_array_equal(X, np.array([[1, 1, 1, 1], - [2, 2, 2, 2], - [3, 3, 3, 3]])) - assert_array_equal(Y, np.array([[4, 5, 6, 7], - [4, 5, 6, 7], - [4, 5, 6, 7]])) - - # Test expected shapes: - z = [8, 9] - assert_(meshgrid(x, y)[0].shape == (4, 3)) - assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) - assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) - assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) - - assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') - - def test_sparse(self): - [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) - assert_array_equal(X, np.array([[1, 2, 3]])) - assert_array_equal(Y, np.array([[4], [5], [6], [7]])) - - def test_invalid_arguments(self): - # Test that meshgrid complains about invalid arguments - # Regression test for issue #4755: - # https://github.com/numpy/numpy/issues/4755 - assert_raises(TypeError, meshgrid, - [1, 2, 3], [4, 5, 6, 7], indices='ij') - - -class TestPiecewise(TestCase): - def test_simple(self): - # Condition is single bool list - x = piecewise([0, 0], [True, False], [1]) - assert_array_equal(x, [1, 0]) - - # List of conditions: single bool list - x = piecewise([0, 0], [[True, False]], [1]) - assert_array_equal(x, [1, 0]) - - # Conditions is single bool array - x = piecewise([0, 0], np.array([True, False]), [1]) - assert_array_equal(x, [1, 0]) - - # Condition is single int array - x = piecewise([0, 0], np.array([1, 0]), [1]) - assert_array_equal(x, [1, 0]) - - # List of conditions: int array - x = piecewise([0, 0], [np.array([1, 0])], [1]) - assert_array_equal(x, [1, 0]) - - x = piecewise([0, 0], [[False, True]], [lambda x:-1]) - assert_array_equal(x, [0, -1]) - - def test_two_conditions(self): - x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) - assert_array_equal(x, [3, 4]) - - def test_default(self): - # No value specified for x[1], should be 0 - x = piecewise([1, 2], [True, False], [2]) - assert_array_equal(x, [2, 0]) - - # Should set x[1] to 3 - x = piecewise([1, 2], [True, False], [2, 3]) - assert_array_equal(x, [2, 3]) - - def test_0d(self): - x = np.array(3) - y = piecewise(x, x > 3, [4, 0]) - assert_(y.ndim == 0) - assert_(y == 0) - - x = 5 - y = piecewise(x, [[True], [False]], [1, 0]) - assert_(y.ndim == 0) - assert_(y == 1) - - def test_0d_comparison(self): - x = 3 - y = piecewise(x, [x <= 3, x > 3], [4, 0]) - - -class TestBincount(TestCase): - def test_simple(self): - y = np.bincount(np.arange(4)) - assert_array_equal(y, np.ones(4)) - - def test_simple2(self): - y = np.bincount(np.array([1, 5, 2, 4, 1])) - assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) - - def test_simple_weight(self): - x = np.arange(4) - w = np.array([0.2, 0.3, 0.5, 0.1]) - y = np.bincount(x, w) - assert_array_equal(y, w) - - def test_simple_weight2(self): - x = np.array([1, 2, 4, 5, 2]) - w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) - y = np.bincount(x, w) - assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) - - def test_with_minlength(self): - x = np.array([0, 1, 0, 1, 1]) - y = np.bincount(x, minlength=3) - assert_array_equal(y, np.array([2, 3, 0])) - - def test_with_minlength_smaller_than_maxvalue(self): - x = np.array([0, 1, 1, 2, 2, 3, 3]) - y = np.bincount(x, minlength=2) - assert_array_equal(y, np.array([1, 2, 2, 2])) - - def test_with_minlength_and_weights(self): - x = np.array([1, 2, 4, 5, 2]) - w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) - y = np.bincount(x, w, 8) - assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) - - def test_empty(self): - x = np.array([], dtype=int) - y = np.bincount(x) - assert_array_equal(x, y) - - def test_empty_with_minlength(self): - x = np.array([], dtype=int) - y = np.bincount(x, minlength=5) - assert_array_equal(y, np.zeros(5, dtype=int)) - - def test_with_incorrect_minlength(self): - x = np.array([], dtype=int) - assert_raises_regex(TypeError, "an integer is required", - lambda: np.bincount(x, minlength="foobar")) - assert_raises_regex(ValueError, "must be positive", - lambda: np.bincount(x, minlength=-1)) - assert_raises_regex(ValueError, "must be positive", - lambda: np.bincount(x, minlength=0)) - - x = np.arange(5) - assert_raises_regex(TypeError, "an integer is required", - lambda: np.bincount(x, minlength="foobar")) - assert_raises_regex(ValueError, "minlength must be positive", - lambda: np.bincount(x, minlength=-1)) - assert_raises_regex(ValueError, "minlength must be positive", - lambda: np.bincount(x, minlength=0)) - - -class TestInterp(TestCase): - def test_exceptions(self): - assert_raises(ValueError, interp, 0, [], []) - assert_raises(ValueError, interp, 0, [0], [1, 2]) - - def test_basic(self): - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) - x0 = np.linspace(0, 1, 50) - assert_almost_equal(np.interp(x0, x, y), x0) - - def test_right_left_behavior(self): - assert_equal(interp([-1, 0, 1], [0], [1]), [1, 1, 1]) - assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0, 1, 1]) - assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1, 1, 0]) - assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0, 1, 0]) - - def test_scalar_interpolation_point(self): - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) - x0 = 0 - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = .3 - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.float32(.3) - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.float64(.3) - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.nan - assert_almost_equal(np.interp(x0, x, y), x0) - - def test_zero_dimensional_interpolation_point(self): - x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) - x0 = np.array(.3) - assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.array(.3, dtype=object) - assert_almost_equal(np.interp(x0, x, y), .3) - - def test_if_len_x_is_small(self): - xp = np.arange(0, 10, 0.0001) - fp = np.sin(xp) - assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) - - -def compare_results(res, desired): - for i in range(len(desired)): - assert_array_equal(res[i], desired[i]) - - -class TestScoreatpercentile(TestCase): - - def test_basic(self): - x = np.arange(8) * 0.5 - assert_equal(np.percentile(x, 0), 0.) - assert_equal(np.percentile(x, 100), 3.5) - assert_equal(np.percentile(x, 50), 1.75) - - def test_api(self): - d = np.ones(5) - np.percentile(d, 5, None, None, False) - np.percentile(d, 5, None, None, False, 'linear') - o = np.ones((1,)) - np.percentile(d, 5, None, o, False, 'linear') - - def test_2D(self): - x = np.array([[1, 1, 1], - [1, 1, 1], - [4, 4, 3], - [1, 1, 1], - [1, 1, 1]]) - assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) - - def test_linear(self): - - # Test defaults - assert_equal(np.percentile(range(10), 50), 4.5) - - # explicitly specify interpolation_method 'fraction' (the default) - assert_equal(np.percentile(range(10), 50, - interpolation='linear'), 4.5) - - def test_lower_higher(self): - - # interpolation_method 'lower'/'higher' - assert_equal(np.percentile(range(10), 50, - interpolation='lower'), 4) - assert_equal(np.percentile(range(10), 50, - interpolation='higher'), 5) - - def test_midpoint(self): - assert_equal(np.percentile(range(10), 51, - interpolation='midpoint'), 4.5) - - def test_nearest(self): - assert_equal(np.percentile(range(10), 51, - interpolation='nearest'), 5) - assert_equal(np.percentile(range(10), 49, - interpolation='nearest'), 4) - - def test_sequence(self): - x = np.arange(8) * 0.5 - assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) - - def test_axis(self): - x = np.arange(12).reshape(3, 4) - - assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) - - r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] - assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) - - r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] - assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) - - # ensure qth axis is always first as with np.array(old_percentile(..)) - x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - assert_equal(np.percentile(x, (25, 50)).shape, (2,)) - assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) - assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) - assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) - assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) - assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) - assert_equal(np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) - assert_equal(np.percentile(x, (25, 50), - interpolation="higher").shape, (2,)) - assert_equal(np.percentile(x, (25, 50, 75), - interpolation="higher").shape, (3,)) - assert_equal(np.percentile(x, (25, 50), axis=0, - interpolation="higher").shape, (2, 4, 5, 6)) - assert_equal(np.percentile(x, (25, 50), axis=1, - interpolation="higher").shape, (2, 3, 5, 6)) - assert_equal(np.percentile(x, (25, 50), axis=2, - interpolation="higher").shape, (2, 3, 4, 6)) - assert_equal(np.percentile(x, (25, 50), axis=3, - interpolation="higher").shape, (2, 3, 4, 5)) - assert_equal(np.percentile(x, (25, 50, 75), axis=1, - interpolation="higher").shape, (3, 3, 5, 6)) - - def test_scalar_q(self): - # test for no empty dimensions for compatiblity with old percentile - x = np.arange(12).reshape(3, 4) - assert_equal(np.percentile(x, 50), 5.5) - self.assertTrue(np.isscalar(np.percentile(x, 50))) - r0 = np.array([ 4., 5., 6., 7.]) - assert_equal(np.percentile(x, 50, axis=0), r0) - assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) - r1 = np.array([ 1.5, 5.5, 9.5]) - assert_almost_equal(np.percentile(x, 50, axis=1), r1) - assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) - - out = np.empty(1) - assert_equal(np.percentile(x, 50, out=out), 5.5) - assert_equal(out, 5.5) - out = np.empty(4) - assert_equal(np.percentile(x, 50, axis=0, out=out), r0) - assert_equal(out, r0) - out = np.empty(3) - assert_equal(np.percentile(x, 50, axis=1, out=out), r1) - assert_equal(out, r1) - - # test for no empty dimensions for compatiblity with old percentile - x = np.arange(12).reshape(3, 4) - assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) - self.assertTrue(np.isscalar(np.percentile(x, 50))) - r0 = np.array([ 4., 5., 6., 7.]) - c0 = np.percentile(x, 50, interpolation='lower', axis=0) - assert_equal(c0, r0) - assert_equal(c0.shape, r0.shape) - r1 = np.array([ 1., 5., 9.]) - c1 = np.percentile(x, 50, interpolation='lower', axis=1) - assert_almost_equal(c1, r1) - assert_equal(c1.shape, r1.shape) - - out = np.empty((), dtype=x.dtype) - c = np.percentile(x, 50, interpolation='lower', out=out) - assert_equal(c, 5) - assert_equal(out, 5) - out = np.empty(4, dtype=x.dtype) - c = np.percentile(x, 50, interpolation='lower', axis=0, out=out) - assert_equal(c, r0) - assert_equal(out, r0) - out = np.empty(3, dtype=x.dtype) - c = np.percentile(x, 50, interpolation='lower', axis=1, out=out) - assert_equal(c, r1) - assert_equal(out, r1) - - def test_exception(self): - assert_raises(ValueError, np.percentile, [1, 2], 56, - interpolation='foobar') - assert_raises(ValueError, np.percentile, [1], 101) - assert_raises(ValueError, np.percentile, [1], -1) - assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) - assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) - - def test_percentile_list(self): - assert_equal(np.percentile([1, 2, 3], 0), 1) - - def test_percentile_out(self): - x = np.array([1, 2, 3]) - y = np.zeros((3,)) - p = (1, 2, 3) - np.percentile(x, p, out=y) - assert_equal(y, np.percentile(x, p)) - - x = np.array([[1, 2, 3], - [4, 5, 6]]) - - y = np.zeros((3, 3)) - np.percentile(x, p, axis=0, out=y) - assert_equal(y, np.percentile(x, p, axis=0)) - - y = np.zeros((3, 2)) - np.percentile(x, p, axis=1, out=y) - assert_equal(y, np.percentile(x, p, axis=1)) - - x = np.arange(12).reshape(3, 4) - # q.dim > 1, float - r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) - out = np.empty((2, 4)) - assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0) - assert_equal(out, r0) - r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) - out = np.empty((2, 3)) - assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) - assert_equal(out, r1) - - # q.dim > 1, int - r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) - out = np.empty((2, 4), dtype=x.dtype) - c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out) - assert_equal(c, r0) - assert_equal(out, r0) - r1 = np.array([[0, 4, 8], [1, 5, 9]]) - out = np.empty((2, 3), dtype=x.dtype) - c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out) - assert_equal(c, r1) - assert_equal(out, r1) - - def test_percentile_empty_dim(self): - # empty dims are preserved - d = np.arange(11*2).reshape(11, 1, 2, 1) - assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) - assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) - assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) - assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) - assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) - assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) - assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) - assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) - - assert_array_equal(np.percentile(d, 50, axis=2, - interpolation='midpoint').shape, - (11, 1, 1)) - assert_array_equal(np.percentile(d, 50, axis=-2, - interpolation='midpoint').shape, - (11, 1, 1)) - - assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, - (2, 1, 2, 1)) - assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, - (2, 11, 2, 1)) - assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, - (2, 11, 1, 1)) - assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, - (2, 11, 1, 2)) - - - def test_percentile_no_overwrite(self): - a = np.array([2, 3, 4, 1]) - np.percentile(a, [50], overwrite_input=False) - assert_equal(a, np.array([2, 3, 4, 1])) - - a = np.array([2, 3, 4, 1]) - np.percentile(a, [50]) - assert_equal(a, np.array([2, 3, 4, 1])) - - def test_no_p_overwrite(self): - p = np.linspace(0., 100., num=5) - np.percentile(np.arange(100.), p, interpolation="midpoint") - assert_array_equal(p, np.linspace(0., 100., num=5)) - p = np.linspace(0., 100., num=5).tolist() - np.percentile(np.arange(100.), p, interpolation="midpoint") - assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) - - def test_percentile_overwrite(self): - a = np.array([2, 3, 4, 1]) - b = np.percentile(a, [50], overwrite_input=True) - assert_equal(b, np.array([2.5])) - - b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) - assert_equal(b, np.array([2.5])) - - def test_extended_axis(self): - o = np.random.normal(size=(71, 23)) - x = np.dstack([o] * 10) - assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) - x = np.rollaxis(x, -1, 0) - assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) - x = x.swapaxes(0, 1).copy() - assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) - x = x.swapaxes(0, 1).copy() - - assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), - np.percentile(x, [25, 60], axis=None)) - assert_equal(np.percentile(x, [25, 60], axis=(0,)), - np.percentile(x, [25, 60], axis=0)) - - d = np.arange(3 * 5 * 7 * 11).reshape(3, 5, 7, 11) - np.random.shuffle(d) - assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], - np.percentile(d[:, :, :, 0].flatten(), 25)) - assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], - np.percentile(d[:, :, 1, :].flatten(), [10, 90])) - assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], - np.percentile(d[:, :, 2, :].flatten(), 25)) - assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], - np.percentile(d[2, :, :, :].flatten(), 25)) - assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], - np.percentile(d[2, 1, :, :].flatten(), 25)) - assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], - np.percentile(d[2, :, :, 1].flatten(), 25)) - assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], - np.percentile(d[2, :, 2, :].flatten(), 25)) - - def test_extended_axis_invalid(self): - d = np.ones((3, 5, 7, 11)) - assert_raises(IndexError, np.percentile, d, axis=-5, q=25) - assert_raises(IndexError, np.percentile, d, axis=(0, -5), q=25) - assert_raises(IndexError, np.percentile, d, axis=4, q=25) - assert_raises(IndexError, np.percentile, d, axis=(0, 4), q=25) - assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) - - def test_keepdims(self): - d = np.ones((3, 5, 7, 11)) - assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, - (1, 1, 1, 1)) - assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, - (1, 1, 7, 11)) - assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, - (1, 5, 7, 1)) - assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, - (3, 1, 7, 11)) - assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, - (1, 1, 1, 1)) - assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, - (1, 1, 7, 1)) - - assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), - keepdims=True).shape, (2, 1, 1, 7, 1)) - assert_equal(np.percentile(d, [1, 7], axis=(0, 3), - keepdims=True).shape, (2, 1, 5, 7, 1)) - - -class TestMedian(TestCase): - def test_basic(self): - a0 = np.array(1) - a1 = np.arange(2) - a2 = np.arange(6).reshape(2, 3) - assert_equal(np.median(a0), 1) - assert_allclose(np.median(a1), 0.5) - assert_allclose(np.median(a2), 2.5) - assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) - assert_equal(np.median(a2, axis=1), [1, 4]) - assert_allclose(np.median(a2, axis=None), 2.5) - - a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) - assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) - a = np.array([0.0463301, 0.0444502, 0.141249]) - assert_equal(a[0], np.median(a)) - a = np.array([0.0444502, 0.141249, 0.0463301]) - assert_equal(a[-1], np.median(a)) - # check array scalar result - assert_equal(np.median(a).ndim, 0) - a[1] = np.nan - assert_equal(np.median(a).ndim, 0) - - def test_axis_keyword(self): - a3 = np.array([[2, 3], - [0, 1], - [6, 7], - [4, 5]]) - for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: - orig = a.copy() - np.median(a, axis=None) - for ax in range(a.ndim): - np.median(a, axis=ax) - assert_array_equal(a, orig) - - assert_allclose(np.median(a3, axis=0), [3, 4]) - assert_allclose(np.median(a3.T, axis=1), [3, 4]) - assert_allclose(np.median(a3), 3.5) - assert_allclose(np.median(a3, axis=None), 3.5) - assert_allclose(np.median(a3.T), 3.5) - - def test_overwrite_keyword(self): - a3 = np.array([[2, 3], - [0, 1], - [6, 7], - [4, 5]]) - a0 = np.array(1) - a1 = np.arange(2) - a2 = np.arange(6).reshape(2, 3) - assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) - assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) - assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) - assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), - [1.5, 2.5, 3.5]) - assert_allclose( - np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) - assert_allclose( - np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) - assert_allclose( - np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) - assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), - [3, 4]) - - a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) - map(np.random.shuffle, a4) - assert_allclose(np.median(a4, axis=None), - np.median(a4.copy(), axis=None, overwrite_input=True)) - assert_allclose(np.median(a4, axis=0), - np.median(a4.copy(), axis=0, overwrite_input=True)) - assert_allclose(np.median(a4, axis=1), - np.median(a4.copy(), axis=1, overwrite_input=True)) - assert_allclose(np.median(a4, axis=2), - np.median(a4.copy(), axis=2, overwrite_input=True)) - - def test_array_like(self): - x = [1, 2, 3] - assert_almost_equal(np.median(x), 2) - x2 = [x] - assert_almost_equal(np.median(x2), 2) - assert_allclose(np.median(x2, axis=0), x) - - def test_subclass(self): - # gh-3846 - class MySubClass(np.ndarray): - def __new__(cls, input_array, info=None): - obj = np.asarray(input_array).view(cls) - obj.info = info - return obj - - def mean(self, axis=None, dtype=None, out=None): - return -7 - - a = MySubClass([1,2,3]) - assert_equal(np.median(a), -7) - - def test_object(self): - o = np.arange(7.); - assert_(type(np.median(o.astype(object))), float) - o[2] = np.nan - assert_(type(np.median(o.astype(object))), float) - - def test_extended_axis(self): - o = np.random.normal(size=(71, 23)) - x = np.dstack([o] * 10) - assert_equal(np.median(x, axis=(0, 1)), np.median(o)) - x = np.rollaxis(x, -1, 0) - assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) - x = x.swapaxes(0, 1).copy() - assert_equal(np.median(x, axis=(0, -1)), np.median(o)) - - assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) - assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) - assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) - - d = np.arange(3 * 5 * 7 * 11).reshape(3, 5, 7, 11) - np.random.shuffle(d) - assert_equal(np.median(d, axis=(0, 1, 2))[0], - np.median(d[:, :, :, 0].flatten())) - assert_equal(np.median(d, axis=(0, 1, 3))[1], - np.median(d[:, :, 1, :].flatten())) - assert_equal(np.median(d, axis=(3, 1, -4))[2], - np.median(d[:, :, 2, :].flatten())) - assert_equal(np.median(d, axis=(3, 1, 2))[2], - np.median(d[2, :, :, :].flatten())) - assert_equal(np.median(d, axis=(3, 2))[2, 1], - np.median(d[2, 1, :, :].flatten())) - assert_equal(np.median(d, axis=(1, -2))[2, 1], - np.median(d[2, :, :, 1].flatten())) - assert_equal(np.median(d, axis=(1, 3))[2, 2], - np.median(d[2, :, 2, :].flatten())) - - def test_extended_axis_invalid(self): - d = np.ones((3, 5, 7, 11)) - assert_raises(IndexError, np.median, d, axis=-5) - assert_raises(IndexError, np.median, d, axis=(0, -5)) - assert_raises(IndexError, np.median, d, axis=4) - assert_raises(IndexError, np.median, d, axis=(0, 4)) - assert_raises(ValueError, np.median, d, axis=(1, 1)) - - def test_keepdims(self): - d = np.ones((3, 5, 7, 11)) - assert_equal(np.median(d, axis=None, keepdims=True).shape, - (1, 1, 1, 1)) - assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, - (1, 1, 7, 11)) - assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, - (1, 5, 7, 1)) - assert_equal(np.median(d, axis=(1,), keepdims=True).shape, - (3, 1, 7, 11)) - assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, - (1, 1, 1, 1)) - assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, - (1, 1, 7, 1)) - - - -class TestAdd_newdoc_ufunc(TestCase): - - def test_ufunc_arg(self): - assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") - assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah") - - def test_string_arg(self): - assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) - - -class TestAdd_newdoc(TestCase): - def test_add_doc(self): - # test np.add_newdoc - tgt = "Current flat index into the array." - self.assertEqual(np.core.flatiter.index.__doc__[:len(tgt)], tgt) - self.assertTrue(len(np.core.ufunc.identity.__doc__) > 300) - self.assertTrue(len(np.lib.index_tricks.mgrid.__doc__) > 300) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_index_tricks.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_index_tricks.py deleted file mode 100644 index 97047c53aa388..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_index_tricks.py +++ /dev/null @@ -1,289 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_raises - ) -from numpy.lib.index_tricks import ( - mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, - index_exp, ndindex, r_, s_ - ) - - -class TestRavelUnravelIndex(TestCase): - def test_basic(self): - assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) - assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) - assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) - assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) - assert_raises(ValueError, np.unravel_index, -1, (2, 2)) - assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) - assert_raises(ValueError, np.unravel_index, 4, (2, 2)) - assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) - assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) - assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) - assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) - assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) - - assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) - assert_equal( - np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) - - arr = np.array([[3, 6, 6], [4, 5, 1]]) - assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) - assert_equal( - np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) - assert_equal( - np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) - assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), - [12, 13, 13]) - assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) - - assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), - [[3, 6, 6], [4, 5, 1]]) - assert_equal( - np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), - [[3, 6, 6], [4, 5, 1]]) - assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) - - def test_dtypes(self): - # Test with different data types - for dtype in [np.int16, np.uint16, np.int32, - np.uint32, np.int64, np.uint64]: - coords = np.array( - [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) - shape = (5, 8) - uncoords = 8*coords[0]+coords[1] - assert_equal(np.ravel_multi_index(coords, shape), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape)) - uncoords = coords[0]+5*coords[1] - assert_equal( - np.ravel_multi_index(coords, shape, order='F'), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) - - coords = np.array( - [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], - dtype=dtype) - shape = (5, 8, 10) - uncoords = 10*(8*coords[0]+coords[1])+coords[2] - assert_equal(np.ravel_multi_index(coords, shape), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape)) - uncoords = coords[0]+5*(coords[1]+8*coords[2]) - assert_equal( - np.ravel_multi_index(coords, shape, order='F'), uncoords) - assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) - - def test_clipmodes(self): - # Test clipmodes - assert_equal( - np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), - np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) - assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), - mode=( - 'wrap', 'raise', 'clip', 'raise')), - np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) - assert_raises( - ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) - - -class TestGrid(TestCase): - def test_basic(self): - a = mgrid[-1:1:10j] - b = mgrid[-1:1:0.1] - assert_(a.shape == (10,)) - assert_(b.shape == (20,)) - assert_(a[0] == -1) - assert_almost_equal(a[-1], 1) - assert_(b[0] == -1) - assert_almost_equal(b[1]-b[0], 0.1, 11) - assert_almost_equal(b[-1], b[0]+19*0.1, 11) - assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) - - def test_linspace_equivalence(self): - y, st = np.linspace(2, 10, retstep=1) - assert_almost_equal(st, 8/49.0) - assert_array_almost_equal(y, mgrid[2:10:50j], 13) - - def test_nd(self): - c = mgrid[-1:1:10j, -2:2:10j] - d = mgrid[-1:1:0.1, -2:2:0.2] - assert_(c.shape == (2, 10, 10)) - assert_(d.shape == (2, 20, 20)) - assert_array_equal(c[0][0, :], -np.ones(10, 'd')) - assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) - assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) - assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) - assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], - 0.1*np.ones(20, 'd'), 11) - assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], - 0.2*np.ones(20, 'd'), 11) - - -class TestConcatenator(TestCase): - def test_1d(self): - assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) - b = np.ones(5) - c = r_[b, 0, 0, b] - assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) - - def test_mixed_type(self): - g = r_[10.1, 1:10] - assert_(g.dtype == 'f8') - - def test_more_mixed_type(self): - g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] - assert_(g.dtype == 'f8') - - def test_2d(self): - b = np.random.rand(5, 5) - c = np.random.rand(5, 5) - d = r_['1', b, c] # append columns - assert_(d.shape == (5, 10)) - assert_array_equal(d[:, :5], b) - assert_array_equal(d[:, 5:], c) - d = r_[b, c] - assert_(d.shape == (10, 5)) - assert_array_equal(d[:5, :], b) - assert_array_equal(d[5:, :], c) - - -class TestNdenumerate(TestCase): - def test_basic(self): - a = np.array([[1, 2], [3, 4]]) - assert_equal(list(ndenumerate(a)), - [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) - - -class TestIndexExpression(TestCase): - def test_regression_1(self): - # ticket #1196 - a = np.arange(2) - assert_equal(a[:-1], a[s_[:-1]]) - assert_equal(a[:-1], a[index_exp[:-1]]) - - def test_simple_1(self): - a = np.random.rand(4, 5, 6) - - assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) - assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) - - -def test_c_(): - a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] - assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) - - -def test_fill_diagonal(): - a = np.zeros((3, 3), int) - fill_diagonal(a, 5) - yield (assert_array_equal, a, - np.array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5]])) - - #Test tall matrix - a = np.zeros((10, 3), int) - fill_diagonal(a, 5) - yield (assert_array_equal, a, - np.array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0]])) - - #Test tall matrix wrap - a = np.zeros((10, 3), int) - fill_diagonal(a, 5, True) - yield (assert_array_equal, a, - np.array([[5, 0, 0], - [0, 5, 0], - [0, 0, 5], - [0, 0, 0], - [5, 0, 0], - [0, 5, 0], - [0, 0, 5], - [0, 0, 0], - [5, 0, 0], - [0, 5, 0]])) - - #Test wide matrix - a = np.zeros((3, 10), int) - fill_diagonal(a, 5) - yield (assert_array_equal, a, - np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])) - - # The same function can operate on a 4-d array: - a = np.zeros((3, 3, 3, 3), int) - fill_diagonal(a, 4) - i = np.array([0, 1, 2]) - yield (assert_equal, np.where(a != 0), (i, i, i, i)) - - -def test_diag_indices(): - di = diag_indices(4) - a = np.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12], - [13, 14, 15, 16]]) - a[di] = 100 - yield (assert_array_equal, a, - np.array([[100, 2, 3, 4], - [5, 100, 7, 8], - [9, 10, 100, 12], - [13, 14, 15, 100]])) - - # Now, we create indices to manipulate a 3-d array: - d3 = diag_indices(2, 3) - - # And use it to set the diagonal of a zeros array to 1: - a = np.zeros((2, 2, 2), int) - a[d3] = 1 - yield (assert_array_equal, a, - np.array([[[1, 0], - [0, 0]], - - [[0, 0], - [0, 1]]])) - - -def test_diag_indices_from(): - x = np.random.random((4, 4)) - r, c = diag_indices_from(x) - assert_array_equal(r, np.arange(4)) - assert_array_equal(c, np.arange(4)) - - -def test_ndindex(): - x = list(ndindex(1, 2, 3)) - expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] - assert_array_equal(x, expected) - - x = list(ndindex((1, 2, 3))) - assert_array_equal(x, expected) - - # Test use of scalars and tuples - x = list(ndindex((3,))) - assert_array_equal(x, list(ndindex(3))) - - # Make sure size argument is optional - x = list(ndindex()) - assert_equal(x, [()]) - - x = list(ndindex(())) - assert_equal(x, [()]) - - # Make sure 0-sized ndindex works correctly - x = list(ndindex(*[0])) - assert_equal(x, []) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_io.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_io.py deleted file mode 100644 index c11cd004149c0..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_io.py +++ /dev/null @@ -1,1736 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import gzip -import os -import threading -from tempfile import mkstemp, NamedTemporaryFile -import time -import warnings -import gc -from io import BytesIO -from datetime import datetime - -import numpy as np -import numpy.ma as ma -from numpy.lib._iotools import (ConverterError, ConverterLockError, - ConversionWarning) -from numpy.compat import asbytes, asbytes_nested, bytes, asstr -from nose import SkipTest -from numpy.ma.testutils import ( - TestCase, assert_equal, assert_array_equal, - assert_raises, assert_raises_regex, run_module_suite -) -from numpy.testing import assert_warns, assert_, build_err_msg -from numpy.testing.utils import tempdir - - -class TextIO(BytesIO): - """Helper IO class. - - Writes encode strings to bytes if needed, reads return bytes. - This makes it easier to emulate files opened in binary mode - without needing to explicitly convert strings to bytes in - setting up the test data. - - """ - def __init__(self, s=""): - BytesIO.__init__(self, asbytes(s)) - - def write(self, s): - BytesIO.write(self, asbytes(s)) - - def writelines(self, lines): - BytesIO.writelines(self, [asbytes(s) for s in lines]) - - -MAJVER, MINVER = sys.version_info[:2] -IS_64BIT = sys.maxsize > 2**32 - - -def strptime(s, fmt=None): - """This function is available in the datetime module only - from Python >= 2.5. - - """ - if sys.version_info[0] >= 3: - return datetime(*time.strptime(s.decode('latin1'), fmt)[:3]) - else: - return datetime(*time.strptime(s, fmt)[:3]) - - -class RoundtripTest(object): - def roundtrip(self, save_func, *args, **kwargs): - """ - save_func : callable - Function used to save arrays to file. - file_on_disk : bool - If true, store the file on disk, instead of in a - string buffer. - save_kwds : dict - Parameters passed to `save_func`. - load_kwds : dict - Parameters passed to `numpy.load`. - args : tuple of arrays - Arrays stored to file. - - """ - save_kwds = kwargs.get('save_kwds', {}) - load_kwds = kwargs.get('load_kwds', {}) - file_on_disk = kwargs.get('file_on_disk', False) - - if file_on_disk: - target_file = NamedTemporaryFile(delete=False) - load_file = target_file.name - else: - target_file = BytesIO() - load_file = target_file - - try: - arr = args - - save_func(target_file, *arr, **save_kwds) - target_file.flush() - target_file.seek(0) - - if sys.platform == 'win32' and not isinstance(target_file, BytesIO): - target_file.close() - - arr_reloaded = np.load(load_file, **load_kwds) - - self.arr = arr - self.arr_reloaded = arr_reloaded - finally: - if not isinstance(target_file, BytesIO): - target_file.close() - # holds an open file descriptor so it can't be deleted on win - if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): - os.remove(target_file.name) - - def check_roundtrips(self, a): - self.roundtrip(a) - self.roundtrip(a, file_on_disk=True) - self.roundtrip(np.asfortranarray(a)) - self.roundtrip(np.asfortranarray(a), file_on_disk=True) - if a.shape[0] > 1: - # neither C nor Fortran contiguous for 2D arrays or more - self.roundtrip(np.asfortranarray(a)[1:]) - self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) - - def test_array(self): - a = np.array([], float) - self.check_roundtrips(a) - - a = np.array([[1, 2], [3, 4]], float) - self.check_roundtrips(a) - - a = np.array([[1, 2], [3, 4]], int) - self.check_roundtrips(a) - - a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) - self.check_roundtrips(a) - - a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) - self.check_roundtrips(a) - - def test_array_object(self): - if sys.version_info[:2] >= (2, 7): - a = np.array([], object) - self.check_roundtrips(a) - - a = np.array([[1, 2], [3, 4]], object) - self.check_roundtrips(a) - # Fails with UnpicklingError: could not find MARK on Python 2.6 - - def test_1D(self): - a = np.array([1, 2, 3, 4], int) - self.roundtrip(a) - - @np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32") - def test_mmap(self): - a = np.array([[1, 2.5], [4, 7.3]]) - self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) - - a = np.asfortranarray([[1, 2.5], [4, 7.3]]) - self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) - - def test_record(self): - a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - self.check_roundtrips(a) - - def test_format_2_0(self): - dt = [(("%d" % i) * 100, float) for i in range(500)] - a = np.ones(1000, dtype=dt) - with warnings.catch_warnings(record=True): - warnings.filterwarnings('always', '', UserWarning) - self.check_roundtrips(a) - - -class TestSaveLoad(RoundtripTest, TestCase): - def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.save, *args, **kwargs) - assert_equal(self.arr[0], self.arr_reloaded) - assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) - assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) - - -class TestSavezLoad(RoundtripTest, TestCase): - def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) - try: - for n, arr in enumerate(self.arr): - reloaded = self.arr_reloaded['arr_%d' % n] - assert_equal(arr, reloaded) - assert_equal(arr.dtype, reloaded.dtype) - assert_equal(arr.flags.fnc, reloaded.flags.fnc) - finally: - # delete tempfile, must be done here on windows - if self.arr_reloaded.fid: - self.arr_reloaded.fid.close() - os.remove(self.arr_reloaded.fid.name) - - @np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems") - @np.testing.dec.slow - def test_big_arrays(self): - L = (1 << 31) + 100000 - a = np.empty(L, dtype=np.uint8) - with tempdir(prefix="numpy_test_big_arrays_") as tmpdir: - tmp = os.path.join(tmpdir, "file.npz") - np.savez(tmp, a=a) - del a - npfile = np.load(tmp) - a = npfile['a'] - npfile.close() - - def test_multiple_arrays(self): - a = np.array([[1, 2], [3, 4]], float) - b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) - self.roundtrip(a, b) - - def test_named_arrays(self): - a = np.array([[1, 2], [3, 4]], float) - b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) - c = BytesIO() - np.savez(c, file_a=a, file_b=b) - c.seek(0) - l = np.load(c) - assert_equal(a, l['file_a']) - assert_equal(b, l['file_b']) - - def test_savez_filename_clashes(self): - # Test that issue #852 is fixed - # and savez functions in multithreaded environment - - def writer(error_list): - fd, tmp = mkstemp(suffix='.npz') - os.close(fd) - try: - arr = np.random.randn(500, 500) - try: - np.savez(tmp, arr=arr) - except OSError as err: - error_list.append(err) - finally: - os.remove(tmp) - - errors = [] - threads = [threading.Thread(target=writer, args=(errors,)) - for j in range(3)] - for t in threads: - t.start() - for t in threads: - t.join() - - if errors: - raise AssertionError(errors) - - def test_not_closing_opened_fid(self): - # Test that issue #2178 is fixed: - # verify could seek on 'loaded' file - - fd, tmp = mkstemp(suffix='.npz') - os.close(fd) - try: - fp = open(tmp, 'wb') - np.savez(fp, data='LOVELY LOAD') - fp.close() - - fp = open(tmp, 'rb', 10000) - fp.seek(0) - assert_(not fp.closed) - _ = np.load(fp)['data'] - assert_(not fp.closed) - # must not get closed by .load(opened fp) - fp.seek(0) - assert_(not fp.closed) - - finally: - fp.close() - os.remove(tmp) - - def test_closing_fid(self): - # Test that issue #1517 (too many opened files) remains closed - # It might be a "weak" test since failed to get triggered on - # e.g. Debian sid of 2012 Jul 05 but was reported to - # trigger the failure on Ubuntu 10.04: - # http://projects.scipy.org/numpy/ticket/1517#comment:2 - fd, tmp = mkstemp(suffix='.npz') - os.close(fd) - - try: - fp = open(tmp, 'wb') - np.savez(fp, data='LOVELY LOAD') - fp.close() - # We need to check if the garbage collector can properly close - # numpy npz file returned by np.load when their reference count - # goes to zero. Python 3 running in debug mode raises a - # ResourceWarning when file closing is left to the garbage - # collector, so we catch the warnings. Because ResourceWarning - # is unknown in Python < 3.x, we take the easy way out and - # catch all warnings. - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - for i in range(1, 1025): - try: - np.load(tmp)["data"] - except Exception as e: - msg = "Failed to load data from a file: %s" % e - raise AssertionError(msg) - finally: - os.remove(tmp) - - def test_closing_zipfile_after_load(self): - # Check that zipfile owns file and can close it. - # This needs to pass a file name to load for the - # test. - with tempdir(prefix="numpy_test_closing_zipfile_after_load_") as tmpdir: - fd, tmp = mkstemp(suffix='.npz', dir=tmpdir) - os.close(fd) - np.savez(tmp, lab='place holder') - data = np.load(tmp) - fp = data.zip.fp - data.close() - assert_(fp.closed) - - -class TestSaveTxt(TestCase): - def test_array(self): - a = np.array([[1, 2], [3, 4]], float) - fmt = "%.18e" - c = BytesIO() - np.savetxt(c, a, fmt=fmt) - c.seek(0) - assert_equal(c.readlines(), - [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), - asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) - - a = np.array([[1, 2], [3, 4]], int) - c = BytesIO() - np.savetxt(c, a, fmt='%d') - c.seek(0) - assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) - - def test_1D(self): - a = np.array([1, 2, 3, 4], int) - c = BytesIO() - np.savetxt(c, a, fmt='%d') - c.seek(0) - lines = c.readlines() - assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) - - def test_record(self): - a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - c = BytesIO() - np.savetxt(c, a, fmt='%d') - c.seek(0) - assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) - - def test_delimiter(self): - a = np.array([[1., 2.], [3., 4.]]) - c = BytesIO() - np.savetxt(c, a, delimiter=',', fmt='%d') - c.seek(0) - assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) - - def test_format(self): - a = np.array([(1, 2), (3, 4)]) - c = BytesIO() - # Sequence of formats - np.savetxt(c, a, fmt=['%02d', '%3.1f']) - c.seek(0) - assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) - - # A single multiformat string - c = BytesIO() - np.savetxt(c, a, fmt='%02d : %3.1f') - c.seek(0) - lines = c.readlines() - assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) - - # Specify delimiter, should be overiden - c = BytesIO() - np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') - c.seek(0) - lines = c.readlines() - assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) - - # Bad fmt, should raise a ValueError - c = BytesIO() - assert_raises(ValueError, np.savetxt, c, a, fmt=99) - - def test_header_footer(self): - """ - Test the functionality of the header and footer keyword argument. - """ - c = BytesIO() - a = np.array([(1, 2), (3, 4)], dtype=np.int) - test_header_footer = 'Test header / footer' - # Test the header keyword argument - np.savetxt(c, a, fmt='%1d', header=test_header_footer) - c.seek(0) - assert_equal(c.read(), - asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) - # Test the footer keyword argument - c = BytesIO() - np.savetxt(c, a, fmt='%1d', footer=test_header_footer) - c.seek(0) - assert_equal(c.read(), - asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) - # Test the commentstr keyword argument used on the header - c = BytesIO() - commentstr = '% ' - np.savetxt(c, a, fmt='%1d', - header=test_header_footer, comments=commentstr) - c.seek(0) - assert_equal(c.read(), - asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) - # Test the commentstr keyword argument used on the footer - c = BytesIO() - commentstr = '% ' - np.savetxt(c, a, fmt='%1d', - footer=test_header_footer, comments=commentstr) - c.seek(0) - assert_equal(c.read(), - asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) - - def test_file_roundtrip(self): - f, name = mkstemp() - os.close(f) - try: - a = np.array([(1, 2), (3, 4)]) - np.savetxt(name, a) - b = np.loadtxt(name) - assert_array_equal(a, b) - finally: - os.unlink(name) - - def test_complex_arrays(self): - ncols = 2 - nrows = 2 - a = np.zeros((ncols, nrows), dtype=np.complex128) - re = np.pi - im = np.e - a[:] = re + 1.0j * im - - # One format only - c = BytesIO() - np.savetxt(c, a, fmt=' %+.3e') - c.seek(0) - lines = c.readlines() - assert_equal( - lines, - [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', - b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) - - # One format for each real and imaginary part - c = BytesIO() - np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) - c.seek(0) - lines = c.readlines() - assert_equal( - lines, - [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', - b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) - - # One format for each complex number - c = BytesIO() - np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) - c.seek(0) - lines = c.readlines() - assert_equal( - lines, - [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', - b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) - - def test_custom_writer(self): - - class CustomWriter(list): - def write(self, text): - self.extend(text.split(b'\n')) - - w = CustomWriter() - a = np.array([(1, 2), (3, 4)]) - np.savetxt(w, a) - b = np.loadtxt(w) - assert_array_equal(a, b) - - -class TestLoadTxt(TestCase): - def test_record(self): - c = TextIO() - c.write('1 2\n3 4') - c.seek(0) - x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) - a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - assert_array_equal(x, a) - - d = TextIO() - d.write('M 64.0 75.0\nF 25.0 60.0') - d.seek(0) - mydescriptor = {'names': ('gender', 'age', 'weight'), - 'formats': ('S1', 'i4', 'f4')} - b = np.array([('M', 64.0, 75.0), - ('F', 25.0, 60.0)], dtype=mydescriptor) - y = np.loadtxt(d, dtype=mydescriptor) - assert_array_equal(y, b) - - def test_array(self): - c = TextIO() - c.write('1 2\n3 4') - - c.seek(0) - x = np.loadtxt(c, dtype=np.int) - a = np.array([[1, 2], [3, 4]], int) - assert_array_equal(x, a) - - c.seek(0) - x = np.loadtxt(c, dtype=float) - a = np.array([[1, 2], [3, 4]], float) - assert_array_equal(x, a) - - def test_1D(self): - c = TextIO() - c.write('1\n2\n3\n4\n') - c.seek(0) - x = np.loadtxt(c, dtype=int) - a = np.array([1, 2, 3, 4], int) - assert_array_equal(x, a) - - c = TextIO() - c.write('1,2,3,4\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',') - a = np.array([1, 2, 3, 4], int) - assert_array_equal(x, a) - - def test_missing(self): - c = TextIO() - c.write('1,2,3,,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - converters={3: lambda s: int(s or - 999)}) - a = np.array([1, 2, 3, -999, 5], int) - assert_array_equal(x, a) - - def test_converters_with_usecols(self): - c = TextIO() - c.write('1,2,3,,5\n6,7,8,9,10\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - converters={3: lambda s: int(s or - 999)}, - usecols=(1, 3,)) - a = np.array([[2, -999], [7, 9]], int) - assert_array_equal(x, a) - - def test_comments(self): - c = TextIO() - c.write('# comment\n1,2,3,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - comments='#') - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - def test_skiprows(self): - c = TextIO() - c.write('comment\n1,2,3,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - skiprows=1) - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - c = TextIO() - c.write('# comment\n1,2,3,5\n') - c.seek(0) - x = np.loadtxt(c, dtype=int, delimiter=',', - skiprows=1) - a = np.array([1, 2, 3, 5], int) - assert_array_equal(x, a) - - def test_usecols(self): - a = np.array([[1, 2], [3, 4]], float) - c = BytesIO() - np.savetxt(c, a) - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=(1,)) - assert_array_equal(x, a[:, 1]) - - a = np.array([[1, 2, 3], [3, 4, 5]], float) - c = BytesIO() - np.savetxt(c, a) - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=(1, 2)) - assert_array_equal(x, a[:, 1:]) - - # Testing with arrays instead of tuples. - c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) - assert_array_equal(x, a[:, 1:]) - - # Checking with dtypes defined converters. - data = '''JOE 70.1 25.3 - BOB 60.5 27.9 - ''' - c = TextIO(data) - names = ['stid', 'temp'] - dtypes = ['S4', 'f8'] - arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) - assert_equal(arr['stid'], [b"JOE", b"BOB"]) - assert_equal(arr['temp'], [25.3, 27.9]) - - def test_fancy_dtype(self): - c = TextIO() - c.write('1,2,3.0\n4,5,6.0\n') - c.seek(0) - dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) - x = np.loadtxt(c, dtype=dt, delimiter=',') - a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) - assert_array_equal(x, a) - - def test_shaped_dtype(self): - c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") - dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ('block', int, (2, 3))]) - x = np.loadtxt(c, dtype=dt) - a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], - dtype=dt) - assert_array_equal(x, a) - - def test_3d_shaped_dtype(self): - c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") - dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), - ('block', int, (2, 2, 3))]) - x = np.loadtxt(c, dtype=dt) - a = np.array([('aaaa', 1.0, 8.0, - [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], - dtype=dt) - assert_array_equal(x, a) - - def test_empty_file(self): - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", - message="loadtxt: Empty input file:") - c = TextIO() - x = np.loadtxt(c) - assert_equal(x.shape, (0,)) - x = np.loadtxt(c, dtype=np.int64) - assert_equal(x.shape, (0,)) - assert_(x.dtype == np.int64) - - def test_unused_converter(self): - c = TextIO() - c.writelines(['1 21\n', '3 42\n']) - c.seek(0) - data = np.loadtxt(c, usecols=(1,), - converters={0: lambda s: int(s, 16)}) - assert_array_equal(data, [21, 42]) - - c.seek(0) - data = np.loadtxt(c, usecols=(1,), - converters={1: lambda s: int(s, 16)}) - assert_array_equal(data, [33, 66]) - - def test_dtype_with_object(self): - "Test using an explicit dtype with an object" - from datetime import date - import time - data = """ 1; 2001-01-01 - 2; 2002-01-31 """ - ndtype = [('idx', int), ('code', np.object)] - func = lambda s: strptime(s.strip(), "%Y-%m-%d") - converters = {1: func} - test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, - converters=converters) - control = np.array( - [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], - dtype=ndtype) - assert_equal(test, control) - - def test_uint64_type(self): - tgt = (9223372043271415339, 9223372043271415853) - c = TextIO() - c.write("%s %s" % tgt) - c.seek(0) - res = np.loadtxt(c, dtype=np.uint64) - assert_equal(res, tgt) - - def test_int64_type(self): - tgt = (-9223372036854775807, 9223372036854775807) - c = TextIO() - c.write("%s %s" % tgt) - c.seek(0) - res = np.loadtxt(c, dtype=np.int64) - assert_equal(res, tgt) - - def test_universal_newline(self): - f, name = mkstemp() - os.write(f, b'1 21\r3 42\r') - os.close(f) - - try: - data = np.loadtxt(name) - assert_array_equal(data, [[1, 21], [3, 42]]) - finally: - os.unlink(name) - - def test_empty_field_after_tab(self): - c = TextIO() - c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') - c.seek(0) - dt = {'names': ('x', 'y', 'z', 'comment'), - 'formats': ('= 3: - # python 3k is known to fail for '\r' - linesep = ('\n', '\r\n') - else: - linesep = ('\n', '\r\n', '\r') - - for sep in linesep: - data = '0 1 2' + sep + '3 4 5' - f, name = mkstemp() - # We can't use NamedTemporaryFile on windows, because we cannot - # reopen the file. - try: - os.write(f, asbytes(data)) - assert_array_equal(np.genfromtxt(name), wanted) - finally: - os.close(f) - os.unlink(name) - - def test_gft_using_generator(self): - # gft doesn't work with unicode. - def count(): - for i in range(10): - yield asbytes("%d" % i) - - res = np.genfromtxt(count()) - assert_array_equal(res, np.arange(10)) - - -def test_gzip_load(): - a = np.random.random((5, 5)) - - s = BytesIO() - f = gzip.GzipFile(fileobj=s, mode="w") - - np.save(f, a) - f.close() - s.seek(0) - - f = gzip.GzipFile(fileobj=s, mode="r") - assert_array_equal(np.load(f), a) - - -def test_gzip_loadtxt(): - # Thanks to another windows brokeness, we can't use - # NamedTemporaryFile: a file created from this function cannot be - # reopened by another open call. So we first put the gzipped string - # of the test reference array, write it to a securely opened file, - # which is then read from by the loadtxt function - s = BytesIO() - g = gzip.GzipFile(fileobj=s, mode='w') - g.write(b'1 2 3\n') - g.close() - s.seek(0) - - f, name = mkstemp(suffix='.gz') - try: - os.write(f, s.read()) - s.close() - assert_array_equal(np.loadtxt(name), [1, 2, 3]) - finally: - os.close(f) - os.unlink(name) - - -def test_gzip_loadtxt_from_string(): - s = BytesIO() - f = gzip.GzipFile(fileobj=s, mode="w") - f.write(b'1 2 3\n') - f.close() - s.seek(0) - - f = gzip.GzipFile(fileobj=s, mode="r") - assert_array_equal(np.loadtxt(f), [1, 2, 3]) - - -def test_npzfile_dict(): - s = BytesIO() - x = np.zeros((3, 3)) - y = np.zeros((3, 3)) - - np.savez(s, x=x, y=y) - s.seek(0) - - z = np.load(s) - - assert_('x' in z) - assert_('y' in z) - assert_('x' in z.keys()) - assert_('y' in z.keys()) - - for f, a in z.items(): - assert_(f in ['x', 'y']) - assert_equal(a.shape, (3, 3)) - - assert_(len(z.items()) == 2) - - for f in z: - assert_(f in ['x', 'y']) - - assert_('x' in z.keys()) - - -def test_load_refcount(): - # Check that objects returned by np.load are directly freed based on - # their refcount, rather than needing the gc to collect them. - - f = BytesIO() - np.savez(f, [1, 2, 3]) - f.seek(0) - - gc.collect() - n_before = len(gc.get_objects()) - np.load(f) - n_after = len(gc.get_objects()) - - assert_equal(n_before, n_after) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_nanfunctions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_nanfunctions.py deleted file mode 100644 index 3da6b51490f65..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_nanfunctions.py +++ /dev/null @@ -1,758 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings - -import numpy as np -from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal, - assert_raises, assert_array_equal - ) - - -# Test data -_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], - [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], - [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], - [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) - - -# Rows of _ndat with nans removed -_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), - np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), - np.array([0.1042, -0.5954]), - np.array([0.1610, 0.1859, 0.3146])] - - -class TestNanFunctions_MinMax(TestCase): - - nanfuncs = [np.nanmin, np.nanmax] - stdfuncs = [np.min, np.max] - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - for f in self.nanfuncs: - f(ndat) - assert_equal(ndat, _ndat) - - def test_keepdims(self): - mat = np.eye(3) - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for axis in [None, 0, 1]: - tgt = rf(mat, axis=axis, keepdims=True) - res = nf(mat, axis=axis, keepdims=True) - assert_(res.ndim == tgt.ndim) - - def test_out(self): - mat = np.eye(3) - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - resout = np.zeros(3) - tgt = rf(mat, axis=1) - res = nf(mat, axis=1, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - def test_dtype_from_input(self): - codes = 'efdgFDG' - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for c in codes: - mat = np.eye(3, dtype=c) - tgt = rf(mat, axis=1).dtype.type - res = nf(mat, axis=1).dtype.type - assert_(res is tgt) - # scalar case - tgt = rf(mat, axis=None).dtype.type - res = nf(mat, axis=None).dtype.type - assert_(res is tgt) - - def test_result_values(self): - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - tgt = [rf(d) for d in _rdat] - res = nf(_ndat, axis=1) - assert_almost_equal(res, tgt) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for f in self.nanfuncs: - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(mat, axis=axis)).all()) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalars - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(np.nan))) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_masked(self): - mat = np.ma.fix_invalid(_ndat) - msk = mat._mask.copy() - for f in [np.nanmin]: - res = f(mat, axis=1) - tgt = f(_ndat, axis=1) - assert_equal(res, tgt) - assert_equal(mat._mask, msk) - assert_(not np.isinf(mat).any()) - - def test_scalar(self): - for f in self.nanfuncs: - assert_(f(0.) == 0.) - - def test_matrices(self): - # Check that it works and that type and - # shape are preserved - mat = np.matrix(np.eye(3)) - for f in self.nanfuncs: - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3)) - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 1)) - res = f(mat) - assert_(np.isscalar(res)) - # check that rows of nan are dealt with for subclasses (#4628) - mat[1] = np.nan - for f in self.nanfuncs: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(not np.any(np.isnan(res))) - assert_(len(w) == 0) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) - and not np.isnan(res[2, 0])) - assert_(len(w) == 1, 'no warning raised') - assert_(issubclass(w[0].category, RuntimeWarning)) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = f(mat) - assert_(np.isscalar(res)) - assert_(res != np.nan) - assert_(len(w) == 0) - - -class TestNanFunctions_ArgminArgmax(TestCase): - - nanfuncs = [np.nanargmin, np.nanargmax] - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - for f in self.nanfuncs: - f(ndat) - assert_equal(ndat, _ndat) - - def test_result_values(self): - for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): - for row in _ndat: - with warnings.catch_warnings(record=True): - warnings.simplefilter('always') - ind = f(row) - val = row[ind] - # comparing with NaN is tricky as the result - # is always false except for NaN != NaN - assert_(not np.isnan(val)) - assert_(not fcmp(val, row).any()) - assert_(not np.equal(val, row[:ind]).any()) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for f in self.nanfuncs: - for axis in [None, 0, 1]: - assert_raises(ValueError, f, mat, axis=axis) - assert_raises(ValueError, f, np.nan) - - def test_empty(self): - mat = np.zeros((0, 3)) - for f in self.nanfuncs: - for axis in [0, None]: - assert_raises(ValueError, f, mat, axis=axis) - for axis in [1]: - res = f(mat, axis=axis) - assert_equal(res, np.zeros(0)) - - def test_scalar(self): - for f in self.nanfuncs: - assert_(f(0.) == 0.) - - def test_matrices(self): - # Check that it works and that type and - # shape are preserved - mat = np.matrix(np.eye(3)) - for f in self.nanfuncs: - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3)) - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 1)) - res = f(mat) - assert_(np.isscalar(res)) - - -class TestNanFunctions_IntTypes(TestCase): - - int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8, - np.uint16, np.uint32, np.uint64) - - mat = np.array([127, 39, 93, 87, 46]) - - def integer_arrays(self): - for dtype in self.int_types: - yield self.mat.astype(dtype) - - def test_nanmin(self): - tgt = np.min(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanmin(mat), tgt) - - def test_nanmax(self): - tgt = np.max(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanmax(mat), tgt) - - def test_nanargmin(self): - tgt = np.argmin(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanargmin(mat), tgt) - - def test_nanargmax(self): - tgt = np.argmax(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanargmax(mat), tgt) - - def test_nansum(self): - tgt = np.sum(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nansum(mat), tgt) - - def test_nanmean(self): - tgt = np.mean(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanmean(mat), tgt) - - def test_nanvar(self): - tgt = np.var(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanvar(mat), tgt) - - tgt = np.var(mat, ddof=1) - for mat in self.integer_arrays(): - assert_equal(np.nanvar(mat, ddof=1), tgt) - - def test_nanstd(self): - tgt = np.std(self.mat) - for mat in self.integer_arrays(): - assert_equal(np.nanstd(mat), tgt) - - tgt = np.std(self.mat, ddof=1) - for mat in self.integer_arrays(): - assert_equal(np.nanstd(mat, ddof=1), tgt) - - -class TestNanFunctions_Sum(TestCase): - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - np.nansum(ndat) - assert_equal(ndat, _ndat) - - def test_keepdims(self): - mat = np.eye(3) - for axis in [None, 0, 1]: - tgt = np.sum(mat, axis=axis, keepdims=True) - res = np.nansum(mat, axis=axis, keepdims=True) - assert_(res.ndim == tgt.ndim) - - def test_out(self): - mat = np.eye(3) - resout = np.zeros(3) - tgt = np.sum(mat, axis=1) - res = np.nansum(mat, axis=1, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - def test_dtype_from_dtype(self): - mat = np.eye(3) - codes = 'efdgFDG' - for c in codes: - tgt = np.sum(mat, dtype=np.dtype(c), axis=1).dtype.type - res = np.nansum(mat, dtype=np.dtype(c), axis=1).dtype.type - assert_(res is tgt) - # scalar case - tgt = np.sum(mat, dtype=np.dtype(c), axis=None).dtype.type - res = np.nansum(mat, dtype=np.dtype(c), axis=None).dtype.type - assert_(res is tgt) - - def test_dtype_from_char(self): - mat = np.eye(3) - codes = 'efdgFDG' - for c in codes: - tgt = np.sum(mat, dtype=c, axis=1).dtype.type - res = np.nansum(mat, dtype=c, axis=1).dtype.type - assert_(res is tgt) - # scalar case - tgt = np.sum(mat, dtype=c, axis=None).dtype.type - res = np.nansum(mat, dtype=c, axis=None).dtype.type - assert_(res is tgt) - - def test_dtype_from_input(self): - codes = 'efdgFDG' - for c in codes: - mat = np.eye(3, dtype=c) - tgt = np.sum(mat, axis=1).dtype.type - res = np.nansum(mat, axis=1).dtype.type - assert_(res is tgt) - # scalar case - tgt = np.sum(mat, axis=None).dtype.type - res = np.nansum(mat, axis=None).dtype.type - assert_(res is tgt) - - def test_result_values(self): - tgt = [np.sum(d) for d in _rdat] - res = np.nansum(_ndat, axis=1) - assert_almost_equal(res, tgt) - - def test_allnans(self): - # Check for FutureWarning - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - res = np.nansum([np.nan]*3, axis=None) - assert_(res == 0, 'result is not 0') - assert_(len(w) == 0, 'warning raised') - # Check scalar - res = np.nansum(np.nan) - assert_(res == 0, 'result is not 0') - assert_(len(w) == 0, 'warning raised') - # Check there is no warning for not all-nan - np.nansum([0]*3, axis=None) - assert_(len(w) == 0, 'unwanted warning raised') - - def test_empty(self): - mat = np.zeros((0, 3)) - tgt = [0]*3 - res = np.nansum(mat, axis=0) - assert_equal(res, tgt) - tgt = [] - res = np.nansum(mat, axis=1) - assert_equal(res, tgt) - tgt = 0 - res = np.nansum(mat, axis=None) - assert_equal(res, tgt) - - def test_scalar(self): - assert_(np.nansum(0.) == 0.) - - def test_matrices(self): - # Check that it works and that type and - # shape are preserved - mat = np.matrix(np.eye(3)) - res = np.nansum(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3)) - res = np.nansum(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 1)) - res = np.nansum(mat) - assert_(np.isscalar(res)) - - -class TestNanFunctions_MeanVarStd(TestCase): - - nanfuncs = [np.nanmean, np.nanvar, np.nanstd] - stdfuncs = [np.mean, np.var, np.std] - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - for f in self.nanfuncs: - f(ndat) - assert_equal(ndat, _ndat) - - def test_dtype_error(self): - for f in self.nanfuncs: - for dtype in [np.bool_, np.int_, np.object]: - assert_raises(TypeError, f, _ndat, axis=1, dtype=np.int) - - def test_out_dtype_error(self): - for f in self.nanfuncs: - for dtype in [np.bool_, np.int_, np.object]: - out = np.empty(_ndat.shape[0], dtype=dtype) - assert_raises(TypeError, f, _ndat, axis=1, out=out) - - def test_keepdims(self): - mat = np.eye(3) - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for axis in [None, 0, 1]: - tgt = rf(mat, axis=axis, keepdims=True) - res = nf(mat, axis=axis, keepdims=True) - assert_(res.ndim == tgt.ndim) - - def test_out(self): - mat = np.eye(3) - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - resout = np.zeros(3) - tgt = rf(mat, axis=1) - res = nf(mat, axis=1, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - def test_dtype_from_dtype(self): - mat = np.eye(3) - codes = 'efdgFDG' - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for c in codes: - tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type - res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type - assert_(res is tgt) - # scalar case - tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type - res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type - assert_(res is tgt) - - def test_dtype_from_char(self): - mat = np.eye(3) - codes = 'efdgFDG' - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for c in codes: - tgt = rf(mat, dtype=c, axis=1).dtype.type - res = nf(mat, dtype=c, axis=1).dtype.type - assert_(res is tgt) - # scalar case - tgt = rf(mat, dtype=c, axis=None).dtype.type - res = nf(mat, dtype=c, axis=None).dtype.type - assert_(res is tgt) - - def test_dtype_from_input(self): - codes = 'efdgFDG' - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - for c in codes: - mat = np.eye(3, dtype=c) - tgt = rf(mat, axis=1).dtype.type - res = nf(mat, axis=1).dtype.type - assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) - # scalar case - tgt = rf(mat, axis=None).dtype.type - res = nf(mat, axis=None).dtype.type - assert_(res is tgt) - - def test_ddof(self): - nanfuncs = [np.nanvar, np.nanstd] - stdfuncs = [np.var, np.std] - for nf, rf in zip(nanfuncs, stdfuncs): - for ddof in [0, 1]: - tgt = [rf(d, ddof=ddof) for d in _rdat] - res = nf(_ndat, axis=1, ddof=ddof) - assert_almost_equal(res, tgt) - - def test_ddof_too_big(self): - nanfuncs = [np.nanvar, np.nanstd] - stdfuncs = [np.var, np.std] - dsize = [len(d) for d in _rdat] - for nf, rf in zip(nanfuncs, stdfuncs): - for ddof in range(5): - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - tgt = [ddof >= d for d in dsize] - res = nf(_ndat, axis=1, ddof=ddof) - assert_equal(np.isnan(res), tgt) - if any(tgt): - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - else: - assert_(len(w) == 0) - - def test_result_values(self): - for nf, rf in zip(self.nanfuncs, self.stdfuncs): - tgt = [rf(d) for d in _rdat] - res = nf(_ndat, axis=1) - assert_almost_equal(res, tgt) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for f in self.nanfuncs: - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(mat, axis=axis)).all()) - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalar - assert_(np.isnan(f(np.nan))) - assert_(len(w) == 2) - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_empty(self): - mat = np.zeros((0, 3)) - for f in self.nanfuncs: - for axis in [0, None]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(f(mat, axis=axis)).all()) - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - for axis in [1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_equal(f(mat, axis=axis), np.zeros([])) - assert_(len(w) == 0) - - def test_scalar(self): - for f in self.nanfuncs: - assert_(f(0.) == 0.) - - def test_matrices(self): - # Check that it works and that type and - # shape are preserved - mat = np.matrix(np.eye(3)) - for f in self.nanfuncs: - res = f(mat, axis=0) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3)) - res = f(mat, axis=1) - assert_(isinstance(res, np.matrix)) - assert_(res.shape == (3, 1)) - res = f(mat) - assert_(np.isscalar(res)) - - -class TestNanFunctions_Median(TestCase): - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - np.nanmedian(ndat) - assert_equal(ndat, _ndat) - - def test_keepdims(self): - mat = np.eye(3) - for axis in [None, 0, 1]: - tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) - res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) - assert_(res.ndim == tgt.ndim) - - d = np.ones((3, 5, 7, 11)) - # Randomly set some elements to NaN: - w = np.random.random((4, 200)) * np.array(d.shape)[:, None] - w = w.astype(np.intp) - d[tuple(w)] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always', RuntimeWarning) - res = np.nanmedian(d, axis=None, keepdims=True) - assert_equal(res.shape, (1, 1, 1, 1)) - res = np.nanmedian(d, axis=(0, 1), keepdims=True) - assert_equal(res.shape, (1, 1, 7, 11)) - res = np.nanmedian(d, axis=(0, 3), keepdims=True) - assert_equal(res.shape, (1, 5, 7, 1)) - res = np.nanmedian(d, axis=(1,), keepdims=True) - assert_equal(res.shape, (3, 1, 7, 11)) - res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) - assert_equal(res.shape, (1, 1, 1, 1)) - res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) - assert_equal(res.shape, (1, 1, 7, 1)) - - def test_out(self): - mat = np.random.rand(3, 3) - nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) - resout = np.zeros(3) - tgt = np.median(mat, axis=1) - res = np.nanmedian(nan_mat, axis=1, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - # 0-d output: - resout = np.zeros(()) - tgt = np.median(mat, axis=None) - res = np.nanmedian(nan_mat, axis=None, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - def test_small_large(self): - # test the small and large code paths, current cutoff 400 elements - for s in [5, 20, 51, 200, 1000]: - d = np.random.randn(4, s) - # Randomly set some elements to NaN: - w = np.random.randint(0, d.size, size=d.size // 5) - d.ravel()[w] = np.nan - d[:,0] = 1. # ensure at least one good value - # use normal median without nans to compare - tgt = [] - for x in d: - nonan = np.compress(~np.isnan(x), x) - tgt.append(np.median(nonan, overwrite_input=True)) - - assert_array_equal(np.nanmedian(d, axis=-1), tgt) - - def test_result_values(self): - tgt = [np.median(d) for d in _rdat] - res = np.nanmedian(_ndat, axis=1) - assert_almost_equal(res, tgt) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) - if axis is None: - assert_(len(w) == 1) - else: - assert_(len(w) == 3) - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalar - assert_(np.isnan(np.nanmedian(np.nan))) - if axis is None: - assert_(len(w) == 2) - else: - assert_(len(w) == 4) - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_empty(self): - mat = np.zeros((0, 3)) - for axis in [0, None]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - for axis in [1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) - assert_(len(w) == 0) - - def test_scalar(self): - assert_(np.nanmedian(0.) == 0.) - - def test_extended_axis_invalid(self): - d = np.ones((3, 5, 7, 11)) - assert_raises(IndexError, np.nanmedian, d, axis=-5) - assert_raises(IndexError, np.nanmedian, d, axis=(0, -5)) - assert_raises(IndexError, np.nanmedian, d, axis=4) - assert_raises(IndexError, np.nanmedian, d, axis=(0, 4)) - assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) - - -class TestNanFunctions_Percentile(TestCase): - - def test_mutation(self): - # Check that passed array is not modified. - ndat = _ndat.copy() - np.nanpercentile(ndat, 30) - assert_equal(ndat, _ndat) - - def test_keepdims(self): - mat = np.eye(3) - for axis in [None, 0, 1]: - tgt = np.percentile(mat, 70, axis=axis, out=None, - overwrite_input=False) - res = np.nanpercentile(mat, 70, axis=axis, out=None, - overwrite_input=False) - assert_(res.ndim == tgt.ndim) - - d = np.ones((3, 5, 7, 11)) - # Randomly set some elements to NaN: - w = np.random.random((4, 200)) * np.array(d.shape)[:, None] - w = w.astype(np.intp) - d[tuple(w)] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always', RuntimeWarning) - res = np.nanpercentile(d, 90, axis=None, keepdims=True) - assert_equal(res.shape, (1, 1, 1, 1)) - res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) - assert_equal(res.shape, (1, 1, 7, 11)) - res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) - assert_equal(res.shape, (1, 5, 7, 1)) - res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) - assert_equal(res.shape, (3, 1, 7, 11)) - res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) - assert_equal(res.shape, (1, 1, 1, 1)) - res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) - assert_equal(res.shape, (1, 1, 7, 1)) - - def test_out(self): - mat = np.random.rand(3, 3) - nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) - resout = np.zeros(3) - tgt = np.percentile(mat, 42, axis=1) - res = np.nanpercentile(nan_mat, 42, axis=1, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - # 0-d output: - resout = np.zeros(()) - tgt = np.percentile(mat, 42, axis=None) - res = np.nanpercentile(nan_mat, 42, axis=None, out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout) - assert_almost_equal(res, resout) - assert_almost_equal(res, tgt) - - def test_result_values(self): - tgt = [np.percentile(d, 28) for d in _rdat] - res = np.nanpercentile(_ndat, 28, axis=1) - assert_almost_equal(res, tgt) - tgt = [np.percentile(d, (28, 98)) for d in _rdat] - res = np.nanpercentile(_ndat, (28, 98), axis=1) - assert_almost_equal(res, tgt) - - def test_allnans(self): - mat = np.array([np.nan]*9).reshape(3, 3) - for axis in [None, 0, 1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all()) - if axis is None: - assert_(len(w) == 1) - else: - assert_(len(w) == 3) - assert_(issubclass(w[0].category, RuntimeWarning)) - # Check scalar - assert_(np.isnan(np.nanpercentile(np.nan, 60))) - if axis is None: - assert_(len(w) == 2) - else: - assert_(len(w) == 4) - assert_(issubclass(w[0].category, RuntimeWarning)) - - def test_empty(self): - mat = np.zeros((0, 3)) - for axis in [0, None]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) - assert_(len(w) == 1) - assert_(issubclass(w[0].category, RuntimeWarning)) - for axis in [1]: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) - assert_(len(w) == 0) - - def test_scalar(self): - assert_(np.nanpercentile(0., 100) == 0.) - - def test_extended_axis_invalid(self): - d = np.ones((3, 5, 7, 11)) - assert_raises(IndexError, np.nanpercentile, d, q=5, axis=-5) - assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, -5)) - assert_raises(IndexError, np.nanpercentile, d, q=5, axis=4) - assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, 4)) - assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_polynomial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_polynomial.py deleted file mode 100644 index 02faa02839230..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_polynomial.py +++ /dev/null @@ -1,177 +0,0 @@ -from __future__ import division, absolute_import, print_function - -''' ->>> p = np.poly1d([1.,2,3]) ->>> p -poly1d([ 1., 2., 3.]) ->>> print(p) - 2 -1 x + 2 x + 3 ->>> q = np.poly1d([3.,2,1]) ->>> q -poly1d([ 3., 2., 1.]) ->>> print(q) - 2 -3 x + 2 x + 1 ->>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j])) - 3 2 -(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j) ->>> print(np.poly1d([-3, -2, -1])) - 2 --3 x - 2 x - 1 - ->>> p(0) -3.0 ->>> p(5) -38.0 ->>> q(0) -1.0 ->>> q(5) -86.0 - ->>> p * q -poly1d([ 3., 8., 14., 8., 3.]) ->>> p / q -(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667])) ->>> p + q -poly1d([ 4., 4., 4.]) ->>> p - q -poly1d([-2., 0., 2.]) ->>> p ** 4 -poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.]) - ->>> p(q) -poly1d([ 9., 12., 16., 8., 6.]) ->>> q(p) -poly1d([ 3., 12., 32., 40., 34.]) - ->>> np.asarray(p) -array([ 1., 2., 3.]) ->>> len(p) -2 - ->>> p[0], p[1], p[2], p[3] -(3.0, 2.0, 1.0, 0) - ->>> p.integ() -poly1d([ 0.33333333, 1. , 3. , 0. ]) ->>> p.integ(1) -poly1d([ 0.33333333, 1. , 3. , 0. ]) ->>> p.integ(5) -poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. , - 0. , 0. , 0. ]) ->>> p.deriv() -poly1d([ 2., 2.]) ->>> p.deriv(2) -poly1d([ 2.]) - ->>> q = np.poly1d([1.,2,3], variable='y') ->>> print(q) - 2 -1 y + 2 y + 3 ->>> q = np.poly1d([1.,2,3], variable='lambda') ->>> print(q) - 2 -1 lambda + 2 lambda + 3 - ->>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1])) -(poly1d([ 1., -1.]), poly1d([ 0.])) - -''' -import numpy as np -from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, - assert_almost_equal, rundocs - ) - - -class TestDocs(TestCase): - def test_doctests(self): - return rundocs() - - def test_roots(self): - assert_array_equal(np.roots([1, 0, 0]), [0, 0]) - - def test_str_leading_zeros(self): - p = np.poly1d([4, 3, 2, 1]) - p[3] = 0 - assert_equal(str(p), - " 2\n" - "3 x + 2 x + 1") - - p = np.poly1d([1, 2]) - p[0] = 0 - p[1] = 0 - assert_equal(str(p), " \n0") - - def test_polyfit(self): - c = np.array([3., 2., 1.]) - x = np.linspace(0, 2, 7) - y = np.polyval(c, x) - err = [1, -1, 1, -1, 1, -1, 1] - weights = np.arange(8, 1, -1)**2/7.0 - - # check 1D case - m, cov = np.polyfit(x, y+err, 2, cov=True) - est = [3.8571, 0.2857, 1.619] - assert_almost_equal(est, m, decimal=4) - val0 = [[2.9388, -5.8776, 1.6327], - [-5.8776, 12.7347, -4.2449], - [1.6327, -4.2449, 2.3220]] - assert_almost_equal(val0, cov, decimal=4) - - m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) - assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) - val = [[8.7929, -10.0103, 0.9756], - [-10.0103, 13.6134, -1.8178], - [0.9756, -1.8178, 0.6674]] - assert_almost_equal(val, cov2, decimal=4) - - # check 2D (n,1) case - y = y[:, np.newaxis] - c = c[:, np.newaxis] - assert_almost_equal(c, np.polyfit(x, y, 2)) - # check 2D (n,2) case - yy = np.concatenate((y, y), axis=1) - cc = np.concatenate((c, c), axis=1) - assert_almost_equal(cc, np.polyfit(x, yy, 2)) - - m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) - assert_almost_equal(est, m[:, 0], decimal=4) - assert_almost_equal(est, m[:, 1], decimal=4) - assert_almost_equal(val0, cov[:, :, 0], decimal=4) - assert_almost_equal(val0, cov[:, :, 1], decimal=4) - - def test_objects(self): - from decimal import Decimal - p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) - p2 = p * Decimal('1.333333333333333') - assert_(p2[1] == Decimal("3.9999999999999990")) - p2 = p.deriv() - assert_(p2[1] == Decimal('8.0')) - p2 = p.integ() - assert_(p2[3] == Decimal("1.333333333333333333333333333")) - assert_(p2[2] == Decimal('1.5')) - assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) - - def test_complex(self): - p = np.poly1d([3j, 2j, 1j]) - p2 = p.integ() - assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) - p2 = p.deriv() - assert_((p2.coeffs == [6j, 2j]).all()) - - def test_integ_coeffs(self): - p = np.poly1d([3, 2, 1]) - p2 = p.integ(3, k=[9, 7, 6]) - assert_( - (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) - - def test_zero_dims(self): - try: - np.poly(np.zeros((0, 0))) - except ValueError: - pass - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_recfunctions.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_recfunctions.py deleted file mode 100644 index 51a2077eb0765..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_recfunctions.py +++ /dev/null @@ -1,705 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.ma as ma -from numpy.ma.mrecords import MaskedRecords -from numpy.ma.testutils import ( - run_module_suite, TestCase, assert_, assert_equal - ) -from numpy.lib.recfunctions import ( - drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, - find_duplicates, merge_arrays, append_fields, stack_arrays, join_by - ) -get_names = np.lib.recfunctions.get_names -get_names_flat = np.lib.recfunctions.get_names_flat -zip_descr = np.lib.recfunctions.zip_descr - - -class TestRecFunctions(TestCase): - # Misc tests - - def setUp(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array([('A', 1.), ('B', 2.)], - dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_zip_descr(self): - # Test zip_descr - (w, x, y, z) = self.data - - # Std array - test = zip_descr((x, x), flatten=True) - assert_equal(test, - np.dtype([('', int), ('', int)])) - test = zip_descr((x, x), flatten=False) - assert_equal(test, - np.dtype([('', int), ('', int)])) - - # Std & flexible-dtype - test = zip_descr((x, z), flatten=True) - assert_equal(test, - np.dtype([('', int), ('A', '|S3'), ('B', float)])) - test = zip_descr((x, z), flatten=False) - assert_equal(test, - np.dtype([('', int), - ('', [('A', '|S3'), ('B', float)])])) - - # Standard & nested dtype - test = zip_descr((x, w), flatten=True) - assert_equal(test, - np.dtype([('', int), - ('a', int), - ('ba', float), ('bb', int)])) - test = zip_descr((x, w), flatten=False) - assert_equal(test, - np.dtype([('', int), - ('', [('a', int), - ('b', [('ba', float), ('bb', int)])])])) - - def test_drop_fields(self): - # Test drop_fields - a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - - # A basic field - test = drop_fields(a, 'a') - control = np.array([((2, 3.0),), ((5, 6.0),)], - dtype=[('b', [('ba', float), ('bb', int)])]) - assert_equal(test, control) - - # Another basic field (but nesting two fields) - test = drop_fields(a, 'b') - control = np.array([(1,), (4,)], dtype=[('a', int)]) - assert_equal(test, control) - - # A nested sub-field - test = drop_fields(a, ['ba', ]) - control = np.array([(1, (3.0,)), (4, (6.0,))], - dtype=[('a', int), ('b', [('bb', int)])]) - assert_equal(test, control) - - # All the nested sub-field from a field: zap that field - test = drop_fields(a, ['ba', 'bb']) - control = np.array([(1,), (4,)], dtype=[('a', int)]) - assert_equal(test, control) - - test = drop_fields(a, ['a', 'b']) - assert_(test is None) - - def test_rename_fields(self): - # Test rename fields - a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], - dtype=[('a', int), - ('b', [('ba', float), ('bb', (float, 2))])]) - test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) - newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] - control = a.view(newdtype) - assert_equal(test.dtype, newdtype) - assert_equal(test, control) - - def test_get_names(self): - # Test get_names - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - test = get_names(ndtype) - assert_equal(test, ('A', 'B')) - - ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) - test = get_names(ndtype) - assert_equal(test, ('a', ('b', ('ba', 'bb')))) - - def test_get_names_flat(self): - # Test get_names_flat - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - test = get_names_flat(ndtype) - assert_equal(test, ('A', 'B')) - - ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) - test = get_names_flat(ndtype) - assert_equal(test, ('a', 'b', 'ba', 'bb')) - - def test_get_fieldstructure(self): - # Test get_fieldstructure - - # No nested fields - ndtype = np.dtype([('A', '|S3'), ('B', float)]) - test = get_fieldstructure(ndtype) - assert_equal(test, {'A': [], 'B': []}) - - # One 1-nested field - ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) - test = get_fieldstructure(ndtype) - assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) - - # One 2-nested fields - ndtype = np.dtype([('A', int), - ('B', [('BA', int), - ('BB', [('BBA', int), ('BBB', int)])])]) - test = get_fieldstructure(ndtype) - control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], - 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} - assert_equal(test, control) - - def test_find_duplicates(self): - # Test find_duplicates - a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), - (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], - mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), - (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], - dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) - - test = find_duplicates(a, ignoremask=False, return_index=True) - control = [0, 2] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, key='A', return_index=True) - control = [0, 1, 2, 3, 5] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, key='B', return_index=True) - control = [0, 1, 2, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, key='BA', return_index=True) - control = [0, 1, 2, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, key='BB', return_index=True) - control = [0, 1, 2, 3, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - def test_find_duplicates_ignoremask(self): - # Test the ignoremask option of find_duplicates - ndtype = [('a', int)] - a = ma.array([1, 1, 1, 2, 2, 3, 3], - mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - test = find_duplicates(a, ignoremask=True, return_index=True) - control = [0, 1, 3, 4] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - test = find_duplicates(a, ignoremask=False, return_index=True) - control = [0, 1, 2, 3, 4, 6] - assert_equal(sorted(test[-1]), control) - assert_equal(test[0], a[test[-1]]) - - -class TestRecursiveFillFields(TestCase): - # Test recursive_fill_fields. - def test_simple_flexible(self): - # Test recursive_fill_fields on flexible-array - a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) - b = np.zeros((3,), dtype=a.dtype) - test = recursive_fill_fields(a, b) - control = np.array([(1, 10.), (2, 20.), (0, 0.)], - dtype=[('A', int), ('B', float)]) - assert_equal(test, control) - - def test_masked_flexible(self): - # Test recursive_fill_fields on masked flexible-array - a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], - dtype=[('A', int), ('B', float)]) - b = ma.zeros((3,), dtype=a.dtype) - test = recursive_fill_fields(a, b) - control = ma.array([(1, 10.), (2, 20.), (0, 0.)], - mask=[(0, 1), (1, 0), (0, 0)], - dtype=[('A', int), ('B', float)]) - assert_equal(test, control) - - -class TestMergeArrays(TestCase): - # Test merge_arrays - - def setUp(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array( - [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) - w = np.array( - [(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_solo(self): - # Test merge_arrays on a single array. - (_, x, _, z) = self.data - - test = merge_arrays(x) - control = np.array([(1,), (2,)], dtype=[('f0', int)]) - assert_equal(test, control) - test = merge_arrays((x,)) - assert_equal(test, control) - - test = merge_arrays(z, flatten=False) - assert_equal(test, z) - test = merge_arrays(z, flatten=True) - assert_equal(test, z) - - def test_solo_w_flatten(self): - # Test merge_arrays on a single array w & w/o flattening - w = self.data[0] - test = merge_arrays(w, flatten=False) - assert_equal(test, w) - - test = merge_arrays(w, flatten=True) - control = np.array([(1, 2, 3.0), (4, 5, 6.0)], - dtype=[('a', int), ('ba', float), ('bb', int)]) - assert_equal(test, control) - - def test_standard(self): - # Test standard & standard - # Test merge arrays - (_, x, y, _) = self.data - test = merge_arrays((x, y), usemask=False) - control = np.array([(1, 10), (2, 20), (-1, 30)], - dtype=[('f0', int), ('f1', int)]) - assert_equal(test, control) - - test = merge_arrays((x, y), usemask=True) - control = ma.array([(1, 10), (2, 20), (-1, 30)], - mask=[(0, 0), (0, 0), (1, 0)], - dtype=[('f0', int), ('f1', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - def test_flatten(self): - # Test standard & flexible - (_, x, _, z) = self.data - test = merge_arrays((x, z), flatten=True) - control = np.array([(1, 'A', 1.), (2, 'B', 2.)], - dtype=[('f0', int), ('A', '|S3'), ('B', float)]) - assert_equal(test, control) - - test = merge_arrays((x, z), flatten=False) - control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], - dtype=[('f0', int), - ('f1', [('A', '|S3'), ('B', float)])]) - assert_equal(test, control) - - def test_flatten_wflexible(self): - # Test flatten standard & nested - (w, x, _, _) = self.data - test = merge_arrays((x, w), flatten=True) - control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], - dtype=[('f0', int), - ('a', int), ('ba', float), ('bb', int)]) - assert_equal(test, control) - - test = merge_arrays((x, w), flatten=False) - controldtype = [('f0', int), - ('f1', [('a', int), - ('b', [('ba', float), ('bb', int)])])] - control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))], - dtype=controldtype) - assert_equal(test, control) - - def test_wmasked_arrays(self): - # Test merge_arrays masked arrays - (_, x, _, _) = self.data - mx = ma.array([1, 2, 3], mask=[1, 0, 0]) - test = merge_arrays((x, mx), usemask=True) - control = ma.array([(1, 1), (2, 2), (-1, 3)], - mask=[(0, 1), (0, 0), (1, 0)], - dtype=[('f0', int), ('f1', int)]) - assert_equal(test, control) - test = merge_arrays((x, mx), usemask=True, asrecarray=True) - assert_equal(test, control) - assert_(isinstance(test, MaskedRecords)) - - def test_w_singlefield(self): - # Test single field - test = merge_arrays((np.array([1, 2]).view([('a', int)]), - np.array([10., 20., 30.])),) - control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], - mask=[(0, 0), (0, 0), (1, 0)], - dtype=[('a', int), ('f1', float)]) - assert_equal(test, control) - - def test_w_shorter_flex(self): - # Test merge_arrays w/ a shorter flexndarray. - z = self.data[-1] - - # Fixme, this test looks incomplete and broken - #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) - #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], - # dtype=[('A', '|S3'), ('B', float), ('C', int)]) - #assert_equal(test, control) - - # Hack to avoid pyflakes warnings about unused variables - merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) - np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], - dtype=[('A', '|S3'), ('B', float), ('C', int)]) - - def test_singlerecord(self): - (_, x, y, z) = self.data - test = merge_arrays((x[0], y[0], z[0]), usemask=False) - control = np.array([(1, 10, ('A', 1))], - dtype=[('f0', int), - ('f1', int), - ('f2', [('A', '|S3'), ('B', float)])]) - assert_equal(test, control) - - -class TestAppendFields(TestCase): - # Test append_fields - - def setUp(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array( - [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_append_single(self): - # Test simple case - (_, x, _, _) = self.data - test = append_fields(x, 'A', data=[10, 20, 30]) - control = ma.array([(1, 10), (2, 20), (-1, 30)], - mask=[(0, 0), (0, 0), (1, 0)], - dtype=[('f0', int), ('A', int)],) - assert_equal(test, control) - - def test_append_double(self): - # Test simple case - (_, x, _, _) = self.data - test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) - control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], - mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], - dtype=[('f0', int), ('A', int), ('B', int)],) - assert_equal(test, control) - - def test_append_on_flex(self): - # Test append_fields on flexible type arrays - z = self.data[-1] - test = append_fields(z, 'C', data=[10, 20, 30]) - control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], - mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], - dtype=[('A', '|S3'), ('B', float), ('C', int)],) - assert_equal(test, control) - - def test_append_on_nested(self): - # Test append_fields on nested fields - w = self.data[0] - test = append_fields(w, 'C', data=[10, 20, 30]) - control = ma.array([(1, (2, 3.0), 10), - (4, (5, 6.0), 20), - (-1, (-1, -1.), 30)], - mask=[( - 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], - dtype=[('a', int), - ('b', [('ba', float), ('bb', int)]), - ('C', int)],) - assert_equal(test, control) - - -class TestStackArrays(TestCase): - # Test stack_arrays - def setUp(self): - x = np.array([1, 2, ]) - y = np.array([10, 20, 30]) - z = np.array( - [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) - w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_solo(self): - # Test stack_arrays on single arrays - (_, x, _, _) = self.data - test = stack_arrays((x,)) - assert_equal(test, x) - self.assertTrue(test is x) - - test = stack_arrays(x) - assert_equal(test, x) - self.assertTrue(test is x) - - def test_unnamed_fields(self): - # Tests combinations of arrays w/o named fields - (_, x, y, _) = self.data - - test = stack_arrays((x, x), usemask=False) - control = np.array([1, 2, 1, 2]) - assert_equal(test, control) - - test = stack_arrays((x, y), usemask=False) - control = np.array([1, 2, 10, 20, 30]) - assert_equal(test, control) - - test = stack_arrays((y, x), usemask=False) - control = np.array([10, 20, 30, 1, 2]) - assert_equal(test, control) - - def test_unnamed_and_named_fields(self): - # Test combination of arrays w/ & w/o named fields - (_, x, _, z) = self.data - - test = stack_arrays((x, z)) - control = ma.array([(1, -1, -1), (2, -1, -1), - (-1, 'A', 1), (-1, 'B', 2)], - mask=[(0, 1, 1), (0, 1, 1), - (1, 0, 0), (1, 0, 0)], - dtype=[('f0', int), ('A', '|S3'), ('B', float)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - test = stack_arrays((z, x)) - control = ma.array([('A', 1, -1), ('B', 2, -1), - (-1, -1, 1), (-1, -1, 2), ], - mask=[(0, 0, 1), (0, 0, 1), - (1, 1, 0), (1, 1, 0)], - dtype=[('A', '|S3'), ('B', float), ('f2', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - test = stack_arrays((z, z, x)) - control = ma.array([('A', 1, -1), ('B', 2, -1), - ('A', 1, -1), ('B', 2, -1), - (-1, -1, 1), (-1, -1, 2), ], - mask=[(0, 0, 1), (0, 0, 1), - (0, 0, 1), (0, 0, 1), - (1, 1, 0), (1, 1, 0)], - dtype=[('A', '|S3'), ('B', float), ('f2', int)]) - assert_equal(test, control) - - def test_matching_named_fields(self): - # Test combination of arrays w/ matching field names - (_, x, _, z) = self.data - zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)]) - test = stack_arrays((z, zz)) - control = ma.array([('A', 1, -1), ('B', 2, -1), - ( - 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)], - mask=[(0, 0, 1), (0, 0, 1), - (0, 0, 0), (0, 0, 0), (0, 0, 0)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - test = stack_arrays((z, zz, x)) - ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] - control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), - ('a', 10., 100., -1), ('b', 20., 200., -1), - ('c', 30., 300., -1), - (-1, -1, -1, 1), (-1, -1, -1, 2)], - dtype=ndtype, - mask=[(0, 0, 1, 1), (0, 0, 1, 1), - (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), - (1, 1, 1, 0), (1, 1, 1, 0)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - def test_defaults(self): - # Test defaults: no exception raised if keys of defaults are not fields. - (_, _, _, z) = self.data - zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)]) - defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} - test = stack_arrays((z, zz), defaults=defaults) - control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), - ( - 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - dtype=[('A', '|S3'), ('B', float), ('C', float)], - mask=[(0, 0, 1), (0, 0, 1), - (0, 0, 0), (0, 0, 0), (0, 0, 0)]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_autoconversion(self): - # Tests autoconversion - adtype = [('A', int), ('B', bool), ('C', float)] - a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) - bdtype = [('A', int), ('B', float), ('C', float)] - b = ma.array([(4, 5, 6)], dtype=bdtype) - control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], - dtype=bdtype) - test = stack_arrays((a, b), autoconvert=True) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - try: - test = stack_arrays((a, b), autoconvert=False) - except TypeError: - pass - else: - raise AssertionError - - def test_checktitles(self): - # Test using titles in the field names - adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] - a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) - bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] - b = ma.array([(4, 5, 6)], dtype=bdtype) - test = stack_arrays((a, b)) - control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], - dtype=bdtype) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - - -class TestJoinBy(TestCase): - def setUp(self): - self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), - np.arange(100, 110))), - dtype=[('a', int), ('b', int), ('c', int)]) - self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), - np.arange(100, 110))), - dtype=[('a', int), ('b', int), ('d', int)]) - - def test_inner_join(self): - # Basic test of join_by - a, b = self.a, self.b - - test = join_by('a', a, b, jointype='inner') - control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), - (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), - (9, 59, 69, 109, 104)], - dtype=[('a', int), ('b1', int), ('b2', int), - ('c', int), ('d', int)]) - assert_equal(test, control) - - def test_join(self): - a, b = self.a, self.b - - # Fixme, this test is broken - #test = join_by(('a', 'b'), a, b) - #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), - # (7, 57, 107, 102), (8, 58, 108, 103), - # (9, 59, 109, 104)], - # dtype=[('a', int), ('b', int), - # ('c', int), ('d', int)]) - #assert_equal(test, control) - - # Hack to avoid pyflakes unused variable warnings - join_by(('a', 'b'), a, b) - np.array([(5, 55, 105, 100), (6, 56, 106, 101), - (7, 57, 107, 102), (8, 58, 108, 103), - (9, 59, 109, 104)], - dtype=[('a', int), ('b', int), - ('c', int), ('d', int)]) - - def test_outer_join(self): - a, b = self.a, self.b - - test = join_by(('a', 'b'), a, b, 'outer') - control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), - (2, 52, 102, -1), (3, 53, 103, -1), - (4, 54, 104, -1), (5, 55, 105, -1), - (5, 65, -1, 100), (6, 56, 106, -1), - (6, 66, -1, 101), (7, 57, 107, -1), - (7, 67, -1, 102), (8, 58, 108, -1), - (8, 68, -1, 103), (9, 59, 109, -1), - (9, 69, -1, 104), (10, 70, -1, 105), - (11, 71, -1, 106), (12, 72, -1, 107), - (13, 73, -1, 108), (14, 74, -1, 109)], - mask=[(0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 0, 1), - (0, 0, 1, 0), (0, 0, 1, 0), - (0, 0, 1, 0), (0, 0, 1, 0), - (0, 0, 1, 0), (0, 0, 1, 0)], - dtype=[('a', int), ('b', int), - ('c', int), ('d', int)]) - assert_equal(test, control) - - def test_leftouter_join(self): - a, b = self.a, self.b - - test = join_by(('a', 'b'), a, b, 'leftouter') - control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), - (2, 52, 102, -1), (3, 53, 103, -1), - (4, 54, 104, -1), (5, 55, 105, -1), - (6, 56, 106, -1), (7, 57, 107, -1), - (8, 58, 108, -1), (9, 59, 109, -1)], - mask=[(0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1), - (0, 0, 0, 1), (0, 0, 0, 1)], - dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) - assert_equal(test, control) - - -class TestJoinBy2(TestCase): - @classmethod - def setUp(cls): - cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60), - np.arange(100, 110))), - dtype=[('a', int), ('b', int), ('c', int)]) - cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75), - np.arange(100, 110))), - dtype=[('a', int), ('b', int), ('d', int)]) - - def test_no_r1postfix(self): - # Basic test of join_by no_r1postfix - a, b = self.a, self.b - - test = join_by( - 'a', a, b, r1postfix='', r2postfix='2', jointype='inner') - control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), - (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), - (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), - (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), - (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], - dtype=[('a', int), ('b', int), ('b2', int), - ('c', int), ('d', int)]) - assert_equal(test, control) - - def test_no_postfix(self): - self.assertRaises(ValueError, join_by, 'a', self.a, self.b, - r1postfix='', r2postfix='') - - def test_no_r2postfix(self): - # Basic test of join_by no_r2postfix - a, b = self.a, self.b - - test = join_by( - 'a', a, b, r1postfix='1', r2postfix='', jointype='inner') - control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101), - (2, 52, 67, 102, 102), (3, 53, 68, 103, 103), - (4, 54, 69, 104, 104), (5, 55, 70, 105, 105), - (6, 56, 71, 106, 106), (7, 57, 72, 107, 107), - (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)], - dtype=[('a', int), ('b1', int), ('b', int), - ('c', int), ('d', int)]) - assert_equal(test, control) - - def test_two_keys_two_vars(self): - a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), - np.arange(50, 60), np.arange(10, 20))), - dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) - - b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), - np.arange(65, 75), np.arange(0, 10))), - dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) - - control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1), - (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3), - (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5), - (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7), - (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)], - dtype=[('k', int), ('a', int), ('b1', int), - ('b2', int), ('c1', int), ('c2', int)]) - test = join_by( - ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner') - assert_equal(test.dtype, control.dtype) - assert_equal(test, control) - - -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_regression.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_regression.py deleted file mode 100644 index 00fa3f195a5d5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_regression.py +++ /dev/null @@ -1,265 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys - -import numpy as np -from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, - assert_array_almost_equal, assert_raises - ) -from numpy.testing.utils import _assert_valid_refcount -from numpy.compat import unicode - -rlevel = 1 - - -class TestRegression(TestCase): - def test_poly1d(self, level=rlevel): - # Ticket #28 - assert_equal(np.poly1d([1]) - np.poly1d([1, 0]), - np.poly1d([-1, 1])) - - def test_cov_parameters(self, level=rlevel): - # Ticket #91 - x = np.random.random((3, 3)) - y = x.copy() - np.cov(x, rowvar=1) - np.cov(y, rowvar=0) - assert_array_equal(x, y) - - def test_mem_digitize(self, level=rlevel): - # Ticket #95 - for i in range(100): - np.digitize([1, 2, 3, 4], [1, 3]) - np.digitize([0, 1, 2, 3, 4], [1, 3]) - - def test_unique_zero_sized(self, level=rlevel): - # Ticket #205 - assert_array_equal([], np.unique(np.array([]))) - - def test_mem_vectorise(self, level=rlevel): - # Ticket #325 - vt = np.vectorize(lambda *args: args) - vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2))) - vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, - 1, 2)), np.zeros((2, 2))) - - def test_mgrid_single_element(self, level=rlevel): - # Ticket #339 - assert_array_equal(np.mgrid[0:0:1j], [0]) - assert_array_equal(np.mgrid[0:0], []) - - def test_refcount_vectorize(self, level=rlevel): - # Ticket #378 - def p(x, y): - return 123 - v = np.vectorize(p) - _assert_valid_refcount(v) - - def test_poly1d_nan_roots(self, level=rlevel): - # Ticket #396 - p = np.poly1d([np.nan, np.nan, 1], r=0) - self.assertRaises(np.linalg.LinAlgError, getattr, p, "r") - - def test_mem_polymul(self, level=rlevel): - # Ticket #448 - np.polymul([], [1.]) - - def test_mem_string_concat(self, level=rlevel): - # Ticket #469 - x = np.array([]) - np.append(x, 'asdasd\tasdasd') - - def test_poly_div(self, level=rlevel): - # Ticket #553 - u = np.poly1d([1, 2, 3]) - v = np.poly1d([1, 2, 3, 4, 5]) - q, r = np.polydiv(u, v) - assert_equal(q*v + r, u) - - def test_poly_eq(self, level=rlevel): - # Ticket #554 - x = np.poly1d([1, 2, 3]) - y = np.poly1d([3, 4]) - assert_(x != y) - assert_(x == x) - - def test_mem_insert(self, level=rlevel): - # Ticket #572 - np.lib.place(1, 1, 1) - - def test_polyfit_build(self): - # Ticket #628 - ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01, - 9.95368241e+00, -3.14526520e+02] - x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129, - 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, - 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 172, 173, 174, 175, 176] - y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0, - 6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0, - 13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0, - 7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0, - 6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0, - 6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0, - 8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0] - tested = np.polyfit(x, y, 4) - assert_array_almost_equal(ref, tested) - - def test_polydiv_type(self): - # Make polydiv work for complex types - msg = "Wrong type, should be complex" - x = np.ones(3, dtype=np.complex) - q, r = np.polydiv(x, x) - assert_(q.dtype == np.complex, msg) - msg = "Wrong type, should be float" - x = np.ones(3, dtype=np.int) - q, r = np.polydiv(x, x) - assert_(q.dtype == np.float, msg) - - def test_histogramdd_too_many_bins(self): - # Ticket 928. - assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10) - - def test_polyint_type(self): - # Ticket #944 - msg = "Wrong type, should be complex" - x = np.ones(3, dtype=np.complex) - assert_(np.polyint(x).dtype == np.complex, msg) - msg = "Wrong type, should be float" - x = np.ones(3, dtype=np.int) - assert_(np.polyint(x).dtype == np.float, msg) - - def test_ndenumerate_crash(self): - # Ticket 1140 - # Shouldn't crash: - list(np.ndenumerate(np.array([[]]))) - - def test_asfarray_none(self, level=rlevel): - # Test for changeset r5065 - assert_array_equal(np.array([np.nan]), np.asfarray([None])) - - def test_large_fancy_indexing(self, level=rlevel): - # Large enough to fail on 64-bit. - nbits = np.dtype(np.intp).itemsize * 8 - thesize = int((2**nbits)**(1.0/5.0)+1) - - def dp(): - n = 3 - a = np.ones((n,)*5) - i = np.random.randint(0, n, size=thesize) - a[np.ix_(i, i, i, i, i)] = 0 - - def dp2(): - n = 3 - a = np.ones((n,)*5) - i = np.random.randint(0, n, size=thesize) - a[np.ix_(i, i, i, i, i)] - - self.assertRaises(ValueError, dp) - self.assertRaises(ValueError, dp2) - - def test_void_coercion(self, level=rlevel): - dt = np.dtype([('a', 'f4'), ('b', 'i4')]) - x = np.zeros((1,), dt) - assert_(np.r_[x, x].dtype == dt) - - def test_who_with_0dim_array(self, level=rlevel): - # ticket #1243 - import os - import sys - - oldstdout = sys.stdout - sys.stdout = open(os.devnull, 'w') - try: - try: - np.who({'foo': np.array(1)}) - except: - raise AssertionError("ticket #1243") - finally: - sys.stdout.close() - sys.stdout = oldstdout - - def test_include_dirs(self): - # As a sanity check, just test that get_include - # includes something reasonable. Somewhat - # related to ticket #1405. - include_dirs = [np.get_include()] - for path in include_dirs: - assert_(isinstance(path, (str, unicode))) - assert_(path != '') - - def test_polyder_return_type(self): - # Ticket #1249 - assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d)) - assert_(isinstance(np.polyder([1], 0), np.ndarray)) - assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d)) - assert_(isinstance(np.polyder([1], 1), np.ndarray)) - - def test_append_fields_dtype_list(self): - # Ticket #1676 - from numpy.lib.recfunctions import append_fields - - base = np.array([1, 2, 3], dtype=np.int32) - names = ['a', 'b', 'c'] - data = np.eye(3).astype(np.int32) - dlist = [np.float64, np.int32, np.int32] - try: - append_fields(base, names, data, dlist) - except: - raise AssertionError() - - def test_loadtxt_fields_subarrays(self): - # For ticket #1936 - if sys.version_info[0] >= 3: - from io import StringIO - else: - from StringIO import StringIO - - dt = [("a", 'u1', 2), ("b", 'u1', 2)] - x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) - - dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])] - x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt)) - - dt = [("a", 'u1', (2, 2))] - x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt)) - - dt = [("a", 'u1', (2, 3, 2))] - x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt) - data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)] - assert_equal(x, np.array(data, dtype=dt)) - - def test_nansum_with_boolean(self): - # gh-2978 - a = np.zeros(2, dtype=np.bool) - try: - np.nansum(a) - except: - raise AssertionError() - - def test_py3_compat(self): - # gh-2561 - # Test if the oldstyle class test is bypassed in python3 - class C(): - """Old-style class in python2, normal class in python3""" - pass - - out = open(os.devnull, 'w') - try: - np.info(C(), output=out) - except AttributeError: - raise AssertionError() - finally: - out.close() - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_shape_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_shape_base.py deleted file mode 100644 index 23f3edfbe2dca..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_shape_base.py +++ /dev/null @@ -1,368 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.lib.shape_base import ( - apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, - vsplit, dstack, kron, tile - ) -from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, - assert_raises, assert_warns - ) - - -class TestApplyAlongAxis(TestCase): - def test_simple(self): - a = np.ones((20, 10), 'd') - assert_array_equal( - apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) - - def test_simple101(self, level=11): - a = np.ones((10, 101), 'd') - assert_array_equal( - apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) - - def test_3d(self): - a = np.arange(27).reshape((3, 3, 3)) - assert_array_equal(apply_along_axis(np.sum, 0, a), - [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) - - -class TestApplyOverAxes(TestCase): - def test_simple(self): - a = np.arange(24).reshape(2, 3, 4) - aoa_a = apply_over_axes(np.sum, a, [0, 2]) - assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) - - -class TestArraySplit(TestCase): - def test_integer_0_split(self): - a = np.arange(10) - assert_raises(ValueError, array_split, a, 0) - - def test_integer_split(self): - a = np.arange(10) - res = array_split(a, 1) - desired = [np.arange(10)] - compare_results(res, desired) - - res = array_split(a, 2) - desired = [np.arange(5), np.arange(5, 10)] - compare_results(res, desired) - - res = array_split(a, 3) - desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] - compare_results(res, desired) - - res = array_split(a, 4) - desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), - np.arange(8, 10)] - compare_results(res, desired) - - res = array_split(a, 5) - desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), - np.arange(6, 8), np.arange(8, 10)] - compare_results(res, desired) - - res = array_split(a, 6) - desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), - np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 7) - desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), - np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), - np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 8) - desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), - np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), - np.arange(8, 9), np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 9) - desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), - np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), - np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 10) - desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), - np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), - np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), - np.arange(9, 10)] - compare_results(res, desired) - - res = array_split(a, 11) - desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), - np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), - np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), - np.arange(9, 10), np.array([])] - compare_results(res, desired) - - def test_integer_split_2D_rows(self): - a = np.array([np.arange(10), np.arange(10)]) - res = assert_warns(FutureWarning, array_split, a, 3, axis=0) - - # After removing the FutureWarning, the last should be zeros((0, 10)) - desired = [np.array([np.arange(10)]), np.array([np.arange(10)]), - np.array([])] - compare_results(res, desired) - assert_(a.dtype.type is res[-1].dtype.type) - - def test_integer_split_2D_cols(self): - a = np.array([np.arange(10), np.arange(10)]) - res = array_split(a, 3, axis=-1) - desired = [np.array([np.arange(4), np.arange(4)]), - np.array([np.arange(4, 7), np.arange(4, 7)]), - np.array([np.arange(7, 10), np.arange(7, 10)])] - compare_results(res, desired) - - def test_integer_split_2D_default(self): - """ This will fail if we change default axis - """ - a = np.array([np.arange(10), np.arange(10)]) - res = assert_warns(FutureWarning, array_split, a, 3) - - # After removing the FutureWarning, the last should be zeros((0, 10)) - desired = [np.array([np.arange(10)]), np.array([np.arange(10)]), - np.array([])] - compare_results(res, desired) - assert_(a.dtype.type is res[-1].dtype.type) - # perhaps should check higher dimensions - - def test_index_split_simple(self): - a = np.arange(10) - indices = [1, 5, 7] - res = array_split(a, indices, axis=-1) - desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), - np.arange(7, 10)] - compare_results(res, desired) - - def test_index_split_low_bound(self): - a = np.arange(10) - indices = [0, 5, 7] - res = array_split(a, indices, axis=-1) - desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), - np.arange(7, 10)] - compare_results(res, desired) - - def test_index_split_high_bound(self): - a = np.arange(10) - indices = [0, 5, 7, 10, 12] - res = array_split(a, indices, axis=-1) - desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), - np.arange(7, 10), np.array([]), np.array([])] - compare_results(res, desired) - - -class TestSplit(TestCase): - # The split function is essentially the same as array_split, - # except that it test if splitting will result in an - # equal split. Only test for this case. - - def test_equal_split(self): - a = np.arange(10) - res = split(a, 2) - desired = [np.arange(5), np.arange(5, 10)] - compare_results(res, desired) - - def test_unequal_split(self): - a = np.arange(10) - assert_raises(ValueError, split, a, 3) - - -class TestDstack(TestCase): - def test_0D_array(self): - a = np.array(1) - b = np.array(2) - res = dstack([a, b]) - desired = np.array([[[1, 2]]]) - assert_array_equal(res, desired) - - def test_1D_array(self): - a = np.array([1]) - b = np.array([2]) - res = dstack([a, b]) - desired = np.array([[[1, 2]]]) - assert_array_equal(res, desired) - - def test_2D_array(self): - a = np.array([[1], [2]]) - b = np.array([[1], [2]]) - res = dstack([a, b]) - desired = np.array([[[1, 1]], [[2, 2, ]]]) - assert_array_equal(res, desired) - - def test_2D_array2(self): - a = np.array([1, 2]) - b = np.array([1, 2]) - res = dstack([a, b]) - desired = np.array([[[1, 1], [2, 2]]]) - assert_array_equal(res, desired) - - -# array_split has more comprehensive test of splitting. -# only do simple test on hsplit, vsplit, and dsplit -class TestHsplit(TestCase): - """Only testing for integer splits. - - """ - def test_0D_array(self): - a = np.array(1) - try: - hsplit(a, 2) - assert_(0) - except ValueError: - pass - - def test_1D_array(self): - a = np.array([1, 2, 3, 4]) - res = hsplit(a, 2) - desired = [np.array([1, 2]), np.array([3, 4])] - compare_results(res, desired) - - def test_2D_array(self): - a = np.array([[1, 2, 3, 4], - [1, 2, 3, 4]]) - res = hsplit(a, 2) - desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] - compare_results(res, desired) - - -class TestVsplit(TestCase): - """Only testing for integer splits. - - """ - def test_1D_array(self): - a = np.array([1, 2, 3, 4]) - try: - vsplit(a, 2) - assert_(0) - except ValueError: - pass - - def test_2D_array(self): - a = np.array([[1, 2, 3, 4], - [1, 2, 3, 4]]) - res = vsplit(a, 2) - desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] - compare_results(res, desired) - - -class TestDsplit(TestCase): - # Only testing for integer splits. - - def test_2D_array(self): - a = np.array([[1, 2, 3, 4], - [1, 2, 3, 4]]) - try: - dsplit(a, 2) - assert_(0) - except ValueError: - pass - - def test_3D_array(self): - a = np.array([[[1, 2, 3, 4], - [1, 2, 3, 4]], - [[1, 2, 3, 4], - [1, 2, 3, 4]]]) - res = dsplit(a, 2) - desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), - np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] - compare_results(res, desired) - - -class TestSqueeze(TestCase): - def test_basic(self): - from numpy.random import rand - - a = rand(20, 10, 10, 1, 1) - b = rand(20, 1, 10, 1, 20) - c = rand(1, 1, 20, 10) - assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) - assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) - assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) - - # Squeezing to 0-dim should still give an ndarray - a = [[[1.5]]] - res = np.squeeze(a) - assert_equal(res, 1.5) - assert_equal(res.ndim, 0) - assert_equal(type(res), np.ndarray) - - -class TestKron(TestCase): - def test_return_type(self): - a = np.ones([2, 2]) - m = np.asmatrix(a) - assert_equal(type(kron(a, a)), np.ndarray) - assert_equal(type(kron(m, m)), np.matrix) - assert_equal(type(kron(a, m)), np.matrix) - assert_equal(type(kron(m, a)), np.matrix) - - class myarray(np.ndarray): - __array_priority__ = 0.0 - - ma = myarray(a.shape, a.dtype, a.data) - assert_equal(type(kron(a, a)), np.ndarray) - assert_equal(type(kron(ma, ma)), myarray) - assert_equal(type(kron(a, ma)), np.ndarray) - assert_equal(type(kron(ma, a)), myarray) - - -class TestTile(TestCase): - def test_basic(self): - a = np.array([0, 1, 2]) - b = [[1, 2], [3, 4]] - assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) - assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) - assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) - assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) - assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) - assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], - [1, 2, 1, 2], [3, 4, 3, 4]]) - - def test_empty(self): - a = np.array([[[]]]) - d = tile(a, (3, 2, 5)).shape - assert_equal(d, (3, 2, 0)) - - def test_kroncompare(self): - from numpy.random import randint - - reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] - shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] - for s in shape: - b = randint(0, 10, size=s) - for r in reps: - a = np.ones(r, b.dtype) - large = tile(b, r) - klarge = kron(a, b) - assert_equal(large, klarge) - - -class TestMayShareMemory(TestCase): - def test_basic(self): - d = np.ones((50, 60)) - d2 = np.ones((30, 60, 6)) - self.assertTrue(np.may_share_memory(d, d)) - self.assertTrue(np.may_share_memory(d, d[::-1])) - self.assertTrue(np.may_share_memory(d, d[::2])) - self.assertTrue(np.may_share_memory(d, d[1:, ::-1])) - - self.assertFalse(np.may_share_memory(d[::-1], d2)) - self.assertFalse(np.may_share_memory(d[::2], d2)) - self.assertFalse(np.may_share_memory(d[1:, ::-1], d2)) - self.assertTrue(np.may_share_memory(d2[1:, ::-1], d2)) - - -# Utility -def compare_results(res, desired): - for i in range(len(desired)): - assert_array_equal(res[i], desired[i]) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_stride_tricks.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_stride_tricks.py deleted file mode 100644 index cd0973300052c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_stride_tricks.py +++ /dev/null @@ -1,238 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import ( - run_module_suite, assert_equal, assert_array_equal, - assert_raises - ) -from numpy.lib.stride_tricks import as_strided, broadcast_arrays - - -def assert_shapes_correct(input_shapes, expected_shape): - # Broadcast a list of arrays with the given input shapes and check the - # common output shape. - - inarrays = [np.zeros(s) for s in input_shapes] - outarrays = broadcast_arrays(*inarrays) - outshapes = [a.shape for a in outarrays] - expected = [expected_shape] * len(inarrays) - assert_equal(outshapes, expected) - - -def assert_incompatible_shapes_raise(input_shapes): - # Broadcast a list of arrays with the given (incompatible) input shapes - # and check that they raise a ValueError. - - inarrays = [np.zeros(s) for s in input_shapes] - assert_raises(ValueError, broadcast_arrays, *inarrays) - - -def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): - # Broadcast two shapes against each other and check that the data layout - # is the same as if a ufunc did the broadcasting. - - x0 = np.zeros(shape0, dtype=int) - # Note that multiply.reduce's identity element is 1.0, so when shape1==(), - # this gives the desired n==1. - n = int(np.multiply.reduce(shape1)) - x1 = np.arange(n).reshape(shape1) - if transposed: - x0 = x0.T - x1 = x1.T - if flipped: - x0 = x0[::-1] - x1 = x1[::-1] - # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the - # result should be exactly the same as the broadcasted view of x1. - y = x0 + x1 - b0, b1 = broadcast_arrays(x0, x1) - assert_array_equal(y, b1) - - -def test_same(): - x = np.arange(10) - y = np.arange(10) - bx, by = broadcast_arrays(x, y) - assert_array_equal(x, bx) - assert_array_equal(y, by) - - -def test_one_off(): - x = np.array([[1, 2, 3]]) - y = np.array([[1], [2], [3]]) - bx, by = broadcast_arrays(x, y) - bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) - by0 = bx0.T - assert_array_equal(bx0, bx) - assert_array_equal(by0, by) - - -def test_same_input_shapes(): - # Check that the final shape is just the input shape. - - data = [ - (), - (1,), - (3,), - (0, 1), - (0, 3), - (1, 0), - (3, 0), - (1, 3), - (3, 1), - (3, 3), - ] - for shape in data: - input_shapes = [shape] - # Single input. - assert_shapes_correct(input_shapes, shape) - # Double input. - input_shapes2 = [shape, shape] - assert_shapes_correct(input_shapes2, shape) - # Triple input. - input_shapes3 = [shape, shape, shape] - assert_shapes_correct(input_shapes3, shape) - - -def test_two_compatible_by_ones_input_shapes(): - # Check that two different input shapes of the same length, but some have - # ones, broadcast to the correct shape. - - data = [ - [[(1,), (3,)], (3,)], - [[(1, 3), (3, 3)], (3, 3)], - [[(3, 1), (3, 3)], (3, 3)], - [[(1, 3), (3, 1)], (3, 3)], - [[(1, 1), (3, 3)], (3, 3)], - [[(1, 1), (1, 3)], (1, 3)], - [[(1, 1), (3, 1)], (3, 1)], - [[(1, 0), (0, 0)], (0, 0)], - [[(0, 1), (0, 0)], (0, 0)], - [[(1, 0), (0, 1)], (0, 0)], - [[(1, 1), (0, 0)], (0, 0)], - [[(1, 1), (1, 0)], (1, 0)], - [[(1, 1), (0, 1)], (0, 1)], - ] - for input_shapes, expected_shape in data: - assert_shapes_correct(input_shapes, expected_shape) - # Reverse the input shapes since broadcasting should be symmetric. - assert_shapes_correct(input_shapes[::-1], expected_shape) - - -def test_two_compatible_by_prepending_ones_input_shapes(): - # Check that two different input shapes (of different lengths) broadcast - # to the correct shape. - - data = [ - [[(), (3,)], (3,)], - [[(3,), (3, 3)], (3, 3)], - [[(3,), (3, 1)], (3, 3)], - [[(1,), (3, 3)], (3, 3)], - [[(), (3, 3)], (3, 3)], - [[(1, 1), (3,)], (1, 3)], - [[(1,), (3, 1)], (3, 1)], - [[(1,), (1, 3)], (1, 3)], - [[(), (1, 3)], (1, 3)], - [[(), (3, 1)], (3, 1)], - [[(), (0,)], (0,)], - [[(0,), (0, 0)], (0, 0)], - [[(0,), (0, 1)], (0, 0)], - [[(1,), (0, 0)], (0, 0)], - [[(), (0, 0)], (0, 0)], - [[(1, 1), (0,)], (1, 0)], - [[(1,), (0, 1)], (0, 1)], - [[(1,), (1, 0)], (1, 0)], - [[(), (1, 0)], (1, 0)], - [[(), (0, 1)], (0, 1)], - ] - for input_shapes, expected_shape in data: - assert_shapes_correct(input_shapes, expected_shape) - # Reverse the input shapes since broadcasting should be symmetric. - assert_shapes_correct(input_shapes[::-1], expected_shape) - - -def test_incompatible_shapes_raise_valueerror(): - # Check that a ValueError is raised for incompatible shapes. - - data = [ - [(3,), (4,)], - [(2, 3), (2,)], - [(3,), (3,), (4,)], - [(1, 3, 4), (2, 3, 3)], - ] - for input_shapes in data: - assert_incompatible_shapes_raise(input_shapes) - # Reverse the input shapes since broadcasting should be symmetric. - assert_incompatible_shapes_raise(input_shapes[::-1]) - - -def test_same_as_ufunc(): - # Check that the data layout is the same as if a ufunc did the operation. - - data = [ - [[(1,), (3,)], (3,)], - [[(1, 3), (3, 3)], (3, 3)], - [[(3, 1), (3, 3)], (3, 3)], - [[(1, 3), (3, 1)], (3, 3)], - [[(1, 1), (3, 3)], (3, 3)], - [[(1, 1), (1, 3)], (1, 3)], - [[(1, 1), (3, 1)], (3, 1)], - [[(1, 0), (0, 0)], (0, 0)], - [[(0, 1), (0, 0)], (0, 0)], - [[(1, 0), (0, 1)], (0, 0)], - [[(1, 1), (0, 0)], (0, 0)], - [[(1, 1), (1, 0)], (1, 0)], - [[(1, 1), (0, 1)], (0, 1)], - [[(), (3,)], (3,)], - [[(3,), (3, 3)], (3, 3)], - [[(3,), (3, 1)], (3, 3)], - [[(1,), (3, 3)], (3, 3)], - [[(), (3, 3)], (3, 3)], - [[(1, 1), (3,)], (1, 3)], - [[(1,), (3, 1)], (3, 1)], - [[(1,), (1, 3)], (1, 3)], - [[(), (1, 3)], (1, 3)], - [[(), (3, 1)], (3, 1)], - [[(), (0,)], (0,)], - [[(0,), (0, 0)], (0, 0)], - [[(0,), (0, 1)], (0, 0)], - [[(1,), (0, 0)], (0, 0)], - [[(), (0, 0)], (0, 0)], - [[(1, 1), (0,)], (1, 0)], - [[(1,), (0, 1)], (0, 1)], - [[(1,), (1, 0)], (1, 0)], - [[(), (1, 0)], (1, 0)], - [[(), (0, 1)], (0, 1)], - ] - for input_shapes, expected_shape in data: - assert_same_as_ufunc(input_shapes[0], input_shapes[1], - "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) - # Reverse the input shapes since broadcasting should be symmetric. - assert_same_as_ufunc(input_shapes[1], input_shapes[0]) - # Try them transposed, too. - assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) - # ... and flipped for non-rank-0 inputs in order to test negative - # strides. - if () not in input_shapes: - assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) - assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) - -def test_as_strided(): - a = np.array([None]) - a_view = as_strided(a) - expected = np.array([None]) - assert_array_equal(a_view, np.array([None])) - - a = np.array([1, 2, 3, 4]) - a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) - expected = np.array([1, 3]) - assert_array_equal(a_view, expected) - - a = np.array([1, 2, 3, 4]) - a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) - expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) - assert_array_equal(a_view, expected) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_twodim_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_twodim_base.py deleted file mode 100644 index 739061a5df49d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_twodim_base.py +++ /dev/null @@ -1,504 +0,0 @@ -"""Test functions for matrix module - -""" -from __future__ import division, absolute_import, print_function - -from numpy.testing import ( - TestCase, run_module_suite, assert_equal, assert_array_equal, - assert_array_max_ulp, assert_array_almost_equal, assert_raises, rand, - ) - -from numpy import ( - arange, rot90, add, fliplr, flipud, zeros, ones, eye, array, diag, - histogram2d, tri, mask_indices, triu_indices, triu_indices_from, - tril_indices, tril_indices_from, vander, - ) - -import numpy as np -from numpy.compat import asbytes_nested - - -def get_mat(n): - data = arange(n) - data = add.outer(data, data) - return data - - -class TestEye(TestCase): - def test_basic(self): - assert_equal(eye(4), - array([[1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]])) - - assert_equal(eye(4, dtype='f'), - array([[1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]], 'f')) - - assert_equal(eye(3) == 1, - eye(3, dtype=bool)) - - def test_diag(self): - assert_equal(eye(4, k=1), - array([[0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1], - [0, 0, 0, 0]])) - - assert_equal(eye(4, k=-1), - array([[0, 0, 0, 0], - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0]])) - - def test_2d(self): - assert_equal(eye(4, 3), - array([[1, 0, 0], - [0, 1, 0], - [0, 0, 1], - [0, 0, 0]])) - - assert_equal(eye(3, 4), - array([[1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0]])) - - def test_diag2d(self): - assert_equal(eye(3, 4, k=2), - array([[0, 0, 1, 0], - [0, 0, 0, 1], - [0, 0, 0, 0]])) - - assert_equal(eye(4, 3, k=-2), - array([[0, 0, 0], - [0, 0, 0], - [1, 0, 0], - [0, 1, 0]])) - - def test_eye_bounds(self): - assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) - assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) - assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) - assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) - assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) - assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) - assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) - assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) - assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) - - def test_strings(self): - assert_equal(eye(2, 2, dtype='S3'), - asbytes_nested([['1', ''], ['', '1']])) - - def test_bool(self): - assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) - - -class TestDiag(TestCase): - def test_vector(self): - vals = (100 * arange(5)).astype('l') - b = zeros((5, 5)) - for k in range(5): - b[k, k] = vals[k] - assert_equal(diag(vals), b) - b = zeros((7, 7)) - c = b.copy() - for k in range(5): - b[k, k + 2] = vals[k] - c[k + 2, k] = vals[k] - assert_equal(diag(vals, k=2), b) - assert_equal(diag(vals, k=-2), c) - - def test_matrix(self, vals=None): - if vals is None: - vals = (100 * get_mat(5) + 1).astype('l') - b = zeros((5,)) - for k in range(5): - b[k] = vals[k, k] - assert_equal(diag(vals), b) - b = b * 0 - for k in range(3): - b[k] = vals[k, k + 2] - assert_equal(diag(vals, 2), b[:3]) - for k in range(3): - b[k] = vals[k + 2, k] - assert_equal(diag(vals, -2), b[:3]) - - def test_fortran_order(self): - vals = array((100 * get_mat(5) + 1), order='F', dtype='l') - self.test_matrix(vals) - - def test_diag_bounds(self): - A = [[1, 2], [3, 4], [5, 6]] - assert_equal(diag(A, k=2), []) - assert_equal(diag(A, k=1), [2]) - assert_equal(diag(A, k=0), [1, 4]) - assert_equal(diag(A, k=-1), [3, 6]) - assert_equal(diag(A, k=-2), [5]) - assert_equal(diag(A, k=-3), []) - - def test_failure(self): - self.assertRaises(ValueError, diag, [[[1]]]) - - -class TestFliplr(TestCase): - def test_basic(self): - self.assertRaises(ValueError, fliplr, ones(4)) - a = get_mat(4) - b = a[:, ::-1] - assert_equal(fliplr(a), b) - a = [[0, 1, 2], - [3, 4, 5]] - b = [[2, 1, 0], - [5, 4, 3]] - assert_equal(fliplr(a), b) - - -class TestFlipud(TestCase): - def test_basic(self): - a = get_mat(4) - b = a[::-1, :] - assert_equal(flipud(a), b) - a = [[0, 1, 2], - [3, 4, 5]] - b = [[3, 4, 5], - [0, 1, 2]] - assert_equal(flipud(a), b) - - -class TestRot90(TestCase): - def test_basic(self): - self.assertRaises(ValueError, rot90, ones(4)) - - a = [[0, 1, 2], - [3, 4, 5]] - b1 = [[2, 5], - [1, 4], - [0, 3]] - b2 = [[5, 4, 3], - [2, 1, 0]] - b3 = [[3, 0], - [4, 1], - [5, 2]] - b4 = [[0, 1, 2], - [3, 4, 5]] - - for k in range(-3, 13, 4): - assert_equal(rot90(a, k=k), b1) - for k in range(-2, 13, 4): - assert_equal(rot90(a, k=k), b2) - for k in range(-1, 13, 4): - assert_equal(rot90(a, k=k), b3) - for k in range(0, 13, 4): - assert_equal(rot90(a, k=k), b4) - - def test_axes(self): - a = ones((50, 40, 3)) - assert_equal(rot90(a).shape, (40, 50, 3)) - - -class TestHistogram2d(TestCase): - def test_simple(self): - x = array( - [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) - y = array( - [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) - xedges = np.linspace(0, 1, 10) - yedges = np.linspace(0, 1, 10) - H = histogram2d(x, y, (xedges, yedges))[0] - answer = array( - [[0, 0, 0, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 0, 1, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]]) - assert_array_equal(H.T, answer) - H = histogram2d(x, y, xedges)[0] - assert_array_equal(H.T, answer) - H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) - assert_array_equal(H, eye(10, 10)) - assert_array_equal(xedges, np.linspace(0, 9, 11)) - assert_array_equal(yedges, np.linspace(0, 9, 11)) - - def test_asym(self): - x = array([1, 1, 2, 3, 4, 4, 4, 5]) - y = array([1, 3, 2, 0, 1, 2, 3, 4]) - H, xed, yed = histogram2d( - x, y, (6, 5), range=[[0, 6], [0, 5]], normed=True) - answer = array( - [[0., 0, 0, 0, 0], - [0, 1, 0, 1, 0], - [0, 0, 1, 0, 0], - [1, 0, 0, 0, 0], - [0, 1, 1, 1, 0], - [0, 0, 0, 0, 1]]) - assert_array_almost_equal(H, answer/8., 3) - assert_array_equal(xed, np.linspace(0, 6, 7)) - assert_array_equal(yed, np.linspace(0, 5, 6)) - - def test_norm(self): - x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) - y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) - H, xed, yed = histogram2d( - x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], normed=True) - answer = array([[1, 1, .5], - [1, 1, .5], - [.5, .5, .25]])/9. - assert_array_almost_equal(H, answer, 3) - - def test_all_outliers(self): - r = rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 - H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) - assert_array_equal(H, 0) - - def test_empty(self): - a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) - assert_array_max_ulp(a, array([[0.]])) - - a, edge1, edge2 = histogram2d([], [], bins=4) - assert_array_max_ulp(a, np.zeros((4, 4))) - - -class TestTri(TestCase): - def test_dtype(self): - out = array([[1, 0, 0], - [1, 1, 0], - [1, 1, 1]]) - assert_array_equal(tri(3), out) - assert_array_equal(tri(3, dtype=bool), out.astype(bool)) - - -def test_tril_triu_ndim2(): - for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: - a = np.ones((2, 2), dtype=dtype) - b = np.tril(a) - c = np.triu(a) - yield assert_array_equal, b, [[1, 0], [1, 1]] - yield assert_array_equal, c, b.T - # should return the same dtype as the original array - yield assert_equal, b.dtype, a.dtype - yield assert_equal, c.dtype, a.dtype - - -def test_tril_triu_ndim3(): - for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: - a = np.array([ - [[1, 1], [1, 1]], - [[1, 1], [1, 0]], - [[1, 1], [0, 0]], - ], dtype=dtype) - a_tril_desired = np.array([ - [[1, 0], [1, 1]], - [[1, 0], [1, 0]], - [[1, 0], [0, 0]], - ], dtype=dtype) - a_triu_desired = np.array([ - [[1, 1], [0, 1]], - [[1, 1], [0, 0]], - [[1, 1], [0, 0]], - ], dtype=dtype) - a_triu_observed = np.triu(a) - a_tril_observed = np.tril(a) - yield assert_array_equal, a_triu_observed, a_triu_desired - yield assert_array_equal, a_tril_observed, a_tril_desired - yield assert_equal, a_triu_observed.dtype, a.dtype - yield assert_equal, a_tril_observed.dtype, a.dtype - -def test_tril_triu_with_inf(): - # Issue 4859 - arr = np.array([[1, 1, np.inf], - [1, 1, 1], - [np.inf, 1, 1]]) - out_tril = np.array([[1, 0, 0], - [1, 1, 0], - [np.inf, 1, 1]]) - out_triu = out_tril.T - assert_array_equal(np.triu(arr), out_triu) - assert_array_equal(np.tril(arr), out_tril) - - -def test_tril_triu_dtype(): - # Issue 4916 - # tril and triu should return the same dtype as input - for c in np.typecodes['All']: - if c == 'V': - continue - arr = np.zeros((3, 3), dtype=c) - assert_equal(np.triu(arr).dtype, arr.dtype) - assert_equal(np.tril(arr).dtype, arr.dtype) - - # check special cases - arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], - ['2004-01-01T12:00', '2003-01-03T13:45']], - dtype='datetime64') - assert_equal(np.triu(arr).dtype, arr.dtype) - assert_equal(np.tril(arr).dtype, arr.dtype) - - arr = np.zeros((3,3), dtype='f4,f4') - assert_equal(np.triu(arr).dtype, arr.dtype) - assert_equal(np.tril(arr).dtype, arr.dtype) - - -def test_mask_indices(): - # simple test without offset - iu = mask_indices(3, np.triu) - a = np.arange(9).reshape(3, 3) - yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8])) - # Now with an offset - iu1 = mask_indices(3, np.triu, 1) - yield (assert_array_equal, a[iu1], array([1, 2, 5])) - - -def test_tril_indices(): - # indices without and with offset - il1 = tril_indices(4) - il2 = tril_indices(4, k=2) - il3 = tril_indices(4, m=5) - il4 = tril_indices(4, k=2, m=5) - - a = np.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12], - [13, 14, 15, 16]]) - b = np.arange(1, 21).reshape(4, 5) - - # indexing: - yield (assert_array_equal, a[il1], - array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) - yield (assert_array_equal, b[il3], - array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) - - # And for assigning values: - a[il1] = -1 - yield (assert_array_equal, a, - array([[-1, 2, 3, 4], - [-1, -1, 7, 8], - [-1, -1, -1, 12], - [-1, -1, -1, -1]])) - b[il3] = -1 - yield (assert_array_equal, b, - array([[-1, 2, 3, 4, 5], - [-1, -1, 8, 9, 10], - [-1, -1, -1, 14, 15], - [-1, -1, -1, -1, 20]])) - # These cover almost the whole array (two diagonals right of the main one): - a[il2] = -10 - yield (assert_array_equal, a, - array([[-10, -10, -10, 4], - [-10, -10, -10, -10], - [-10, -10, -10, -10], - [-10, -10, -10, -10]])) - b[il4] = -10 - yield (assert_array_equal, b, - array([[-10, -10, -10, 4, 5], - [-10, -10, -10, -10, 10], - [-10, -10, -10, -10, -10], - [-10, -10, -10, -10, -10]])) - - -class TestTriuIndices(object): - def test_triu_indices(self): - iu1 = triu_indices(4) - iu2 = triu_indices(4, k=2) - iu3 = triu_indices(4, m=5) - iu4 = triu_indices(4, k=2, m=5) - - a = np.array([[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12], - [13, 14, 15, 16]]) - b = np.arange(1, 21).reshape(4, 5) - - # Both for indexing: - yield (assert_array_equal, a[iu1], - array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) - yield (assert_array_equal, b[iu3], - array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20])) - - # And for assigning values: - a[iu1] = -1 - yield (assert_array_equal, a, - array([[-1, -1, -1, -1], - [5, -1, -1, -1], - [9, 10, -1, -1], - [13, 14, 15, -1]])) - b[iu3] = -1 - yield (assert_array_equal, b, - array([[-1, -1, -1, -1, -1], - [6, -1, -1, -1, -1], - [11, 12, -1, -1, -1], - [16, 17, 18, -1, -1]])) - - # These cover almost the whole array (two diagonals right of the - # main one): - a[iu2] = -10 - yield (assert_array_equal, a, - array([[-1, -1, -10, -10], - [5, -1, -1, -10], - [9, 10, -1, -1], - [13, 14, 15, -1]])) - b[iu4] = -10 - yield (assert_array_equal, b, - array([[-1, -1, -10, -10, -10], - [6, -1, -1, -10, -10], - [11, 12, -1, -1, -10], - [16, 17, 18, -1, -1]])) - - -class TestTrilIndicesFrom(object): - def test_exceptions(self): - assert_raises(ValueError, tril_indices_from, np.ones((2,))) - assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) - # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) - - -class TestTriuIndicesFrom(object): - def test_exceptions(self): - assert_raises(ValueError, triu_indices_from, np.ones((2,))) - assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) - # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) - - -class TestVander(object): - def test_basic(self): - c = np.array([0, 1, -2, 3]) - v = vander(c) - powers = np.array([[0, 0, 0, 0, 1], - [1, 1, 1, 1, 1], - [16, -8, 4, -2, 1], - [81, 27, 9, 3, 1]]) - # Check default value of N: - yield (assert_array_equal, v, powers[:, 1:]) - # Check a range of N values, including 0 and 5 (greater than default) - m = powers.shape[1] - for n in range(6): - v = vander(c, N=n) - yield (assert_array_equal, v, powers[:, m-n:m]) - - def test_dtypes(self): - c = array([11, -12, 13], dtype=np.int8) - v = vander(c) - expected = np.array([[121, 11, 1], - [144, -12, 1], - [169, 13, 1]]) - yield (assert_array_equal, v, expected) - - c = array([1.0+1j, 1.0-1j]) - v = vander(c, N=3) - expected = np.array([[2j, 1+1j, 1], - [-2j, 1-1j, 1]]) - # The data is floating point, but the values are small integers, - # so assert_array_equal *should* be safe here (rather than, say, - # assert_array_almost_equal). - yield (assert_array_equal, v, expected) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_type_check.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_type_check.py deleted file mode 100644 index 3931f95e5fb9d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_type_check.py +++ /dev/null @@ -1,328 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.compat import long -from numpy.testing import ( - TestCase, assert_, assert_equal, assert_array_equal, run_module_suite - ) -from numpy.lib.type_check import ( - common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, - nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close - ) - - -def assert_all(x): - assert_(np.all(x), x) - - -class TestCommonType(TestCase): - def test_basic(self): - ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) - af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) - af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) - acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) - acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) - assert_(common_type(ai32) == np.float64) - assert_(common_type(af32) == np.float32) - assert_(common_type(af64) == np.float64) - assert_(common_type(acs) == np.csingle) - assert_(common_type(acd) == np.cdouble) - - -class TestMintypecode(TestCase): - - def test_default_1(self): - for itype in '1bcsuwil': - assert_equal(mintypecode(itype), 'd') - assert_equal(mintypecode('f'), 'f') - assert_equal(mintypecode('d'), 'd') - assert_equal(mintypecode('F'), 'F') - assert_equal(mintypecode('D'), 'D') - - def test_default_2(self): - for itype in '1bcsuwil': - assert_equal(mintypecode(itype+'f'), 'f') - assert_equal(mintypecode(itype+'d'), 'd') - assert_equal(mintypecode(itype+'F'), 'F') - assert_equal(mintypecode(itype+'D'), 'D') - assert_equal(mintypecode('ff'), 'f') - assert_equal(mintypecode('fd'), 'd') - assert_equal(mintypecode('fF'), 'F') - assert_equal(mintypecode('fD'), 'D') - assert_equal(mintypecode('df'), 'd') - assert_equal(mintypecode('dd'), 'd') - #assert_equal(mintypecode('dF',savespace=1),'F') - assert_equal(mintypecode('dF'), 'D') - assert_equal(mintypecode('dD'), 'D') - assert_equal(mintypecode('Ff'), 'F') - #assert_equal(mintypecode('Fd',savespace=1),'F') - assert_equal(mintypecode('Fd'), 'D') - assert_equal(mintypecode('FF'), 'F') - assert_equal(mintypecode('FD'), 'D') - assert_equal(mintypecode('Df'), 'D') - assert_equal(mintypecode('Dd'), 'D') - assert_equal(mintypecode('DF'), 'D') - assert_equal(mintypecode('DD'), 'D') - - def test_default_3(self): - assert_equal(mintypecode('fdF'), 'D') - #assert_equal(mintypecode('fdF',savespace=1),'F') - assert_equal(mintypecode('fdD'), 'D') - assert_equal(mintypecode('fFD'), 'D') - assert_equal(mintypecode('dFD'), 'D') - - assert_equal(mintypecode('ifd'), 'd') - assert_equal(mintypecode('ifF'), 'F') - assert_equal(mintypecode('ifD'), 'D') - assert_equal(mintypecode('idF'), 'D') - #assert_equal(mintypecode('idF',savespace=1),'F') - assert_equal(mintypecode('idD'), 'D') - - -class TestIsscalar(TestCase): - - def test_basic(self): - assert_(np.isscalar(3)) - assert_(not np.isscalar([3])) - assert_(not np.isscalar((3,))) - assert_(np.isscalar(3j)) - assert_(np.isscalar(long(10))) - assert_(np.isscalar(4.0)) - - -class TestReal(TestCase): - - def test_real(self): - y = np.random.rand(10,) - assert_array_equal(y, np.real(y)) - - def test_cmplx(self): - y = np.random.rand(10,)+1j*np.random.rand(10,) - assert_array_equal(y.real, np.real(y)) - - -class TestImag(TestCase): - - def test_real(self): - y = np.random.rand(10,) - assert_array_equal(0, np.imag(y)) - - def test_cmplx(self): - y = np.random.rand(10,)+1j*np.random.rand(10,) - assert_array_equal(y.imag, np.imag(y)) - - -class TestIscomplex(TestCase): - - def test_fail(self): - z = np.array([-1, 0, 1]) - res = iscomplex(z) - assert_(not np.sometrue(res, axis=0)) - - def test_pass(self): - z = np.array([-1j, 1, 0]) - res = iscomplex(z) - assert_array_equal(res, [1, 0, 0]) - - -class TestIsreal(TestCase): - - def test_pass(self): - z = np.array([-1, 0, 1j]) - res = isreal(z) - assert_array_equal(res, [1, 1, 0]) - - def test_fail(self): - z = np.array([-1j, 1, 0]) - res = isreal(z) - assert_array_equal(res, [0, 1, 1]) - - -class TestIscomplexobj(TestCase): - - def test_basic(self): - z = np.array([-1, 0, 1]) - assert_(not iscomplexobj(z)) - z = np.array([-1j, 0, -1]) - assert_(iscomplexobj(z)) - - -class TestIsrealobj(TestCase): - def test_basic(self): - z = np.array([-1, 0, 1]) - assert_(isrealobj(z)) - z = np.array([-1j, 0, -1]) - assert_(not isrealobj(z)) - - -class TestIsnan(TestCase): - - def test_goodvalues(self): - z = np.array((-1., 0., 1.)) - res = np.isnan(z) == 0 - assert_all(np.all(res, axis=0)) - - def test_posinf(self): - with np.errstate(divide='ignore'): - assert_all(np.isnan(np.array((1.,))/0.) == 0) - - def test_neginf(self): - with np.errstate(divide='ignore'): - assert_all(np.isnan(np.array((-1.,))/0.) == 0) - - def test_ind(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isnan(np.array((0.,))/0.) == 1) - - def test_integer(self): - assert_all(np.isnan(1) == 0) - - def test_complex(self): - assert_all(np.isnan(1+1j) == 0) - - def test_complex1(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isnan(np.array(0+0j)/0.) == 1) - - -class TestIsfinite(TestCase): - # Fixme, wrong place, isfinite now ufunc - - def test_goodvalues(self): - z = np.array((-1., 0., 1.)) - res = np.isfinite(z) == 1 - assert_all(np.all(res, axis=0)) - - def test_posinf(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isfinite(np.array((1.,))/0.) == 0) - - def test_neginf(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isfinite(np.array((-1.,))/0.) == 0) - - def test_ind(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isfinite(np.array((0.,))/0.) == 0) - - def test_integer(self): - assert_all(np.isfinite(1) == 1) - - def test_complex(self): - assert_all(np.isfinite(1+1j) == 1) - - def test_complex1(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isfinite(np.array(1+1j)/0.) == 0) - - -class TestIsinf(TestCase): - # Fixme, wrong place, isinf now ufunc - - def test_goodvalues(self): - z = np.array((-1., 0., 1.)) - res = np.isinf(z) == 0 - assert_all(np.all(res, axis=0)) - - def test_posinf(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isinf(np.array((1.,))/0.) == 1) - - def test_posinf_scalar(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isinf(np.array(1.,)/0.) == 1) - - def test_neginf(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isinf(np.array((-1.,))/0.) == 1) - - def test_neginf_scalar(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isinf(np.array(-1.)/0.) == 1) - - def test_ind(self): - with np.errstate(divide='ignore', invalid='ignore'): - assert_all(np.isinf(np.array((0.,))/0.) == 0) - - -class TestIsposinf(TestCase): - - def test_generic(self): - with np.errstate(divide='ignore', invalid='ignore'): - vals = isposinf(np.array((-1., 0, 1))/0.) - assert_(vals[0] == 0) - assert_(vals[1] == 0) - assert_(vals[2] == 1) - - -class TestIsneginf(TestCase): - - def test_generic(self): - with np.errstate(divide='ignore', invalid='ignore'): - vals = isneginf(np.array((-1., 0, 1))/0.) - assert_(vals[0] == 1) - assert_(vals[1] == 0) - assert_(vals[2] == 0) - - -class TestNanToNum(TestCase): - - def test_generic(self): - with np.errstate(divide='ignore', invalid='ignore'): - vals = nan_to_num(np.array((-1., 0, 1))/0.) - assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) - assert_(vals[1] == 0) - assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) - - def test_integer(self): - vals = nan_to_num(1) - assert_all(vals == 1) - - def test_complex_good(self): - vals = nan_to_num(1+1j) - assert_all(vals == 1+1j) - - def test_complex_bad(self): - with np.errstate(divide='ignore', invalid='ignore'): - v = 1 + 1j - v += np.array(0+1.j)/0. - vals = nan_to_num(v) - # !! This is actually (unexpectedly) zero - assert_all(np.isfinite(vals)) - - def test_complex_bad2(self): - with np.errstate(divide='ignore', invalid='ignore'): - v = 1 + 1j - v += np.array(-1+1.j)/0. - vals = nan_to_num(v) - assert_all(np.isfinite(vals)) - # Fixme - #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) - # !! This is actually (unexpectedly) positive - # !! inf. Comment out for now, and see if it - # !! changes - #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) - - -class TestRealIfClose(TestCase): - - def test_basic(self): - a = np.random.rand(10) - b = real_if_close(a+1e-15j) - assert_all(isrealobj(b)) - assert_array_equal(a, b) - b = real_if_close(a+1e-7j) - assert_all(iscomplexobj(b)) - b = real_if_close(a+1e-7j, tol=1e-6) - assert_all(isrealobj(b)) - - -class TestArrayConversion(TestCase): - - def test_asfarray(self): - a = asfarray(np.array([1, 2, 3])) - assert_equal(a.__class__, np.ndarray) - assert_(np.issubdtype(a.dtype, np.float)) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_ufunclike.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_ufunclike.py deleted file mode 100644 index 97d608ecfa801..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_ufunclike.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy.core as nx -import numpy.lib.ufunclike as ufl -from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal - ) - - -class TestUfunclike(TestCase): - - def test_isposinf(self): - a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) - out = nx.zeros(a.shape, bool) - tgt = nx.array([True, False, False, False, False, False]) - - res = ufl.isposinf(a) - assert_equal(res, tgt) - res = ufl.isposinf(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - - def test_isneginf(self): - a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) - out = nx.zeros(a.shape, bool) - tgt = nx.array([False, True, False, False, False, False]) - - res = ufl.isneginf(a) - assert_equal(res, tgt) - res = ufl.isneginf(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - - def test_fix(self): - a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) - out = nx.zeros(a.shape, float) - tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) - - res = ufl.fix(a) - assert_equal(res, tgt) - res = ufl.fix(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - assert_equal(ufl.fix(3.14), 3) - - def test_fix_with_subclass(self): - class MyArray(nx.ndarray): - def __new__(cls, data, metadata=None): - res = nx.array(data, copy=True).view(cls) - res.metadata = metadata - return res - - def __array_wrap__(self, obj, context=None): - obj.metadata = self.metadata - return obj - - a = nx.array([1.1, -1.1]) - m = MyArray(a, metadata='foo') - f = ufl.fix(m) - assert_array_equal(f, nx.array([1, -1])) - assert_(isinstance(f, MyArray)) - assert_equal(f.metadata, 'foo') - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_utils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_utils.py deleted file mode 100644 index fcb37f98a3e72..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/tests/test_utils.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from numpy.core import arange -from numpy.testing import ( - run_module_suite, assert_, assert_equal - ) -from numpy.lib import deprecate -import numpy.lib.utils as utils - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - - -def test_lookfor(): - out = StringIO() - utils.lookfor('eigenvalue', module='numpy', output=out, - import_modules=False) - out = out.getvalue() - assert_('numpy.linalg.eig' in out) - - -@deprecate -def old_func(self, x): - return x - - -@deprecate(message="Rather use new_func2") -def old_func2(self, x): - return x - - -def old_func3(self, x): - return x -new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3") - - -def test_deprecate_decorator(): - assert_('deprecated' in old_func.__doc__) - - -def test_deprecate_decorator_message(): - assert_('Rather use new_func2' in old_func2.__doc__) - - -def test_deprecate_fn(): - assert_('old_func3' in new_func3.__doc__) - assert_('new_func3' in new_func3.__doc__) - - -def test_safe_eval_nameconstant(): - # Test if safe_eval supports Python 3.4 _ast.NameConstant - utils.safe_eval('None') - - -def test_byte_bounds(): - a = arange(12).reshape(3, 4) - low, high = utils.byte_bounds(a) - assert_equal(high - low, a.size * a.itemsize) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/twodim_base.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/twodim_base.py deleted file mode 100644 index 40a140b6b09c5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/twodim_base.py +++ /dev/null @@ -1,1003 +0,0 @@ -""" Basic functions for manipulating 2d arrays - -""" -from __future__ import division, absolute_import, print_function - -from numpy.core.numeric import ( - asanyarray, arange, zeros, greater_equal, multiply, ones, asarray, - where, int8, int16, int32, int64, empty, promote_types - ) -from numpy.core import iinfo - - -__all__ = [ - 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu', - 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', - 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] - - -i1 = iinfo(int8) -i2 = iinfo(int16) -i4 = iinfo(int32) -def _min_int(low, high): - """ get small int that fits the range """ - if high <= i1.max and low >= i1.min: - return int8 - if high <= i2.max and low >= i2.min: - return int16 - if high <= i4.max and low >= i4.min: - return int32 - return int64 - - -def fliplr(m): - """ - Flip array in the left/right direction. - - Flip the entries in each row in the left/right direction. - Columns are preserved, but appear in a different order than before. - - Parameters - ---------- - m : array_like - Input array, must be at least 2-D. - - Returns - ------- - f : ndarray - A view of `m` with the columns reversed. Since a view - is returned, this operation is :math:`\\mathcal O(1)`. - - See Also - -------- - flipud : Flip array in the up/down direction. - rot90 : Rotate array counterclockwise. - - Notes - ----- - Equivalent to A[:,::-1]. Requires the array to be at least 2-D. - - Examples - -------- - >>> A = np.diag([1.,2.,3.]) - >>> A - array([[ 1., 0., 0.], - [ 0., 2., 0.], - [ 0., 0., 3.]]) - >>> np.fliplr(A) - array([[ 0., 0., 1.], - [ 0., 2., 0.], - [ 3., 0., 0.]]) - - >>> A = np.random.randn(2,3,5) - >>> np.all(np.fliplr(A)==A[:,::-1,...]) - True - - """ - m = asanyarray(m) - if m.ndim < 2: - raise ValueError("Input must be >= 2-d.") - return m[:, ::-1] - - -def flipud(m): - """ - Flip array in the up/down direction. - - Flip the entries in each column in the up/down direction. - Rows are preserved, but appear in a different order than before. - - Parameters - ---------- - m : array_like - Input array. - - Returns - ------- - out : array_like - A view of `m` with the rows reversed. Since a view is - returned, this operation is :math:`\\mathcal O(1)`. - - See Also - -------- - fliplr : Flip array in the left/right direction. - rot90 : Rotate array counterclockwise. - - Notes - ----- - Equivalent to ``A[::-1,...]``. - Does not require the array to be two-dimensional. - - Examples - -------- - >>> A = np.diag([1.0, 2, 3]) - >>> A - array([[ 1., 0., 0.], - [ 0., 2., 0.], - [ 0., 0., 3.]]) - >>> np.flipud(A) - array([[ 0., 0., 3.], - [ 0., 2., 0.], - [ 1., 0., 0.]]) - - >>> A = np.random.randn(2,3,5) - >>> np.all(np.flipud(A)==A[::-1,...]) - True - - >>> np.flipud([1,2]) - array([2, 1]) - - """ - m = asanyarray(m) - if m.ndim < 1: - raise ValueError("Input must be >= 1-d.") - return m[::-1, ...] - - -def rot90(m, k=1): - """ - Rotate an array by 90 degrees in the counter-clockwise direction. - - The first two dimensions are rotated; therefore, the array must be at - least 2-D. - - Parameters - ---------- - m : array_like - Array of two or more dimensions. - k : integer - Number of times the array is rotated by 90 degrees. - - Returns - ------- - y : ndarray - Rotated array. - - See Also - -------- - fliplr : Flip an array horizontally. - flipud : Flip an array vertically. - - Examples - -------- - >>> m = np.array([[1,2],[3,4]], int) - >>> m - array([[1, 2], - [3, 4]]) - >>> np.rot90(m) - array([[2, 4], - [1, 3]]) - >>> np.rot90(m, 2) - array([[4, 3], - [2, 1]]) - - """ - m = asanyarray(m) - if m.ndim < 2: - raise ValueError("Input must >= 2-d.") - k = k % 4 - if k == 0: - return m - elif k == 1: - return fliplr(m).swapaxes(0, 1) - elif k == 2: - return fliplr(flipud(m)) - else: - # k == 3 - return fliplr(m.swapaxes(0, 1)) - - -def eye(N, M=None, k=0, dtype=float): - """ - Return a 2-D array with ones on the diagonal and zeros elsewhere. - - Parameters - ---------- - N : int - Number of rows in the output. - M : int, optional - Number of columns in the output. If None, defaults to `N`. - k : int, optional - Index of the diagonal: 0 (the default) refers to the main diagonal, - a positive value refers to an upper diagonal, and a negative value - to a lower diagonal. - dtype : data-type, optional - Data-type of the returned array. - - Returns - ------- - I : ndarray of shape (N,M) - An array where all elements are equal to zero, except for the `k`-th - diagonal, whose values are equal to one. - - See Also - -------- - identity : (almost) equivalent function - diag : diagonal 2-D array from a 1-D array specified by the user. - - Examples - -------- - >>> np.eye(2, dtype=int) - array([[1, 0], - [0, 1]]) - >>> np.eye(3, k=1) - array([[ 0., 1., 0.], - [ 0., 0., 1.], - [ 0., 0., 0.]]) - - """ - if M is None: - M = N - m = zeros((N, M), dtype=dtype) - if k >= M: - return m - if k >= 0: - i = k - else: - i = (-k) * M - m[:M-k].flat[i::M+1] = 1 - return m - - -def diag(v, k=0): - """ - Extract a diagonal or construct a diagonal array. - - See the more detailed documentation for ``numpy.diagonal`` if you use this - function to extract a diagonal and wish to write to the resulting array; - whether it returns a copy or a view depends on what version of numpy you - are using. - - Parameters - ---------- - v : array_like - If `v` is a 2-D array, return a copy of its `k`-th diagonal. - If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th - diagonal. - k : int, optional - Diagonal in question. The default is 0. Use `k>0` for diagonals - above the main diagonal, and `k<0` for diagonals below the main - diagonal. - - Returns - ------- - out : ndarray - The extracted diagonal or constructed diagonal array. - - See Also - -------- - diagonal : Return specified diagonals. - diagflat : Create a 2-D array with the flattened input as a diagonal. - trace : Sum along diagonals. - triu : Upper triangle of an array. - tril : Lower triangle of an array. - - Examples - -------- - >>> x = np.arange(9).reshape((3,3)) - >>> x - array([[0, 1, 2], - [3, 4, 5], - [6, 7, 8]]) - - >>> np.diag(x) - array([0, 4, 8]) - >>> np.diag(x, k=1) - array([1, 5]) - >>> np.diag(x, k=-1) - array([3, 7]) - - >>> np.diag(np.diag(x)) - array([[0, 0, 0], - [0, 4, 0], - [0, 0, 8]]) - - """ - v = asarray(v) - s = v.shape - if len(s) == 1: - n = s[0]+abs(k) - res = zeros((n, n), v.dtype) - if k >= 0: - i = k - else: - i = (-k) * n - res[:n-k].flat[i::n+1] = v - return res - elif len(s) == 2: - return v.diagonal(k) - else: - raise ValueError("Input must be 1- or 2-d.") - - -def diagflat(v, k=0): - """ - Create a two-dimensional array with the flattened input as a diagonal. - - Parameters - ---------- - v : array_like - Input data, which is flattened and set as the `k`-th - diagonal of the output. - k : int, optional - Diagonal to set; 0, the default, corresponds to the "main" diagonal, - a positive (negative) `k` giving the number of the diagonal above - (below) the main. - - Returns - ------- - out : ndarray - The 2-D output array. - - See Also - -------- - diag : MATLAB work-alike for 1-D and 2-D arrays. - diagonal : Return specified diagonals. - trace : Sum along diagonals. - - Examples - -------- - >>> np.diagflat([[1,2], [3,4]]) - array([[1, 0, 0, 0], - [0, 2, 0, 0], - [0, 0, 3, 0], - [0, 0, 0, 4]]) - - >>> np.diagflat([1,2], 1) - array([[0, 1, 0], - [0, 0, 2], - [0, 0, 0]]) - - """ - try: - wrap = v.__array_wrap__ - except AttributeError: - wrap = None - v = asarray(v).ravel() - s = len(v) - n = s + abs(k) - res = zeros((n, n), v.dtype) - if (k >= 0): - i = arange(0, n-k) - fi = i+k+i*n - else: - i = arange(0, n+k) - fi = i+(i-k)*n - res.flat[fi] = v - if not wrap: - return res - return wrap(res) - - -def tri(N, M=None, k=0, dtype=float): - """ - An array with ones at and below the given diagonal and zeros elsewhere. - - Parameters - ---------- - N : int - Number of rows in the array. - M : int, optional - Number of columns in the array. - By default, `M` is taken equal to `N`. - k : int, optional - The sub-diagonal at and below which the array is filled. - `k` = 0 is the main diagonal, while `k` < 0 is below it, - and `k` > 0 is above. The default is 0. - dtype : dtype, optional - Data type of the returned array. The default is float. - - Returns - ------- - tri : ndarray of shape (N, M) - Array with its lower triangle filled with ones and zero elsewhere; - in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise. - - Examples - -------- - >>> np.tri(3, 5, 2, dtype=int) - array([[1, 1, 1, 0, 0], - [1, 1, 1, 1, 0], - [1, 1, 1, 1, 1]]) - - >>> np.tri(3, 5, -1) - array([[ 0., 0., 0., 0., 0.], - [ 1., 0., 0., 0., 0.], - [ 1., 1., 0., 0., 0.]]) - - """ - if M is None: - M = N - - m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), - arange(-k, M-k, dtype=_min_int(-k, M - k))) - - # Avoid making a copy if the requested type is already bool - m = m.astype(dtype, copy=False) - - return m - - -def tril(m, k=0): - """ - Lower triangle of an array. - - Return a copy of an array with elements above the `k`-th diagonal zeroed. - - Parameters - ---------- - m : array_like, shape (M, N) - Input array. - k : int, optional - Diagonal above which to zero elements. `k = 0` (the default) is the - main diagonal, `k < 0` is below it and `k > 0` is above. - - Returns - ------- - tril : ndarray, shape (M, N) - Lower triangle of `m`, of same shape and data-type as `m`. - - See Also - -------- - triu : same thing, only for the upper triangle - - Examples - -------- - >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) - array([[ 0, 0, 0], - [ 4, 0, 0], - [ 7, 8, 0], - [10, 11, 12]]) - - """ - m = asanyarray(m) - mask = tri(*m.shape[-2:], k=k, dtype=bool) - - return where(mask, m, zeros(1, m.dtype)) - - -def triu(m, k=0): - """ - Upper triangle of an array. - - Return a copy of a matrix with the elements below the `k`-th diagonal - zeroed. - - Please refer to the documentation for `tril` for further details. - - See Also - -------- - tril : lower triangle of an array - - Examples - -------- - >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) - array([[ 1, 2, 3], - [ 4, 5, 6], - [ 0, 8, 9], - [ 0, 0, 12]]) - - """ - m = asanyarray(m) - mask = tri(*m.shape[-2:], k=k-1, dtype=bool) - - return where(mask, zeros(1, m.dtype), m) - - -# Originally borrowed from John Hunter and matplotlib -def vander(x, N=None, increasing=False): - """ - Generate a Vandermonde matrix. - - The columns of the output matrix are powers of the input vector. The - order of the powers is determined by the `increasing` boolean argument. - Specifically, when `increasing` is False, the `i`-th output column is - the input vector raised element-wise to the power of ``N - i - 1``. Such - a matrix with a geometric progression in each row is named for Alexandre- - Theophile Vandermonde. - - Parameters - ---------- - x : array_like - 1-D input array. - N : int, optional - Number of columns in the output. If `N` is not specified, a square - array is returned (``N = len(x)``). - increasing : bool, optional - Order of the powers of the columns. If True, the powers increase - from left to right, if False (the default) they are reversed. - - .. versionadded:: 1.9.0 - - Returns - ------- - out : ndarray - Vandermonde matrix. If `increasing` is False, the first column is - ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is - True, the columns are ``x^0, x^1, ..., x^(N-1)``. - - See Also - -------- - polynomial.polynomial.polyvander - - Examples - -------- - >>> x = np.array([1, 2, 3, 5]) - >>> N = 3 - >>> np.vander(x, N) - array([[ 1, 1, 1], - [ 4, 2, 1], - [ 9, 3, 1], - [25, 5, 1]]) - - >>> np.column_stack([x**(N-1-i) for i in range(N)]) - array([[ 1, 1, 1], - [ 4, 2, 1], - [ 9, 3, 1], - [25, 5, 1]]) - - >>> x = np.array([1, 2, 3, 5]) - >>> np.vander(x) - array([[ 1, 1, 1, 1], - [ 8, 4, 2, 1], - [ 27, 9, 3, 1], - [125, 25, 5, 1]]) - >>> np.vander(x, increasing=True) - array([[ 1, 1, 1, 1], - [ 1, 2, 4, 8], - [ 1, 3, 9, 27], - [ 1, 5, 25, 125]]) - - The determinant of a square Vandermonde matrix is the product - of the differences between the values of the input vector: - - >>> np.linalg.det(np.vander(x)) - 48.000000000000043 - >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) - 48 - - """ - x = asarray(x) - if x.ndim != 1: - raise ValueError("x must be a one-dimensional array or sequence.") - if N is None: - N = len(x) - - v = empty((len(x), N), dtype=promote_types(x.dtype, int)) - tmp = v[:, ::-1] if not increasing else v - - if N > 0: - tmp[:, 0] = 1 - if N > 1: - tmp[:, 1:] = x[:, None] - multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) - - return v - - -def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): - """ - Compute the bi-dimensional histogram of two data samples. - - Parameters - ---------- - x : array_like, shape (N,) - An array containing the x coordinates of the points to be - histogrammed. - y : array_like, shape (N,) - An array containing the y coordinates of the points to be - histogrammed. - bins : int or [int, int] or array_like or [array, array], optional - The bin specification: - - * If int, the number of bins for the two dimensions (nx=ny=bins). - * If [int, int], the number of bins in each dimension - (nx, ny = bins). - * If array_like, the bin edges for the two dimensions - (x_edges=y_edges=bins). - * If [array, array], the bin edges in each dimension - (x_edges, y_edges = bins). - - range : array_like, shape(2,2), optional - The leftmost and rightmost edges of the bins along each dimension - (if not specified explicitly in the `bins` parameters): - ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range - will be considered outliers and not tallied in the histogram. - normed : bool, optional - If False, returns the number of samples in each bin. If True, - returns the bin density ``bin_count / sample_count / bin_area``. - weights : array_like, shape(N,), optional - An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. - Weights are normalized to 1 if `normed` is True. If `normed` is - False, the values of the returned histogram are equal to the sum of - the weights belonging to the samples falling into each bin. - - Returns - ------- - H : ndarray, shape(nx, ny) - The bi-dimensional histogram of samples `x` and `y`. Values in `x` - are histogrammed along the first dimension and values in `y` are - histogrammed along the second dimension. - xedges : ndarray, shape(nx,) - The bin edges along the first dimension. - yedges : ndarray, shape(ny,) - The bin edges along the second dimension. - - See Also - -------- - histogram : 1D histogram - histogramdd : Multidimensional histogram - - Notes - ----- - When `normed` is True, then the returned histogram is the sample - density, defined such that the sum over bins of the product - ``bin_value * bin_area`` is 1. - - Please note that the histogram does not follow the Cartesian convention - where `x` values are on the abscissa and `y` values on the ordinate - axis. Rather, `x` is histogrammed along the first dimension of the - array (vertical), and `y` along the second dimension of the array - (horizontal). This ensures compatibility with `histogramdd`. - - Examples - -------- - >>> import matplotlib as mpl - >>> import matplotlib.pyplot as plt - - Construct a 2D-histogram with variable bin width. First define the bin - edges: - - >>> xedges = [0, 1, 1.5, 3, 5] - >>> yedges = [0, 2, 3, 4, 6] - - Next we create a histogram H with random bin content: - - >>> x = np.random.normal(3, 1, 100) - >>> y = np.random.normal(1, 1, 100) - >>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges)) - - Or we fill the histogram H with a determined bin content: - - >>> H = np.ones((4, 4)).cumsum().reshape(4, 4) - >>> print H[::-1] # This shows the bin content in the order as plotted - [[ 13. 14. 15. 16.] - [ 9. 10. 11. 12.] - [ 5. 6. 7. 8.] - [ 1. 2. 3. 4.]] - - Imshow can only do an equidistant representation of bins: - - >>> fig = plt.figure(figsize=(7, 3)) - >>> ax = fig.add_subplot(131) - >>> ax.set_title('imshow: equidistant') - >>> im = plt.imshow(H, interpolation='nearest', origin='low', - extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) - - pcolormesh can display exact bin edges: - - >>> ax = fig.add_subplot(132) - >>> ax.set_title('pcolormesh: exact bin edges') - >>> X, Y = np.meshgrid(xedges, yedges) - >>> ax.pcolormesh(X, Y, H) - >>> ax.set_aspect('equal') - - NonUniformImage displays exact bin edges with interpolation: - - >>> ax = fig.add_subplot(133) - >>> ax.set_title('NonUniformImage: interpolated') - >>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear') - >>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1]) - >>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1]) - >>> im.set_data(xcenters, ycenters, H) - >>> ax.images.append(im) - >>> ax.set_xlim(xedges[0], xedges[-1]) - >>> ax.set_ylim(yedges[0], yedges[-1]) - >>> ax.set_aspect('equal') - >>> plt.show() - - """ - from numpy import histogramdd - - try: - N = len(bins) - except TypeError: - N = 1 - - if N != 1 and N != 2: - xedges = yedges = asarray(bins, float) - bins = [xedges, yedges] - hist, edges = histogramdd([x, y], bins, range, normed, weights) - return hist, edges[0], edges[1] - - -def mask_indices(n, mask_func, k=0): - """ - Return the indices to access (n, n) arrays, given a masking function. - - Assume `mask_func` is a function that, for a square array a of size - ``(n, n)`` with a possible offset argument `k`, when called as - ``mask_func(a, k)`` returns a new array with zeros in certain locations - (functions like `triu` or `tril` do precisely this). Then this function - returns the indices where the non-zero values would be located. - - Parameters - ---------- - n : int - The returned indices will be valid to access arrays of shape (n, n). - mask_func : callable - A function whose call signature is similar to that of `triu`, `tril`. - That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. - `k` is an optional argument to the function. - k : scalar - An optional argument which is passed through to `mask_func`. Functions - like `triu`, `tril` take a second argument that is interpreted as an - offset. - - Returns - ------- - indices : tuple of arrays. - The `n` arrays of indices corresponding to the locations where - ``mask_func(np.ones((n, n)), k)`` is True. - - See Also - -------- - triu, tril, triu_indices, tril_indices - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - These are the indices that would allow you to access the upper triangular - part of any 3x3 array: - - >>> iu = np.mask_indices(3, np.triu) - - For example, if `a` is a 3x3 array: - - >>> a = np.arange(9).reshape(3, 3) - >>> a - array([[0, 1, 2], - [3, 4, 5], - [6, 7, 8]]) - >>> a[iu] - array([0, 1, 2, 4, 5, 8]) - - An offset can be passed also to the masking function. This gets us the - indices starting on the first diagonal right of the main one: - - >>> iu1 = np.mask_indices(3, np.triu, 1) - - with which we now extract only three elements: - - >>> a[iu1] - array([1, 2, 5]) - - """ - m = ones((n, n), int) - a = mask_func(m, k) - return where(a != 0) - - -def tril_indices(n, k=0, m=None): - """ - Return the indices for the lower-triangle of an (n, m) array. - - Parameters - ---------- - n : int - The row dimension of the arrays for which the returned - indices will be valid. - k : int, optional - Diagonal offset (see `tril` for details). - m : int, optional - .. versionadded:: 1.9.0 - - The column dimension of the arrays for which the returned - arrays will be valid. - By default `m` is taken equal to `n`. - - - Returns - ------- - inds : tuple of arrays - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. - - See also - -------- - triu_indices : similar function, for upper-triangular. - mask_indices : generic function accepting an arbitrary mask function. - tril, triu - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Compute two different sets of indices to access 4x4 arrays, one for the - lower triangular part starting at the main diagonal, and one starting two - diagonals further right: - - >>> il1 = np.tril_indices(4) - >>> il2 = np.tril_indices(4, 2) - - Here is how they can be used with a sample array: - - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - - Both for indexing: - - >>> a[il1] - array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) - - And for assigning values: - - >>> a[il1] = -1 - >>> a - array([[-1, 1, 2, 3], - [-1, -1, 6, 7], - [-1, -1, -1, 11], - [-1, -1, -1, -1]]) - - These cover almost the whole array (two diagonals right of the main one): - - >>> a[il2] = -10 - >>> a - array([[-10, -10, -10, 3], - [-10, -10, -10, -10], - [-10, -10, -10, -10], - [-10, -10, -10, -10]]) - - """ - return where(tri(n, m, k=k, dtype=bool)) - - -def tril_indices_from(arr, k=0): - """ - Return the indices for the lower-triangle of arr. - - See `tril_indices` for full details. - - Parameters - ---------- - arr : array_like - The indices will be valid for square arrays whose dimensions are - the same as arr. - k : int, optional - Diagonal offset (see `tril` for details). - - See Also - -------- - tril_indices, tril - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - if arr.ndim != 2: - raise ValueError("input array must be 2-d") - return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) - - -def triu_indices(n, k=0, m=None): - """ - Return the indices for the upper-triangle of an (n, m) array. - - Parameters - ---------- - n : int - The size of the arrays for which the returned indices will - be valid. - k : int, optional - Diagonal offset (see `triu` for details). - m : int, optional - .. versionadded:: 1.9.0 - - The column dimension of the arrays for which the returned - arrays will be valid. - By default `m` is taken equal to `n`. - - - Returns - ------- - inds : tuple, shape(2) of ndarrays, shape(`n`) - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. Can be used - to slice a ndarray of shape(`n`, `n`). - - See also - -------- - tril_indices : similar function, for lower-triangular. - mask_indices : generic function accepting an arbitrary mask function. - triu, tril - - Notes - ----- - .. versionadded:: 1.4.0 - - Examples - -------- - Compute two different sets of indices to access 4x4 arrays, one for the - upper triangular part starting at the main diagonal, and one starting two - diagonals further right: - - >>> iu1 = np.triu_indices(4) - >>> iu2 = np.triu_indices(4, 2) - - Here is how they can be used with a sample array: - - >>> a = np.arange(16).reshape(4, 4) - >>> a - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]) - - Both for indexing: - - >>> a[iu1] - array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) - - And for assigning values: - - >>> a[iu1] = -1 - >>> a - array([[-1, -1, -1, -1], - [ 4, -1, -1, -1], - [ 8, 9, -1, -1], - [12, 13, 14, -1]]) - - These cover only a small part of the whole array (two diagonals right - of the main one): - - >>> a[iu2] = -10 - >>> a - array([[ -1, -1, -10, -10], - [ 4, -1, -1, -10], - [ 8, 9, -1, -1], - [ 12, 13, 14, -1]]) - - """ - return where(~tri(n, m, k=k-1, dtype=bool)) - - -def triu_indices_from(arr, k=0): - """ - Return the indices for the upper-triangle of arr. - - See `triu_indices` for full details. - - Parameters - ---------- - arr : ndarray, shape(N, N) - The indices will be valid for square arrays. - k : int, optional - Diagonal offset (see `triu` for details). - - Returns - ------- - triu_indices_from : tuple, shape(2) of ndarray, shape(N) - Indices for the upper-triangle of `arr`. - - See Also - -------- - triu_indices, triu - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - if arr.ndim != 2: - raise ValueError("input array must be 2-d") - return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/type_check.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/type_check.py deleted file mode 100644 index a45d0bd865c30..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/type_check.py +++ /dev/null @@ -1,605 +0,0 @@ -"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', - 'isreal', 'nan_to_num', 'real', 'real_if_close', - 'typename', 'asfarray', 'mintypecode', 'asscalar', - 'common_type'] - -import numpy.core.numeric as _nx -from numpy.core.numeric import asarray, asanyarray, array, isnan, \ - obj2sctype, zeros -from .ufunclike import isneginf, isposinf - -_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' - -def mintypecode(typechars,typeset='GDFgdf',default='d'): - """ - Return the character for the minimum-size type to which given types can - be safely cast. - - The returned type character must represent the smallest size dtype such - that an array of the returned type can handle the data from an array of - all types in `typechars` (or if `typechars` is an array, then its - dtype.char). - - Parameters - ---------- - typechars : list of str or array_like - If a list of strings, each string should represent a dtype. - If array_like, the character representation of the array dtype is used. - typeset : str or list of str, optional - The set of characters that the returned character is chosen from. - The default set is 'GDFgdf'. - default : str, optional - The default character, this is returned if none of the characters in - `typechars` matches a character in `typeset`. - - Returns - ------- - typechar : str - The character representing the minimum-size type that was found. - - See Also - -------- - dtype, sctype2char, maximum_sctype - - Examples - -------- - >>> np.mintypecode(['d', 'f', 'S']) - 'd' - >>> x = np.array([1.1, 2-3.j]) - >>> np.mintypecode(x) - 'D' - - >>> np.mintypecode('abceh', default='G') - 'G' - - """ - typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char - for t in typechars] - intersection = [t for t in typecodes if t in typeset] - if not intersection: - return default - if 'F' in intersection and 'd' in intersection: - return 'D' - l = [] - for t in intersection: - i = _typecodes_by_elsize.index(t) - l.append((i, t)) - l.sort() - return l[0][1] - -def asfarray(a, dtype=_nx.float_): - """ - Return an array converted to a float type. - - Parameters - ---------- - a : array_like - The input array. - dtype : str or dtype object, optional - Float type code to coerce input array `a`. If `dtype` is one of the - 'int' dtypes, it is replaced with float64. - - Returns - ------- - out : ndarray - The input `a` as a float ndarray. - - Examples - -------- - >>> np.asfarray([2, 3]) - array([ 2., 3.]) - >>> np.asfarray([2, 3], dtype='float') - array([ 2., 3.]) - >>> np.asfarray([2, 3], dtype='int8') - array([ 2., 3.]) - - """ - dtype = _nx.obj2sctype(dtype) - if not issubclass(dtype, _nx.inexact): - dtype = _nx.float_ - return asarray(a, dtype=dtype) - -def real(val): - """ - Return the real part of the elements of the array. - - Parameters - ---------- - val : array_like - Input array. - - Returns - ------- - out : ndarray - Output array. If `val` is real, the type of `val` is used for the - output. If `val` has complex elements, the returned type is float. - - See Also - -------- - real_if_close, imag, angle - - Examples - -------- - >>> a = np.array([1+2j, 3+4j, 5+6j]) - >>> a.real - array([ 1., 3., 5.]) - >>> a.real = 9 - >>> a - array([ 9.+2.j, 9.+4.j, 9.+6.j]) - >>> a.real = np.array([9, 8, 7]) - >>> a - array([ 9.+2.j, 8.+4.j, 7.+6.j]) - - """ - return asanyarray(val).real - -def imag(val): - """ - Return the imaginary part of the elements of the array. - - Parameters - ---------- - val : array_like - Input array. - - Returns - ------- - out : ndarray - Output array. If `val` is real, the type of `val` is used for the - output. If `val` has complex elements, the returned type is float. - - See Also - -------- - real, angle, real_if_close - - Examples - -------- - >>> a = np.array([1+2j, 3+4j, 5+6j]) - >>> a.imag - array([ 2., 4., 6.]) - >>> a.imag = np.array([8, 10, 12]) - >>> a - array([ 1. +8.j, 3.+10.j, 5.+12.j]) - - """ - return asanyarray(val).imag - -def iscomplex(x): - """ - Returns a bool array, where True if input element is complex. - - What is tested is whether the input has a non-zero imaginary part, not if - the input type is complex. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray of bools - Output array. - - See Also - -------- - isreal - iscomplexobj : Return True if x is a complex type or an array of complex - numbers. - - Examples - -------- - >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) - array([ True, False, False, False, False, True], dtype=bool) - - """ - ax = asanyarray(x) - if issubclass(ax.dtype.type, _nx.complexfloating): - return ax.imag != 0 - res = zeros(ax.shape, bool) - return +res # convet to array-scalar if needed - -def isreal(x): - """ - Returns a bool array, where True if input element is real. - - If element has complex type with zero complex part, the return value - for that element is True. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray, bool - Boolean array of same shape as `x`. - - See Also - -------- - iscomplex - isrealobj : Return True if x is not a complex type. - - Examples - -------- - >>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]) - array([False, True, True, True, True, False], dtype=bool) - - """ - return imag(x) == 0 - -def iscomplexobj(x): - """ - Check for a complex type or an array of complex numbers. - - The type of the input is checked, not the value. Even if the input - has an imaginary part equal to zero, `iscomplexobj` evaluates to True. - - Parameters - ---------- - x : any - The input can be of any type and shape. - - Returns - ------- - iscomplexobj : bool - The return value, True if `x` is of a complex type or has at least - one complex element. - - See Also - -------- - isrealobj, iscomplex - - Examples - -------- - >>> np.iscomplexobj(1) - False - >>> np.iscomplexobj(1+0j) - True - >>> np.iscomplexobj([3, 1+0j, True]) - True - - """ - return issubclass(asarray(x).dtype.type, _nx.complexfloating) - -def isrealobj(x): - """ - Return True if x is a not complex type or an array of complex numbers. - - The type of the input is checked, not the value. So even if the input - has an imaginary part equal to zero, `isrealobj` evaluates to False - if the data type is complex. - - Parameters - ---------- - x : any - The input can be of any type and shape. - - Returns - ------- - y : bool - The return value, False if `x` is of a complex type. - - See Also - -------- - iscomplexobj, isreal - - Examples - -------- - >>> np.isrealobj(1) - True - >>> np.isrealobj(1+0j) - False - >>> np.isrealobj([3, 1+0j, True]) - False - - """ - return not issubclass(asarray(x).dtype.type, _nx.complexfloating) - -#----------------------------------------------------------------------------- - -def _getmaxmin(t): - from numpy.core import getlimits - f = getlimits.finfo(t) - return f.max, f.min - -def nan_to_num(x): - """ - Replace nan with zero and inf with finite numbers. - - Returns an array or scalar replacing Not a Number (NaN) with zero, - (positive) infinity with a very large number and negative infinity - with a very small (or negative) number. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - out : ndarray, float - Array with the same shape as `x` and dtype of the element in `x` with - the greatest precision. NaN is replaced by zero, and infinity - (-infinity) is replaced by the largest (smallest or most negative) - floating point value that fits in the output dtype. All finite numbers - are upcast to the output dtype (default float64). - - See Also - -------- - isinf : Shows which elements are negative or negative infinity. - isneginf : Shows which elements are negative infinity. - isposinf : Shows which elements are positive infinity. - isnan : Shows which elements are Not a Number (NaN). - isfinite : Shows which elements are finite (not NaN, not infinity) - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - - - Examples - -------- - >>> np.set_printoptions(precision=8) - >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) - >>> np.nan_to_num(x) - array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, - -1.28000000e+002, 1.28000000e+002]) - - """ - try: - t = x.dtype.type - except AttributeError: - t = obj2sctype(type(x)) - if issubclass(t, _nx.complexfloating): - return nan_to_num(x.real) + 1j * nan_to_num(x.imag) - else: - try: - y = x.copy() - except AttributeError: - y = array(x) - if not issubclass(t, _nx.integer): - if not y.shape: - y = array([x]) - scalar = True - else: - scalar = False - are_inf = isposinf(y) - are_neg_inf = isneginf(y) - are_nan = isnan(y) - maxf, minf = _getmaxmin(y.dtype.type) - y[are_nan] = 0 - y[are_inf] = maxf - y[are_neg_inf] = minf - if scalar: - y = y[0] - return y - -#----------------------------------------------------------------------------- - -def real_if_close(a,tol=100): - """ - If complex input returns a real array if complex parts are close to zero. - - "Close to zero" is defined as `tol` * (machine epsilon of the type for - `a`). - - Parameters - ---------- - a : array_like - Input array. - tol : float - Tolerance in machine epsilons for the complex part of the elements - in the array. - - Returns - ------- - out : ndarray - If `a` is real, the type of `a` is used for the output. If `a` - has complex elements, the returned type is float. - - See Also - -------- - real, imag, angle - - Notes - ----- - Machine epsilon varies from machine to machine and between data types - but Python floats on most platforms have a machine epsilon equal to - 2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print - out the machine epsilon for floats. - - Examples - -------- - >>> np.finfo(np.float).eps - 2.2204460492503131e-16 - - >>> np.real_if_close([2.1 + 4e-14j], tol=1000) - array([ 2.1]) - >>> np.real_if_close([2.1 + 4e-13j], tol=1000) - array([ 2.1 +4.00000000e-13j]) - - """ - a = asanyarray(a) - if not issubclass(a.dtype.type, _nx.complexfloating): - return a - if tol > 1: - from numpy.core import getlimits - f = getlimits.finfo(a.dtype.type) - tol = f.eps * tol - if _nx.allclose(a.imag, 0, atol=tol): - a = a.real - return a - - -def asscalar(a): - """ - Convert an array of size 1 to its scalar equivalent. - - Parameters - ---------- - a : ndarray - Input array of size 1. - - Returns - ------- - out : scalar - Scalar representation of `a`. The output data type is the same type - returned by the input's `item` method. - - Examples - -------- - >>> np.asscalar(np.array([24])) - 24 - - """ - return a.item() - -#----------------------------------------------------------------------------- - -_namefromtype = {'S1': 'character', - '?': 'bool', - 'b': 'signed char', - 'B': 'unsigned char', - 'h': 'short', - 'H': 'unsigned short', - 'i': 'integer', - 'I': 'unsigned integer', - 'l': 'long integer', - 'L': 'unsigned long integer', - 'q': 'long long integer', - 'Q': 'unsigned long long integer', - 'f': 'single precision', - 'd': 'double precision', - 'g': 'long precision', - 'F': 'complex single precision', - 'D': 'complex double precision', - 'G': 'complex long double precision', - 'S': 'string', - 'U': 'unicode', - 'V': 'void', - 'O': 'object' - } - -def typename(char): - """ - Return a description for the given data type code. - - Parameters - ---------- - char : str - Data type code. - - Returns - ------- - out : str - Description of the input data type code. - - See Also - -------- - dtype, typecodes - - Examples - -------- - >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', - ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] - >>> for typechar in typechars: - ... print typechar, ' : ', np.typename(typechar) - ... - S1 : character - ? : bool - B : unsigned char - D : complex double precision - G : complex long double precision - F : complex single precision - I : unsigned integer - H : unsigned short - L : unsigned long integer - O : object - Q : unsigned long long integer - S : string - U : unicode - V : void - b : signed char - d : double precision - g : long precision - f : single precision - i : integer - h : short - l : long integer - q : long long integer - - """ - return _namefromtype[char] - -#----------------------------------------------------------------------------- - -#determine the "minimum common type" for a group of arrays. -array_type = [[_nx.single, _nx.double, _nx.longdouble], - [_nx.csingle, _nx.cdouble, _nx.clongdouble]] -array_precision = {_nx.single: 0, - _nx.double: 1, - _nx.longdouble: 2, - _nx.csingle: 0, - _nx.cdouble: 1, - _nx.clongdouble: 2} -def common_type(*arrays): - """ - Return a scalar type which is common to the input arrays. - - The return type will always be an inexact (i.e. floating point) scalar - type, even if all the arrays are integer arrays. If one of the inputs is - an integer array, the minimum precision type that is returned is a - 64-bit floating point dtype. - - All input arrays can be safely cast to the returned dtype without loss - of information. - - Parameters - ---------- - array1, array2, ... : ndarrays - Input arrays. - - Returns - ------- - out : data type code - Data type code. - - See Also - -------- - dtype, mintypecode - - Examples - -------- - >>> np.common_type(np.arange(2, dtype=np.float32)) - - >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) - - >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) - - - """ - is_complex = False - precision = 0 - for a in arrays: - t = a.dtype.type - if iscomplexobj(a): - is_complex = True - if issubclass(t, _nx.integer): - p = 1 - else: - p = array_precision.get(t, None) - if p is None: - raise TypeError("can't get common type for non-numeric array") - precision = max(precision, p) - if is_complex: - return array_type[1][precision] - else: - return array_type[0][precision] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/ufunclike.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/ufunclike.py deleted file mode 100644 index e91f64d0ef927..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/ufunclike.py +++ /dev/null @@ -1,177 +0,0 @@ -""" -Module of functions that are like ufuncs in acting on arrays and optionally -storing results in an output array. - -""" -from __future__ import division, absolute_import, print_function - -__all__ = ['fix', 'isneginf', 'isposinf'] - -import numpy.core.numeric as nx - -def fix(x, y=None): - """ - Round to nearest integer towards zero. - - Round an array of floats element-wise to nearest integer towards zero. - The rounded values are returned as floats. - - Parameters - ---------- - x : array_like - An array of floats to be rounded - y : ndarray, optional - Output array - - Returns - ------- - out : ndarray of floats - The array of rounded numbers - - See Also - -------- - trunc, floor, ceil - around : Round to given number of decimals - - Examples - -------- - >>> np.fix(3.14) - 3.0 - >>> np.fix(3) - 3.0 - >>> np.fix([2.1, 2.9, -2.1, -2.9]) - array([ 2., 2., -2., -2.]) - - """ - x = nx.asanyarray(x) - y1 = nx.floor(x) - y2 = nx.ceil(x) - if y is None: - y = nx.asanyarray(y1) - y[...] = nx.where(x >= 0, y1, y2) - return y - -def isposinf(x, y=None): - """ - Test element-wise for positive infinity, return result as bool array. - - Parameters - ---------- - x : array_like - The input array. - y : array_like, optional - A boolean array with the same shape as `x` to store the result. - - Returns - ------- - y : ndarray - A boolean array with the same dimensions as the input. - If second argument is not supplied then a boolean array is returned - with values True where the corresponding element of the input is - positive infinity and values False where the element of the input is - not positive infinity. - - If a second argument is supplied the result is stored there. If the - type of that array is a numeric type the result is represented as zeros - and ones, if the type is boolean then as False and True. - The return value `y` is then a reference to that array. - - See Also - -------- - isinf, isneginf, isfinite, isnan - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). - - Errors result if the second argument is also supplied when `x` is a - scalar input, or if first and second arguments have different shapes. - - Examples - -------- - >>> np.isposinf(np.PINF) - array(True, dtype=bool) - >>> np.isposinf(np.inf) - array(True, dtype=bool) - >>> np.isposinf(np.NINF) - array(False, dtype=bool) - >>> np.isposinf([-np.inf, 0., np.inf]) - array([False, False, True], dtype=bool) - - >>> x = np.array([-np.inf, 0., np.inf]) - >>> y = np.array([2, 2, 2]) - >>> np.isposinf(x, y) - array([0, 0, 1]) - >>> y - array([0, 0, 1]) - - """ - if y is None: - x = nx.asarray(x) - y = nx.empty(x.shape, dtype=nx.bool_) - nx.logical_and(nx.isinf(x), ~nx.signbit(x), y) - return y - -def isneginf(x, y=None): - """ - Test element-wise for negative infinity, return result as bool array. - - Parameters - ---------- - x : array_like - The input array. - y : array_like, optional - A boolean array with the same shape and type as `x` to store the - result. - - Returns - ------- - y : ndarray - A boolean array with the same dimensions as the input. - If second argument is not supplied then a numpy boolean array is - returned with values True where the corresponding element of the - input is negative infinity and values False where the element of - the input is not negative infinity. - - If a second argument is supplied the result is stored there. If the - type of that array is a numeric type the result is represented as - zeros and ones, if the type is boolean then as False and True. The - return value `y` is then a reference to that array. - - See Also - -------- - isinf, isposinf, isnan, isfinite - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). - - Errors result if the second argument is also supplied when x is a scalar - input, or if first and second arguments have different shapes. - - Examples - -------- - >>> np.isneginf(np.NINF) - array(True, dtype=bool) - >>> np.isneginf(np.inf) - array(False, dtype=bool) - >>> np.isneginf(np.PINF) - array(False, dtype=bool) - >>> np.isneginf([-np.inf, 0., np.inf]) - array([ True, False, False], dtype=bool) - - >>> x = np.array([-np.inf, 0., np.inf]) - >>> y = np.array([2, 2, 2]) - >>> np.isneginf(x, y) - array([1, 0, 0]) - >>> y - array([1, 0, 0]) - - """ - if y is None: - x = nx.asarray(x) - y = nx.empty(x.shape, dtype=nx.bool_) - nx.logical_and(nx.isinf(x), nx.signbit(x), y) - return y diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/user_array.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/user_array.py deleted file mode 100644 index bb5bec628f122..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/user_array.py +++ /dev/null @@ -1,277 +0,0 @@ -""" -Standard container-class for easy multiple-inheritance. -Try to inherit from the ndarray instead of using this class as this is not -complete. - -""" -from __future__ import division, absolute_import, print_function - -from numpy.core import ( - array, asarray, absolute, add, subtract, multiply, divide, - remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, - bitwise_xor, invert, less, less_equal, not_equal, equal, greater, - greater_equal, shape, reshape, arange, sin, sqrt, transpose -) -from numpy.compat import long - - -class container(object): - - def __init__(self, data, dtype=None, copy=True): - self.array = array(data, dtype, copy=copy) - - def __repr__(self): - if len(self.shape) > 0: - return self.__class__.__name__ + repr(self.array)[len("array"):] - else: - return self.__class__.__name__ + "(" + repr(self.array) + ")" - - def __array__(self, t=None): - if t: - return self.array.astype(t) - return self.array - - # Array as sequence - def __len__(self): - return len(self.array) - - def __getitem__(self, index): - return self._rc(self.array[index]) - - def __getslice__(self, i, j): - return self._rc(self.array[i:j]) - - def __setitem__(self, index, value): - self.array[index] = asarray(value, self.dtype) - - def __setslice__(self, i, j, value): - self.array[i:j] = asarray(value, self.dtype) - - def __abs__(self): - return self._rc(absolute(self.array)) - - def __neg__(self): - return self._rc(-self.array) - - def __add__(self, other): - return self._rc(self.array + asarray(other)) - - __radd__ = __add__ - - def __iadd__(self, other): - add(self.array, other, self.array) - return self - - def __sub__(self, other): - return self._rc(self.array - asarray(other)) - - def __rsub__(self, other): - return self._rc(asarray(other) - self.array) - - def __isub__(self, other): - subtract(self.array, other, self.array) - return self - - def __mul__(self, other): - return self._rc(multiply(self.array, asarray(other))) - - __rmul__ = __mul__ - - def __imul__(self, other): - multiply(self.array, other, self.array) - return self - - def __div__(self, other): - return self._rc(divide(self.array, asarray(other))) - - def __rdiv__(self, other): - return self._rc(divide(asarray(other), self.array)) - - def __idiv__(self, other): - divide(self.array, other, self.array) - return self - - def __mod__(self, other): - return self._rc(remainder(self.array, other)) - - def __rmod__(self, other): - return self._rc(remainder(other, self.array)) - - def __imod__(self, other): - remainder(self.array, other, self.array) - return self - - def __divmod__(self, other): - return (self._rc(divide(self.array, other)), - self._rc(remainder(self.array, other))) - - def __rdivmod__(self, other): - return (self._rc(divide(other, self.array)), - self._rc(remainder(other, self.array))) - - def __pow__(self, other): - return self._rc(power(self.array, asarray(other))) - - def __rpow__(self, other): - return self._rc(power(asarray(other), self.array)) - - def __ipow__(self, other): - power(self.array, other, self.array) - return self - - def __lshift__(self, other): - return self._rc(left_shift(self.array, other)) - - def __rshift__(self, other): - return self._rc(right_shift(self.array, other)) - - def __rlshift__(self, other): - return self._rc(left_shift(other, self.array)) - - def __rrshift__(self, other): - return self._rc(right_shift(other, self.array)) - - def __ilshift__(self, other): - left_shift(self.array, other, self.array) - return self - - def __irshift__(self, other): - right_shift(self.array, other, self.array) - return self - - def __and__(self, other): - return self._rc(bitwise_and(self.array, other)) - - def __rand__(self, other): - return self._rc(bitwise_and(other, self.array)) - - def __iand__(self, other): - bitwise_and(self.array, other, self.array) - return self - - def __xor__(self, other): - return self._rc(bitwise_xor(self.array, other)) - - def __rxor__(self, other): - return self._rc(bitwise_xor(other, self.array)) - - def __ixor__(self, other): - bitwise_xor(self.array, other, self.array) - return self - - def __or__(self, other): - return self._rc(bitwise_or(self.array, other)) - - def __ror__(self, other): - return self._rc(bitwise_or(other, self.array)) - - def __ior__(self, other): - bitwise_or(self.array, other, self.array) - return self - - def __pos__(self): - return self._rc(self.array) - - def __invert__(self): - return self._rc(invert(self.array)) - - def _scalarfunc(self, func): - if len(self.shape) == 0: - return func(self[0]) - else: - raise TypeError( - "only rank-0 arrays can be converted to Python scalars.") - - def __complex__(self): - return self._scalarfunc(complex) - - def __float__(self): - return self._scalarfunc(float) - - def __int__(self): - return self._scalarfunc(int) - - def __long__(self): - return self._scalarfunc(long) - - def __hex__(self): - return self._scalarfunc(hex) - - def __oct__(self): - return self._scalarfunc(oct) - - def __lt__(self, other): - return self._rc(less(self.array, other)) - - def __le__(self, other): - return self._rc(less_equal(self.array, other)) - - def __eq__(self, other): - return self._rc(equal(self.array, other)) - - def __ne__(self, other): - return self._rc(not_equal(self.array, other)) - - def __gt__(self, other): - return self._rc(greater(self.array, other)) - - def __ge__(self, other): - return self._rc(greater_equal(self.array, other)) - - def copy(self): - return self._rc(self.array.copy()) - - def tostring(self): - return self.array.tostring() - - def byteswap(self): - return self._rc(self.array.byteswap()) - - def astype(self, typecode): - return self._rc(self.array.astype(typecode)) - - def _rc(self, a): - if len(shape(a)) == 0: - return a - else: - return self.__class__(a) - - def __array_wrap__(self, *args): - return self.__class__(args[0]) - - def __setattr__(self, attr, value): - if attr == 'array': - object.__setattr__(self, attr, value) - return - try: - self.array.__setattr__(attr, value) - except AttributeError: - object.__setattr__(self, attr, value) - - # Only called after other approaches fail. - def __getattr__(self, attr): - if (attr == 'array'): - return object.__getattribute__(self, attr) - return self.array.__getattribute__(attr) - -############################################################# -# Test of class container -############################################################# -if __name__ == '__main__': - temp = reshape(arange(10000), (100, 100)) - - ua = container(temp) - # new object created begin test - print(dir(ua)) - print(shape(ua), ua.shape) # I have changed Numeric.py - - ua_small = ua[:3, :5] - print(ua_small) - # this did not change ua[0,0], which is not normal behavior - ua_small[0, 0] = 10 - print(ua_small[0, 0], ua[0, 0]) - print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) - print(less(ua_small, 103), type(less(ua_small, 103))) - print(type(ua_small * reshape(arange(15), shape(ua_small)))) - print(reshape(ua_small, (5, 3))) - print(transpose(ua_small)) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/utils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/utils.py deleted file mode 100644 index df0052493da3f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/lib/utils.py +++ /dev/null @@ -1,1176 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import sys -import types -import re - -from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype -from numpy.core import ndarray, ufunc, asarray - -__all__ = [ - 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', - 'deprecate_with_doc', 'get_include', 'info', 'source', 'who', - 'lookfor', 'byte_bounds', 'safe_eval' - ] - -def get_include(): - """ - Return the directory that contains the NumPy \\*.h header files. - - Extension modules that need to compile against NumPy should use this - function to locate the appropriate include directory. - - Notes - ----- - When using ``distutils``, for example in ``setup.py``. - :: - - import numpy as np - ... - Extension('extension_name', ... - include_dirs=[np.get_include()]) - ... - - """ - import numpy - if numpy.show_config is None: - # running from numpy source directory - d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include') - else: - # using installed numpy core headers - import numpy.core as core - d = os.path.join(os.path.dirname(core.__file__), 'include') - return d - - -def _set_function_name(func, name): - func.__name__ = name - return func - - -class _Deprecate(object): - """ - Decorator class to deprecate old functions. - - Refer to `deprecate` for details. - - See Also - -------- - deprecate - - """ - - def __init__(self, old_name=None, new_name=None, message=None): - self.old_name = old_name - self.new_name = new_name - self.message = message - - def __call__(self, func, *args, **kwargs): - """ - Decorator call. Refer to ``decorate``. - - """ - old_name = self.old_name - new_name = self.new_name - message = self.message - - import warnings - if old_name is None: - try: - old_name = func.__name__ - except AttributeError: - old_name = func.__name__ - if new_name is None: - depdoc = "`%s` is deprecated!" % old_name - else: - depdoc = "`%s` is deprecated, use `%s` instead!" % \ - (old_name, new_name) - - if message is not None: - depdoc += "\n" + message - - def newfunc(*args,**kwds): - """`arrayrange` is deprecated, use `arange` instead!""" - warnings.warn(depdoc, DeprecationWarning) - return func(*args, **kwds) - - newfunc = _set_function_name(newfunc, old_name) - doc = func.__doc__ - if doc is None: - doc = depdoc - else: - doc = '\n\n'.join([depdoc, doc]) - newfunc.__doc__ = doc - try: - d = func.__dict__ - except AttributeError: - pass - else: - newfunc.__dict__.update(d) - return newfunc - -def deprecate(*args, **kwargs): - """ - Issues a DeprecationWarning, adds warning to `old_name`'s - docstring, rebinds ``old_name.__name__`` and returns the new - function object. - - This function may also be used as a decorator. - - Parameters - ---------- - func : function - The function to be deprecated. - old_name : str, optional - The name of the function to be deprecated. Default is None, in - which case the name of `func` is used. - new_name : str, optional - The new name for the function. Default is None, in which case the - deprecation message is that `old_name` is deprecated. If given, the - deprecation message is that `old_name` is deprecated and `new_name` - should be used instead. - message : str, optional - Additional explanation of the deprecation. Displayed in the - docstring after the warning. - - Returns - ------- - old_func : function - The deprecated function. - - Examples - -------- - Note that ``olduint`` returns a value after printing Deprecation - Warning: - - >>> olduint = np.deprecate(np.uint) - >>> olduint(6) - /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114: - DeprecationWarning: uint32 is deprecated - warnings.warn(str1, DeprecationWarning) - 6 - - """ - # Deprecate may be run as a function or as a decorator - # If run as a function, we initialise the decorator class - # and execute its __call__ method. - - if args: - fn = args[0] - args = args[1:] - - # backward compatibility -- can be removed - # after next release - if 'newname' in kwargs: - kwargs['new_name'] = kwargs.pop('newname') - if 'oldname' in kwargs: - kwargs['old_name'] = kwargs.pop('oldname') - - return _Deprecate(*args, **kwargs)(fn) - else: - return _Deprecate(*args, **kwargs) - -deprecate_with_doc = lambda msg: _Deprecate(message=msg) - - -#-------------------------------------------- -# Determine if two arrays can share memory -#-------------------------------------------- - -def byte_bounds(a): - """ - Returns pointers to the end-points of an array. - - Parameters - ---------- - a : ndarray - Input array. It must conform to the Python-side of the array - interface. - - Returns - ------- - (low, high) : tuple of 2 integers - The first integer is the first byte of the array, the second - integer is just past the last byte of the array. If `a` is not - contiguous it will not use every byte between the (`low`, `high`) - values. - - Examples - -------- - >>> I = np.eye(2, dtype='f'); I.dtype - dtype('float32') - >>> low, high = np.byte_bounds(I) - >>> high - low == I.size*I.itemsize - True - >>> I = np.eye(2, dtype='G'); I.dtype - dtype('complex192') - >>> low, high = np.byte_bounds(I) - >>> high - low == I.size*I.itemsize - True - - """ - ai = a.__array_interface__ - a_data = ai['data'][0] - astrides = ai['strides'] - ashape = ai['shape'] - bytes_a = asarray(a).dtype.itemsize - - a_low = a_high = a_data - if astrides is None: - # contiguous case - a_high += a.size * bytes_a - else: - for shape, stride in zip(ashape, astrides): - if stride < 0: - a_low += (shape-1)*stride - else: - a_high += (shape-1)*stride - a_high += bytes_a - return a_low, a_high - - -#----------------------------------------------------------------------------- -# Function for output and information on the variables used. -#----------------------------------------------------------------------------- - - -def who(vardict=None): - """ - Print the Numpy arrays in the given dictionary. - - If there is no dictionary passed in or `vardict` is None then returns - Numpy arrays in the globals() dictionary (all Numpy arrays in the - namespace). - - Parameters - ---------- - vardict : dict, optional - A dictionary possibly containing ndarrays. Default is globals(). - - Returns - ------- - out : None - Returns 'None'. - - Notes - ----- - Prints out the name, shape, bytes and type of all of the ndarrays - present in `vardict`. - - Examples - -------- - >>> a = np.arange(10) - >>> b = np.ones(20) - >>> np.who() - Name Shape Bytes Type - =========================================================== - a 10 40 int32 - b 20 160 float64 - Upper bound on total bytes = 200 - - >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', - ... 'idx':5} - >>> np.who(d) - Name Shape Bytes Type - =========================================================== - y 3 24 float64 - x 2 16 float64 - Upper bound on total bytes = 40 - - """ - if vardict is None: - frame = sys._getframe().f_back - vardict = frame.f_globals - sta = [] - cache = {} - for name in vardict.keys(): - if isinstance(vardict[name], ndarray): - var = vardict[name] - idv = id(var) - if idv in cache.keys(): - namestr = name + " (%s)" % cache[idv] - original = 0 - else: - cache[idv] = name - namestr = name - original = 1 - shapestr = " x ".join(map(str, var.shape)) - bytestr = str(var.nbytes) - sta.append([namestr, shapestr, bytestr, var.dtype.name, - original]) - - maxname = 0 - maxshape = 0 - maxbyte = 0 - totalbytes = 0 - for k in range(len(sta)): - val = sta[k] - if maxname < len(val[0]): - maxname = len(val[0]) - if maxshape < len(val[1]): - maxshape = len(val[1]) - if maxbyte < len(val[2]): - maxbyte = len(val[2]) - if val[4]: - totalbytes += int(val[2]) - - if len(sta) > 0: - sp1 = max(10, maxname) - sp2 = max(10, maxshape) - sp3 = max(10, maxbyte) - prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ') - print(prval + "\n" + "="*(len(prval)+5) + "\n") - - for k in range(len(sta)): - val = sta[k] - print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4), - val[1], ' '*(sp2-len(val[1])+5), - val[2], ' '*(sp3-len(val[2])+5), - val[3])) - print("\nUpper bound on total bytes = %d" % totalbytes) - return - -#----------------------------------------------------------------------------- - - -# NOTE: pydoc defines a help function which works simliarly to this -# except it uses a pager to take over the screen. - -# combine name and arguments and split to multiple lines of width -# characters. End lines on a comma and begin argument list indented with -# the rest of the arguments. -def _split_line(name, arguments, width): - firstwidth = len(name) - k = firstwidth - newstr = name - sepstr = ", " - arglist = arguments.split(sepstr) - for argument in arglist: - if k == firstwidth: - addstr = "" - else: - addstr = sepstr - k = k + len(argument) + len(addstr) - if k > width: - k = firstwidth + 1 + len(argument) - newstr = newstr + ",\n" + " "*(firstwidth+2) + argument - else: - newstr = newstr + addstr + argument - return newstr - -_namedict = None -_dictlist = None - -# Traverse all module directories underneath globals -# to see if something is defined -def _makenamedict(module='numpy'): - module = __import__(module, globals(), locals(), []) - thedict = {module.__name__:module.__dict__} - dictlist = [module.__name__] - totraverse = [module.__dict__] - while True: - if len(totraverse) == 0: - break - thisdict = totraverse.pop(0) - for x in thisdict.keys(): - if isinstance(thisdict[x], types.ModuleType): - modname = thisdict[x].__name__ - if modname not in dictlist: - moddict = thisdict[x].__dict__ - dictlist.append(modname) - totraverse.append(moddict) - thedict[modname] = moddict - return thedict, dictlist - - -def _info(obj, output=sys.stdout): - """Provide information about ndarray obj. - - Parameters - ---------- - obj: ndarray - Must be ndarray, not checked. - output: - Where printed output goes. - - Notes - ----- - Copied over from the numarray module prior to its removal. - Adapted somewhat as only numpy is an option now. - - Called by info. - - """ - extra = "" - tic = "" - bp = lambda x: x - cls = getattr(obj, '__class__', type(obj)) - nm = getattr(cls, '__name__', cls) - strides = obj.strides - endian = obj.dtype.byteorder - - print("class: ", nm, file=output) - print("shape: ", obj.shape, file=output) - print("strides: ", strides, file=output) - print("itemsize: ", obj.itemsize, file=output) - print("aligned: ", bp(obj.flags.aligned), file=output) - print("contiguous: ", bp(obj.flags.contiguous), file=output) - print("fortran: ", obj.flags.fortran, file=output) - print( - "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), - file=output - ) - print("byteorder: ", end=' ', file=output) - if endian in ['|', '=']: - print("%s%s%s" % (tic, sys.byteorder, tic), file=output) - byteswap = False - elif endian == '>': - print("%sbig%s" % (tic, tic), file=output) - byteswap = sys.byteorder != "big" - else: - print("%slittle%s" % (tic, tic), file=output) - byteswap = sys.byteorder != "little" - print("byteswap: ", bp(byteswap), file=output) - print("type: %s" % obj.dtype, file=output) - - -def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): - """ - Get help information for a function, class, or module. - - Parameters - ---------- - object : object or str, optional - Input object or name to get information about. If `object` is a - numpy object, its docstring is given. If it is a string, available - modules are searched for matching objects. If None, information - about `info` itself is returned. - maxwidth : int, optional - Printing width. - output : file like object, optional - File like object that the output is written to, default is - ``stdout``. The object has to be opened in 'w' or 'a' mode. - toplevel : str, optional - Start search at this level. - - See Also - -------- - source, lookfor - - Notes - ----- - When used interactively with an object, ``np.info(obj)`` is equivalent - to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython - prompt. - - Examples - -------- - >>> np.info(np.polyval) # doctest: +SKIP - polyval(p, x) - Evaluate the polynomial p at x. - ... - - When using a string for `object` it is possible to get multiple results. - - >>> np.info('fft') # doctest: +SKIP - *** Found in numpy *** - Core FFT routines - ... - *** Found in numpy.fft *** - fft(a, n=None, axis=-1) - ... - *** Repeat reference found in numpy.fft.fftpack *** - *** Total of 3 references found. *** - - """ - global _namedict, _dictlist - # Local import to speed up numpy's import time. - import pydoc - import inspect - - if (hasattr(object, '_ppimport_importer') or - hasattr(object, '_ppimport_module')): - object = object._ppimport_module - elif hasattr(object, '_ppimport_attr'): - object = object._ppimport_attr - - if object is None: - info(info) - elif isinstance(object, ndarray): - _info(object, output=output) - elif isinstance(object, str): - if _namedict is None: - _namedict, _dictlist = _makenamedict(toplevel) - numfound = 0 - objlist = [] - for namestr in _dictlist: - try: - obj = _namedict[namestr][object] - if id(obj) in objlist: - print("\n " - "*** Repeat reference found in %s *** " % namestr, - file=output - ) - else: - objlist.append(id(obj)) - print(" *** Found in %s ***" % namestr, file=output) - info(obj) - print("-"*maxwidth, file=output) - numfound += 1 - except KeyError: - pass - if numfound == 0: - print("Help for %s not found." % object, file=output) - else: - print("\n " - "*** Total of %d references found. ***" % numfound, - file=output - ) - - elif inspect.isfunction(object): - name = object.__name__ - arguments = inspect.formatargspec(*inspect.getargspec(object)) - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print(" " + argstr + "\n", file=output) - print(inspect.getdoc(object), file=output) - - elif inspect.isclass(object): - name = object.__name__ - arguments = "()" - try: - if hasattr(object, '__init__'): - arguments = inspect.formatargspec( - *inspect.getargspec(object.__init__.__func__) - ) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - except: - pass - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print(" " + argstr + "\n", file=output) - doc1 = inspect.getdoc(object) - if doc1 is None: - if hasattr(object, '__init__'): - print(inspect.getdoc(object.__init__), file=output) - else: - print(inspect.getdoc(object), file=output) - - methods = pydoc.allmethods(object) - if methods != []: - print("\n\nMethods:\n", file=output) - for meth in methods: - if meth[0] == '_': - continue - thisobj = getattr(object, meth, None) - if thisobj is not None: - methstr, other = pydoc.splitdoc( - inspect.getdoc(thisobj) or "None" - ) - print(" %s -- %s" % (meth, methstr), file=output) - - elif (sys.version_info[0] < 3 - and isinstance(object, types.InstanceType)): - # check for __call__ method - # types.InstanceType is the type of the instances of oldstyle classes - print("Instance of class: ", object.__class__.__name__, file=output) - print(file=output) - if hasattr(object, '__call__'): - arguments = inspect.formatargspec( - *inspect.getargspec(object.__call__.__func__) - ) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - else: - arguments = "()" - - if hasattr(object, 'name'): - name = "%s" % object.name - else: - name = "" - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print(" " + argstr + "\n", file=output) - doc = inspect.getdoc(object.__call__) - if doc is not None: - print(inspect.getdoc(object.__call__), file=output) - print(inspect.getdoc(object), file=output) - - else: - print(inspect.getdoc(object), file=output) - - elif inspect.ismethod(object): - name = object.__name__ - arguments = inspect.formatargspec( - *inspect.getargspec(object.__func__) - ) - arglist = arguments.split(', ') - if len(arglist) > 1: - arglist[1] = "("+arglist[1] - arguments = ", ".join(arglist[1:]) - else: - arguments = "()" - - if len(name+arguments) > maxwidth: - argstr = _split_line(name, arguments, maxwidth) - else: - argstr = name + arguments - - print(" " + argstr + "\n", file=output) - print(inspect.getdoc(object), file=output) - - elif hasattr(object, '__doc__'): - print(inspect.getdoc(object), file=output) - - -def source(object, output=sys.stdout): - """ - Print or write to a file the source code for a Numpy object. - - The source code is only returned for objects written in Python. Many - functions and classes are defined in C and will therefore not return - useful information. - - Parameters - ---------- - object : numpy object - Input object. This can be any object (function, class, module, - ...). - output : file object, optional - If `output` not supplied then source code is printed to screen - (sys.stdout). File object must be created with either write 'w' or - append 'a' modes. - - See Also - -------- - lookfor, info - - Examples - -------- - >>> np.source(np.interp) #doctest: +SKIP - In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py - def interp(x, xp, fp, left=None, right=None): - \"\"\".... (full docstring printed)\"\"\" - if isinstance(x, (float, int, number)): - return compiled_interp([x], xp, fp, left, right).item() - else: - return compiled_interp(x, xp, fp, left, right) - - The source code is only returned for objects written in Python. - - >>> np.source(np.array) #doctest: +SKIP - Not available for this object. - - """ - # Local import to speed up numpy's import time. - import inspect - try: - print("In file: %s\n" % inspect.getsourcefile(object), file=output) - print(inspect.getsource(object), file=output) - except: - print("Not available for this object.", file=output) - - -# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} -# where kind: "func", "class", "module", "object" -# and index: index in breadth-first namespace traversal -_lookfor_caches = {} - -# regexp whose match indicates that the string may contain a function -# signature -_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I) - -def lookfor(what, module=None, import_modules=True, regenerate=False, - output=None): - """ - Do a keyword search on docstrings. - - A list of of objects that matched the search is displayed, - sorted by relevance. All given keywords need to be found in the - docstring for it to be returned as a result, but the order does - not matter. - - Parameters - ---------- - what : str - String containing words to look for. - module : str or list, optional - Name of module(s) whose docstrings to go through. - import_modules : bool, optional - Whether to import sub-modules in packages. Default is True. - regenerate : bool, optional - Whether to re-generate the docstring cache. Default is False. - output : file-like, optional - File-like object to write the output to. If omitted, use a pager. - - See Also - -------- - source, info - - Notes - ----- - Relevance is determined only roughly, by checking if the keywords occur - in the function name, at the start of a docstring, etc. - - Examples - -------- - >>> np.lookfor('binary representation') - Search results for 'binary representation' - ------------------------------------------ - numpy.binary_repr - Return the binary representation of the input number as a string. - numpy.core.setup_common.long_double_representation - Given a binary dump as given by GNU od -b, look for long double - numpy.base_repr - Return a string representation of a number in the given base system. - ... - - """ - import pydoc - - # Cache - cache = _lookfor_generate_cache(module, import_modules, regenerate) - - # Search - # XXX: maybe using a real stemming search engine would be better? - found = [] - whats = str(what).lower().split() - if not whats: - return - - for name, (docstring, kind, index) in cache.items(): - if kind in ('module', 'object'): - # don't show modules or objects - continue - ok = True - doc = docstring.lower() - for w in whats: - if w not in doc: - ok = False - break - if ok: - found.append(name) - - # Relevance sort - # XXX: this is full Harrison-Stetson heuristics now, - # XXX: it probably could be improved - - kind_relevance = {'func': 1000, 'class': 1000, - 'module': -1000, 'object': -1000} - - def relevance(name, docstr, kind, index): - r = 0 - # do the keywords occur within the start of the docstring? - first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) - r += sum([200 for w in whats if w in first_doc]) - # do the keywords occur in the function name? - r += sum([30 for w in whats if w in name]) - # is the full name long? - r += -len(name) * 5 - # is the object of bad type? - r += kind_relevance.get(kind, -1000) - # is the object deep in namespace hierarchy? - r += -name.count('.') * 10 - r += max(-index / 100, -100) - return r - - def relevance_value(a): - return relevance(a, *cache[a]) - found.sort(key=relevance_value) - - # Pretty-print - s = "Search results for '%s'" % (' '.join(whats)) - help_text = [s, "-"*len(s)] - for name in found[::-1]: - doc, kind, ix = cache[name] - - doclines = [line.strip() for line in doc.strip().split("\n") - if line.strip()] - - # find a suitable short description - try: - first_doc = doclines[0].strip() - if _function_signature_re.search(first_doc): - first_doc = doclines[1].strip() - except IndexError: - first_doc = "" - help_text.append("%s\n %s" % (name, first_doc)) - - if not found: - help_text.append("Nothing found.") - - # Output - if output is not None: - output.write("\n".join(help_text)) - elif len(help_text) > 10: - pager = pydoc.getpager() - pager("\n".join(help_text)) - else: - print("\n".join(help_text)) - -def _lookfor_generate_cache(module, import_modules, regenerate): - """ - Generate docstring cache for given module. - - Parameters - ---------- - module : str, None, module - Module for which to generate docstring cache - import_modules : bool - Whether to import sub-modules in packages. - regenerate : bool - Re-generate the docstring cache - - Returns - ------- - cache : dict {obj_full_name: (docstring, kind, index), ...} - Docstring cache for the module, either cached one (regenerate=False) - or newly generated. - - """ - global _lookfor_caches - # Local import to speed up numpy's import time. - import inspect - - if sys.version_info[0] >= 3: - # In Python3 stderr, stdout are text files. - from io import StringIO - else: - from StringIO import StringIO - - if module is None: - module = "numpy" - - if isinstance(module, str): - try: - __import__(module) - except ImportError: - return {} - module = sys.modules[module] - elif isinstance(module, list) or isinstance(module, tuple): - cache = {} - for mod in module: - cache.update(_lookfor_generate_cache(mod, import_modules, - regenerate)) - return cache - - if id(module) in _lookfor_caches and not regenerate: - return _lookfor_caches[id(module)] - - # walk items and collect docstrings - cache = {} - _lookfor_caches[id(module)] = cache - seen = {} - index = 0 - stack = [(module.__name__, module)] - while stack: - name, item = stack.pop(0) - if id(item) in seen: - continue - seen[id(item)] = True - - index += 1 - kind = "object" - - if inspect.ismodule(item): - kind = "module" - try: - _all = item.__all__ - except AttributeError: - _all = None - - # import sub-packages - if import_modules and hasattr(item, '__path__'): - for pth in item.__path__: - for mod_path in os.listdir(pth): - this_py = os.path.join(pth, mod_path) - init_py = os.path.join(pth, mod_path, '__init__.py') - if (os.path.isfile(this_py) and - mod_path.endswith('.py')): - to_import = mod_path[:-3] - elif os.path.isfile(init_py): - to_import = mod_path - else: - continue - if to_import == '__init__': - continue - - try: - # Catch SystemExit, too - base_exc = BaseException - except NameError: - # Python 2.4 doesn't have BaseException - base_exc = Exception - - try: - old_stdout = sys.stdout - old_stderr = sys.stderr - try: - sys.stdout = StringIO() - sys.stderr = StringIO() - __import__("%s.%s" % (name, to_import)) - finally: - sys.stdout = old_stdout - sys.stderr = old_stderr - except base_exc: - continue - - for n, v in _getmembers(item): - try: - item_name = getattr(v, '__name__', "%s.%s" % (name, n)) - mod_name = getattr(v, '__module__', None) - except NameError: - # ref. SWIG's global cvars - # NameError: Unknown C global variable - item_name = "%s.%s" % (name, n) - mod_name = None - if '.' not in item_name and mod_name: - item_name = "%s.%s" % (mod_name, item_name) - - if not item_name.startswith(name + '.'): - # don't crawl "foreign" objects - if isinstance(v, ufunc): - # ... unless they are ufuncs - pass - else: - continue - elif not (inspect.ismodule(v) or _all is None or n in _all): - continue - stack.append(("%s.%s" % (name, n), v)) - elif inspect.isclass(item): - kind = "class" - for n, v in _getmembers(item): - stack.append(("%s.%s" % (name, n), v)) - elif hasattr(item, "__call__"): - kind = "func" - - try: - doc = inspect.getdoc(item) - except NameError: - # ref SWIG's NameError: Unknown C global variable - doc = None - if doc is not None: - cache[name] = (doc, kind, index) - - return cache - -def _getmembers(item): - import inspect - try: - members = inspect.getmembers(item) - except AttributeError: - members = [(x, getattr(item, x)) for x in dir(item) - if hasattr(item, x)] - return members - -#----------------------------------------------------------------------------- - -# The following SafeEval class and company are adapted from Michael Spencer's -# ASPN Python Cookbook recipe: -# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469 -# Accordingly it is mostly Copyright 2006 by Michael Spencer. -# The recipe, like most of the other ASPN Python Cookbook recipes was made -# available under the Python license. -# http://www.python.org/license - -# It has been modified to: -# * handle unary -/+ -# * support True/False/None -# * raise SyntaxError instead of a custom exception. - -class SafeEval(object): - """ - Object to evaluate constant string expressions. - - This includes strings with lists, dicts and tuples using the abstract - syntax tree created by ``compiler.parse``. - - For an example of usage, see `safe_eval`. - - See Also - -------- - safe_eval - - """ - - if sys.version_info[0] < 3: - def visit(self, node, **kw): - cls = node.__class__ - meth = getattr(self, 'visit'+cls.__name__, self.default) - return meth(node, **kw) - - def default(self, node, **kw): - raise SyntaxError("Unsupported source construct: %s" - % node.__class__) - - def visitExpression(self, node, **kw): - for child in node.getChildNodes(): - return self.visit(child, **kw) - - def visitConst(self, node, **kw): - return node.value - - def visitDict(self, node,**kw): - return dict( - [(self.visit(k), self.visit(v)) for k, v in node.items] - ) - - def visitTuple(self, node, **kw): - return tuple([self.visit(i) for i in node.nodes]) - - def visitList(self, node, **kw): - return [self.visit(i) for i in node.nodes] - - def visitUnaryAdd(self, node, **kw): - return +self.visit(node.getChildNodes()[0]) - - def visitUnarySub(self, node, **kw): - return -self.visit(node.getChildNodes()[0]) - - def visitName(self, node, **kw): - if node.name == 'False': - return False - elif node.name == 'True': - return True - elif node.name == 'None': - return None - else: - raise SyntaxError("Unknown name: %s" % node.name) - else: - - def visit(self, node): - cls = node.__class__ - meth = getattr(self, 'visit' + cls.__name__, self.default) - return meth(node) - - def default(self, node): - raise SyntaxError("Unsupported source construct: %s" - % node.__class__) - - def visitExpression(self, node): - return self.visit(node.body) - - def visitNum(self, node): - return node.n - - def visitStr(self, node): - return node.s - - def visitBytes(self, node): - return node.s - - def visitDict(self, node,**kw): - return dict([(self.visit(k), self.visit(v)) - for k, v in zip(node.keys, node.values)]) - - def visitTuple(self, node): - return tuple([self.visit(i) for i in node.elts]) - - def visitList(self, node): - return [self.visit(i) for i in node.elts] - - def visitUnaryOp(self, node): - import ast - if isinstance(node.op, ast.UAdd): - return +self.visit(node.operand) - elif isinstance(node.op, ast.USub): - return -self.visit(node.operand) - else: - raise SyntaxError("Unknown unary op: %r" % node.op) - - def visitName(self, node): - if node.id == 'False': - return False - elif node.id == 'True': - return True - elif node.id == 'None': - return None - else: - raise SyntaxError("Unknown name: %s" % node.id) - - def visitNameConstant(self, node): - return node.value - -def safe_eval(source): - """ - Protected string evaluation. - - Evaluate a string containing a Python literal expression without - allowing the execution of arbitrary non-literal code. - - Parameters - ---------- - source : str - The string to evaluate. - - Returns - ------- - obj : object - The result of evaluating `source`. - - Raises - ------ - SyntaxError - If the code has invalid Python syntax, or if it contains - non-literal code. - - Examples - -------- - >>> np.safe_eval('1') - 1 - >>> np.safe_eval('[1, 2, 3]') - [1, 2, 3] - >>> np.safe_eval('{"foo": ("bar", 10.0)}') - {'foo': ('bar', 10.0)} - - >>> np.safe_eval('import os') - Traceback (most recent call last): - ... - SyntaxError: invalid syntax - - >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') - Traceback (most recent call last): - ... - SyntaxError: Unsupported source construct: compiler.ast.CallFunc - - """ - # Local imports to speed up numpy's import time. - import warnings - - with warnings.catch_warnings(): - # compiler package is deprecated for 3.x, which is already solved - # here - warnings.simplefilter('ignore', DeprecationWarning) - try: - import compiler - except ImportError: - import ast as compiler - - walker = SafeEval() - try: - ast = compiler.parse(source, mode="eval") - except SyntaxError: - raise - try: - return walker.visit(ast) - except SyntaxError: - raise - -#----------------------------------------------------------------------------- diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/__init__.py deleted file mode 100644 index bc2a1ff6ce9fb..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Core Linear Algebra Tools -========================= - -=============== ========================================================== -Linear algebra basics -========================================================================== -norm Vector or matrix norm -inv Inverse of a square matrix -solve Solve a linear system of equations -det Determinant of a square matrix -slogdet Logarithm of the determinant of a square matrix -lstsq Solve linear least-squares problem -pinv Pseudo-inverse (Moore-Penrose) calculated using a singular - value decomposition -matrix_power Integer power of a square matrix -matrix_rank Calculate matrix rank using an SVD-based method -=============== ========================================================== - -=============== ========================================================== -Eigenvalues and decompositions -========================================================================== -eig Eigenvalues and vectors of a square matrix -eigh Eigenvalues and eigenvectors of a Hermitian matrix -eigvals Eigenvalues of a square matrix -eigvalsh Eigenvalues of a Hermitian matrix -qr QR decomposition of a matrix -svd Singular value decomposition of a matrix -cholesky Cholesky decomposition of a matrix -=============== ========================================================== - -=============== ========================================================== -Tensor operations -========================================================================== -tensorsolve Solve a linear tensor equation -tensorinv Calculate an inverse of a tensor -=============== ========================================================== - -=============== ========================================================== -Exceptions -========================================================================== -LinAlgError Indicates a failed linear algebra operation -=============== ========================================================== - -""" -from __future__ import division, absolute_import, print_function - -# To get sub-modules -from .info import __doc__ - -from .linalg import * - -from numpy.testing import Tester -test = Tester().test -bench = Tester().test diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/_umath_linalg.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/_umath_linalg.py deleted file mode 100644 index 389a85fc2fa93..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/_umath_linalg.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, '_umath_linalg.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/info.py deleted file mode 100644 index 646ecda04aa95..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/info.py +++ /dev/null @@ -1,37 +0,0 @@ -"""\ -Core Linear Algebra Tools -------------------------- -Linear algebra basics: - -- norm Vector or matrix norm -- inv Inverse of a square matrix -- solve Solve a linear system of equations -- det Determinant of a square matrix -- lstsq Solve linear least-squares problem -- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular - value decomposition -- matrix_power Integer power of a square matrix - -Eigenvalues and decompositions: - -- eig Eigenvalues and vectors of a square matrix -- eigh Eigenvalues and eigenvectors of a Hermitian matrix -- eigvals Eigenvalues of a square matrix -- eigvalsh Eigenvalues of a Hermitian matrix -- qr QR decomposition of a matrix -- svd Singular value decomposition of a matrix -- cholesky Cholesky decomposition of a matrix - -Tensor operations: - -- tensorsolve Solve a linear tensor equation -- tensorinv Calculate an inverse of a tensor - -Exceptions: - -- LinAlgError Indicates a failed linear algebra operation - -""" -from __future__ import division, absolute_import, print_function - -depends = ['core'] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/lapack_lite.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/lapack_lite.py deleted file mode 100644 index 3b8026dadd909..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/lapack_lite.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'lapack_lite.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/linalg.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/linalg.py deleted file mode 100644 index 6b2299fe7a6c0..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/linalg.py +++ /dev/null @@ -1,2136 +0,0 @@ -"""Lite version of scipy.linalg. - -Notes ------ -This module is a lite version of the linalg.py module in SciPy which -contains high-level Python interface to the LAPACK library. The lite -version only accesses the following LAPACK functions: dgesv, zgesv, -dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, -zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. -""" -from __future__ import division, absolute_import, print_function - - -__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', - 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', - 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', - 'LinAlgError'] - -import warnings - -from numpy.core import ( - array, asarray, zeros, empty, empty_like, transpose, intc, single, double, - csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot, - add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size, - finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs, - broadcast - ) -from numpy.lib import triu, asfarray -from numpy.linalg import lapack_lite, _umath_linalg -from numpy.matrixlib.defmatrix import matrix_power -from numpy.compat import asbytes - -# For Python2/3 compatibility -_N = asbytes('N') -_V = asbytes('V') -_A = asbytes('A') -_S = asbytes('S') -_L = asbytes('L') - -fortran_int = intc - -# Error object -class LinAlgError(Exception): - """ - Generic Python-exception-derived object raised by linalg functions. - - General purpose exception class, derived from Python's exception.Exception - class, programmatically raised in linalg functions when a Linear - Algebra-related condition would prevent further correct execution of the - function. - - Parameters - ---------- - None - - Examples - -------- - >>> from numpy import linalg as LA - >>> LA.inv(np.zeros((2,2))) - Traceback (most recent call last): - File "", line 1, in - File "...linalg.py", line 350, - in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) - File "...linalg.py", line 249, - in solve - raise LinAlgError('Singular matrix') - numpy.linalg.LinAlgError: Singular matrix - - """ - pass - -# Dealing with errors in _umath_linalg - -_linalg_error_extobj = None - -def _determine_error_states(): - global _linalg_error_extobj - errobj = geterrobj() - bufsize = errobj[0] - - with errstate(invalid='call', over='ignore', - divide='ignore', under='ignore'): - invalid_call_errmask = geterrobj()[1] - - _linalg_error_extobj = [bufsize, invalid_call_errmask, None] - -_determine_error_states() - -def _raise_linalgerror_singular(err, flag): - raise LinAlgError("Singular matrix") - -def _raise_linalgerror_nonposdef(err, flag): - raise LinAlgError("Matrix is not positive definite") - -def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): - raise LinAlgError("Eigenvalues did not converge") - -def _raise_linalgerror_svd_nonconvergence(err, flag): - raise LinAlgError("SVD did not converge") - -def get_linalg_error_extobj(callback): - extobj = list(_linalg_error_extobj) - extobj[2] = callback - return extobj - -def _makearray(a): - new = asarray(a) - wrap = getattr(a, "__array_prepare__", new.__array_wrap__) - return new, wrap - -def isComplexType(t): - return issubclass(t, complexfloating) - -_real_types_map = {single : single, - double : double, - csingle : single, - cdouble : double} - -_complex_types_map = {single : csingle, - double : cdouble, - csingle : csingle, - cdouble : cdouble} - -def _realType(t, default=double): - return _real_types_map.get(t, default) - -def _complexType(t, default=cdouble): - return _complex_types_map.get(t, default) - -def _linalgRealType(t): - """Cast the type t to either double or cdouble.""" - return double - -_complex_types_map = {single : csingle, - double : cdouble, - csingle : csingle, - cdouble : cdouble} - -def _commonType(*arrays): - # in lite version, use higher precision (always double or cdouble) - result_type = single - is_complex = False - for a in arrays: - if issubclass(a.dtype.type, inexact): - if isComplexType(a.dtype.type): - is_complex = True - rt = _realType(a.dtype.type, default=None) - if rt is None: - # unsupported inexact scalar - raise TypeError("array type %s is unsupported in linalg" % - (a.dtype.name,)) - else: - rt = double - if rt is double: - result_type = double - if is_complex: - t = cdouble - result_type = _complex_types_map[result_type] - else: - t = double - return t, result_type - - -# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are). - -_fastCT = fastCopyAndTranspose - -def _to_native_byte_order(*arrays): - ret = [] - for arr in arrays: - if arr.dtype.byteorder not in ('=', '|'): - ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) - else: - ret.append(arr) - if len(ret) == 1: - return ret[0] - else: - return ret - -def _fastCopyAndTranspose(type, *arrays): - cast_arrays = () - for a in arrays: - if a.dtype.type is type: - cast_arrays = cast_arrays + (_fastCT(a),) - else: - cast_arrays = cast_arrays + (_fastCT(a.astype(type)),) - if len(cast_arrays) == 1: - return cast_arrays[0] - else: - return cast_arrays - -def _assertRank2(*arrays): - for a in arrays: - if len(a.shape) != 2: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'two-dimensional' % len(a.shape)) - -def _assertRankAtLeast2(*arrays): - for a in arrays: - if len(a.shape) < 2: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'at least two-dimensional' % len(a.shape)) - -def _assertSquareness(*arrays): - for a in arrays: - if max(a.shape) != min(a.shape): - raise LinAlgError('Array must be square') - -def _assertNdSquareness(*arrays): - for a in arrays: - if max(a.shape[-2:]) != min(a.shape[-2:]): - raise LinAlgError('Last 2 dimensions of the array must be square') - -def _assertFinite(*arrays): - for a in arrays: - if not (isfinite(a).all()): - raise LinAlgError("Array must not contain infs or NaNs") - -def _assertNoEmpty2d(*arrays): - for a in arrays: - if a.size == 0 and product(a.shape[-2:]) == 0: - raise LinAlgError("Arrays cannot be empty") - - -# Linear equations - -def tensorsolve(a, b, axes=None): - """ - Solve the tensor equation ``a x = b`` for x. - - It is assumed that all indices of `x` are summed over in the product, - together with the rightmost indices of `a`, as is done in, for example, - ``tensordot(a, x, axes=len(b.shape))``. - - Parameters - ---------- - a : array_like - Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals - the shape of that sub-tensor of `a` consisting of the appropriate - number of its rightmost indices, and must be such that - ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be - 'square'). - b : array_like - Right-hand tensor, which can be of any shape. - axes : tuple of ints, optional - Axes in `a` to reorder to the right, before inversion. - If None (default), no reordering is done. - - Returns - ------- - x : ndarray, shape Q - - Raises - ------ - LinAlgError - If `a` is singular or not 'square' (in the above sense). - - See Also - -------- - tensordot, tensorinv, einsum - - Examples - -------- - >>> a = np.eye(2*3*4) - >>> a.shape = (2*3, 4, 2, 3, 4) - >>> b = np.random.randn(2*3, 4) - >>> x = np.linalg.tensorsolve(a, b) - >>> x.shape - (2, 3, 4) - >>> np.allclose(np.tensordot(a, x, axes=3), b) - True - - """ - a, wrap = _makearray(a) - b = asarray(b) - an = a.ndim - - if axes is not None: - allaxes = list(range(0, an)) - for k in axes: - allaxes.remove(k) - allaxes.insert(an, k) - a = a.transpose(allaxes) - - oldshape = a.shape[-(an-b.ndim):] - prod = 1 - for k in oldshape: - prod *= k - - a = a.reshape(-1, prod) - b = b.ravel() - res = wrap(solve(a, b)) - res.shape = oldshape - return res - -def solve(a, b): - """ - Solve a linear matrix equation, or system of linear scalar equations. - - Computes the "exact" solution, `x`, of the well-determined, i.e., full - rank, linear matrix equation `ax = b`. - - Parameters - ---------- - a : (..., M, M) array_like - Coefficient matrix. - b : {(..., M,), (..., M, K)}, array_like - Ordinate or "dependent variable" values. - - Returns - ------- - x : {(..., M,), (..., M, K)} ndarray - Solution to the system a x = b. Returned shape is identical to `b`. - - Raises - ------ - LinAlgError - If `a` is singular or not square. - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The solutions are computed using LAPACK routine _gesv - - `a` must be square and of full-rank, i.e., all rows (or, equivalently, - columns) must be linearly independent; if either is not true, use - `lstsq` for the least-squares best "solution" of the - system/equation. - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, - FL, Academic Press, Inc., 1980, pg. 22. - - Examples - -------- - Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``: - - >>> a = np.array([[3,1], [1,2]]) - >>> b = np.array([9,8]) - >>> x = np.linalg.solve(a, b) - >>> x - array([ 2., 3.]) - - Check that the solution is correct: - - >>> np.allclose(np.dot(a, x), b) - True - - """ - a, _ = _makearray(a) - _assertRankAtLeast2(a) - _assertNdSquareness(a) - b, wrap = _makearray(b) - t, result_t = _commonType(a, b) - - # We use the b = (..., M,) logic, only if the number of extra dimensions - # match exactly - if b.ndim == a.ndim - 1: - if a.shape[-1] == 0 and b.shape[-1] == 0: - # Legal, but the ufunc cannot handle the 0-sized inner dims - # let the ufunc handle all wrong cases. - a = a.reshape(a.shape[:-1]) - bc = broadcast(a, b) - return wrap(empty(bc.shape, dtype=result_t)) - - gufunc = _umath_linalg.solve1 - else: - if b.size == 0: - if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0: - a = a[:,:1].reshape(a.shape[:-1] + (1,)) - bc = broadcast(a, b) - return wrap(empty(bc.shape, dtype=result_t)) - - gufunc = _umath_linalg.solve - - signature = 'DD->D' if isComplexType(t) else 'dd->d' - extobj = get_linalg_error_extobj(_raise_linalgerror_singular) - r = gufunc(a, b, signature=signature, extobj=extobj) - - return wrap(r.astype(result_t)) - - -def tensorinv(a, ind=2): - """ - Compute the 'inverse' of an N-dimensional array. - - The result is an inverse for `a` relative to the tensordot operation - ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, - ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the - tensordot operation. - - Parameters - ---------- - a : array_like - Tensor to 'invert'. Its shape must be 'square', i. e., - ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. - ind : int, optional - Number of first indices that are involved in the inverse sum. - Must be a positive integer, default is 2. - - Returns - ------- - b : ndarray - `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. - - Raises - ------ - LinAlgError - If `a` is singular or not 'square' (in the above sense). - - See Also - -------- - tensordot, tensorsolve - - Examples - -------- - >>> a = np.eye(4*6) - >>> a.shape = (4, 6, 8, 3) - >>> ainv = np.linalg.tensorinv(a, ind=2) - >>> ainv.shape - (8, 3, 4, 6) - >>> b = np.random.randn(4, 6) - >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) - True - - >>> a = np.eye(4*6) - >>> a.shape = (24, 8, 3) - >>> ainv = np.linalg.tensorinv(a, ind=1) - >>> ainv.shape - (8, 3, 24) - >>> b = np.random.randn(24) - >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) - True - - """ - a = asarray(a) - oldshape = a.shape - prod = 1 - if ind > 0: - invshape = oldshape[ind:] + oldshape[:ind] - for k in oldshape[ind:]: - prod *= k - else: - raise ValueError("Invalid ind argument.") - a = a.reshape(prod, -1) - ia = inv(a) - return ia.reshape(*invshape) - - -# Matrix inversion - -def inv(a): - """ - Compute the (multiplicative) inverse of a matrix. - - Given a square matrix `a`, return the matrix `ainv` satisfying - ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. - - Parameters - ---------- - a : (..., M, M) array_like - Matrix to be inverted. - - Returns - ------- - ainv : (..., M, M) ndarray or matrix - (Multiplicative) inverse of the matrix `a`. - - Raises - ------ - LinAlgError - If `a` is not square or inversion fails. - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - Examples - -------- - >>> from numpy.linalg import inv - >>> a = np.array([[1., 2.], [3., 4.]]) - >>> ainv = inv(a) - >>> np.allclose(np.dot(a, ainv), np.eye(2)) - True - >>> np.allclose(np.dot(ainv, a), np.eye(2)) - True - - If a is a matrix object, then the return value is a matrix as well: - - >>> ainv = inv(np.matrix(a)) - >>> ainv - matrix([[-2. , 1. ], - [ 1.5, -0.5]]) - - Inverses of several matrices can be computed at once: - - >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) - >>> inv(a) - array([[[-2. , 1. ], - [ 1.5, -0.5]], - [[-5. , 2. ], - [ 3. , -1. ]]]) - - """ - a, wrap = _makearray(a) - _assertRankAtLeast2(a) - _assertNdSquareness(a) - t, result_t = _commonType(a) - - if a.shape[-1] == 0: - # The inner array is 0x0, the ufunc cannot handle this case - return wrap(empty_like(a, dtype=result_t)) - - signature = 'D->D' if isComplexType(t) else 'd->d' - extobj = get_linalg_error_extobj(_raise_linalgerror_singular) - ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) - return wrap(ainv.astype(result_t)) - - -# Cholesky decomposition - -def cholesky(a): - """ - Cholesky decomposition. - - Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, - where `L` is lower-triangular and .H is the conjugate transpose operator - (which is the ordinary transpose if `a` is real-valued). `a` must be - Hermitian (symmetric if real-valued) and positive-definite. Only `L` is - actually returned. - - Parameters - ---------- - a : (..., M, M) array_like - Hermitian (symmetric if all elements are real), positive-definite - input matrix. - - Returns - ------- - L : (..., M, M) array_like - Upper or lower-triangular Cholesky factor of `a`. Returns a - matrix object if `a` is a matrix object. - - Raises - ------ - LinAlgError - If the decomposition fails, for example, if `a` is not - positive-definite. - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The Cholesky decomposition is often used as a fast way of solving - - .. math:: A \\mathbf{x} = \\mathbf{b} - - (when `A` is both Hermitian/symmetric and positive-definite). - - First, we solve for :math:`\\mathbf{y}` in - - .. math:: L \\mathbf{y} = \\mathbf{b}, - - and then for :math:`\\mathbf{x}` in - - .. math:: L.H \\mathbf{x} = \\mathbf{y}. - - Examples - -------- - >>> A = np.array([[1,-2j],[2j,5]]) - >>> A - array([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> L = np.linalg.cholesky(A) - >>> L - array([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) - >>> np.dot(L, L.T.conj()) # verify that L * L.H = A - array([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? - >>> np.linalg.cholesky(A) # an ndarray object is returned - array([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) - >>> # But a matrix object is returned if A is a matrix object - >>> LA.cholesky(np.matrix(A)) - matrix([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) - - """ - extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef) - gufunc = _umath_linalg.cholesky_lo - a, wrap = _makearray(a) - _assertRankAtLeast2(a) - _assertNdSquareness(a) - t, result_t = _commonType(a) - signature = 'D->D' if isComplexType(t) else 'd->d' - return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t)) - -# QR decompostion - -def qr(a, mode='reduced'): - """ - Compute the qr factorization of a matrix. - - Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is - upper-triangular. - - Parameters - ---------- - a : array_like, shape (M, N) - Matrix to be factored. - mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional - If K = min(M, N), then - - 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) - 'complete' : returns q, r with dimensions (M, M), (M, N) - 'r' : returns r only with dimensions (K, N) - 'raw' : returns h, tau with dimensions (N, M), (K,) - 'full' : alias of 'reduced', deprecated - 'economic' : returns h from 'raw', deprecated. - - The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, - see the notes for more information. The default is 'reduced' and to - maintain backward compatibility with earlier versions of numpy both - it and the old default 'full' can be omitted. Note that array h - returned in 'raw' mode is transposed for calling Fortran. The - 'economic' mode is deprecated. The modes 'full' and 'economic' may - be passed using only the first letter for backwards compatibility, - but all others must be spelled out. See the Notes for more - explanation. - - - Returns - ------- - q : ndarray of float or complex, optional - A matrix with orthonormal columns. When mode = 'complete' the - result is an orthogonal/unitary matrix depending on whether or not - a is real/complex. The determinant may be either +/- 1 in that - case. - r : ndarray of float or complex, optional - The upper-triangular matrix. - (h, tau) : ndarrays of np.double or np.cdouble, optional - The array h contains the Householder reflectors that generate q - along with r. The tau array contains scaling factors for the - reflectors. In the deprecated 'economic' mode only h is returned. - - Raises - ------ - LinAlgError - If factoring fails. - - Notes - ----- - This is an interface to the LAPACK routines dgeqrf, zgeqrf, - dorgqr, and zungqr. - - For more information on the qr factorization, see for example: - http://en.wikipedia.org/wiki/QR_factorization - - Subclasses of `ndarray` are preserved except for the 'raw' mode. So if - `a` is of type `matrix`, all the return values will be matrices too. - - New 'reduced', 'complete', and 'raw' options for mode were added in - Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In - addition the options 'full' and 'economic' were deprecated. Because - 'full' was the previous default and 'reduced' is the new default, - backward compatibility can be maintained by letting `mode` default. - The 'raw' option was added so that LAPACK routines that can multiply - arrays by q using the Householder reflectors can be used. Note that in - this case the returned arrays are of type np.double or np.cdouble and - the h array is transposed to be FORTRAN compatible. No routines using - the 'raw' return are currently exposed by numpy, but some are available - in lapack_lite and just await the necessary work. - - Examples - -------- - >>> a = np.random.randn(9, 6) - >>> q, r = np.linalg.qr(a) - >>> np.allclose(a, np.dot(q, r)) # a does equal qr - True - >>> r2 = np.linalg.qr(a, mode='r') - >>> r3 = np.linalg.qr(a, mode='economic') - >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full' - True - >>> # But only triu parts are guaranteed equal when mode='economic' - >>> np.allclose(r, np.triu(r3[:6,:6], k=0)) - True - - Example illustrating a common use of `qr`: solving of least squares - problems - - What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for - the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points - and you'll see that it should be y0 = 0, m = 1.) The answer is provided - by solving the over-determined matrix equation ``Ax = b``, where:: - - A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) - x = array([[y0], [m]]) - b = array([[1], [0], [2], [1]]) - - If A = qr such that q is orthonormal (which is always possible via - Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice, - however, we simply use `lstsq`.) - - >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) - >>> A - array([[0, 1], - [1, 1], - [1, 1], - [2, 1]]) - >>> b = np.array([1, 0, 2, 1]) - >>> q, r = LA.qr(A) - >>> p = np.dot(q.T, b) - >>> np.dot(LA.inv(r), p) - array([ 1.1e-16, 1.0e+00]) - - """ - if mode not in ('reduced', 'complete', 'r', 'raw'): - if mode in ('f', 'full'): - msg = "".join(( - "The 'full' option is deprecated in favor of 'reduced'.\n", - "For backward compatibility let mode default.")) - warnings.warn(msg, DeprecationWarning) - mode = 'reduced' - elif mode in ('e', 'economic'): - msg = "The 'economic' option is deprecated.", - warnings.warn(msg, DeprecationWarning) - mode = 'economic' - else: - raise ValueError("Unrecognized mode '%s'" % mode) - - a, wrap = _makearray(a) - _assertRank2(a) - _assertNoEmpty2d(a) - m, n = a.shape - t, result_t = _commonType(a) - a = _fastCopyAndTranspose(t, a) - a = _to_native_byte_order(a) - mn = min(m, n) - tau = zeros((mn,), t) - if isComplexType(t): - lapack_routine = lapack_lite.zgeqrf - routine_name = 'zgeqrf' - else: - lapack_routine = lapack_lite.dgeqrf - routine_name = 'dgeqrf' - - # calculate optimal size of work data 'work' - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(m, n, a, m, tau, work, -1, 0) - if results['info'] != 0: - raise LinAlgError('%s returns %d' % (routine_name, results['info'])) - - # do qr decomposition - lwork = int(abs(work[0])) - work = zeros((lwork,), t) - results = lapack_routine(m, n, a, m, tau, work, lwork, 0) - if results['info'] != 0: - raise LinAlgError('%s returns %d' % (routine_name, results['info'])) - - # handle modes that don't return q - if mode == 'r': - r = _fastCopyAndTranspose(result_t, a[:, :mn]) - return wrap(triu(r)) - - if mode == 'raw': - return a, tau - - if mode == 'economic': - if t != result_t : - a = a.astype(result_t) - return wrap(a.T) - - # generate q from a - if mode == 'complete' and m > n: - mc = m - q = empty((m, m), t) - else: - mc = mn - q = empty((n, m), t) - q[:n] = a - - if isComplexType(t): - lapack_routine = lapack_lite.zungqr - routine_name = 'zungqr' - else: - lapack_routine = lapack_lite.dorgqr - routine_name = 'dorgqr' - - # determine optimal lwork - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0) - if results['info'] != 0: - raise LinAlgError('%s returns %d' % (routine_name, results['info'])) - - # compute q - lwork = int(abs(work[0])) - work = zeros((lwork,), t) - results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0) - if results['info'] != 0: - raise LinAlgError('%s returns %d' % (routine_name, results['info'])) - - q = _fastCopyAndTranspose(result_t, q[:mc]) - r = _fastCopyAndTranspose(result_t, a[:, :mc]) - - return wrap(q), wrap(triu(r)) - - -# Eigenvalues - - -def eigvals(a): - """ - Compute the eigenvalues of a general matrix. - - Main difference between `eigvals` and `eig`: the eigenvectors aren't - returned. - - Parameters - ---------- - a : (..., M, M) array_like - A complex- or real-valued matrix whose eigenvalues will be computed. - - Returns - ------- - w : (..., M,) ndarray - The eigenvalues, each repeated according to its multiplicity. - They are not necessarily ordered, nor are they necessarily - real for real matrices. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eig : eigenvalues and right eigenvectors of general arrays - eigvalsh : eigenvalues of symmetric or Hermitian arrays. - eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - This is implemented using the _geev LAPACK routines which compute - the eigenvalues and eigenvectors of general square arrays. - - Examples - -------- - Illustration, using the fact that the eigenvalues of a diagonal matrix - are its diagonal elements, that multiplying a matrix on the left - by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose - of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, - if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as - ``A``: - - >>> from numpy import linalg as LA - >>> x = np.random.random() - >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) - >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) - (1.0, 1.0, 0.0) - - Now multiply a diagonal matrix by Q on one side and by Q.T on the other: - - >>> D = np.diag((-1,1)) - >>> LA.eigvals(D) - array([-1., 1.]) - >>> A = np.dot(Q, D) - >>> A = np.dot(A, Q.T) - >>> LA.eigvals(A) - array([ 1., -1.]) - - """ - a, wrap = _makearray(a) - _assertNoEmpty2d(a) - _assertRankAtLeast2(a) - _assertNdSquareness(a) - _assertFinite(a) - t, result_t = _commonType(a) - - extobj = get_linalg_error_extobj( - _raise_linalgerror_eigenvalues_nonconvergence) - signature = 'D->D' if isComplexType(t) else 'd->D' - w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj) - - if not isComplexType(t): - if all(w.imag == 0): - w = w.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - return w.astype(result_t) - -def eigvalsh(a, UPLO='L'): - """ - Compute the eigenvalues of a Hermitian or real symmetric matrix. - - Main difference from eigh: the eigenvectors are not computed. - - Parameters - ---------- - a : (..., M, M) array_like - A complex- or real-valued matrix whose eigenvalues are to be - computed. - UPLO : {'L', 'U'}, optional - Same as `lower`, with 'L' for lower and 'U' for upper triangular. - Deprecated. - - Returns - ------- - w : (..., M,) ndarray - The eigenvalues, not necessarily ordered, each repeated according to - its multiplicity. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. - eigvals : eigenvalues of general real or complex arrays. - eig : eigenvalues and right eigenvectors of general real or complex - arrays. - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The eigenvalues are computed using LAPACK routines _ssyevd, _heevd - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1, -2j], [2j, 5]]) - >>> LA.eigvalsh(a) - array([ 0.17157288+0.j, 5.82842712+0.j]) - - """ - UPLO = UPLO.upper() - if UPLO not in ('L', 'U'): - raise ValueError("UPLO argument must be 'L' or 'U'") - - extobj = get_linalg_error_extobj( - _raise_linalgerror_eigenvalues_nonconvergence) - if UPLO == 'L': - gufunc = _umath_linalg.eigvalsh_lo - else: - gufunc = _umath_linalg.eigvalsh_up - - a, wrap = _makearray(a) - _assertNoEmpty2d(a) - _assertRankAtLeast2(a) - _assertNdSquareness(a) - t, result_t = _commonType(a) - signature = 'D->d' if isComplexType(t) else 'd->d' - w = gufunc(a, signature=signature, extobj=extobj) - return w.astype(_realType(result_t)) - -def _convertarray(a): - t, result_t = _commonType(a) - a = _fastCT(a.astype(t)) - return a, t, result_t - - -# Eigenvectors - - -def eig(a): - """ - Compute the eigenvalues and right eigenvectors of a square array. - - Parameters - ---------- - a : (..., M, M) array - Matrices for which the eigenvalues and right eigenvectors will - be computed - - Returns - ------- - w : (..., M) array - The eigenvalues, each repeated according to its multiplicity. - The eigenvalues are not necessarily ordered. The resulting - array will be always be of complex type. When `a` is real - the resulting eigenvalues will be real (0 imaginary part) or - occur in conjugate pairs - - v : (..., M, M) array - The normalized (unit "length") eigenvectors, such that the - column ``v[:,i]`` is the eigenvector corresponding to the - eigenvalue ``w[i]``. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric) - array. - - eigvals : eigenvalues of a non-symmetric array. - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - This is implemented using the _geev LAPACK routines which compute - the eigenvalues and eigenvectors of general square arrays. - - The number `w` is an eigenvalue of `a` if there exists a vector - `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and - `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]`` - for :math:`i \\in \\{0,...,M-1\\}`. - - The array `v` of eigenvectors may not be of maximum rank, that is, some - of the columns may be linearly dependent, although round-off error may - obscure that fact. If the eigenvalues are all different, then theoretically - the eigenvectors are linearly independent. Likewise, the (complex-valued) - matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e., - if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate - transpose of `a`. - - Finally, it is emphasized that `v` consists of the *right* (as in - right-hand side) eigenvectors of `a`. A vector `y` satisfying - ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left* - eigenvector of `a`, and, in general, the left and right eigenvectors - of a matrix are not necessarily the (perhaps conjugate) transposes - of each other. - - References - ---------- - G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, - Academic Press, Inc., 1980, Various pp. - - Examples - -------- - >>> from numpy import linalg as LA - - (Almost) trivial example with real e-values and e-vectors. - - >>> w, v = LA.eig(np.diag((1, 2, 3))) - >>> w; v - array([ 1., 2., 3.]) - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - - Real matrix possessing complex e-values and e-vectors; note that the - e-values are complex conjugates of each other. - - >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) - >>> w; v - array([ 1. + 1.j, 1. - 1.j]) - array([[ 0.70710678+0.j , 0.70710678+0.j ], - [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]]) - - Complex-valued matrix with real e-values (but complex-valued e-vectors); - note that a.conj().T = a, i.e., a is Hermitian. - - >>> a = np.array([[1, 1j], [-1j, 1]]) - >>> w, v = LA.eig(a) - >>> w; v - array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0} - array([[ 0.00000000+0.70710678j, 0.70710678+0.j ], - [ 0.70710678+0.j , 0.00000000+0.70710678j]]) - - Be careful about round-off error! - - >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) - >>> # Theor. e-values are 1 +/- 1e-9 - >>> w, v = LA.eig(a) - >>> w; v - array([ 1., 1.]) - array([[ 1., 0.], - [ 0., 1.]]) - - """ - a, wrap = _makearray(a) - _assertRankAtLeast2(a) - _assertNdSquareness(a) - _assertFinite(a) - t, result_t = _commonType(a) - - extobj = get_linalg_error_extobj( - _raise_linalgerror_eigenvalues_nonconvergence) - signature = 'D->DD' if isComplexType(t) else 'd->DD' - w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj) - - if not isComplexType(t) and all(w.imag == 0.0): - w = w.real - vt = vt.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - vt = vt.astype(result_t) - return w.astype(result_t), wrap(vt) - - -def eigh(a, UPLO='L'): - """ - Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix. - - Returns two objects, a 1-D array containing the eigenvalues of `a`, and - a 2-D square array or matrix (depending on the input type) of the - corresponding eigenvectors (in columns). - - Parameters - ---------- - A : (..., M, M) array - Hermitian/Symmetric matrices whose eigenvalues and - eigenvectors are to be computed. - UPLO : {'L', 'U'}, optional - Specifies whether the calculation is done with the lower triangular - part of `a` ('L', default) or the upper triangular part ('U'). - - Returns - ------- - w : (..., M) ndarray - The eigenvalues, not necessarily ordered. - v : {(..., M, M) ndarray, (..., M, M) matrix} - The column ``v[:, i]`` is the normalized eigenvector corresponding - to the eigenvalue ``w[i]``. Will return a matrix object if `a` is - a matrix object. - - Raises - ------ - LinAlgError - If the eigenvalue computation does not converge. - - See Also - -------- - eigvalsh : eigenvalues of symmetric or Hermitian arrays. - eig : eigenvalues and right eigenvectors for non-symmetric arrays. - eigvals : eigenvalues of non-symmetric arrays. - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd, - _heevd - - The eigenvalues of real symmetric or complex Hermitian matrices are - always real. [1]_ The array `v` of (column) eigenvectors is unitary - and `a`, `w`, and `v` satisfy the equations - ``dot(a, v[:, i]) = w[i] * v[:, i]``. - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, - FL, Academic Press, Inc., 1980, pg. 222. - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1, -2j], [2j, 5]]) - >>> a - array([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> w, v = LA.eigh(a) - >>> w; v - array([ 0.17157288, 5.82842712]) - array([[-0.92387953+0.j , -0.38268343+0.j ], - [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) - - >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair - array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j]) - >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair - array([ 0.+0.j, 0.+0.j]) - - >>> A = np.matrix(a) # what happens if input is a matrix object - >>> A - matrix([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) - >>> w, v = LA.eigh(A) - >>> w; v - array([ 0.17157288, 5.82842712]) - matrix([[-0.92387953+0.j , -0.38268343+0.j ], - [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) - - """ - UPLO = UPLO.upper() - if UPLO not in ('L', 'U'): - raise ValueError("UPLO argument must be 'L' or 'U'") - - a, wrap = _makearray(a) - _assertRankAtLeast2(a) - _assertNdSquareness(a) - t, result_t = _commonType(a) - - extobj = get_linalg_error_extobj( - _raise_linalgerror_eigenvalues_nonconvergence) - if UPLO == 'L': - gufunc = _umath_linalg.eigh_lo - else: - gufunc = _umath_linalg.eigh_up - - signature = 'D->dD' if isComplexType(t) else 'd->dd' - w, vt = gufunc(a, signature=signature, extobj=extobj) - w = w.astype(_realType(result_t)) - vt = vt.astype(result_t) - return w, wrap(vt) - - -# Singular value decomposition - -def svd(a, full_matrices=1, compute_uv=1): - """ - Singular Value Decomposition. - - Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v` - are unitary and `s` is a 1-d array of `a`'s singular values. - - Parameters - ---------- - a : (..., M, N) array_like - A real or complex matrix of shape (`M`, `N`) . - full_matrices : bool, optional - If True (default), `u` and `v` have the shapes (`M`, `M`) and - (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`) - and (`K`, `N`), respectively, where `K` = min(`M`, `N`). - compute_uv : bool, optional - Whether or not to compute `u` and `v` in addition to `s`. True - by default. - - Returns - ------- - u : { (..., M, M), (..., M, K) } array - Unitary matrices. The actual shape depends on the value of - ``full_matrices``. Only returned when ``compute_uv`` is True. - s : (..., K) array - The singular values for every matrix, sorted in descending order. - v : { (..., N, N), (..., K, N) } array - Unitary matrices. The actual shape depends on the value of - ``full_matrices``. Only returned when ``compute_uv`` is True. - - Raises - ------ - LinAlgError - If SVD computation does not converge. - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The decomposition is performed using LAPACK routine _gesdd - - The SVD is commonly written as ``a = U S V.H``. The `v` returned - by this function is ``V.H`` and ``u = U``. - - If ``U`` is a unitary matrix, it means that it - satisfies ``U.H = inv(U)``. - - The rows of `v` are the eigenvectors of ``a.H a``. The columns - of `u` are the eigenvectors of ``a a.H``. For row ``i`` in - `v` and column ``i`` in `u`, the corresponding eigenvalue is - ``s[i]**2``. - - If `a` is a `matrix` object (as opposed to an `ndarray`), then so - are all the return values. - - Examples - -------- - >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) - - Reconstruction based on full SVD: - - >>> U, s, V = np.linalg.svd(a, full_matrices=True) - >>> U.shape, V.shape, s.shape - ((9, 9), (6, 6), (6,)) - >>> S = np.zeros((9, 6), dtype=complex) - >>> S[:6, :6] = np.diag(s) - >>> np.allclose(a, np.dot(U, np.dot(S, V))) - True - - Reconstruction based on reduced SVD: - - >>> U, s, V = np.linalg.svd(a, full_matrices=False) - >>> U.shape, V.shape, s.shape - ((9, 6), (6, 6), (6,)) - >>> S = np.diag(s) - >>> np.allclose(a, np.dot(U, np.dot(S, V))) - True - - """ - a, wrap = _makearray(a) - _assertNoEmpty2d(a) - _assertRankAtLeast2(a) - t, result_t = _commonType(a) - - extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) - - m = a.shape[-2] - n = a.shape[-1] - if compute_uv: - if full_matrices: - if m < n: - gufunc = _umath_linalg.svd_m_f - else: - gufunc = _umath_linalg.svd_n_f - else: - if m < n: - gufunc = _umath_linalg.svd_m_s - else: - gufunc = _umath_linalg.svd_n_s - - signature = 'D->DdD' if isComplexType(t) else 'd->ddd' - u, s, vt = gufunc(a, signature=signature, extobj=extobj) - u = u.astype(result_t) - s = s.astype(_realType(result_t)) - vt = vt.astype(result_t) - return wrap(u), s, wrap(vt) - else: - if m < n: - gufunc = _umath_linalg.svd_m - else: - gufunc = _umath_linalg.svd_n - - signature = 'D->d' if isComplexType(t) else 'd->d' - s = gufunc(a, signature=signature, extobj=extobj) - s = s.astype(_realType(result_t)) - return s - -def cond(x, p=None): - """ - Compute the condition number of a matrix. - - This function is capable of returning the condition number using - one of seven different norms, depending on the value of `p` (see - Parameters below). - - Parameters - ---------- - x : (M, N) array_like - The matrix whose condition number is sought. - p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional - Order of the norm: - - ===== ============================ - p norm for matrices - ===== ============================ - None 2-norm, computed directly using the ``SVD`` - 'fro' Frobenius norm - inf max(sum(abs(x), axis=1)) - -inf min(sum(abs(x), axis=1)) - 1 max(sum(abs(x), axis=0)) - -1 min(sum(abs(x), axis=0)) - 2 2-norm (largest sing. value) - -2 smallest singular value - ===== ============================ - - inf means the numpy.inf object, and the Frobenius norm is - the root-of-sum-of-squares norm. - - Returns - ------- - c : {float, inf} - The condition number of the matrix. May be infinite. - - See Also - -------- - numpy.linalg.norm - - Notes - ----- - The condition number of `x` is defined as the norm of `x` times the - norm of the inverse of `x` [1]_; the norm can be the usual L2-norm - (root-of-sum-of-squares) or one of a number of other matrix norms. - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, - Academic Press, Inc., 1980, pg. 285. - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) - >>> a - array([[ 1, 0, -1], - [ 0, 1, 0], - [ 1, 0, 1]]) - >>> LA.cond(a) - 1.4142135623730951 - >>> LA.cond(a, 'fro') - 3.1622776601683795 - >>> LA.cond(a, np.inf) - 2.0 - >>> LA.cond(a, -np.inf) - 1.0 - >>> LA.cond(a, 1) - 2.0 - >>> LA.cond(a, -1) - 1.0 - >>> LA.cond(a, 2) - 1.4142135623730951 - >>> LA.cond(a, -2) - 0.70710678118654746 - >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0)) - 0.70710678118654746 - - """ - x = asarray(x) # in case we have a matrix - if p is None: - s = svd(x, compute_uv=False) - return s[0]/s[-1] - else: - return norm(x, p)*norm(inv(x), p) - - -def matrix_rank(M, tol=None): - """ - Return matrix rank of array using SVD method - - Rank of the array is the number of SVD singular values of the array that are - greater than `tol`. - - Parameters - ---------- - M : {(M,), (M, N)} array_like - array of <=2 dimensions - tol : {None, float}, optional - threshold below which SVD values are considered zero. If `tol` is - None, and ``S`` is an array with singular values for `M`, and - ``eps`` is the epsilon value for datatype of ``S``, then `tol` is - set to ``S.max() * max(M.shape) * eps``. - - Notes - ----- - The default threshold to detect rank deficiency is a test on the magnitude - of the singular values of `M`. By default, we identify singular values less - than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with - the symbols defined above). This is the algorithm MATLAB uses [1]. It also - appears in *Numerical recipes* in the discussion of SVD solutions for linear - least squares [2]. - - This default threshold is designed to detect rank deficiency accounting for - the numerical errors of the SVD computation. Imagine that there is a column - in `M` that is an exact (in floating point) linear combination of other - columns in `M`. Computing the SVD on `M` will not produce a singular value - exactly equal to 0 in general: any difference of the smallest SVD value from - 0 will be caused by numerical imprecision in the calculation of the SVD. - Our threshold for small SVD values takes this numerical imprecision into - account, and the default threshold will detect such numerical rank - deficiency. The threshold may declare a matrix `M` rank deficient even if - the linear combination of some columns of `M` is not exactly equal to - another column of `M` but only numerically very close to another column of - `M`. - - We chose our default threshold because it is in wide use. Other thresholds - are possible. For example, elsewhere in the 2007 edition of *Numerical - recipes* there is an alternative threshold of ``S.max() * - np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe - this threshold as being based on "expected roundoff error" (p 71). - - The thresholds above deal with floating point roundoff error in the - calculation of the SVD. However, you may have more information about the - sources of error in `M` that would make you consider other tolerance values - to detect *effective* rank deficiency. The most useful measure of the - tolerance depends on the operations you intend to use on your matrix. For - example, if your data come from uncertain measurements with uncertainties - greater than floating point epsilon, choosing a tolerance near that - uncertainty may be preferable. The tolerance may be absolute if the - uncertainties are absolute rather than relative. - - References - ---------- - .. [1] MATLAB reference documention, "Rank" - http://www.mathworks.com/help/techdoc/ref/rank.html - .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, - "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, - page 795. - - Examples - -------- - >>> from numpy.linalg import matrix_rank - >>> matrix_rank(np.eye(4)) # Full rank matrix - 4 - >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix - >>> matrix_rank(I) - 3 - >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 - 1 - >>> matrix_rank(np.zeros((4,))) - 0 - """ - M = asarray(M) - if M.ndim > 2: - raise TypeError('array should have 2 or fewer dimensions') - if M.ndim < 2: - return int(not all(M==0)) - S = svd(M, compute_uv=False) - if tol is None: - tol = S.max() * max(M.shape) * finfo(S.dtype).eps - return sum(S > tol) - - -# Generalized inverse - -def pinv(a, rcond=1e-15 ): - """ - Compute the (Moore-Penrose) pseudo-inverse of a matrix. - - Calculate the generalized inverse of a matrix using its - singular-value decomposition (SVD) and including all - *large* singular values. - - Parameters - ---------- - a : (M, N) array_like - Matrix to be pseudo-inverted. - rcond : float - Cutoff for small singular values. - Singular values smaller (in modulus) than - `rcond` * largest_singular_value (again, in modulus) - are set to zero. - - Returns - ------- - B : (N, M) ndarray - The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so - is `B`. - - Raises - ------ - LinAlgError - If the SVD computation does not converge. - - Notes - ----- - The pseudo-inverse of a matrix A, denoted :math:`A^+`, is - defined as: "the matrix that 'solves' [the least-squares problem] - :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then - :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. - - It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular - value decomposition of A, then - :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are - orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting - of A's so-called singular values, (followed, typically, by - zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix - consisting of the reciprocals of A's singular values - (again, followed by zeros). [1]_ - - References - ---------- - .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, - FL, Academic Press, Inc., 1980, pp. 139-142. - - Examples - -------- - The following example checks that ``a * a+ * a == a`` and - ``a+ * a * a+ == a+``: - - >>> a = np.random.randn(9, 6) - >>> B = np.linalg.pinv(a) - >>> np.allclose(a, np.dot(a, np.dot(B, a))) - True - >>> np.allclose(B, np.dot(B, np.dot(a, B))) - True - - """ - a, wrap = _makearray(a) - _assertNoEmpty2d(a) - a = a.conjugate() - u, s, vt = svd(a, 0) - m = u.shape[0] - n = vt.shape[1] - cutoff = rcond*maximum.reduce(s) - for i in range(min(n, m)): - if s[i] > cutoff: - s[i] = 1./s[i] - else: - s[i] = 0.; - res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u))) - return wrap(res) - -# Determinant - -def slogdet(a): - """ - Compute the sign and (natural) logarithm of the determinant of an array. - - If an array has a very small or very large determinant, than a call to - `det` may overflow or underflow. This routine is more robust against such - issues, because it computes the logarithm of the determinant rather than - the determinant itself. - - Parameters - ---------- - a : (..., M, M) array_like - Input array, has to be a square 2-D array. - - Returns - ------- - sign : (...) array_like - A number representing the sign of the determinant. For a real matrix, - this is 1, 0, or -1. For a complex matrix, this is a complex number - with absolute value 1 (i.e., it is on the unit circle), or else 0. - logdet : (...) array_like - The natural log of the absolute value of the determinant. - - If the determinant is zero, then `sign` will be 0 and `logdet` will be - -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``. - - See Also - -------- - det - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The determinant is computed via LU factorization using the LAPACK - routine z/dgetrf. - - .. versionadded:: 1.6.0. - - Examples - -------- - The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: - - >>> a = np.array([[1, 2], [3, 4]]) - >>> (sign, logdet) = np.linalg.slogdet(a) - >>> (sign, logdet) - (-1, 0.69314718055994529) - >>> sign * np.exp(logdet) - -2.0 - - Computing log-determinants for a stack of matrices: - - >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) - >>> a.shape - (3, 2, 2) - >>> sign, logdet = np.linalg.slogdet(a) - >>> (sign, logdet) - (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) - >>> sign * np.exp(logdet) - array([-2., -3., -8.]) - - This routine succeeds where ordinary `det` does not: - - >>> np.linalg.det(np.eye(500) * 0.1) - 0.0 - >>> np.linalg.slogdet(np.eye(500) * 0.1) - (1, -1151.2925464970228) - - """ - a = asarray(a) - _assertNoEmpty2d(a) - _assertRankAtLeast2(a) - _assertNdSquareness(a) - t, result_t = _commonType(a) - real_t = _realType(result_t) - signature = 'D->Dd' if isComplexType(t) else 'd->dd' - sign, logdet = _umath_linalg.slogdet(a, signature=signature) - return sign.astype(result_t), logdet.astype(real_t) - -def det(a): - """ - Compute the determinant of an array. - - Parameters - ---------- - a : (..., M, M) array_like - Input array to compute determinants for. - - Returns - ------- - det : (...) array_like - Determinant of `a`. - - See Also - -------- - slogdet : Another way to representing the determinant, more suitable - for large matrices where underflow/overflow may occur. - - Notes - ----- - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. - - The determinant is computed via LU factorization using the LAPACK - routine z/dgetrf. - - Examples - -------- - The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: - - >>> a = np.array([[1, 2], [3, 4]]) - >>> np.linalg.det(a) - -2.0 - - Computing determinants for a stack of matrices: - - >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) - >>> a.shape - (2, 2, 2 - >>> np.linalg.det(a) - array([-2., -3., -8.]) - - """ - a = asarray(a) - _assertNoEmpty2d(a) - _assertRankAtLeast2(a) - _assertNdSquareness(a) - t, result_t = _commonType(a) - signature = 'D->D' if isComplexType(t) else 'd->d' - return _umath_linalg.det(a, signature=signature).astype(result_t) - -# Linear Least Squares - -def lstsq(a, b, rcond=-1): - """ - Return the least-squares solution to a linear matrix equation. - - Solves the equation `a x = b` by computing a vector `x` that - minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may - be under-, well-, or over- determined (i.e., the number of - linearly independent rows of `a` can be less than, equal to, or - greater than its number of linearly independent columns). If `a` - is square and of full rank, then `x` (but for round-off error) is - the "exact" solution of the equation. - - Parameters - ---------- - a : (M, N) array_like - "Coefficient" matrix. - b : {(M,), (M, K)} array_like - Ordinate or "dependent variable" values. If `b` is two-dimensional, - the least-squares solution is calculated for each of the `K` columns - of `b`. - rcond : float, optional - Cut-off ratio for small singular values of `a`. - Singular values are set to zero if they are smaller than `rcond` - times the largest singular value of `a`. - - Returns - ------- - x : {(N,), (N, K)} ndarray - Least-squares solution. If `b` is two-dimensional, - the solutions are in the `K` columns of `x`. - residuals : {(), (1,), (K,)} ndarray - Sums of residuals; squared Euclidean 2-norm for each column in - ``b - a*x``. - If the rank of `a` is < N or M <= N, this is an empty array. - If `b` is 1-dimensional, this is a (1,) shape array. - Otherwise the shape is (K,). - rank : int - Rank of matrix `a`. - s : (min(M, N),) ndarray - Singular values of `a`. - - Raises - ------ - LinAlgError - If computation does not converge. - - Notes - ----- - If `b` is a matrix, then all array results are returned as matrices. - - Examples - -------- - Fit a line, ``y = mx + c``, through some noisy data-points: - - >>> x = np.array([0, 1, 2, 3]) - >>> y = np.array([-1, 0.2, 0.9, 2.1]) - - By examining the coefficients, we see that the line should have a - gradient of roughly 1 and cut the y-axis at, more or less, -1. - - We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` - and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: - - >>> A = np.vstack([x, np.ones(len(x))]).T - >>> A - array([[ 0., 1.], - [ 1., 1.], - [ 2., 1.], - [ 3., 1.]]) - - >>> m, c = np.linalg.lstsq(A, y)[0] - >>> print m, c - 1.0 -0.95 - - Plot the data along with the fitted line: - - >>> import matplotlib.pyplot as plt - >>> plt.plot(x, y, 'o', label='Original data', markersize=10) - >>> plt.plot(x, m*x + c, 'r', label='Fitted line') - >>> plt.legend() - >>> plt.show() - - """ - import math - a, _ = _makearray(a) - b, wrap = _makearray(b) - is_1d = len(b.shape) == 1 - if is_1d: - b = b[:, newaxis] - _assertRank2(a, b) - m = a.shape[0] - n = a.shape[1] - n_rhs = b.shape[1] - ldb = max(n, m) - if m != b.shape[0]: - raise LinAlgError('Incompatible dimensions') - t, result_t = _commonType(a, b) - result_real_t = _realType(result_t) - real_t = _linalgRealType(t) - bstar = zeros((ldb, n_rhs), t) - bstar[:b.shape[0], :n_rhs] = b.copy() - a, bstar = _fastCopyAndTranspose(t, a, bstar) - a, bstar = _to_native_byte_order(a, bstar) - s = zeros((min(m, n),), real_t) - nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 ) - iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int) - if isComplexType(t): - lapack_routine = lapack_lite.zgelsd - lwork = 1 - rwork = zeros((lwork,), real_t) - work = zeros((lwork,), t) - results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, - 0, work, -1, rwork, iwork, 0) - lwork = int(abs(work[0])) - rwork = zeros((lwork,), real_t) - a_real = zeros((m, n), real_t) - bstar_real = zeros((ldb, n_rhs,), real_t) - results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m, - bstar_real, ldb, s, rcond, - 0, rwork, -1, iwork, 0) - lrwork = int(rwork[0]) - work = zeros((lwork,), t) - rwork = zeros((lrwork,), real_t) - results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, - 0, work, lwork, rwork, iwork, 0) - else: - lapack_routine = lapack_lite.dgelsd - lwork = 1 - work = zeros((lwork,), t) - results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, - 0, work, -1, iwork, 0) - lwork = int(work[0]) - work = zeros((lwork,), t) - results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, - 0, work, lwork, iwork, 0) - if results['info'] > 0: - raise LinAlgError('SVD did not converge in Linear Least Squares') - resids = array([], result_real_t) - if is_1d: - x = array(ravel(bstar)[:n], dtype=result_t, copy=True) - if results['rank'] == n and m > n: - if isComplexType(t): - resids = array([sum(abs(ravel(bstar)[n:])**2)], - dtype=result_real_t) - else: - resids = array([sum((ravel(bstar)[n:])**2)], - dtype=result_real_t) - else: - x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True) - if results['rank'] == n and m > n: - if isComplexType(t): - resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype( - result_real_t) - else: - resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype( - result_real_t) - - st = s[:min(n, m)].copy().astype(result_real_t) - return wrap(x), wrap(resids), results['rank'], st - - -def _multi_svd_norm(x, row_axis, col_axis, op): - """Compute the extreme singular values of the 2-D matrices in `x`. - - This is a private utility function used by numpy.linalg.norm(). - - Parameters - ---------- - x : ndarray - row_axis, col_axis : int - The axes of `x` that hold the 2-D matrices. - op : callable - This should be either numpy.amin or numpy.amax. - - Returns - ------- - result : float or ndarray - If `x` is 2-D, the return values is a float. - Otherwise, it is an array with ``x.ndim - 2`` dimensions. - The return values are either the minimum or maximum of the - singular values of the matrices, depending on whether `op` - is `numpy.amin` or `numpy.amax`. - - """ - if row_axis > col_axis: - row_axis -= 1 - y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1) - result = op(svd(y, compute_uv=0), axis=-1) - return result - - -def norm(x, ord=None, axis=None): - """ - Matrix or vector norm. - - This function is able to return one of seven different matrix norms, - or one of an infinite number of vector norms (described below), depending - on the value of the ``ord`` parameter. - - Parameters - ---------- - x : array_like - Input array. If `axis` is None, `x` must be 1-D or 2-D. - ord : {non-zero int, inf, -inf, 'fro'}, optional - Order of the norm (see table under ``Notes``). inf means numpy's - `inf` object. - axis : {int, 2-tuple of ints, None}, optional - If `axis` is an integer, it specifies the axis of `x` along which to - compute the vector norms. If `axis` is a 2-tuple, it specifies the - axes that hold 2-D matrices, and the matrix norms of these matrices - are computed. If `axis` is None then either a vector norm (when `x` - is 1-D) or a matrix norm (when `x` is 2-D) is returned. - - Returns - ------- - n : float or ndarray - Norm of the matrix or vector(s). - - Notes - ----- - For values of ``ord <= 0``, the result is, strictly speaking, not a - mathematical 'norm', but it may still be useful for various numerical - purposes. - - The following norms can be calculated: - - ===== ============================ ========================== - ord norm for matrices norm for vectors - ===== ============================ ========================== - None Frobenius norm 2-norm - 'fro' Frobenius norm -- - inf max(sum(abs(x), axis=1)) max(abs(x)) - -inf min(sum(abs(x), axis=1)) min(abs(x)) - 0 -- sum(x != 0) - 1 max(sum(abs(x), axis=0)) as below - -1 min(sum(abs(x), axis=0)) as below - 2 2-norm (largest sing. value) as below - -2 smallest singular value as below - other -- sum(abs(x)**ord)**(1./ord) - ===== ============================ ========================== - - The Frobenius norm is given by [1]_: - - :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` - - References - ---------- - .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, - Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 - - Examples - -------- - >>> from numpy import linalg as LA - >>> a = np.arange(9) - 4 - >>> a - array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) - >>> b = a.reshape((3, 3)) - >>> b - array([[-4, -3, -2], - [-1, 0, 1], - [ 2, 3, 4]]) - - >>> LA.norm(a) - 7.745966692414834 - >>> LA.norm(b) - 7.745966692414834 - >>> LA.norm(b, 'fro') - 7.745966692414834 - >>> LA.norm(a, np.inf) - 4 - >>> LA.norm(b, np.inf) - 9 - >>> LA.norm(a, -np.inf) - 0 - >>> LA.norm(b, -np.inf) - 2 - - >>> LA.norm(a, 1) - 20 - >>> LA.norm(b, 1) - 7 - >>> LA.norm(a, -1) - -4.6566128774142013e-010 - >>> LA.norm(b, -1) - 6 - >>> LA.norm(a, 2) - 7.745966692414834 - >>> LA.norm(b, 2) - 7.3484692283495345 - - >>> LA.norm(a, -2) - nan - >>> LA.norm(b, -2) - 1.8570331885190563e-016 - >>> LA.norm(a, 3) - 5.8480354764257312 - >>> LA.norm(a, -3) - nan - - Using the `axis` argument to compute vector norms: - - >>> c = np.array([[ 1, 2, 3], - ... [-1, 1, 4]]) - >>> LA.norm(c, axis=0) - array([ 1.41421356, 2.23606798, 5. ]) - >>> LA.norm(c, axis=1) - array([ 3.74165739, 4.24264069]) - >>> LA.norm(c, ord=1, axis=1) - array([6, 6]) - - Using the `axis` argument to compute matrix norms: - - >>> m = np.arange(8).reshape(2,2,2) - >>> LA.norm(m, axis=(1,2)) - array([ 3.74165739, 11.22497216]) - >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) - (3.7416573867739413, 11.224972160321824) - - """ - x = asarray(x) - - # Check the default case first and handle it immediately. - if ord is None and axis is None: - x = x.ravel(order='K') - if isComplexType(x.dtype.type): - sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) - else: - sqnorm = dot(x, x) - return sqrt(sqnorm) - - # Normalize the `axis` argument to a tuple. - nd = x.ndim - if axis is None: - axis = tuple(range(nd)) - elif not isinstance(axis, tuple): - axis = (axis,) - - if len(axis) == 1: - if ord == Inf: - return abs(x).max(axis=axis) - elif ord == -Inf: - return abs(x).min(axis=axis) - elif ord == 0: - # Zero norm - return (x != 0).sum(axis=axis) - elif ord == 1: - # special case for speedup - return add.reduce(abs(x), axis=axis) - elif ord is None or ord == 2: - # special case for speedup - s = (x.conj() * x).real - return sqrt(add.reduce(s, axis=axis)) - else: - try: - ord + 1 - except TypeError: - raise ValueError("Invalid norm order for vectors.") - if x.dtype.type is longdouble: - # Convert to a float type, so integer arrays give - # float results. Don't apply asfarray to longdouble arrays, - # because it will downcast to float64. - absx = abs(x) - else: - absx = x if isComplexType(x.dtype.type) else asfarray(x) - if absx.dtype is x.dtype: - absx = abs(absx) - else: - # if the type changed, we can safely overwrite absx - abs(absx, out=absx) - absx **= ord - return add.reduce(absx, axis=axis) ** (1.0 / ord) - elif len(axis) == 2: - row_axis, col_axis = axis - if not (-nd <= row_axis < nd and -nd <= col_axis < nd): - raise ValueError('Invalid axis %r for an array with shape %r' % - (axis, x.shape)) - if row_axis % nd == col_axis % nd: - raise ValueError('Duplicate axes given.') - if ord == 2: - return _multi_svd_norm(x, row_axis, col_axis, amax) - elif ord == -2: - return _multi_svd_norm(x, row_axis, col_axis, amin) - elif ord == 1: - if col_axis > row_axis: - col_axis -= 1 - return add.reduce(abs(x), axis=row_axis).max(axis=col_axis) - elif ord == Inf: - if row_axis > col_axis: - row_axis -= 1 - return add.reduce(abs(x), axis=col_axis).max(axis=row_axis) - elif ord == -1: - if col_axis > row_axis: - col_axis -= 1 - return add.reduce(abs(x), axis=row_axis).min(axis=col_axis) - elif ord == -Inf: - if row_axis > col_axis: - row_axis -= 1 - return add.reduce(abs(x), axis=col_axis).min(axis=row_axis) - elif ord in [None, 'fro', 'f']: - return sqrt(add.reduce((x.conj() * x).real, axis=axis)) - else: - raise ValueError("Invalid norm order for matrices.") - else: - raise ValueError("Improper number of dimensions to norm.") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/setup.py deleted file mode 100644 index 282c3423c93c5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/setup.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import division, print_function - -import os -import sys - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - from numpy.distutils.system_info import get_info - config = Configuration('linalg', parent_package, top_path) - - config.add_data_dir('tests') - - # Configure lapack_lite - - src_dir = 'lapack_lite' - lapack_lite_src = [ - os.path.join(src_dir, 'python_xerbla.c'), - os.path.join(src_dir, 'zlapack_lite.c'), - os.path.join(src_dir, 'dlapack_lite.c'), - os.path.join(src_dir, 'blas_lite.c'), - os.path.join(src_dir, 'dlamch.c'), - os.path.join(src_dir, 'f2c_lite.c'), - os.path.join(src_dir, 'f2c.h'), - ] - - lapack_info = get_info('lapack_opt', 0) # and {} - def get_lapack_lite_sources(ext, build_dir): - if not lapack_info: - print("### Warning: Using unoptimized lapack ###") - return ext.depends[:-1] - else: - if sys.platform=='win32': - print("### Warning: python_xerbla.c is disabled ###") - return ext.depends[:1] - return ext.depends[:2] - - config.add_extension('lapack_lite', - sources = [get_lapack_lite_sources], - depends = ['lapack_litemodule.c'] + lapack_lite_src, - extra_info = lapack_info - ) - - # umath_linalg module - - config.add_extension('_umath_linalg', - sources = [get_lapack_lite_sources], - depends = ['umath_linalg.c.src'] + lapack_lite_src, - extra_info = lapack_info, - libraries = ['npymath'], - ) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_build.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_build.py deleted file mode 100644 index 0d237c81cb866..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_build.py +++ /dev/null @@ -1,53 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from subprocess import call, PIPE, Popen -import sys -import re - -import numpy as np -from numpy.linalg import lapack_lite -from numpy.testing import TestCase, dec - -from numpy.compat import asbytes_nested - -class FindDependenciesLdd(object): - def __init__(self): - self.cmd = ['ldd'] - - try: - p = Popen(self.cmd, stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - except OSError: - raise RuntimeError("command %s cannot be run" % self.cmd) - - def get_dependencies(self, lfile): - p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - if not (p.returncode == 0): - raise RuntimeError("failed dependencies check for %s" % lfile) - - return stdout - - def grep_dependencies(self, lfile, deps): - stdout = self.get_dependencies(lfile) - - rdeps = dict([(dep, re.compile(dep)) for dep in deps]) - founds = [] - for l in stdout.splitlines(): - for k, v in rdeps.items(): - if v.search(l): - founds.append(k) - - return founds - -class TestF77Mismatch(TestCase): - @dec.skipif(not(sys.platform[:5] == 'linux'), - "Skipping fortran compiler mismatch on non Linux platform") - def test_lapack(self): - f = FindDependenciesLdd() - deps = f.grep_dependencies(lapack_lite.__file__, - asbytes_nested(['libg2c', 'libgfortran'])) - self.assertFalse(len(deps) > 1, -"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to -cause random crashes and wrong results. See numpy INSTALL.txt for more -information.""") diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_deprecations.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_deprecations.py deleted file mode 100644 index 13d244199733e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_deprecations.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Test deprecation and future warnings. - -""" -import numpy as np -from numpy.testing import assert_warns, run_module_suite - - -def test_qr_mode_full_future_warning(): - """Check mode='full' FutureWarning. - - In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were - deprecated. The release date will probably be sometime in the summer - of 2013. - - """ - a = np.eye(2) - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_linalg.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_linalg.py deleted file mode 100644 index 8edf36aa67e79..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_linalg.py +++ /dev/null @@ -1,1153 +0,0 @@ -""" Test functions for linalg module - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import itertools -import traceback - -import numpy as np -from numpy import array, single, double, csingle, cdouble, dot, identity -from numpy import multiply, atleast_2d, inf, asarray, matrix -from numpy import linalg -from numpy.linalg import matrix_power, norm, matrix_rank -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_allclose, run_module_suite, - dec -) - - -def ifthen(a, b): - return not a or b - - -def imply(a, b): - return not a or b - - -old_assert_almost_equal = assert_almost_equal - - -def assert_almost_equal(a, b, **kw): - if asarray(a).dtype.type in (single, csingle): - decimal = 6 - else: - decimal = 12 - old_assert_almost_equal(a, b, decimal=decimal, **kw) - - -def get_real_dtype(dtype): - return {single: single, double: double, - csingle: single, cdouble: double}[dtype] - - -def get_complex_dtype(dtype): - return {single: csingle, double: cdouble, - csingle: csingle, cdouble: cdouble}[dtype] - -def get_rtol(dtype): - # Choose a safe rtol - if dtype in (single, csingle): - return 1e-5 - else: - return 1e-11 - -class LinalgCase(object): - def __init__(self, name, a, b, exception_cls=None): - assert isinstance(name, str) - self.name = name - self.a = a - self.b = b - self.exception_cls = exception_cls - - def check(self, do): - if self.exception_cls is None: - do(self.a, self.b) - else: - assert_raises(self.exception_cls, do, self.a, self.b) - - def __repr__(self): - return "" % (self.name,) - - -# -# Base test cases -# - -np.random.seed(1234) - -SQUARE_CASES = [ - LinalgCase("single", - array([[1., 2.], [3., 4.]], dtype=single), - array([2., 1.], dtype=single)), - LinalgCase("double", - array([[1., 2.], [3., 4.]], dtype=double), - array([2., 1.], dtype=double)), - LinalgCase("double_2", - array([[1., 2.], [3., 4.]], dtype=double), - array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), - LinalgCase("csingle", - array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=csingle), - array([2.+1j, 1.+2j], dtype=csingle)), - LinalgCase("cdouble", - array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble), - array([2.+1j, 1.+2j], dtype=cdouble)), - LinalgCase("cdouble_2", - array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble), - array([[2.+1j, 1.+2j, 1+3j], [1-2j, 1-3j, 1-6j]], dtype=cdouble)), - LinalgCase("empty", - atleast_2d(array([], dtype = double)), - atleast_2d(array([], dtype = double)), - linalg.LinAlgError), - LinalgCase("8x8", - np.random.rand(8, 8), - np.random.rand(8)), - LinalgCase("1x1", - np.random.rand(1, 1), - np.random.rand(1)), - LinalgCase("nonarray", - [[1, 2], [3, 4]], - [2, 1]), - LinalgCase("matrix_b_only", - array([[1., 2.], [3., 4.]]), - matrix([2., 1.]).T), - LinalgCase("matrix_a_and_b", - matrix([[1., 2.], [3., 4.]]), - matrix([2., 1.]).T), -] - -NONSQUARE_CASES = [ - LinalgCase("single_nsq_1", - array([[1., 2., 3.], [3., 4., 6.]], dtype=single), - array([2., 1.], dtype=single)), - LinalgCase("single_nsq_2", - array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), - array([2., 1., 3.], dtype=single)), - LinalgCase("double_nsq_1", - array([[1., 2., 3.], [3., 4., 6.]], dtype=double), - array([2., 1.], dtype=double)), - LinalgCase("double_nsq_2", - array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), - array([2., 1., 3.], dtype=double)), - LinalgCase("csingle_nsq_1", - array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=csingle), - array([2.+1j, 1.+2j], dtype=csingle)), - LinalgCase("csingle_nsq_2", - array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=csingle), - array([2.+1j, 1.+2j, 3.-3j], dtype=csingle)), - LinalgCase("cdouble_nsq_1", - array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble), - array([2.+1j, 1.+2j], dtype=cdouble)), - LinalgCase("cdouble_nsq_2", - array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble), - array([2.+1j, 1.+2j, 3.-3j], dtype=cdouble)), - LinalgCase("cdouble_nsq_1_2", - array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble), - array([[2.+1j, 1.+2j], [1-1j, 2-2j]], dtype=cdouble)), - LinalgCase("cdouble_nsq_2_2", - array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble), - array([[2.+1j, 1.+2j], [1-1j, 2-2j], [1-1j, 2-2j]], dtype=cdouble)), - LinalgCase("8x11", - np.random.rand(8, 11), - np.random.rand(11)), - LinalgCase("1x5", - np.random.rand(1, 5), - np.random.rand(5)), - LinalgCase("5x1", - np.random.rand(5, 1), - np.random.rand(1)), -] - -HERMITIAN_CASES = [ - LinalgCase("hsingle", - array([[1., 2.], [2., 1.]], dtype=single), - None), - LinalgCase("hdouble", - array([[1., 2.], [2., 1.]], dtype=double), - None), - LinalgCase("hcsingle", - array([[1., 2+3j], [2-3j, 1]], dtype=csingle), - None), - LinalgCase("hcdouble", - array([[1., 2+3j], [2-3j, 1]], dtype=cdouble), - None), - LinalgCase("hempty", - atleast_2d(array([], dtype = double)), - None, - linalg.LinAlgError), - LinalgCase("hnonarray", - [[1, 2], [2, 1]], - None), - LinalgCase("matrix_b_only", - array([[1., 2.], [2., 1.]]), - None), - LinalgCase("hmatrix_a_and_b", - matrix([[1., 2.], [2., 1.]]), - None), - LinalgCase("hmatrix_1x1", - np.random.rand(1, 1), - None), -] - - -# -# Gufunc test cases -# - -GENERALIZED_SQUARE_CASES = [] -GENERALIZED_NONSQUARE_CASES = [] -GENERALIZED_HERMITIAN_CASES = [] - -for tgt, src in ((GENERALIZED_SQUARE_CASES, SQUARE_CASES), - (GENERALIZED_NONSQUARE_CASES, NONSQUARE_CASES), - (GENERALIZED_HERMITIAN_CASES, HERMITIAN_CASES)): - for case in src: - if not isinstance(case.a, np.ndarray): - continue - - a = np.array([case.a, 2*case.a, 3*case.a]) - if case.b is None: - b = None - else: - b = np.array([case.b, 7*case.b, 6*case.b]) - new_case = LinalgCase(case.name + "_tile3", a, b, - case.exception_cls) - tgt.append(new_case) - - a = np.array([case.a]*2*3).reshape((3, 2) + case.a.shape) - if case.b is None: - b = None - else: - b = np.array([case.b]*2*3).reshape((3, 2) + case.b.shape) - new_case = LinalgCase(case.name + "_tile213", a, b, - case.exception_cls) - tgt.append(new_case) - -# -# Generate stride combination variations of the above -# - -def _stride_comb_iter(x): - """ - Generate cartesian product of strides for all axes - """ - - if not isinstance(x, np.ndarray): - yield x, "nop" - return - - stride_set = [(1,)]*x.ndim - stride_set[-1] = (1, 3, -4) - if x.ndim > 1: - stride_set[-2] = (1, 3, -4) - if x.ndim > 2: - stride_set[-3] = (1, -4) - - for repeats in itertools.product(*tuple(stride_set)): - new_shape = [abs(a*b) for a, b in zip(x.shape, repeats)] - slices = tuple([slice(None, None, repeat) for repeat in repeats]) - - # new array with different strides, but same data - xi = np.empty(new_shape, dtype=x.dtype) - xi.view(np.uint32).fill(0xdeadbeef) - xi = xi[slices] - xi[...] = x - xi = xi.view(x.__class__) - assert np.all(xi == x) - yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) - - # generate also zero strides if possible - if x.ndim >= 1 and x.shape[-1] == 1: - s = list(x.strides) - s[-1] = 0 - xi = np.lib.stride_tricks.as_strided(x, strides=s) - yield xi, "stride_xxx_0" - if x.ndim >= 2 and x.shape[-2] == 1: - s = list(x.strides) - s[-2] = 0 - xi = np.lib.stride_tricks.as_strided(x, strides=s) - yield xi, "stride_xxx_0_x" - if x.ndim >= 2 and x.shape[:-2] == (1, 1): - s = list(x.strides) - s[-1] = 0 - s[-2] = 0 - xi = np.lib.stride_tricks.as_strided(x, strides=s) - yield xi, "stride_xxx_0_0" - -for src in (SQUARE_CASES, - NONSQUARE_CASES, - HERMITIAN_CASES, - GENERALIZED_SQUARE_CASES, - GENERALIZED_NONSQUARE_CASES, - GENERALIZED_HERMITIAN_CASES): - - new_cases = [] - for case in src: - for a, a_tag in _stride_comb_iter(case.a): - for b, b_tag in _stride_comb_iter(case.b): - new_case = LinalgCase(case.name + "_" + a_tag + "_" + b_tag, a, b, - exception_cls=case.exception_cls) - new_cases.append(new_case) - src.extend(new_cases) - - -# -# Test different routines against the above cases -# - -def _check_cases(func, cases): - for case in cases: - try: - case.check(func) - except Exception: - msg = "In test case: %r\n\n" % case - msg += traceback.format_exc() - raise AssertionError(msg) - -class LinalgTestCase(object): - def test_sq_cases(self): - _check_cases(self.do, SQUARE_CASES) - - -class LinalgNonsquareTestCase(object): - def test_sq_cases(self): - _check_cases(self.do, NONSQUARE_CASES) - - -class LinalgGeneralizedTestCase(object): - @dec.slow - def test_generalized_sq_cases(self): - _check_cases(self.do, GENERALIZED_SQUARE_CASES) - - -class LinalgGeneralizedNonsquareTestCase(object): - @dec.slow - def test_generalized_nonsq_cases(self): - _check_cases(self.do, GENERALIZED_NONSQUARE_CASES) - - -class HermitianTestCase(object): - def test_herm_cases(self): - _check_cases(self.do, HERMITIAN_CASES) - - -class HermitianGeneralizedTestCase(object): - @dec.slow - def test_generalized_herm_cases(self): - _check_cases(self.do, GENERALIZED_HERMITIAN_CASES) - - -def dot_generalized(a, b): - a = asarray(a) - if a.ndim >= 3: - if a.ndim == b.ndim: - # matrix x matrix - new_shape = a.shape[:-1] + b.shape[-1:] - elif a.ndim == b.ndim + 1: - # matrix x vector - new_shape = a.shape[:-1] - else: - raise ValueError("Not implemented...") - r = np.empty(new_shape, dtype=np.common_type(a, b)) - for c in itertools.product(*map(range, a.shape[:-2])): - r[c] = dot(a[c], b[c]) - return r - else: - return dot(a, b) - - -def identity_like_generalized(a): - a = asarray(a) - if a.ndim >= 3: - r = np.empty(a.shape, dtype=a.dtype) - for c in itertools.product(*map(range, a.shape[:-2])): - r[c] = identity(a.shape[-2]) - return r - else: - return identity(a.shape[0]) - - -class TestSolve(LinalgTestCase, LinalgGeneralizedTestCase): - def do(self, a, b): - x = linalg.solve(a, b) - assert_almost_equal(b, dot_generalized(a, x)) - assert_(imply(isinstance(b, matrix), isinstance(x, matrix))) - - def test_types(self): - def check(dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(linalg.solve(x, x).dtype, dtype) - for dtype in [single, double, csingle, cdouble]: - yield check, dtype - - def test_0_size(self): - class ArraySubclass(np.ndarray): - pass - # Test system of 0x0 matrices - a = np.arange(8).reshape(2, 2, 2) - b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) - - expected = linalg.solve(a, b)[:, 0:0,:] - result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0,:]) - assert_array_equal(result, expected) - assert_(isinstance(result, ArraySubclass)) - - # Test errors for non-square and only b's dimension being 0 - assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) - assert_raises(ValueError, linalg.solve, a, b[:, 0:0,:]) - - # Test broadcasting error - b = np.arange(6).reshape(1, 3, 2) # broadcasting error - assert_raises(ValueError, linalg.solve, a, b) - assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) - - # Test zero "single equations" with 0x0 matrices. - b = np.arange(2).reshape(1, 2).view(ArraySubclass) - expected = linalg.solve(a, b)[:, 0:0] - result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0]) - assert_array_equal(result, expected) - assert_(isinstance(result, ArraySubclass)) - - b = np.arange(3).reshape(1, 3) - assert_raises(ValueError, linalg.solve, a, b) - assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) - assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) - - def test_0_size_k(self): - # test zero multiple equation (K=0) case. - class ArraySubclass(np.ndarray): - pass - a = np.arange(4).reshape(1, 2, 2) - b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) - - expected = linalg.solve(a, b)[:,:, 0:0] - result = linalg.solve(a, b[:,:, 0:0]) - assert_array_equal(result, expected) - assert_(isinstance(result, ArraySubclass)) - - # test both zero. - expected = linalg.solve(a, b)[:, 0:0, 0:0] - result = linalg.solve(a[:, 0:0, 0:0], b[:,0:0, 0:0]) - assert_array_equal(result, expected) - assert_(isinstance(result, ArraySubclass)) - - -class TestInv(LinalgTestCase, LinalgGeneralizedTestCase): - def do(self, a, b): - a_inv = linalg.inv(a) - assert_almost_equal(dot_generalized(a, a_inv), - identity_like_generalized(a)) - assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix))) - - def test_types(self): - def check(dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(linalg.inv(x).dtype, dtype) - for dtype in [single, double, csingle, cdouble]: - yield check, dtype - - def test_0_size(self): - # Check that all kinds of 0-sized arrays work - class ArraySubclass(np.ndarray): - pass - a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) - res = linalg.inv(a) - assert_(res.dtype.type is np.float64) - assert_equal(a.shape, res.shape) - assert_(isinstance(a, ArraySubclass)) - - a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) - res = linalg.inv(a) - assert_(res.dtype.type is np.complex64) - assert_equal(a.shape, res.shape) - - -class TestEigvals(LinalgTestCase, LinalgGeneralizedTestCase): - def do(self, a, b): - ev = linalg.eigvals(a) - evalues, evectors = linalg.eig(a) - assert_almost_equal(ev, evalues) - - def test_types(self): - def check(dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(linalg.eigvals(x).dtype, dtype) - x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) - assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) - for dtype in [single, double, csingle, cdouble]: - yield check, dtype - - -class TestEig(LinalgTestCase, LinalgGeneralizedTestCase): - def do(self, a, b): - evalues, evectors = linalg.eig(a) - assert_allclose(dot_generalized(a, evectors), - np.asarray(evectors) * np.asarray(evalues)[...,None,:], - rtol=get_rtol(evalues.dtype)) - assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix))) - - def test_types(self): - def check(dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - w, v = np.linalg.eig(x) - assert_equal(w.dtype, dtype) - assert_equal(v.dtype, dtype) - - x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) - w, v = np.linalg.eig(x) - assert_equal(w.dtype, get_complex_dtype(dtype)) - assert_equal(v.dtype, get_complex_dtype(dtype)) - - for dtype in [single, double, csingle, cdouble]: - yield check, dtype - - -class TestSVD(LinalgTestCase, LinalgGeneralizedTestCase): - def do(self, a, b): - u, s, vt = linalg.svd(a, 0) - assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[...,None,:], - np.asarray(vt)), - rtol=get_rtol(u.dtype)) - assert_(imply(isinstance(a, matrix), isinstance(u, matrix))) - assert_(imply(isinstance(a, matrix), isinstance(vt, matrix))) - - def test_types(self): - def check(dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - u, s, vh = linalg.svd(x) - assert_equal(u.dtype, dtype) - assert_equal(s.dtype, get_real_dtype(dtype)) - assert_equal(vh.dtype, dtype) - s = linalg.svd(x, compute_uv=False) - assert_equal(s.dtype, get_real_dtype(dtype)) - - for dtype in [single, double, csingle, cdouble]: - yield check, dtype - - -class TestCondSVD(LinalgTestCase, LinalgGeneralizedTestCase): - def do(self, a, b): - c = asarray(a) # a might be a matrix - s = linalg.svd(c, compute_uv=False) - old_assert_almost_equal(s[0]/s[-1], linalg.cond(a), decimal=5) - - -class TestCond2(LinalgTestCase): - def do(self, a, b): - c = asarray(a) # a might be a matrix - s = linalg.svd(c, compute_uv=False) - old_assert_almost_equal(s[0]/s[-1], linalg.cond(a, 2), decimal=5) - - -class TestCondInf(object): - def test(self): - A = array([[1., 0, 0], [0, -2., 0], [0, 0, 3.]]) - assert_almost_equal(linalg.cond(A, inf), 3.) - - -class TestPinv(LinalgTestCase): - def do(self, a, b): - a_ginv = linalg.pinv(a) - assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0])) - assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix))) - - -class TestDet(LinalgTestCase, LinalgGeneralizedTestCase): - def do(self, a, b): - d = linalg.det(a) - (s, ld) = linalg.slogdet(a) - if asarray(a).dtype.type in (single, double): - ad = asarray(a).astype(double) - else: - ad = asarray(a).astype(cdouble) - ev = linalg.eigvals(ad) - assert_almost_equal(d, multiply.reduce(ev, axis=-1)) - assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) - - s = np.atleast_1d(s) - ld = np.atleast_1d(ld) - m = (s != 0) - assert_almost_equal(np.abs(s[m]), 1) - assert_equal(ld[~m], -inf) - - def test_zero(self): - assert_equal(linalg.det([[0.0]]), 0.0) - assert_equal(type(linalg.det([[0.0]])), double) - assert_equal(linalg.det([[0.0j]]), 0.0) - assert_equal(type(linalg.det([[0.0j]])), cdouble) - - assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) - assert_equal(type(linalg.slogdet([[0.0]])[0]), double) - assert_equal(type(linalg.slogdet([[0.0]])[1]), double) - assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) - assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) - assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) - - def test_types(self): - def check(dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(np.linalg.det(x).dtype, dtype) - ph, s = np.linalg.slogdet(x) - assert_equal(s.dtype, get_real_dtype(dtype)) - assert_equal(ph.dtype, dtype) - for dtype in [single, double, csingle, cdouble]: - yield check, dtype - - -class TestLstsq(LinalgTestCase, LinalgNonsquareTestCase): - def do(self, a, b): - arr = np.asarray(a) - m, n = arr.shape - u, s, vt = linalg.svd(a, 0) - x, residuals, rank, sv = linalg.lstsq(a, b) - if m <= n: - assert_almost_equal(b, dot(a, x)) - assert_equal(rank, m) - else: - assert_equal(rank, n) - assert_almost_equal(sv, sv.__array_wrap__(s)) - if rank == n and m > n: - expect_resids = (np.asarray(abs(np.dot(a, x) - b))**2).sum(axis=0) - expect_resids = np.asarray(expect_resids) - if len(np.asarray(b).shape) == 1: - expect_resids.shape = (1,) - assert_equal(residuals.shape, expect_resids.shape) - else: - expect_resids = np.array([]).view(type(x)) - assert_almost_equal(residuals, expect_resids) - assert_(np.issubdtype(residuals.dtype, np.floating)) - assert_(imply(isinstance(b, matrix), isinstance(x, matrix))) - assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix))) - - -class TestMatrixPower(object): - R90 = array([[0, 1], [-1, 0]]) - Arb22 = array([[4, -7], [-2, 10]]) - noninv = array([[1, 0], [0, 0]]) - arbfloat = array([[0.1, 3.2], [1.2, 0.7]]) - - large = identity(10) - t = large[1,:].copy() - large[1,:] = large[0,:] - large[0,:] = t - - def test_large_power(self): - assert_equal(matrix_power(self.R90, 2**100+2**10+2**5+1), self.R90) - - def test_large_power_trailing_zero(self): - assert_equal(matrix_power(self.R90, 2**100+2**10+2**5), identity(2)) - - def testip_zero(self): - def tz(M): - mz = matrix_power(M, 0) - assert_equal(mz, identity(M.shape[0])) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - yield tz, M - - def testip_one(self): - def tz(M): - mz = matrix_power(M, 1) - assert_equal(mz, M) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - yield tz, M - - def testip_two(self): - def tz(M): - mz = matrix_power(M, 2) - assert_equal(mz, dot(M, M)) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - yield tz, M - - def testip_invert(self): - def tz(M): - mz = matrix_power(M, -1) - assert_almost_equal(identity(M.shape[0]), dot(mz, M)) - for M in [self.R90, self.Arb22, self.arbfloat, self.large]: - yield tz, M - - def test_invert_noninvertible(self): - import numpy.linalg - assert_raises(numpy.linalg.linalg.LinAlgError, - lambda: matrix_power(self.noninv, -1)) - - -class TestBoolPower(object): - def test_square(self): - A = array([[True, False], [True, True]]) - assert_equal(matrix_power(A, 2), A) - - -class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase): - def do(self, a, b): - # note that eigenvalue arrays must be sorted since - # their order isn't guaranteed. - ev = linalg.eigvalsh(a, 'L') - evalues, evectors = linalg.eig(a) - ev.sort(axis=-1) - evalues.sort(axis=-1) - assert_allclose(ev, evalues, - rtol=get_rtol(ev.dtype)) - - ev2 = linalg.eigvalsh(a, 'U') - ev2.sort(axis=-1) - assert_allclose(ev2, evalues, - rtol=get_rtol(ev.dtype)) - - def test_types(self): - def check(dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - w = np.linalg.eigvalsh(x) - assert_equal(w.dtype, get_real_dtype(dtype)) - for dtype in [single, double, csingle, cdouble]: - yield check, dtype - - def test_invalid(self): - x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) - assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") - assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") - assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") - - def test_UPLO(self): - Klo = np.array([[0, 0],[1, 0]], dtype=np.double) - Kup = np.array([[0, 1],[0, 0]], dtype=np.double) - tgt = np.array([-1, 1], dtype=np.double) - rtol = get_rtol(np.double) - - # Check default is 'L' - w = np.linalg.eigvalsh(Klo) - assert_allclose(np.sort(w), tgt, rtol=rtol) - # Check 'L' - w = np.linalg.eigvalsh(Klo, UPLO='L') - assert_allclose(np.sort(w), tgt, rtol=rtol) - # Check 'l' - w = np.linalg.eigvalsh(Klo, UPLO='l') - assert_allclose(np.sort(w), tgt, rtol=rtol) - # Check 'U' - w = np.linalg.eigvalsh(Kup, UPLO='U') - assert_allclose(np.sort(w), tgt, rtol=rtol) - # Check 'u' - w = np.linalg.eigvalsh(Kup, UPLO='u') - assert_allclose(np.sort(w), tgt, rtol=rtol) - - -class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase): - def do(self, a, b): - # note that eigenvalue arrays must be sorted since - # their order isn't guaranteed. - ev, evc = linalg.eigh(a) - evalues, evectors = linalg.eig(a) - ev.sort(axis=-1) - evalues.sort(axis=-1) - assert_almost_equal(ev, evalues) - - assert_allclose(dot_generalized(a, evc), - np.asarray(ev)[...,None,:] * np.asarray(evc), - rtol=get_rtol(ev.dtype)) - - ev2, evc2 = linalg.eigh(a, 'U') - ev2.sort(axis=-1) - assert_almost_equal(ev2, evalues) - - assert_allclose(dot_generalized(a, evc2), - np.asarray(ev2)[...,None,:] * np.asarray(evc2), - rtol=get_rtol(ev.dtype), err_msg=repr(a)) - - def test_types(self): - def check(dtype): - x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - w, v = np.linalg.eigh(x) - assert_equal(w.dtype, get_real_dtype(dtype)) - assert_equal(v.dtype, dtype) - for dtype in [single, double, csingle, cdouble]: - yield check, dtype - - def test_invalid(self): - x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) - assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") - assert_raises(ValueError, np.linalg.eigh, x, "lower") - assert_raises(ValueError, np.linalg.eigh, x, "upper") - - def test_UPLO(self): - Klo = np.array([[0, 0],[1, 0]], dtype=np.double) - Kup = np.array([[0, 1],[0, 0]], dtype=np.double) - tgt = np.array([-1, 1], dtype=np.double) - rtol = get_rtol(np.double) - - # Check default is 'L' - w, v = np.linalg.eigh(Klo) - assert_allclose(np.sort(w), tgt, rtol=rtol) - # Check 'L' - w, v = np.linalg.eigh(Klo, UPLO='L') - assert_allclose(np.sort(w), tgt, rtol=rtol) - # Check 'l' - w, v = np.linalg.eigh(Klo, UPLO='l') - assert_allclose(np.sort(w), tgt, rtol=rtol) - # Check 'U' - w, v = np.linalg.eigh(Kup, UPLO='U') - assert_allclose(np.sort(w), tgt, rtol=rtol) - # Check 'u' - w, v = np.linalg.eigh(Kup, UPLO='u') - assert_allclose(np.sort(w), tgt, rtol=rtol) - - -class _TestNorm(object): - - dt = None - dec = None - - def test_empty(self): - assert_equal(norm([]), 0.0) - assert_equal(norm(array([], dtype=self.dt)), 0.0) - assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) - - def test_vector(self): - a = [1, 2, 3, 4] - b = [-1, -2, -3, -4] - c = [-1, 2, -3, 4] - - def _test(v): - np.testing.assert_almost_equal(norm(v), 30**0.5, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, inf), 4.0, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, -inf), 1.0, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, 1), 10.0, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, -1), 12.0/25, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, 2), 30**0.5, - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, -2), ((205./144)**-0.5), - decimal=self.dec) - np.testing.assert_almost_equal(norm(v, 0), 4, - decimal=self.dec) - - for v in (a, b, c,): - _test(v) - - for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), - array(c, dtype=self.dt)): - _test(v) - - def test_matrix(self): - A = matrix([[1, 3], [5, 7]], dtype=self.dt) - assert_almost_equal(norm(A), 84**0.5) - assert_almost_equal(norm(A, 'fro'), 84**0.5) - assert_almost_equal(norm(A, inf), 12.0) - assert_almost_equal(norm(A, -inf), 4.0) - assert_almost_equal(norm(A, 1), 10.0) - assert_almost_equal(norm(A, -1), 6.0) - assert_almost_equal(norm(A, 2), 9.1231056256176615) - assert_almost_equal(norm(A, -2), 0.87689437438234041) - - assert_raises(ValueError, norm, A, 'nofro') - assert_raises(ValueError, norm, A, -3) - assert_raises(ValueError, norm, A, 0) - - def test_axis(self): - # Vector norms. - # Compare the use of `axis` with computing the norm of each row - # or column separately. - A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) - for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: - expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] - assert_almost_equal(norm(A, ord=order, axis=0), expected0) - expected1 = [norm(A[k,:], ord=order) for k in range(A.shape[0])] - assert_almost_equal(norm(A, ord=order, axis=1), expected1) - - # Matrix norms. - B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) - - for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']: - assert_almost_equal(norm(A, ord=order), norm(A, ord=order, - axis=(0, 1))) - - n = norm(B, ord=order, axis=(1, 2)) - expected = [norm(B[k], ord=order) for k in range(B.shape[0])] - assert_almost_equal(n, expected) - - n = norm(B, ord=order, axis=(2, 1)) - expected = [norm(B[k].T, ord=order) for k in range(B.shape[0])] - assert_almost_equal(n, expected) - - n = norm(B, ord=order, axis=(0, 2)) - expected = [norm(B[:, k,:], ord=order) for k in range(B.shape[1])] - assert_almost_equal(n, expected) - - n = norm(B, ord=order, axis=(0, 1)) - expected = [norm(B[:,:, k], ord=order) for k in range(B.shape[2])] - assert_almost_equal(n, expected) - - def test_bad_args(self): - # Check that bad arguments raise the appropriate exceptions. - - A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) - B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) - - # Using `axis=` or passing in a 1-D array implies vector - # norms are being computed, so also using `ord='fro'` raises a - # ValueError. - assert_raises(ValueError, norm, A, 'fro', 0) - assert_raises(ValueError, norm, [3, 4], 'fro', None) - - # Similarly, norm should raise an exception when ord is any finite - # number other than 1, 2, -1 or -2 when computing matrix norms. - for order in [0, 3]: - assert_raises(ValueError, norm, A, order, None) - assert_raises(ValueError, norm, A, order, (0, 1)) - assert_raises(ValueError, norm, B, order, (1, 2)) - - # Invalid axis - assert_raises(ValueError, norm, B, None, 3) - assert_raises(ValueError, norm, B, None, (2, 3)) - assert_raises(ValueError, norm, B, None, (0, 1, 2)) - - def test_longdouble_norm(self): - # Non-regression test: p-norm of longdouble would previously raise - # UnboundLocalError. - x = np.arange(10, dtype=np.longdouble) - old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) - - def test_intmin(self): - # Non-regression test: p-norm of signed integer would previously do - # float cast and abs in the wrong order. - x = np.array([-2 ** 31], dtype=np.int32) - old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) - - def test_complex_high_ord(self): - # gh-4156 - d = np.empty((2,), dtype=np.clongdouble) - d[0] = 6+7j - d[1] = -6+7j - res = 11.615898132184 - old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) - d = d.astype(np.complex128) - old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) - d = d.astype(np.complex64) - old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) - - -class TestNormDouble(_TestNorm): - dt = np.double - dec = 12 - - -class TestNormSingle(_TestNorm): - dt = np.float32 - dec = 6 - - -class TestNormInt64(_TestNorm): - dt = np.int64 - dec = 12 - - -class TestMatrixRank(object): - def test_matrix_rank(self): - # Full rank matrix - yield assert_equal, 4, matrix_rank(np.eye(4)) - # rank deficient matrix - I=np.eye(4); I[-1, -1] = 0. - yield assert_equal, matrix_rank(I), 3 - # All zeros - zero rank - yield assert_equal, matrix_rank(np.zeros((4, 4))), 0 - # 1 dimension - rank 1 unless all 0 - yield assert_equal, matrix_rank([1, 0, 0, 0]), 1 - yield assert_equal, matrix_rank(np.zeros((4,))), 0 - # accepts array-like - yield assert_equal, matrix_rank([1]), 1 - # greater than 2 dimensions raises error - yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2)) - # works on scalar - yield assert_equal, matrix_rank(1), 1 - - -def test_reduced_rank(): - # Test matrices with reduced rank - rng = np.random.RandomState(20120714) - for i in range(100): - # Make a rank deficient matrix - X = rng.normal(size=(40, 10)) - X[:, 0] = X[:, 1] + X[:, 2] - # Assert that matrix_rank detected deficiency - assert_equal(matrix_rank(X), 9) - X[:, 3] = X[:, 4] + X[:, 5] - assert_equal(matrix_rank(X), 8) - - -class TestQR(object): - - def check_qr(self, a): - # This test expects the argument `a` to be an ndarray or - # a subclass of an ndarray of inexact type. - a_type = type(a) - a_dtype = a.dtype - m, n = a.shape - k = min(m, n) - - # mode == 'complete' - q, r = linalg.qr(a, mode='complete') - assert_(q.dtype == a_dtype) - assert_(r.dtype == a_dtype) - assert_(isinstance(q, a_type)) - assert_(isinstance(r, a_type)) - assert_(q.shape == (m, m)) - assert_(r.shape == (m, n)) - assert_almost_equal(dot(q, r), a) - assert_almost_equal(dot(q.T.conj(), q), np.eye(m)) - assert_almost_equal(np.triu(r), r) - - # mode == 'reduced' - q1, r1 = linalg.qr(a, mode='reduced') - assert_(q1.dtype == a_dtype) - assert_(r1.dtype == a_dtype) - assert_(isinstance(q1, a_type)) - assert_(isinstance(r1, a_type)) - assert_(q1.shape == (m, k)) - assert_(r1.shape == (k, n)) - assert_almost_equal(dot(q1, r1), a) - assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) - assert_almost_equal(np.triu(r1), r1) - - # mode == 'r' - r2 = linalg.qr(a, mode='r') - assert_(r2.dtype == a_dtype) - assert_(isinstance(r2, a_type)) - assert_almost_equal(r2, r1) - - def test_qr_empty(self): - a = np.zeros((0, 2)) - assert_raises(linalg.LinAlgError, linalg.qr, a) - - def test_mode_raw(self): - # The factorization is not unique and varies between libraries, - # so it is not possible to check against known values. Functional - # testing is a possibility, but awaits the exposure of more - # of the functions in lapack_lite. Consequently, this test is - # very limited in scope. Note that the results are in FORTRAN - # order, hence the h arrays are transposed. - a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double) - b = a.astype(np.single) - - # Test double - h, tau = linalg.qr(a, mode='raw') - assert_(h.dtype == np.double) - assert_(tau.dtype == np.double) - assert_(h.shape == (2, 3)) - assert_(tau.shape == (2,)) - - h, tau = linalg.qr(a.T, mode='raw') - assert_(h.dtype == np.double) - assert_(tau.dtype == np.double) - assert_(h.shape == (3, 2)) - assert_(tau.shape == (2,)) - - def test_mode_all_but_economic(self): - a = array([[1, 2], [3, 4]]) - b = array([[1, 2], [3, 4], [5, 6]]) - for dt in "fd": - m1 = a.astype(dt) - m2 = b.astype(dt) - self.check_qr(m1) - self.check_qr(m2) - self.check_qr(m2.T) - self.check_qr(matrix(m1)) - for dt in "fd": - m1 = 1 + 1j * a.astype(dt) - m2 = 1 + 1j * b.astype(dt) - self.check_qr(m1) - self.check_qr(m2) - self.check_qr(m2.T) - self.check_qr(matrix(m1)) - - -def test_byteorder_check(): - # Byte order check should pass for native order - if sys.byteorder == 'little': - native = '<' - else: - native = '>' - - for dtt in (np.float32, np.float64): - arr = np.eye(4, dtype=dtt) - n_arr = arr.newbyteorder(native) - sw_arr = arr.newbyteorder('S').byteswap() - assert_equal(arr.dtype.byteorder, '=') - for routine in (linalg.inv, linalg.det, linalg.pinv): - # Normal call - res = routine(arr) - # Native but not '=' - assert_array_equal(res, routine(n_arr)) - # Swapped - assert_array_equal(res, routine(sw_arr)) - - -def test_generalized_raise_multiloop(): - # It should raise an error even if the error doesn't occur in the - # last iteration of the ufunc inner loop - - invertible = np.array([[1, 2], [3, 4]]) - non_invertible = np.array([[1, 1], [1, 1]]) - - x = np.zeros([4, 4, 2, 2])[1::2] - x[...] = invertible - x[0, 0] = non_invertible - - assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) - -def test_xerbla_override(): - # Check that our xerbla has been successfully linked in. If it is not, - # the default xerbla routine is called, which prints a message to stdout - # and may, or may not, abort the process depending on the LAPACK package. - from nose import SkipTest - - try: - pid = os.fork() - except (OSError, AttributeError): - # fork failed, or not running on POSIX - raise SkipTest("Not POSIX or fork failed.") - - if pid == 0: - # child; close i/o file handles - os.close(1) - os.close(0) - # Avoid producing core files. - import resource - resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) - # These calls may abort. - try: - np.linalg.lapack_lite.xerbla() - except ValueError: - pass - except: - os._exit(os.EX_CONFIG) - - try: - a = np.array([[1.]]) - np.linalg.lapack_lite.dorgqr( - 1, 1, 1, a, - 0, # <- invalid value - a, a, 0, 0) - except ValueError as e: - if "DORGQR parameter number 5" in str(e): - # success - os._exit(os.EX_OK) - - # Did not abort, but our xerbla was not linked in. - os._exit(os.EX_CONFIG) - else: - # parent - pid, status = os.wait() - if os.WEXITSTATUS(status) != os.EX_OK or os.WIFSIGNALED(status): - raise SkipTest('Numpy xerbla not linked in.') - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_regression.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_regression.py deleted file mode 100644 index 18d212cdc9d2a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/linalg/tests/test_regression.py +++ /dev/null @@ -1,90 +0,0 @@ -""" Test functions for linalg module -""" -from __future__ import division, absolute_import, print_function - - -from numpy.testing import * -import numpy as np -from numpy import linalg, arange, float64, array, dot, transpose - -rlevel = 1 - -class TestRegression(TestCase): - def test_eig_build(self, level = rlevel): - """Ticket #652""" - rva = array([1.03221168e+02 +0.j, - -1.91843603e+01 +0.j, - -6.04004526e-01+15.84422474j, - -6.04004526e-01-15.84422474j, - -1.13692929e+01 +0.j, - -6.57612485e-01+10.41755503j, - -6.57612485e-01-10.41755503j, - 1.82126812e+01 +0.j, - 1.06011014e+01 +0.j, - 7.80732773e+00 +0.j, - -7.65390898e-01 +0.j, - 1.51971555e-15 +0.j, - -1.51308713e-15 +0.j]) - a = arange(13*13, dtype = float64) - a.shape = (13, 13) - a = a%17 - va, ve = linalg.eig(a) - va.sort() - rva.sort() - assert_array_almost_equal(va, rva) - - def test_eigh_build(self, level = rlevel): - """Ticket 662.""" - rvals = [68.60568999, 89.57756725, 106.67185574] - - cov = array([[ 77.70273908, 3.51489954, 15.64602427], - [3.51489954, 88.97013878, -1.07431931], - [15.64602427, -1.07431931, 98.18223512]]) - - vals, vecs = linalg.eigh(cov) - assert_array_almost_equal(vals, rvals) - - def test_svd_build(self, level = rlevel): - """Ticket 627.""" - a = array([[ 0., 1.], [ 1., 1.], [ 2., 1.], [ 3., 1.]]) - m, n = a.shape - u, s, vh = linalg.svd(a) - - b = dot(transpose(u[:, n:]), a) - - assert_array_almost_equal(b, np.zeros((2, 2))) - - def test_norm_vector_badarg(self): - """Regression for #786: Froebenius norm for vectors raises - TypeError.""" - self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') - - def test_lapack_endian(self): - # For bug #1482 - a = array([[5.7998084, -2.1825367 ], - [-2.1825367, 9.85910595]], dtype='>f8') - b = array(a, dtype=' 0.5) - assert_equal(c, 1) - assert_equal(np.linalg.matrix_rank(a), 1) - assert_array_less(1, np.linalg.norm(a, ord=2)) - - -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/__init__.py deleted file mode 100644 index 0cb92f6678a23..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -============= -Masked Arrays -============= - -Arrays sometimes contain invalid or missing data. When doing operations -on such arrays, we wish to suppress invalid values, which is the purpose masked -arrays fulfill (an example of typical use is given below). - -For example, examine the following array: - ->>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) - -When we try to calculate the mean of the data, the result is undetermined: - ->>> np.mean(x) -nan - -The mean is calculated using roughly ``np.sum(x)/len(x)``, but since -any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter -masked arrays: - ->>> m = np.ma.masked_array(x, np.isnan(x)) ->>> m -masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], - mask = [False False False True False False False True], - fill_value=1e+20) - -Here, we construct a masked array that suppress all ``NaN`` values. We -may now proceed to calculate the mean of the other values: - ->>> np.mean(m) -2.6666666666666665 - -.. [1] Not-a-Number, a floating point value that is the result of an - invalid operation. - -""" -from __future__ import division, absolute_import, print_function - -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -from . import core -from .core import * - -from . import extras -from .extras import * - -__all__ = ['core', 'extras'] -__all__ += core.__all__ -__all__ += extras.__all__ - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/bench.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/bench.py deleted file mode 100644 index 75e6d90c8f5e4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/bench.py +++ /dev/null @@ -1,166 +0,0 @@ -#! python -# encoding: utf-8 -from __future__ import division, absolute_import, print_function - -import timeit -#import IPython.ipapi -#ip = IPython.ipapi.get() -#from IPython import ipmagic -import numpy -#from numpy import ma -#from numpy.ma import filled -#from numpy.ma.testutils import assert_equal - - -#####--------------------------------------------------------------------------- -#---- --- Global variables --- -#####--------------------------------------------------------------------------- - -# Small arrays .................................. -xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3) -ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3) -zs = xs + 1j * ys -m1 = [[True, False, False], [False, False, True]] -m2 = [[True, False, True], [False, False, True]] -nmxs = numpy.ma.array(xs, mask=m1) -nmys = numpy.ma.array(ys, mask=m2) -nmzs = numpy.ma.array(zs, mask=m1) -# Big arrays .................................... -xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) -yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) -zl = xl + 1j * yl -maskx = xl > 0.8 -masky = yl < -0.8 -nmxl = numpy.ma.array(xl, mask=maskx) -nmyl = numpy.ma.array(yl, mask=masky) -nmzl = numpy.ma.array(zl, mask=maskx) - -#####--------------------------------------------------------------------------- -#---- --- Functions --- -#####--------------------------------------------------------------------------- - -def timer(s, v='', nloop=500, nrep=3): - units = ["s", "ms", "µs", "ns"] - scaling = [1, 1e3, 1e6, 1e9] - print("%s : %-50s : " % (v, s), end=' ') - varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz'] - setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames) - Timer = timeit.Timer(stmt=s, setup=setup) - best = min(Timer.repeat(nrep, nloop)) / nloop - if best > 0.0: - order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3) - else: - order = 3 - print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep, - 3, - best * scaling[order], - units[order])) -# ip.magic('timeit -n%i %s' % (nloop,s)) - - - -def compare_functions_1v(func, nloop=500, - xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): - funcname = func.__name__ - print("-"*50) - print("%s on small arrays" % funcname) - module, data = "numpy.ma", "nmxs" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - # - print("%s on large arrays" % funcname) - module, data = "numpy.ma", "nmxl" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - return - -def compare_methods(methodname, args, vars='x', nloop=500, test=True, - xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): - print("-"*50) - print("%s on small arrays" % methodname) - data, ver = "nm%ss" % vars, 'numpy.ma' - timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) - # - print("%s on large arrays" % methodname) - data, ver = "nm%sl" % vars, 'numpy.ma' - timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) - return - -def compare_functions_2v(func, nloop=500, test=True, - xs=xs, nmxs=nmxs, - ys=ys, nmys=nmys, - xl=xl, nmxl=nmxl, - yl=yl, nmyl=nmyl): - funcname = func.__name__ - print("-"*50) - print("%s on small arrays" % funcname) - module, data = "numpy.ma", "nmxs,nmys" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - # - print("%s on large arrays" % funcname) - module, data = "numpy.ma", "nmxl,nmyl" - timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) - return - - -############################################################################### - - -################################################################################ -if __name__ == '__main__': -# # Small arrays .................................. -# xs = numpy.random.uniform(-1,1,6).reshape(2,3) -# ys = numpy.random.uniform(-1,1,6).reshape(2,3) -# zs = xs + 1j * ys -# m1 = [[True, False, False], [False, False, True]] -# m2 = [[True, False, True], [False, False, True]] -# nmxs = numpy.ma.array(xs, mask=m1) -# nmys = numpy.ma.array(ys, mask=m2) -# nmzs = numpy.ma.array(zs, mask=m1) -# mmxs = maskedarray.array(xs, mask=m1) -# mmys = maskedarray.array(ys, mask=m2) -# mmzs = maskedarray.array(zs, mask=m1) -# # Big arrays .................................... -# xl = numpy.random.uniform(-1,1,100*100).reshape(100,100) -# yl = numpy.random.uniform(-1,1,100*100).reshape(100,100) -# zl = xl + 1j * yl -# maskx = xl > 0.8 -# masky = yl < -0.8 -# nmxl = numpy.ma.array(xl, mask=maskx) -# nmyl = numpy.ma.array(yl, mask=masky) -# nmzl = numpy.ma.array(zl, mask=maskx) -# mmxl = maskedarray.array(xl, mask=maskx, shrink=True) -# mmyl = maskedarray.array(yl, mask=masky, shrink=True) -# mmzl = maskedarray.array(zl, mask=maskx, shrink=True) -# - compare_functions_1v(numpy.sin) - compare_functions_1v(numpy.log) - compare_functions_1v(numpy.sqrt) - #.................................................................... - compare_functions_2v(numpy.multiply) - compare_functions_2v(numpy.divide) - compare_functions_2v(numpy.power) - #.................................................................... - compare_methods('ravel', '', nloop=1000) - compare_methods('conjugate', '', 'z', nloop=1000) - compare_methods('transpose', '', nloop=1000) - compare_methods('compressed', '', nloop=1000) - compare_methods('__getitem__', '0', nloop=1000) - compare_methods('__getitem__', '(0,0)', nloop=1000) - compare_methods('__getitem__', '[0,-1]', nloop=1000) - compare_methods('__setitem__', '0, 17', nloop=1000, test=False) - compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False) - #.................................................................... - print("-"*50) - print("__setitem__ on small arrays") - timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) - - print("-"*50) - print("__setitem__ on large arrays") - timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) - - #.................................................................... - print("-"*50) - print("where on small arrays") - timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000) - print("-"*50) - print("where on large arrays") - timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/core.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/core.py deleted file mode 100644 index 00164b851f95a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/core.py +++ /dev/null @@ -1,7321 +0,0 @@ -""" -numpy.ma : a package to handle missing or invalid values. - -This package was initially written for numarray by Paul F. Dubois -at Lawrence Livermore National Laboratory. -In 2006, the package was completely rewritten by Pierre Gerard-Marchant -(University of Georgia) to make the MaskedArray class a subclass of ndarray, -and to improve support of structured arrays. - - -Copyright 1999, 2000, 2001 Regents of the University of California. -Released for unlimited redistribution. - -* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. -* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant - (pgmdevlist_AT_gmail_DOT_com) -* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) - -.. moduleauthor:: Pierre Gerard-Marchant - -""" -# pylint: disable-msg=E1002 -from __future__ import division, absolute_import, print_function - -import sys -import warnings -from functools import reduce - -import numpy as np -import numpy.core.umath as umath -import numpy.core.numerictypes as ntypes -from numpy import ndarray, amax, amin, iscomplexobj, bool_ -from numpy import array as narray -from numpy.lib.function_base import angle -from numpy.compat import getargspec, formatargspec, long, basestring -from numpy import expand_dims as n_expand_dims - -if sys.version_info[0] >= 3: - import pickle -else: - import cPickle as pickle - -__author__ = "Pierre GF Gerard-Marchant" -__docformat__ = "restructuredtext en" - -__all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray', - 'bool_', - 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', - 'amax', 'amin', 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', - 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', - 'arctanh', 'argmax', 'argmin', 'argsort', 'around', - 'array', 'asarray', 'asanyarray', - 'bitwise_and', 'bitwise_or', 'bitwise_xor', - 'ceil', 'choose', 'clip', 'common_fill_value', 'compress', - 'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh', - 'count', 'cumprod', 'cumsum', - 'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump', - 'dumps', - 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', - 'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide', - 'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex', - 'fromfunction', - 'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal', - 'harden_mask', 'hypot', - 'identity', 'ids', 'indices', 'inner', 'innerproduct', - 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', - 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log2', - 'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', - 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', - 'masked', 'masked_array', 'masked_equal', 'masked_greater', - 'masked_greater_equal', 'masked_inside', 'masked_invalid', - 'masked_less', 'masked_less_equal', 'masked_not_equal', - 'masked_object', 'masked_outside', 'masked_print_option', - 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', - 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', - 'mod', 'multiply', 'mvoid', - 'negative', 'nomask', 'nonzero', 'not_equal', - 'ones', 'outer', 'outerproduct', - 'power', 'prod', 'product', 'ptp', 'put', 'putmask', - 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', - 'right_shift', 'round_', 'round', - 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', - 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', - 'swapaxes', - 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', - 'var', 'where', - 'zeros'] - -MaskType = np.bool_ -nomask = MaskType(0) - -def doc_note(initialdoc, note): - """ - Adds a Notes section to an existing docstring. - """ - if initialdoc is None: - return - if note is None: - return initialdoc - newdoc = """ - %s - - Notes - ----- - %s - """ - return newdoc % (initialdoc, note) - -def get_object_signature(obj): - """ - Get the signature from obj - """ - try: - sig = formatargspec(*getargspec(obj)) - except TypeError as errmsg: - sig = '' -# msg = "Unable to retrieve the signature of %s '%s'\n"\ -# "(Initial error message: %s)" -# warnings.warn(msg % (type(obj), -# getattr(obj, '__name__', '???'), -# errmsg)) - return sig - - -#####-------------------------------------------------------------------------- -#---- --- Exceptions --- -#####-------------------------------------------------------------------------- -class MAError(Exception): - """Class for masked array related errors.""" - pass -class MaskError(MAError): - "Class for mask related errors." - pass - - -#####-------------------------------------------------------------------------- -#---- --- Filling options --- -#####-------------------------------------------------------------------------- -# b: boolean - c: complex - f: floats - i: integer - O: object - S: string -default_filler = {'b': True, - 'c' : 1.e20 + 0.0j, - 'f' : 1.e20, - 'i' : 999999, - 'O' : '?', - 'S' : 'N/A', - 'u' : 999999, - 'V' : '???', - 'U' : 'N/A', - 'M8[D]' : np.datetime64('NaT', 'D'), - 'M8[us]' : np.datetime64('NaT', 'us') - } -max_filler = ntypes._minvals -max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]]) -min_filler = ntypes._maxvals -min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]]) -if 'float128' in ntypes.typeDict: - max_filler.update([(np.float128, -np.inf)]) - min_filler.update([(np.float128, +np.inf)]) - - -def default_fill_value(obj): - """ - Return the default fill value for the argument object. - - The default filling value depends on the datatype of the input - array or the type of the input scalar: - - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== - - - Parameters - ---------- - obj : ndarray, dtype or scalar - The array data-type or scalar for which the default fill value - is returned. - - Returns - ------- - fill_value : scalar - The default fill value. - - Examples - -------- - >>> np.ma.default_fill_value(1) - 999999 - >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) - 1e+20 - >>> np.ma.default_fill_value(np.dtype(complex)) - (1e+20+0j) - - """ - if hasattr(obj, 'dtype'): - defval = _check_fill_value(None, obj.dtype) - elif isinstance(obj, np.dtype): - if obj.subdtype: - defval = default_filler.get(obj.subdtype[0].kind, '?') - elif obj.kind == 'M': - defval = default_filler.get(obj.str[1:], '?') - else: - defval = default_filler.get(obj.kind, '?') - elif isinstance(obj, float): - defval = default_filler['f'] - elif isinstance(obj, int) or isinstance(obj, long): - defval = default_filler['i'] - elif isinstance(obj, str): - defval = default_filler['S'] - elif isinstance(obj, unicode): - defval = default_filler['U'] - elif isinstance(obj, complex): - defval = default_filler['c'] - else: - defval = default_filler['O'] - return defval - - -def _recursive_extremum_fill_value(ndtype, extremum): - names = ndtype.names - if names: - deflist = [] - for name in names: - fval = _recursive_extremum_fill_value(ndtype[name], extremum) - deflist.append(fval) - return tuple(deflist) - return extremum[ndtype] - - -def minimum_fill_value(obj): - """ - Return the maximum value that can be represented by the dtype of an object. - - This function is useful for calculating a fill value suitable for - taking the minimum of an array with a given dtype. - - Parameters - ---------- - obj : ndarray or dtype - An object that can be queried for it's numeric type. - - Returns - ------- - val : scalar - The maximum representable value. - - Raises - ------ - TypeError - If `obj` isn't a suitable numeric type. - - See Also - -------- - maximum_fill_value : The inverse function. - set_fill_value : Set the filling value of a masked array. - MaskedArray.fill_value : Return current fill value. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.int8() - >>> ma.minimum_fill_value(a) - 127 - >>> a = np.int32() - >>> ma.minimum_fill_value(a) - 2147483647 - - An array of numeric data can also be passed. - - >>> a = np.array([1, 2, 3], dtype=np.int8) - >>> ma.minimum_fill_value(a) - 127 - >>> a = np.array([1, 2, 3], dtype=np.float32) - >>> ma.minimum_fill_value(a) - inf - - """ - errmsg = "Unsuitable type for calculating minimum." - if hasattr(obj, 'dtype'): - return _recursive_extremum_fill_value(obj.dtype, min_filler) - elif isinstance(obj, float): - return min_filler[ntypes.typeDict['float_']] - elif isinstance(obj, int): - return min_filler[ntypes.typeDict['int_']] - elif isinstance(obj, long): - return min_filler[ntypes.typeDict['uint']] - elif isinstance(obj, np.dtype): - return min_filler[obj] - else: - raise TypeError(errmsg) - - -def maximum_fill_value(obj): - """ - Return the minimum value that can be represented by the dtype of an object. - - This function is useful for calculating a fill value suitable for - taking the maximum of an array with a given dtype. - - Parameters - ---------- - obj : {ndarray, dtype} - An object that can be queried for it's numeric type. - - Returns - ------- - val : scalar - The minimum representable value. - - Raises - ------ - TypeError - If `obj` isn't a suitable numeric type. - - See Also - -------- - minimum_fill_value : The inverse function. - set_fill_value : Set the filling value of a masked array. - MaskedArray.fill_value : Return current fill value. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.int8() - >>> ma.maximum_fill_value(a) - -128 - >>> a = np.int32() - >>> ma.maximum_fill_value(a) - -2147483648 - - An array of numeric data can also be passed. - - >>> a = np.array([1, 2, 3], dtype=np.int8) - >>> ma.maximum_fill_value(a) - -128 - >>> a = np.array([1, 2, 3], dtype=np.float32) - >>> ma.maximum_fill_value(a) - -inf - - """ - errmsg = "Unsuitable type for calculating maximum." - if hasattr(obj, 'dtype'): - return _recursive_extremum_fill_value(obj.dtype, max_filler) - elif isinstance(obj, float): - return max_filler[ntypes.typeDict['float_']] - elif isinstance(obj, int): - return max_filler[ntypes.typeDict['int_']] - elif isinstance(obj, long): - return max_filler[ntypes.typeDict['uint']] - elif isinstance(obj, np.dtype): - return max_filler[obj] - else: - raise TypeError(errmsg) - - -def _recursive_set_default_fill_value(dtypedescr): - deflist = [] - for currentdescr in dtypedescr: - currenttype = currentdescr[1] - if isinstance(currenttype, list): - deflist.append(tuple(_recursive_set_default_fill_value(currenttype))) - else: - deflist.append(default_fill_value(np.dtype(currenttype))) - return tuple(deflist) - -def _recursive_set_fill_value(fillvalue, dtypedescr): - fillvalue = np.resize(fillvalue, len(dtypedescr)) - output_value = [] - for (fval, descr) in zip(fillvalue, dtypedescr): - cdtype = descr[1] - if isinstance(cdtype, list): - output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) - else: - output_value.append(np.array(fval, dtype=cdtype).item()) - return tuple(output_value) - - -def _check_fill_value(fill_value, ndtype): - """ - Private function validating the given `fill_value` for the given dtype. - - If fill_value is None, it is set to the default corresponding to the dtype - if this latter is standard (no fields). If the datatype is flexible (named - fields), fill_value is set to a tuple whose elements are the default fill - values corresponding to each field. - - If fill_value is not None, its value is forced to the given dtype. - - """ - ndtype = np.dtype(ndtype) - fields = ndtype.fields - if fill_value is None: - if fields: - descr = ndtype.descr - fill_value = np.array(_recursive_set_default_fill_value(descr), - dtype=ndtype,) - else: - fill_value = default_fill_value(ndtype) - elif fields: - fdtype = [(_[0], _[1]) for _ in ndtype.descr] - if isinstance(fill_value, (ndarray, np.void)): - try: - fill_value = np.array(fill_value, copy=False, dtype=fdtype) - except ValueError: - err_msg = "Unable to transform %s to dtype %s" - raise ValueError(err_msg % (fill_value, fdtype)) - else: - descr = ndtype.descr - fill_value = np.asarray(fill_value, dtype=object) - fill_value = np.array(_recursive_set_fill_value(fill_value, descr), - dtype=ndtype) - else: - if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'): - err_msg = "Cannot set fill value of string with array of dtype %s" - raise TypeError(err_msg % ndtype) - else: - # In case we want to convert 1e20 to int... - try: - fill_value = np.array(fill_value, copy=False, dtype=ndtype) - except OverflowError: - # Raise TypeError instead of OverflowError. OverflowError - # is seldom used, and the real problem here is that the - # passed fill_value is not compatible with the ndtype. - err_msg = "Fill value %s overflows dtype %s" - raise TypeError(err_msg % (fill_value, ndtype)) - return np.array(fill_value) - - -def set_fill_value(a, fill_value): - """ - Set the filling value of a, if a is a masked array. - - This function changes the fill value of the masked array `a` in place. - If `a` is not a masked array, the function returns silently, without - doing anything. - - Parameters - ---------- - a : array_like - Input array. - fill_value : dtype - Filling value. A consistency test is performed to make sure - the value is compatible with the dtype of `a`. - - Returns - ------- - None - Nothing returned by this function. - - See Also - -------- - maximum_fill_value : Return the default fill value for a dtype. - MaskedArray.fill_value : Return current fill value. - MaskedArray.set_fill_value : Equivalent method. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(5) - >>> a - array([0, 1, 2, 3, 4]) - >>> a = ma.masked_where(a < 3, a) - >>> a - masked_array(data = [-- -- -- 3 4], - mask = [ True True True False False], - fill_value=999999) - >>> ma.set_fill_value(a, -999) - >>> a - masked_array(data = [-- -- -- 3 4], - mask = [ True True True False False], - fill_value=-999) - - Nothing happens if `a` is not a masked array. - - >>> a = range(5) - >>> a - [0, 1, 2, 3, 4] - >>> ma.set_fill_value(a, 100) - >>> a - [0, 1, 2, 3, 4] - >>> a = np.arange(5) - >>> a - array([0, 1, 2, 3, 4]) - >>> ma.set_fill_value(a, 100) - >>> a - array([0, 1, 2, 3, 4]) - - """ - if isinstance(a, MaskedArray): - a.set_fill_value(fill_value) - return - -def get_fill_value(a): - """ - Return the filling value of a, if any. Otherwise, returns the - default filling value for that type. - - """ - if isinstance(a, MaskedArray): - result = a.fill_value - else: - result = default_fill_value(a) - return result - -def common_fill_value(a, b): - """ - Return the common filling value of two masked arrays, if any. - - If ``a.fill_value == b.fill_value``, return the fill value, - otherwise return None. - - Parameters - ---------- - a, b : MaskedArray - The masked arrays for which to compare fill values. - - Returns - ------- - fill_value : scalar or None - The common fill value, or None. - - Examples - -------- - >>> x = np.ma.array([0, 1.], fill_value=3) - >>> y = np.ma.array([0, 1.], fill_value=3) - >>> np.ma.common_fill_value(x, y) - 3.0 - - """ - t1 = get_fill_value(a) - t2 = get_fill_value(b) - if t1 == t2: - return t1 - return None - - -#####-------------------------------------------------------------------------- -def filled(a, fill_value=None): - """ - Return input as an array with masked data replaced by a fill value. - - If `a` is not a `MaskedArray`, `a` itself is returned. - If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to - ``a.fill_value``. - - Parameters - ---------- - a : MaskedArray or array_like - An input object. - fill_value : scalar, optional - Filling value. Default is None. - - Returns - ------- - a : ndarray - The filled array. - - See Also - -------- - compressed - - Examples - -------- - >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], - ... [1, 0, 0], - ... [0, 0, 0]]) - >>> x.filled() - array([[999999, 1, 2], - [999999, 4, 5], - [ 6, 7, 8]]) - - """ - if hasattr(a, 'filled'): - return a.filled(fill_value) - elif isinstance(a, ndarray): - # Should we check for contiguity ? and a.flags['CONTIGUOUS']: - return a - elif isinstance(a, dict): - return np.array(a, 'O') - else: - return np.array(a) - -#####-------------------------------------------------------------------------- -def get_masked_subclass(*arrays): - """ - Return the youngest subclass of MaskedArray from a list of (masked) arrays. - In case of siblings, the first listed takes over. - - """ - if len(arrays) == 1: - arr = arrays[0] - if isinstance(arr, MaskedArray): - rcls = type(arr) - else: - rcls = MaskedArray - else: - arrcls = [type(a) for a in arrays] - rcls = arrcls[0] - if not issubclass(rcls, MaskedArray): - rcls = MaskedArray - for cls in arrcls[1:]: - if issubclass(cls, rcls): - rcls = cls - # Don't return MaskedConstant as result: revert to MaskedArray - if rcls.__name__ == 'MaskedConstant': - return MaskedArray - return rcls - -#####-------------------------------------------------------------------------- -def getdata(a, subok=True): - """ - Return the data of a masked array as an ndarray. - - Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, - else return `a` as a ndarray or subclass (depending on `subok`) if not. - - Parameters - ---------- - a : array_like - Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. - subok : bool - Whether to force the output to be a `pure` ndarray (False) or to - return a subclass of ndarray if appropriate (True, default). - - See Also - -------- - getmask : Return the mask of a masked array, or nomask. - getmaskarray : Return the mask of a masked array, or full array of False. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.masked_equal([[1,2],[3,4]], 2) - >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value=999999) - >>> ma.getdata(a) - array([[1, 2], - [3, 4]]) - - Equivalently use the ``MaskedArray`` `data` attribute. - - >>> a.data - array([[1, 2], - [3, 4]]) - - """ - try: - data = a._data - except AttributeError: - data = np.array(a, copy=False, subok=subok) - if not subok: - return data.view(ndarray) - return data -get_data = getdata - - -def fix_invalid(a, mask=nomask, copy=True, fill_value=None): - """ - Return input with invalid data masked and replaced by a fill value. - - Invalid data means values of `nan`, `inf`, etc. - - Parameters - ---------- - a : array_like - Input array, a (subclass of) ndarray. - copy : bool, optional - Whether to use a copy of `a` (True) or to fix `a` in place (False). - Default is True. - fill_value : scalar, optional - Value used for fixing invalid data. Default is None, in which case - the ``a.fill_value`` is used. - - Returns - ------- - b : MaskedArray - The input array with invalid entries fixed. - - Notes - ----- - A copy is performed by default. - - Examples - -------- - >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) - >>> x - masked_array(data = [-- -1.0 nan inf], - mask = [ True False False False], - fill_value = 1e+20) - >>> np.ma.fix_invalid(x) - masked_array(data = [-- -1.0 -- --], - mask = [ True False True True], - fill_value = 1e+20) - - >>> fixed = np.ma.fix_invalid(x) - >>> fixed.data - array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20, - 1.00000000e+20]) - >>> x.data - array([ 1., -1., NaN, Inf]) - - """ - a = masked_array(a, copy=copy, mask=mask, subok=True) - #invalid = (numpy.isnan(a._data) | numpy.isinf(a._data)) - invalid = np.logical_not(np.isfinite(a._data)) - if not invalid.any(): - return a - a._mask |= invalid - if fill_value is None: - fill_value = a.fill_value - a._data[invalid] = fill_value - return a - - - -#####-------------------------------------------------------------------------- -#---- --- Ufuncs --- -#####-------------------------------------------------------------------------- -ufunc_domain = {} -ufunc_fills = {} - -class _DomainCheckInterval: - """ - Define a valid interval, so that : - - ``domain_check_interval(a,b)(x) == True`` where - ``x < a`` or ``x > b``. - - """ - def __init__(self, a, b): - "domain_check_interval(a,b)(x) = true where x < a or y > b" - if (a > b): - (a, b) = (b, a) - self.a = a - self.b = b - - def __call__ (self, x): - "Execute the call behavior." - return umath.logical_or(umath.greater (x, self.b), - umath.less(x, self.a)) - - - -class _DomainTan: - """Define a valid interval for the `tan` function, so that: - - ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` - - """ - def __init__(self, eps): - "domain_tan(eps) = true where abs(cos(x)) < eps)" - self.eps = eps - - def __call__ (self, x): - "Executes the call behavior." - return umath.less(umath.absolute(umath.cos(x)), self.eps) - - - -class _DomainSafeDivide: - """Define a domain for safe division.""" - def __init__ (self, tolerance=None): - self.tolerance = tolerance - - def __call__ (self, a, b): - # Delay the selection of the tolerance to here in order to reduce numpy - # import times. The calculation of these parameters is a substantial - # component of numpy's import time. - if self.tolerance is None: - self.tolerance = np.finfo(float).tiny - return umath.absolute(a) * self.tolerance >= umath.absolute(b) - - - -class _DomainGreater: - """DomainGreater(v)(x) is True where x <= v.""" - def __init__(self, critical_value): - "DomainGreater(v)(x) = true where x <= v" - self.critical_value = critical_value - - def __call__ (self, x): - "Executes the call behavior." - return umath.less_equal(x, self.critical_value) - - - -class _DomainGreaterEqual: - """DomainGreaterEqual(v)(x) is True where x < v.""" - def __init__(self, critical_value): - "DomainGreaterEqual(v)(x) = true where x < v" - self.critical_value = critical_value - - def __call__ (self, x): - "Executes the call behavior." - return umath.less(x, self.critical_value) - -#.............................................................................. -class _MaskedUnaryOperation: - """ - Defines masked version of unary operations, where invalid values are - pre-masked. - - Parameters - ---------- - mufunc : callable - The function for which to define a masked version. Made available - as ``_MaskedUnaryOperation.f``. - fill : scalar, optional - Filling value, default is 0. - domain : class instance - Domain for the function. Should be one of the ``_Domain*`` - classes. Default is None. - - """ - def __init__ (self, mufunc, fill=0, domain=None): - """ _MaskedUnaryOperation(aufunc, fill=0, domain=None) - aufunc(fill) must be defined - self(x) returns aufunc(x) - with masked values where domain(x) is true or getmask(x) is true. - """ - self.f = mufunc - self.fill = fill - self.domain = domain - self.__doc__ = getattr(mufunc, "__doc__", str(mufunc)) - self.__name__ = getattr(mufunc, "__name__", str(mufunc)) - ufunc_domain[mufunc] = domain - ufunc_fills[mufunc] = fill - # - def __call__ (self, a, *args, **kwargs): - "Execute the call behavior." - d = getdata(a) - # Case 1.1. : Domained function - if self.domain is not None: - with np.errstate(divide='ignore', invalid='ignore'): - result = self.f(d, *args, **kwargs) - # Make a mask - m = ~umath.isfinite(result) - m |= self.domain(d) - m |= getmask(a) - # Case 1.2. : Function without a domain - else: - # Get the result and the mask - result = self.f(d, *args, **kwargs) - m = getmask(a) - # Case 2.1. : The result is scalarscalar - if not result.ndim: - if m: - return masked - return result - # Case 2.2. The result is an array - # We need to fill the invalid data back w/ the input - # Now, that's plain silly: in C, we would just skip the element and keep - # the original, but we do have to do it that way in Python - if m is not nomask: - # In case result has a lower dtype than the inputs (as in equal) - try: - np.copyto(result, d, where=m) - except TypeError: - pass - # Transform to - if isinstance(a, MaskedArray): - subtype = type(a) - else: - subtype = MaskedArray - result = result.view(subtype) - result._mask = m - result._update_from(a) - return result - # - def __str__ (self): - return "Masked version of %s. [Invalid values are masked]" % str(self.f) - - - -class _MaskedBinaryOperation: - """ - Define masked version of binary operations, where invalid - values are pre-masked. - - Parameters - ---------- - mbfunc : function - The function for which to define a masked version. Made available - as ``_MaskedBinaryOperation.f``. - domain : class instance - Default domain for the function. Should be one of the ``_Domain*`` - classes. Default is None. - fillx : scalar, optional - Filling value for the first argument, default is 0. - filly : scalar, optional - Filling value for the second argument, default is 0. - - """ - def __init__ (self, mbfunc, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = mbfunc - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc)) - self.__name__ = getattr(mbfunc, "__name__", str(mbfunc)) - ufunc_domain[mbfunc] = None - ufunc_fills[mbfunc] = (fillx, filly) - - def __call__ (self, a, b, *args, **kwargs): - "Execute the call behavior." - # Get the data, as ndarray - (da, db) = (getdata(a, subok=False), getdata(b, subok=False)) - # Get the mask - (ma, mb) = (getmask(a), getmask(b)) - if ma is nomask: - if mb is nomask: - m = nomask - else: - m = umath.logical_or(getmaskarray(a), mb) - elif mb is nomask: - m = umath.logical_or(ma, getmaskarray(b)) - else: - m = umath.logical_or(ma, mb) - # Get the result - with np.errstate(divide='ignore', invalid='ignore'): - result = self.f(da, db, *args, **kwargs) - # check it worked - if result is NotImplemented: - return NotImplemented - # Case 1. : scalar - if not result.ndim: - if m: - return masked - return result - # Case 2. : array - # Revert result to da where masked - if m is not nomask: - np.copyto(result, da, casting='unsafe', where=m) - # Transforms to a (subclass of) MaskedArray - result = result.view(get_masked_subclass(a, b)) - result._mask = m - # Update the optional info from the inputs - if isinstance(b, MaskedArray): - if isinstance(a, MaskedArray): - result._update_from(a) - else: - result._update_from(b) - elif isinstance(a, MaskedArray): - result._update_from(a) - return result - - - def reduce(self, target, axis=0, dtype=None): - """Reduce `target` along the given `axis`.""" - if isinstance(target, MaskedArray): - tclass = type(target) - else: - tclass = MaskedArray - m = getmask(target) - t = filled(target, self.filly) - if t.shape == (): - t = t.reshape(1) - if m is not nomask: - m = make_mask(m, copy=1) - m.shape = (1,) - if m is nomask: - return self.f.reduce(t, axis).view(tclass) - t = t.view(tclass) - t._mask = m - tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype) - mr = umath.logical_and.reduce(m, axis) - tr = tr.view(tclass) - if mr.ndim > 0: - tr._mask = mr - return tr - elif mr: - return masked - return tr - - def outer (self, a, b): - """Return the function applied to the outer product of a and b. - - """ - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = umath.logical_or.outer(ma, mb) - if (not m.ndim) and m: - return masked - (da, db) = (getdata(a), getdata(b)) - d = self.f.outer(da, db) - # check it worked - if d is NotImplemented: - return NotImplemented - if m is not nomask: - np.copyto(d, da, where=m) - if d.shape: - d = d.view(get_masked_subclass(a, b)) - d._mask = m - return d - - def accumulate (self, target, axis=0): - """Accumulate `target` along `axis` after filling with y fill - value. - - """ - if isinstance(target, MaskedArray): - tclass = type(target) - else: - tclass = MaskedArray - t = filled(target, self.filly) - return self.f.accumulate(t, axis).view(tclass) - - def __str__ (self): - return "Masked version of " + str(self.f) - - - -class _DomainedBinaryOperation: - """ - Define binary operations that have a domain, like divide. - - They have no reduce, outer or accumulate. - - Parameters - ---------- - mbfunc : function - The function for which to define a masked version. Made available - as ``_DomainedBinaryOperation.f``. - domain : class instance - Default domain for the function. Should be one of the ``_Domain*`` - classes. - fillx : scalar, optional - Filling value for the first argument, default is 0. - filly : scalar, optional - Filling value for the second argument, default is 0. - - """ - def __init__ (self, dbfunc, domain, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = dbfunc - self.domain = domain - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc)) - self.__name__ = getattr(dbfunc, "__name__", str(dbfunc)) - ufunc_domain[dbfunc] = domain - ufunc_fills[dbfunc] = (fillx, filly) - - def __call__(self, a, b, *args, **kwargs): - "Execute the call behavior." - # Get the data and the mask - (da, db) = (getdata(a, subok=False), getdata(b, subok=False)) - (ma, mb) = (getmask(a), getmask(b)) - # Get the result - with np.errstate(divide='ignore', invalid='ignore'): - result = self.f(da, db, *args, **kwargs) - # check it worked - if result is NotImplemented: - return NotImplemented - # Get the mask as a combination of ma, mb and invalid - m = ~umath.isfinite(result) - m |= ma - m |= mb - # Apply the domain - domain = ufunc_domain.get(self.f, None) - if domain is not None: - m |= filled(domain(da, db), True) - # Take care of the scalar case first - if (not m.ndim): - if m: - return masked - else: - return result - # When the mask is True, put back da - np.copyto(result, da, casting='unsafe', where=m) - result = result.view(get_masked_subclass(a, b)) - result._mask = m - if isinstance(b, MaskedArray): - if isinstance(a, MaskedArray): - result._update_from(a) - else: - result._update_from(b) - elif isinstance(a, MaskedArray): - result._update_from(a) - return result - - def __str__ (self): - return "Masked version of " + str(self.f) - -#.............................................................................. -# Unary ufuncs -exp = _MaskedUnaryOperation(umath.exp) -conjugate = _MaskedUnaryOperation(umath.conjugate) -sin = _MaskedUnaryOperation(umath.sin) -cos = _MaskedUnaryOperation(umath.cos) -tan = _MaskedUnaryOperation(umath.tan) -arctan = _MaskedUnaryOperation(umath.arctan) -arcsinh = _MaskedUnaryOperation(umath.arcsinh) -sinh = _MaskedUnaryOperation(umath.sinh) -cosh = _MaskedUnaryOperation(umath.cosh) -tanh = _MaskedUnaryOperation(umath.tanh) -abs = absolute = _MaskedUnaryOperation(umath.absolute) -angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base -fabs = _MaskedUnaryOperation(umath.fabs) -negative = _MaskedUnaryOperation(umath.negative) -floor = _MaskedUnaryOperation(umath.floor) -ceil = _MaskedUnaryOperation(umath.ceil) -around = _MaskedUnaryOperation(np.round_) -logical_not = _MaskedUnaryOperation(umath.logical_not) -# Domained unary ufuncs ....................................................... -sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, - _DomainGreaterEqual(0.0)) -log = _MaskedUnaryOperation(umath.log, 1.0, - _DomainGreater(0.0)) -log2 = _MaskedUnaryOperation(umath.log2, 1.0, - _DomainGreater(0.0)) -log10 = _MaskedUnaryOperation(umath.log10, 1.0, - _DomainGreater(0.0)) -tan = _MaskedUnaryOperation(umath.tan, 0.0, - _DomainTan(1e-35)) -arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, - _DomainCheckInterval(-1.0, 1.0)) -arccos = _MaskedUnaryOperation(umath.arccos, 0.0, - _DomainCheckInterval(-1.0, 1.0)) -arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, - _DomainGreaterEqual(1.0)) -arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, - _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) -# Binary ufuncs ............................................................... -add = _MaskedBinaryOperation(umath.add) -subtract = _MaskedBinaryOperation(umath.subtract) -multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) -arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) -equal = _MaskedBinaryOperation(umath.equal) -equal.reduce = None -not_equal = _MaskedBinaryOperation(umath.not_equal) -not_equal.reduce = None -less_equal = _MaskedBinaryOperation(umath.less_equal) -less_equal.reduce = None -greater_equal = _MaskedBinaryOperation(umath.greater_equal) -greater_equal.reduce = None -less = _MaskedBinaryOperation(umath.less) -less.reduce = None -greater = _MaskedBinaryOperation(umath.greater) -greater.reduce = None -logical_and = _MaskedBinaryOperation(umath.logical_and) -alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce -logical_or = _MaskedBinaryOperation(umath.logical_or) -sometrue = logical_or.reduce -logical_xor = _MaskedBinaryOperation(umath.logical_xor) -bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) -bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) -bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) -hypot = _MaskedBinaryOperation(umath.hypot) -# Domained binary ufuncs ...................................................... -divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) -true_divide = _DomainedBinaryOperation(umath.true_divide, - _DomainSafeDivide(), 0, 1) -floor_divide = _DomainedBinaryOperation(umath.floor_divide, - _DomainSafeDivide(), 0, 1) -remainder = _DomainedBinaryOperation(umath.remainder, - _DomainSafeDivide(), 0, 1) -fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) -mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) - - -#####-------------------------------------------------------------------------- -#---- --- Mask creation functions --- -#####-------------------------------------------------------------------------- - -def _recursive_make_descr(datatype, newtype=bool_): - "Private function allowing recursion in make_descr." - # Do we have some name fields ? - if datatype.names: - descr = [] - for name in datatype.names: - field = datatype.fields[name] - if len(field) == 3: - # Prepend the title to the name - name = (field[-1], name) - descr.append((name, _recursive_make_descr(field[0], newtype))) - return descr - # Is this some kind of composite a la (np.float,2) - elif datatype.subdtype: - mdescr = list(datatype.subdtype) - mdescr[0] = newtype - return tuple(mdescr) - else: - return newtype - -def make_mask_descr(ndtype): - """ - Construct a dtype description list from a given dtype. - - Returns a new dtype object, with the type of all fields in `ndtype` to a - boolean type. Field names are not altered. - - Parameters - ---------- - ndtype : dtype - The dtype to convert. - - Returns - ------- - result : dtype - A dtype that looks like `ndtype`, the type of all fields is boolean. - - Examples - -------- - >>> import numpy.ma as ma - >>> dtype = np.dtype({'names':['foo', 'bar'], - 'formats':[np.float32, np.int]}) - >>> dtype - dtype([('foo', '>> ma.make_mask_descr(dtype) - dtype([('foo', '|b1'), ('bar', '|b1')]) - >>> ma.make_mask_descr(np.float32) - - - """ - # Make sure we do have a dtype - if not isinstance(ndtype, np.dtype): - ndtype = np.dtype(ndtype) - return np.dtype(_recursive_make_descr(ndtype, np.bool)) - -def getmask(a): - """ - Return the mask of a masked array, or nomask. - - Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the - mask is not `nomask`, else return `nomask`. To guarantee a full array - of booleans of the same shape as a, use `getmaskarray`. - - Parameters - ---------- - a : array_like - Input `MaskedArray` for which the mask is required. - - See Also - -------- - getdata : Return the data of a masked array as an ndarray. - getmaskarray : Return the mask of a masked array, or full array of False. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.masked_equal([[1,2],[3,4]], 2) - >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value=999999) - >>> ma.getmask(a) - array([[False, True], - [False, False]], dtype=bool) - - Equivalently use the `MaskedArray` `mask` attribute. - - >>> a.mask - array([[False, True], - [False, False]], dtype=bool) - - Result when mask == `nomask` - - >>> b = ma.masked_array([[1,2],[3,4]]) - >>> b - masked_array(data = - [[1 2] - [3 4]], - mask = - False, - fill_value=999999) - >>> ma.nomask - False - >>> ma.getmask(b) == ma.nomask - True - >>> b.mask == ma.nomask - True - - """ - return getattr(a, '_mask', nomask) -get_mask = getmask - -def getmaskarray(arr): - """ - Return the mask of a masked array, or full boolean array of False. - - Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and - the mask is not `nomask`, else return a full boolean array of False of - the same shape as `arr`. - - Parameters - ---------- - arr : array_like - Input `MaskedArray` for which the mask is required. - - See Also - -------- - getmask : Return the mask of a masked array, or nomask. - getdata : Return the data of a masked array as an ndarray. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.masked_equal([[1,2],[3,4]], 2) - >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value=999999) - >>> ma.getmaskarray(a) - array([[False, True], - [False, False]], dtype=bool) - - Result when mask == ``nomask`` - - >>> b = ma.masked_array([[1,2],[3,4]]) - >>> b - masked_array(data = - [[1 2] - [3 4]], - mask = - False, - fill_value=999999) - >>> >ma.getmaskarray(b) - array([[False, False], - [False, False]], dtype=bool) - - """ - mask = getmask(arr) - if mask is nomask: - mask = make_mask_none(np.shape(arr), getdata(arr).dtype) - return mask - -def is_mask(m): - """ - Return True if m is a valid, standard mask. - - This function does not check the contents of the input, only that the - type is MaskType. In particular, this function returns False if the - mask has a flexible dtype. - - Parameters - ---------- - m : array_like - Array to test. - - Returns - ------- - result : bool - True if `m.dtype.type` is MaskType, False otherwise. - - See Also - -------- - isMaskedArray : Test whether input is an instance of MaskedArray. - - Examples - -------- - >>> import numpy.ma as ma - >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) - >>> m - masked_array(data = [-- 1 -- 2 3], - mask = [ True False True False False], - fill_value=999999) - >>> ma.is_mask(m) - False - >>> ma.is_mask(m.mask) - True - - Input must be an ndarray (or have similar attributes) - for it to be considered a valid mask. - - >>> m = [False, True, False] - >>> ma.is_mask(m) - False - >>> m = np.array([False, True, False]) - >>> m - array([False, True, False], dtype=bool) - >>> ma.is_mask(m) - True - - Arrays with complex dtypes don't return True. - - >>> dtype = np.dtype({'names':['monty', 'pithon'], - 'formats':[np.bool, np.bool]}) - >>> dtype - dtype([('monty', '|b1'), ('pithon', '|b1')]) - >>> m = np.array([(True, False), (False, True), (True, False)], - dtype=dtype) - >>> m - array([(True, False), (False, True), (True, False)], - dtype=[('monty', '|b1'), ('pithon', '|b1')]) - >>> ma.is_mask(m) - False - - """ - try: - return m.dtype.type is MaskType - except AttributeError: - return False - -def make_mask(m, copy=False, shrink=True, dtype=MaskType): - """ - Create a boolean mask from an array. - - Return `m` as a boolean mask, creating a copy if necessary or requested. - The function can accept any sequence that is convertible to integers, - or ``nomask``. Does not require that contents must be 0s and 1s, values - of 0 are interepreted as False, everything else as True. - - Parameters - ---------- - m : array_like - Potential mask. - copy : bool, optional - Whether to return a copy of `m` (True) or `m` itself (False). - shrink : bool, optional - Whether to shrink `m` to ``nomask`` if all its values are False. - dtype : dtype, optional - Data-type of the output mask. By default, the output mask has - a dtype of MaskType (bool). If the dtype is flexible, each field - has a boolean dtype. - - Returns - ------- - result : ndarray - A boolean mask derived from `m`. - - Examples - -------- - >>> import numpy.ma as ma - >>> m = [True, False, True, True] - >>> ma.make_mask(m) - array([ True, False, True, True], dtype=bool) - >>> m = [1, 0, 1, 1] - >>> ma.make_mask(m) - array([ True, False, True, True], dtype=bool) - >>> m = [1, 0, 2, -3] - >>> ma.make_mask(m) - array([ True, False, True, True], dtype=bool) - - Effect of the `shrink` parameter. - - >>> m = np.zeros(4) - >>> m - array([ 0., 0., 0., 0.]) - >>> ma.make_mask(m) - False - >>> ma.make_mask(m, shrink=False) - array([False, False, False, False], dtype=bool) - - Using a flexible `dtype`. - - >>> m = [1, 0, 1, 1] - >>> n = [0, 1, 0, 0] - >>> arr = [] - >>> for man, mouse in zip(m, n): - ... arr.append((man, mouse)) - >>> arr - [(1, 0), (0, 1), (1, 0), (1, 0)] - >>> dtype = np.dtype({'names':['man', 'mouse'], - 'formats':[np.int, np.int]}) - >>> arr = np.array(arr, dtype=dtype) - >>> arr - array([(1, 0), (0, 1), (1, 0), (1, 0)], - dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) - array([(True, False), (False, True), (True, False), (True, False)], - dtype=[('man', '|b1'), ('mouse', '|b1')]) - - """ - if m is nomask: - return nomask - elif isinstance(m, ndarray): - # We won't return after this point to make sure we can shrink the mask - # Fill the mask in case there are missing data - m = filled(m, True) - # Make sure the input dtype is valid - dtype = make_mask_descr(dtype) - if m.dtype == dtype: - if copy: - result = m.copy() - else: - result = m - else: - result = np.array(m, dtype=dtype, copy=copy) - else: - result = np.array(filled(m, True), dtype=MaskType) - # Bas les masques ! - if shrink and (not result.dtype.names) and (not result.any()): - return nomask - else: - return result - - -def make_mask_none(newshape, dtype=None): - """ - Return a boolean mask of the given shape, filled with False. - - This function returns a boolean ndarray with all entries False, that can - be used in common mask manipulations. If a complex dtype is specified, the - type of each field is converted to a boolean type. - - Parameters - ---------- - newshape : tuple - A tuple indicating the shape of the mask. - dtype : {None, dtype}, optional - If None, use a MaskType instance. Otherwise, use a new datatype with - the same fields as `dtype`, converted to boolean types. - - Returns - ------- - result : ndarray - An ndarray of appropriate shape and dtype, filled with False. - - See Also - -------- - make_mask : Create a boolean mask from an array. - make_mask_descr : Construct a dtype description list from a given dtype. - - Examples - -------- - >>> import numpy.ma as ma - >>> ma.make_mask_none((3,)) - array([False, False, False], dtype=bool) - - Defining a more complex dtype. - - >>> dtype = np.dtype({'names':['foo', 'bar'], - 'formats':[np.float32, np.int]}) - >>> dtype - dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) - array([(False, False), (False, False), (False, False)], - dtype=[('foo', '|b1'), ('bar', '|b1')]) - - """ - if dtype is None: - result = np.zeros(newshape, dtype=MaskType) - else: - result = np.zeros(newshape, dtype=make_mask_descr(dtype)) - return result - -def mask_or (m1, m2, copy=False, shrink=True): - """ - Combine two masks with the ``logical_or`` operator. - - The result may be a view on `m1` or `m2` if the other is `nomask` - (i.e. False). - - Parameters - ---------- - m1, m2 : array_like - Input masks. - copy : bool, optional - If copy is False and one of the inputs is `nomask`, return a view - of the other input mask. Defaults to False. - shrink : bool, optional - Whether to shrink the output to `nomask` if all its values are - False. Defaults to True. - - Returns - ------- - mask : output mask - The result masks values that are masked in either `m1` or `m2`. - - Raises - ------ - ValueError - If `m1` and `m2` have different flexible dtypes. - - Examples - -------- - >>> m1 = np.ma.make_mask([0, 1, 1, 0]) - >>> m2 = np.ma.make_mask([1, 0, 0, 0]) - >>> np.ma.mask_or(m1, m2) - array([ True, True, True, False], dtype=bool) - - """ - def _recursive_mask_or(m1, m2, newmask): - names = m1.dtype.names - for name in names: - current1 = m1[name] - if current1.dtype.names: - _recursive_mask_or(current1, m2[name], newmask[name]) - else: - umath.logical_or(current1, m2[name], newmask[name]) - return - # - if (m1 is nomask) or (m1 is False): - dtype = getattr(m2, 'dtype', MaskType) - return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) - if (m2 is nomask) or (m2 is False): - dtype = getattr(m1, 'dtype', MaskType) - return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) - if m1 is m2 and is_mask(m1): - return m1 - (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) - if (dtype1 != dtype2): - raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) - if dtype1.names: - newmask = np.empty_like(m1) - _recursive_mask_or(m1, m2, newmask) - return newmask - return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) - - -def flatten_mask(mask): - """ - Returns a completely flattened version of the mask, where nested fields - are collapsed. - - Parameters - ---------- - mask : array_like - Input array, which will be interpreted as booleans. - - Returns - ------- - flattened_mask : ndarray of bools - The flattened input. - - Examples - -------- - >>> mask = np.array([0, 0, 1], dtype=np.bool) - >>> flatten_mask(mask) - array([False, False, True], dtype=bool) - - >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) - >>> flatten_mask(mask) - array([False, False, False, True], dtype=bool) - - >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] - >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) - >>> flatten_mask(mask) - array([False, False, False, False, False, True], dtype=bool) - - """ - # - def _flatmask(mask): - "Flatten the mask and returns a (maybe nested) sequence of booleans." - mnames = mask.dtype.names - if mnames: - return [flatten_mask(mask[name]) for name in mnames] - else: - return mask - # - def _flatsequence(sequence): - "Generates a flattened version of the sequence." - try: - for element in sequence: - if hasattr(element, '__iter__'): - for f in _flatsequence(element): - yield f - else: - yield element - except TypeError: - yield sequence - # - mask = np.asarray(mask) - flattened = _flatsequence(_flatmask(mask)) - return np.array([_ for _ in flattened], dtype=bool) - - -def _check_mask_axis(mask, axis): - "Check whether there are masked values along the given axis" - if mask is not nomask: - return mask.all(axis=axis) - return nomask - - -#####-------------------------------------------------------------------------- -#--- --- Masking functions --- -#####-------------------------------------------------------------------------- - -def masked_where(condition, a, copy=True): - """ - Mask an array where a condition is met. - - Return `a` as an array masked where `condition` is True. - Any masked values of `a` or `condition` are also masked in the output. - - Parameters - ---------- - condition : array_like - Masking condition. When `condition` tests floating point values for - equality, consider using ``masked_values`` instead. - a : array_like - Array to mask. - copy : bool - If True (default) make a copy of `a` in the result. If False modify - `a` in place and return a view. - - Returns - ------- - result : MaskedArray - The result of masking `a` where `condition` is True. - - See Also - -------- - masked_values : Mask using floating point equality. - masked_equal : Mask where equal to a given value. - masked_not_equal : Mask where `not` equal to a given value. - masked_less_equal : Mask where less than or equal to a given value. - masked_greater_equal : Mask where greater than or equal to a given value. - masked_less : Mask where less than a given value. - masked_greater : Mask where greater than a given value. - masked_inside : Mask inside a given interval. - masked_outside : Mask outside a given interval. - masked_invalid : Mask invalid values (NaNs or infs). - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_where(a <= 2, a) - masked_array(data = [-- -- -- 3], - mask = [ True True True False], - fill_value=999999) - - Mask array `b` conditional on `a`. - - >>> b = ['a', 'b', 'c', 'd'] - >>> ma.masked_where(a == 2, b) - masked_array(data = [a b -- d], - mask = [False False True False], - fill_value=N/A) - - Effect of the `copy` argument. - - >>> c = ma.masked_where(a <= 2, a) - >>> c - masked_array(data = [-- -- -- 3], - mask = [ True True True False], - fill_value=999999) - >>> c[0] = 99 - >>> c - masked_array(data = [99 -- -- 3], - mask = [False True True False], - fill_value=999999) - >>> a - array([0, 1, 2, 3]) - >>> c = ma.masked_where(a <= 2, a, copy=False) - >>> c[0] = 99 - >>> c - masked_array(data = [99 -- -- 3], - mask = [False True True False], - fill_value=999999) - >>> a - array([99, 1, 2, 3]) - - When `condition` or `a` contain masked values. - - >>> a = np.arange(4) - >>> a = ma.masked_where(a == 2, a) - >>> a - masked_array(data = [0 1 -- 3], - mask = [False False True False], - fill_value=999999) - >>> b = np.arange(4) - >>> b = ma.masked_where(b == 0, b) - >>> b - masked_array(data = [-- 1 2 3], - mask = [ True False False False], - fill_value=999999) - >>> ma.masked_where(a == 3, b) - masked_array(data = [-- 1 -- --], - mask = [ True False True True], - fill_value=999999) - - """ - # Make sure that condition is a valid standard-type mask. - cond = make_mask(condition) - a = np.array(a, copy=copy, subok=True) - - (cshape, ashape) = (cond.shape, a.shape) - if cshape and cshape != ashape: - raise IndexError("Inconsistant shape between the condition and the input" - " (got %s and %s)" % (cshape, ashape)) - if hasattr(a, '_mask'): - cond = mask_or(cond, a._mask) - cls = type(a) - else: - cls = MaskedArray - result = a.view(cls) - result._mask = cond - return result - - -def masked_greater(x, value, copy=True): - """ - Mask an array where greater than a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x > value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_greater(a, 2) - masked_array(data = [0 1 2 --], - mask = [False False False True], - fill_value=999999) - - """ - return masked_where(greater(x, value), x, copy=copy) - - -def masked_greater_equal(x, value, copy=True): - """ - Mask an array where greater than or equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x >= value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_greater_equal(a, 2) - masked_array(data = [0 1 -- --], - mask = [False False True True], - fill_value=999999) - - """ - return masked_where(greater_equal(x, value), x, copy=copy) - - -def masked_less(x, value, copy=True): - """ - Mask an array where less than a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x < value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_less(a, 2) - masked_array(data = [-- -- 2 3], - mask = [ True True False False], - fill_value=999999) - - """ - return masked_where(less(x, value), x, copy=copy) - - -def masked_less_equal(x, value, copy=True): - """ - Mask an array where less than or equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x <= value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_less_equal(a, 2) - masked_array(data = [-- -- -- 3], - mask = [ True True True False], - fill_value=999999) - - """ - return masked_where(less_equal(x, value), x, copy=copy) - - -def masked_not_equal(x, value, copy=True): - """ - Mask an array where `not` equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x != value). - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_not_equal(a, 2) - masked_array(data = [-- -- 2 --], - mask = [ True True False True], - fill_value=999999) - - """ - return masked_where(not_equal(x, value), x, copy=copy) - - -def masked_equal(x, value, copy=True): - """ - Mask an array where equal to a given value. - - This function is a shortcut to ``masked_where``, with - `condition` = (x == value). For floating point arrays, - consider using ``masked_values(x, value)``. - - See Also - -------- - masked_where : Mask where a condition is met. - masked_values : Mask using floating point equality. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(4) - >>> a - array([0, 1, 2, 3]) - >>> ma.masked_equal(a, 2) - masked_array(data = [0 1 -- 3], - mask = [False False True False], - fill_value=999999) - - """ - # An alternative implementation relies on filling first: probably not needed. - # d = filled(x, 0) - # c = umath.equal(d, value) - # m = mask_or(c, getmask(x)) - # return array(d, mask=m, copy=copy) - output = masked_where(equal(x, value), x, copy=copy) - output.fill_value = value - return output - - -def masked_inside(x, v1, v2, copy=True): - """ - Mask an array inside a given interval. - - Shortcut to ``masked_where``, where `condition` is True for `x` inside - the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` - can be given in either order. - - See Also - -------- - masked_where : Mask where a condition is met. - - Notes - ----- - The array `x` is prefilled with its filling value. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] - >>> ma.masked_inside(x, -0.3, 0.3) - masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], - mask = [False False True True False False], - fill_value=1e+20) - - The order of `v1` and `v2` doesn't matter. - - >>> ma.masked_inside(x, 0.3, -0.3) - masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], - mask = [False False True True False False], - fill_value=1e+20) - - """ - if v2 < v1: - (v1, v2) = (v2, v1) - xf = filled(x) - condition = (xf >= v1) & (xf <= v2) - return masked_where(condition, x, copy=copy) - - -def masked_outside(x, v1, v2, copy=True): - """ - Mask an array outside a given interval. - - Shortcut to ``masked_where``, where `condition` is True for `x` outside - the interval [v1,v2] (x < v1)|(x > v2). - The boundaries `v1` and `v2` can be given in either order. - - See Also - -------- - masked_where : Mask where a condition is met. - - Notes - ----- - The array `x` is prefilled with its filling value. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] - >>> ma.masked_outside(x, -0.3, 0.3) - masked_array(data = [-- -- 0.01 0.2 -- --], - mask = [ True True False False True True], - fill_value=1e+20) - - The order of `v1` and `v2` doesn't matter. - - >>> ma.masked_outside(x, 0.3, -0.3) - masked_array(data = [-- -- 0.01 0.2 -- --], - mask = [ True True False False True True], - fill_value=1e+20) - - """ - if v2 < v1: - (v1, v2) = (v2, v1) - xf = filled(x) - condition = (xf < v1) | (xf > v2) - return masked_where(condition, x, copy=copy) - - -def masked_object(x, value, copy=True, shrink=True): - """ - Mask the array `x` where the data are exactly equal to value. - - This function is similar to `masked_values`, but only suitable - for object arrays: for floating point, use `masked_values` instead. - - Parameters - ---------- - x : array_like - Array to mask - value : object - Comparison value - copy : {True, False}, optional - Whether to return a copy of `x`. - shrink : {True, False}, optional - Whether to collapse a mask full of False to nomask - - Returns - ------- - result : MaskedArray - The result of masking `x` where equal to `value`. - - See Also - -------- - masked_where : Mask where a condition is met. - masked_equal : Mask where equal to a given value (integers). - masked_values : Mask using floating point equality. - - Examples - -------- - >>> import numpy.ma as ma - >>> food = np.array(['green_eggs', 'ham'], dtype=object) - >>> # don't eat spoiled food - >>> eat = ma.masked_object(food, 'green_eggs') - >>> print eat - [-- ham] - >>> # plain ol` ham is boring - >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) - >>> eat = ma.masked_object(fresh_food, 'green_eggs') - >>> print eat - [cheese ham pineapple] - - Note that `mask` is set to ``nomask`` if possible. - - >>> eat - masked_array(data = [cheese ham pineapple], - mask = False, - fill_value=?) - - """ - if isMaskedArray(x): - condition = umath.equal(x._data, value) - mask = x._mask - else: - condition = umath.equal(np.asarray(x), value) - mask = nomask - mask = mask_or(mask, make_mask(condition, shrink=shrink)) - return masked_array(x, mask=mask, copy=copy, fill_value=value) - - -def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): - """ - Mask using floating point equality. - - Return a MaskedArray, masked where the data in array `x` are approximately - equal to `value`, i.e. where the following condition is True - - (abs(x - value) <= atol+rtol*abs(value)) - - The fill_value is set to `value` and the mask is set to ``nomask`` if - possible. For integers, consider using ``masked_equal``. - - Parameters - ---------- - x : array_like - Array to mask. - value : float - Masking value. - rtol : float, optional - Tolerance parameter. - atol : float, optional - Tolerance parameter (1e-8). - copy : bool, optional - Whether to return a copy of `x`. - shrink : bool, optional - Whether to collapse a mask full of False to ``nomask``. - - Returns - ------- - result : MaskedArray - The result of masking `x` where approximately equal to `value`. - - See Also - -------- - masked_where : Mask where a condition is met. - masked_equal : Mask where equal to a given value (integers). - - Examples - -------- - >>> import numpy.ma as ma - >>> x = np.array([1, 1.1, 2, 1.1, 3]) - >>> ma.masked_values(x, 1.1) - masked_array(data = [1.0 -- 2.0 -- 3.0], - mask = [False True False True False], - fill_value=1.1) - - Note that `mask` is set to ``nomask`` if possible. - - >>> ma.masked_values(x, 1.5) - masked_array(data = [ 1. 1.1 2. 1.1 3. ], - mask = False, - fill_value=1.5) - - For integers, the fill value will be different in general to the - result of ``masked_equal``. - - >>> x = np.arange(5) - >>> x - array([0, 1, 2, 3, 4]) - >>> ma.masked_values(x, 2) - masked_array(data = [0 1 -- 3 4], - mask = [False False True False False], - fill_value=2) - >>> ma.masked_equal(x, 2) - masked_array(data = [0 1 -- 3 4], - mask = [False False True False False], - fill_value=999999) - - """ - mabs = umath.absolute - xnew = filled(x, value) - if issubclass(xnew.dtype.type, np.floating): - condition = umath.less_equal(mabs(xnew - value), atol + rtol * mabs(value)) - mask = getattr(x, '_mask', nomask) - else: - condition = umath.equal(xnew, value) - mask = nomask - mask = mask_or(mask, make_mask(condition, shrink=shrink)) - return masked_array(xnew, mask=mask, copy=copy, fill_value=value) - - -def masked_invalid(a, copy=True): - """ - Mask an array where invalid values occur (NaNs or infs). - - This function is a shortcut to ``masked_where``, with - `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. - Only applies to arrays with a dtype where NaNs or infs make sense - (i.e. floating point types), but accepts any array_like object. - - See Also - -------- - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(5, dtype=np.float) - >>> a[2] = np.NaN - >>> a[3] = np.PINF - >>> a - array([ 0., 1., NaN, Inf, 4.]) - >>> ma.masked_invalid(a) - masked_array(data = [0.0 1.0 -- -- 4.0], - mask = [False False True True False], - fill_value=1e+20) - - """ - a = np.array(a, copy=copy, subok=True) - mask = getattr(a, '_mask', None) - if mask is not None: - condition = ~(np.isfinite(getdata(a))) - if mask is not nomask: - condition |= mask - cls = type(a) - else: - condition = ~(np.isfinite(a)) - cls = MaskedArray - result = a.view(cls) - result._mask = condition - return result - - -#####-------------------------------------------------------------------------- -#---- --- Printing options --- -#####-------------------------------------------------------------------------- - -class _MaskedPrintOption: - """ - Handle the string used to represent missing data in a masked array. - - """ - def __init__ (self, display): - "Create the masked_print_option object." - self._display = display - self._enabled = True - - def display(self): - "Display the string to print for masked values." - return self._display - - def set_display (self, s): - "Set the string to print for masked values." - self._display = s - - def enabled(self): - "Is the use of the display value enabled?" - return self._enabled - - def enable(self, shrink=1): - "Set the enabling shrink to `shrink`." - self._enabled = shrink - - def __str__ (self): - return str(self._display) - - __repr__ = __str__ - -#if you single index into a masked location you get this object. -masked_print_option = _MaskedPrintOption('--') - - -def _recursive_printoption(result, mask, printopt): - """ - Puts printoptions in result where mask is True. - Private function allowing for recursion - """ - names = result.dtype.names - for name in names: - (curdata, curmask) = (result[name], mask[name]) - if curdata.dtype.names: - _recursive_printoption(curdata, curmask, printopt) - else: - np.copyto(curdata, printopt, where=curmask) - return - -_print_templates = dict(long_std="""\ -masked_%(name)s(data = - %(data)s, - %(nlen)s mask = - %(mask)s, - %(nlen)s fill_value = %(fill)s) -""", - short_std="""\ -masked_%(name)s(data = %(data)s, - %(nlen)s mask = %(mask)s, -%(nlen)s fill_value = %(fill)s) -""", - long_flx="""\ -masked_%(name)s(data = - %(data)s, - %(nlen)s mask = - %(mask)s, -%(nlen)s fill_value = %(fill)s, - %(nlen)s dtype = %(dtype)s) -""", - short_flx="""\ -masked_%(name)s(data = %(data)s, -%(nlen)s mask = %(mask)s, -%(nlen)s fill_value = %(fill)s, -%(nlen)s dtype = %(dtype)s) -""") - -#####-------------------------------------------------------------------------- -#---- --- MaskedArray class --- -#####-------------------------------------------------------------------------- - -def _recursive_filled(a, mask, fill_value): - """ - Recursively fill `a` with `fill_value`. - Private function - """ - names = a.dtype.names - for name in names: - current = a[name] - if current.dtype.names: - _recursive_filled(current, mask[name], fill_value[name]) - else: - np.copyto(current, fill_value[name], where=mask[name]) - - - -def flatten_structured_array(a): - """ - Flatten a structured array. - - The data type of the output is chosen such that it can represent all of the - (nested) fields. - - Parameters - ---------- - a : structured array - - Returns - ------- - output : masked array or ndarray - A flattened masked array if the input is a masked array, otherwise a - standard ndarray. - - Examples - -------- - >>> ndtype = [('a', int), ('b', float)] - >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) - >>> flatten_structured_array(a) - array([[1., 1.], - [2., 2.]]) - - """ - # - def flatten_sequence(iterable): - """Flattens a compound of nested iterables.""" - for elm in iter(iterable): - if hasattr(elm, '__iter__'): - for f in flatten_sequence(elm): - yield f - else: - yield elm - # - a = np.asanyarray(a) - inishape = a.shape - a = a.ravel() - if isinstance(a, MaskedArray): - out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) - out = out.view(MaskedArray) - out._mask = np.array([tuple(flatten_sequence(d.item())) - for d in getmaskarray(a)]) - else: - out = np.array([tuple(flatten_sequence(d.item())) for d in a]) - if len(inishape) > 1: - newshape = list(out.shape) - newshape[0] = inishape - out.shape = tuple(flatten_sequence(newshape)) - return out - - - -class _arraymethod(object): - """ - Define a wrapper for basic array methods. - - Upon call, returns a masked array, where the new ``_data`` array is - the output of the corresponding method called on the original - ``_data``. - - If `onmask` is True, the new mask is the output of the method called - on the initial mask. Otherwise, the new mask is just a reference - to the initial mask. - - Attributes - ---------- - _onmask : bool - Holds the `onmask` parameter. - obj : object - The object calling `_arraymethod`. - - Parameters - ---------- - funcname : str - Name of the function to apply on data. - onmask : bool - Whether the mask must be processed also (True) or left - alone (False). Default is True. Make available as `_onmask` - attribute. - - """ - def __init__(self, funcname, onmask=True): - self.__name__ = funcname - self._onmask = onmask - self.obj = None - self.__doc__ = self.getdoc() - # - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - methdoc = getattr(ndarray, self.__name__, None) or \ - getattr(np, self.__name__, None) - if methdoc is not None: - return methdoc.__doc__ - # - def __get__(self, obj, objtype=None): - self.obj = obj - return self - # - def __call__(self, *args, **params): - methodname = self.__name__ - instance = self.obj - # Fallback : if the instance has not been initialized, use the first arg - if instance is None: - args = list(args) - instance = args.pop(0) - data = instance._data - mask = instance._mask - cls = type(instance) - result = getattr(data, methodname)(*args, **params).view(cls) - result._update_from(instance) - if result.ndim: - if not self._onmask: - result.__setmask__(mask) - elif mask is not nomask: - result.__setmask__(getattr(mask, methodname)(*args, **params)) - else: - if mask.ndim and (not mask.dtype.names and mask.all()): - return masked - return result - - -class MaskedIterator(object): - """ - Flat iterator object to iterate over masked arrays. - - A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array - `x`. It allows iterating over the array as if it were a 1-D array, - either in a for-loop or by calling its `next` method. - - Iteration is done in C-contiguous style, with the last index varying the - fastest. The iterator can also be indexed using basic slicing or - advanced indexing. - - See Also - -------- - MaskedArray.flat : Return a flat iterator over an array. - MaskedArray.flatten : Returns a flattened copy of an array. - - Notes - ----- - `MaskedIterator` is not exported by the `ma` module. Instead of - instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. - - Examples - -------- - >>> x = np.ma.array(arange(6).reshape(2, 3)) - >>> fl = x.flat - >>> type(fl) - - >>> for item in fl: - ... print item - ... - 0 - 1 - 2 - 3 - 4 - 5 - - Extracting more than a single element b indexing the `MaskedIterator` - returns a masked array: - - >>> fl[2:4] - masked_array(data = [2 3], - mask = False, - fill_value = 999999) - - """ - def __init__(self, ma): - self.ma = ma - self.dataiter = ma._data.flat - # - if ma._mask is nomask: - self.maskiter = None - else: - self.maskiter = ma._mask.flat - - def __iter__(self): - return self - - def __getitem__(self, indx): - result = self.dataiter.__getitem__(indx).view(type(self.ma)) - if self.maskiter is not None: - _mask = self.maskiter.__getitem__(indx) - if isinstance(_mask, ndarray): - # set shape to match that of data; this is needed for matrices - _mask.shape = result.shape - result._mask = _mask - elif isinstance(_mask, np.void): - return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) - elif _mask: # Just a scalar, masked - return masked - return result - - ### This won't work is ravel makes a copy - def __setitem__(self, index, value): - self.dataiter[index] = getdata(value) - if self.maskiter is not None: - self.maskiter[index] = getmaskarray(value) - - def __next__(self): - """ - Return the next value, or raise StopIteration. - - Examples - -------- - >>> x = np.ma.array([3, 2], mask=[0, 1]) - >>> fl = x.flat - >>> fl.next() - 3 - >>> fl.next() - masked_array(data = --, - mask = True, - fill_value = 1e+20) - >>> fl.next() - Traceback (most recent call last): - File "", line 1, in - File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next - d = self.dataiter.next() - StopIteration - - """ - d = next(self.dataiter) - if self.maskiter is not None: - m = next(self.maskiter) - if isinstance(m, np.void): - return mvoid(d, mask=m, hardmask=self.ma._hardmask) - elif m: # Just a scalar, masked - return masked - return d - - next = __next__ - - -class MaskedArray(ndarray): - """ - An array class with possibly masked values. - - Masked values of True exclude the corresponding element from any - computation. - - Construction:: - - x = MaskedArray(data, mask=nomask, dtype=None, - copy=False, subok=True, ndmin=0, fill_value=None, - keep_mask=True, hard_mask=None, shrink=True) - - Parameters - ---------- - data : array_like - Input data. - mask : sequence, optional - Mask. Must be convertible to an array of booleans with the same - shape as `data`. True indicates a masked (i.e. invalid) data. - dtype : dtype, optional - Data type of the output. - If `dtype` is None, the type of the data argument (``data.dtype``) - is used. If `dtype` is not None and different from ``data.dtype``, - a copy is performed. - copy : bool, optional - Whether to copy the input data (True), or to use a reference instead. - Default is False. - subok : bool, optional - Whether to return a subclass of `MaskedArray` if possible (True) or a - plain `MaskedArray`. Default is True. - ndmin : int, optional - Minimum number of dimensions. Default is 0. - fill_value : scalar, optional - Value used to fill in the masked values when necessary. - If None, a default based on the data-type is used. - keep_mask : bool, optional - Whether to combine `mask` with the mask of the input data, if any - (True), or to use only `mask` for the output (False). Default is True. - hard_mask : bool, optional - Whether to use a hard mask or not. With a hard mask, masked values - cannot be unmasked. Default is False. - shrink : bool, optional - Whether to force compression of an empty mask. Default is True. - - """ - - __array_priority__ = 15 - _defaultmask = nomask - _defaulthardmask = False - _baseclass = ndarray - - def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, - subok=True, ndmin=0, fill_value=None, - keep_mask=True, hard_mask=None, shrink=True, - **options): - """ - Create a new masked array from scratch. - - Notes - ----- - A masked array can also be created by taking a .view(MaskedArray). - - """ - # Process data............ - _data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin) - _baseclass = getattr(data, '_baseclass', type(_data)) - # Check that we're not erasing the mask.......... - if isinstance(data, MaskedArray) and (data.shape != _data.shape): - copy = True - # Careful, cls might not always be MaskedArray... - if not isinstance(data, cls) or not subok: - _data = ndarray.view(_data, cls) - else: - _data = ndarray.view(_data, type(data)) - # Backwards compatibility w/ numpy.core.ma ....... - if hasattr(data, '_mask') and not isinstance(data, ndarray): - _data._mask = data._mask - _sharedmask = True - # Process mask ............................... - # Number of named fields (or zero if none) - names_ = _data.dtype.names or () - # Type of the mask - if names_: - mdtype = make_mask_descr(_data.dtype) - else: - mdtype = MaskType - # Case 1. : no mask in input ............ - if mask is nomask: - # Erase the current mask ? - if not keep_mask: - # With a reduced version - if shrink: - _data._mask = nomask - # With full version - else: - _data._mask = np.zeros(_data.shape, dtype=mdtype) - # Check whether we missed something - elif isinstance(data, (tuple, list)): - try: - # If data is a sequence of masked array - mask = np.array([getmaskarray(m) for m in data], - dtype=mdtype) - except ValueError: - # If data is nested - mask = nomask - # Force shrinking of the mask if needed (and possible) - if (mdtype == MaskType) and mask.any(): - _data._mask = mask - _data._sharedmask = False - else: - if copy: - _data._mask = _data._mask.copy() - _data._sharedmask = False - # Reset the shape of the original mask - if getmask(data) is not nomask: - data._mask.shape = data.shape - else: - _data._sharedmask = True - # Case 2. : With a mask in input ........ - else: - # Read the mask with the current mdtype - try: - mask = np.array(mask, copy=copy, dtype=mdtype) - # Or assume it's a sequence of bool/int - except TypeError: - mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) - # Make sure the mask and the data have the same shape - if mask.shape != _data.shape: - (nd, nm) = (_data.size, mask.size) - if nm == 1: - mask = np.resize(mask, _data.shape) - elif nm == nd: - mask = np.reshape(mask, _data.shape) - else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise MaskError(msg % (nd, nm)) - copy = True - # Set the mask to the new value - if _data._mask is nomask: - _data._mask = mask - _data._sharedmask = not copy - else: - if not keep_mask: - _data._mask = mask - _data._sharedmask = not copy - else: - if names_: - def _recursive_or(a, b): - "do a|=b on each field of a, recursively" - for name in a.dtype.names: - (af, bf) = (a[name], b[name]) - if af.dtype.names: - _recursive_or(af, bf) - else: - af |= bf - return - _recursive_or(_data._mask, mask) - else: - _data._mask = np.logical_or(mask, _data._mask) - _data._sharedmask = False - # Update fill_value....... - if fill_value is None: - fill_value = getattr(data, '_fill_value', None) - # But don't run the check unless we have something to check.... - if fill_value is not None: - _data._fill_value = _check_fill_value(fill_value, _data.dtype) - # Process extra options .. - if hard_mask is None: - _data._hardmask = getattr(data, '_hardmask', False) - else: - _data._hardmask = hard_mask - _data._baseclass = _baseclass - return _data - # - def _update_from(self, obj): - """Copies some attributes of obj to self. - """ - if obj is not None and isinstance(obj, ndarray): - _baseclass = type(obj) - else: - _baseclass = ndarray - # We need to copy the _basedict to avoid backward propagation - _optinfo = {} - _optinfo.update(getattr(obj, '_optinfo', {})) - _optinfo.update(getattr(obj, '_basedict', {})) - if not isinstance(obj, MaskedArray): - _optinfo.update(getattr(obj, '__dict__', {})) - _dict = dict(_fill_value=getattr(obj, '_fill_value', None), - _hardmask=getattr(obj, '_hardmask', False), - _sharedmask=getattr(obj, '_sharedmask', False), - _isfield=getattr(obj, '_isfield', False), - _baseclass=getattr(obj, '_baseclass', _baseclass), - _optinfo=_optinfo, - _basedict=_optinfo) - self.__dict__.update(_dict) - self.__dict__.update(_optinfo) - return - - - def __array_finalize__(self, obj): - """Finalizes the masked array. - """ - # Get main attributes ......... - self._update_from(obj) - if isinstance(obj, ndarray): - odtype = obj.dtype - if odtype.names: - _mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype)) - else: - _mask = getattr(obj, '_mask', nomask) - else: - _mask = nomask - self._mask = _mask - # Finalize the mask ........... - if self._mask is not nomask: - try: - self._mask.shape = self.shape - except ValueError: - self._mask = nomask - except (TypeError, AttributeError): - # When _mask.shape is not writable (because it's a void) - pass - # Finalize the fill_value for structured arrays - if self.dtype.names: - if self._fill_value is None: - self._fill_value = _check_fill_value(None, self.dtype) - return - - - def __array_wrap__(self, obj, context=None): - """ - Special hook for ufuncs. - Wraps the numpy array and sets the mask according to context. - """ - result = obj.view(type(self)) - result._update_from(self) - #.......... - if context is not None: - result._mask = result._mask.copy() - (func, args, _) = context - m = reduce(mask_or, [getmaskarray(arg) for arg in args]) - # Get the domain mask................ - domain = ufunc_domain.get(func, None) - if domain is not None: - # Take the domain, and make sure it's a ndarray - if len(args) > 2: - d = filled(reduce(domain, args), True) - else: - d = filled(domain(*args), True) - # Fill the result where the domain is wrong - try: - # Binary domain: take the last value - fill_value = ufunc_fills[func][-1] - except TypeError: - # Unary domain: just use this one - fill_value = ufunc_fills[func] - except KeyError: - # Domain not recognized, use fill_value instead - fill_value = self.fill_value - result = result.copy() - np.copyto(result, fill_value, where=d) - # Update the mask - if m is nomask: - if d is not nomask: - m = d - else: - # Don't modify inplace, we risk back-propagation - m = (m | d) - # Make sure the mask has the proper size - if result.shape == () and m: - return masked - else: - result._mask = m - result._sharedmask = False - #.... - return result - - - def view(self, dtype=None, type=None, fill_value=None): - """ - Return a view of the MaskedArray data - - Parameters - ---------- - dtype : data-type or ndarray sub-class, optional - Data-type descriptor of the returned view, e.g., float32 or int16. - The default, None, results in the view having the same data-type - as `a`. As with ``ndarray.view``, dtype can also be specified as - an ndarray sub-class, which then specifies the type of the - returned object (this is equivalent to setting the ``type`` - parameter). - type : Python type, optional - Type of the returned view, e.g., ndarray or matrix. Again, the - default None results in type preservation. - - Notes - ----- - - ``a.view()`` is used two different ways: - - ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view - of the array's memory with a different data-type. This can cause a - reinterpretation of the bytes of memory. - - ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just - returns an instance of `ndarray_subclass` that looks at the same array - (same shape, dtype, etc.) This does not cause a reinterpretation of the - memory. - - If `fill_value` is not specified, but `dtype` is specified (and is not - an ndarray sub-class), the `fill_value` of the MaskedArray will be - reset. If neither `fill_value` nor `dtype` are specified (or if - `dtype` is an ndarray sub-class), then the fill value is preserved. - Finally, if `fill_value` is specified, but `dtype` is not, the fill - value is set to the specified value. - - For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of - bytes per entry than the previous dtype (for example, converting a - regular array to a structured array), then the behavior of the view - cannot be predicted just from the superficial appearance of ``a`` (shown - by ``print(a)``). It also depends on exactly how ``a`` is stored in - memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus - defined as a slice or transpose, etc., the view may give different - results. - """ - - if dtype is None: - if type is None: - output = ndarray.view(self) - else: - output = ndarray.view(self, type) - elif type is None: - try: - if issubclass(dtype, ndarray): - output = ndarray.view(self, dtype) - dtype = None - else: - output = ndarray.view(self, dtype) - except TypeError: - output = ndarray.view(self, dtype) - else: - output = ndarray.view(self, dtype, type) - # Should we update the mask ? - if (getattr(output, '_mask', nomask) is not nomask): - if dtype is None: - dtype = output.dtype - mdtype = make_mask_descr(dtype) - output._mask = self._mask.view(mdtype, ndarray) - # Try to reset the shape of the mask (if we don't have a void) - try: - output._mask.shape = output.shape - except (AttributeError, TypeError): - pass - # Make sure to reset the _fill_value if needed - if getattr(output, '_fill_value', None) is not None: - if fill_value is None: - if dtype is None: - pass # leave _fill_value as is - else: - output._fill_value = None - else: - output.fill_value = fill_value - return output - view.__doc__ = ndarray.view.__doc__ - - - def astype(self, newtype): - """ - Returns a copy of the MaskedArray cast to given newtype. - - Returns - ------- - output : MaskedArray - A copy of self cast to input newtype. - The returned record shape matches self.shape. - - Examples - -------- - >>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1.0 -- 3.1] - [-- 5.0 --] - [7.0 -- 9.0]] - >>> print x.astype(int32) - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - - """ - newtype = np.dtype(newtype) - output = self._data.astype(newtype).view(type(self)) - output._update_from(self) - names = output.dtype.names - if names is None: - output._mask = self._mask.astype(bool) - else: - if self._mask is nomask: - output._mask = nomask - else: - output._mask = self._mask.astype([(n, bool) for n in names]) - # Don't check _fill_value if it's None, that'll speed things up - if self._fill_value is not None: - output._fill_value = _check_fill_value(self._fill_value, newtype) - return output - - - def __getitem__(self, indx): - """x.__getitem__(y) <==> x[y] - - Return the item described by i, as a masked array. - - """ - # This test is useful, but we should keep things light... -# if getmask(indx) is not nomask: -# msg = "Masked arrays must be filled before they can be used as indices!" -# raise IndexError(msg) - _data = ndarray.view(self, ndarray) - dout = ndarray.__getitem__(_data, indx) - # We could directly use ndarray.__getitem__ on self... - # But then we would have to modify __array_finalize__ to prevent the - # mask of being reshaped if it hasn't been set up properly yet... - # So it's easier to stick to the current version - _mask = self._mask - if not getattr(dout, 'ndim', False): - # A record ................ - if isinstance(dout, np.void): - mask = _mask[indx] - # We should always re-cast to mvoid, otherwise users can - # change masks on rows that already have masked values, but not - # on rows that have no masked values, which is inconsistent. - dout = mvoid(dout, mask=mask, hardmask=self._hardmask) - # Just a scalar............ - elif _mask is not nomask and _mask[indx]: - return masked - else: - # Force dout to MA ........ - dout = dout.view(type(self)) - # Inherit attributes from self - dout._update_from(self) - # Check the fill_value .... - if isinstance(indx, basestring): - if self._fill_value is not None: - dout._fill_value = self._fill_value[indx] - dout._isfield = True - # Update the mask if needed - if _mask is not nomask: - dout._mask = _mask[indx] - dout._sharedmask = True -# Note: Don't try to check for m.any(), that'll take too long... - return dout - - def __setitem__(self, indx, value): - """x.__setitem__(i, y) <==> x[i]=y - - Set item described by index. If value is masked, masks those - locations. - - """ - if self is masked: - raise MaskError('Cannot alter the masked element.') - # This test is useful, but we should keep things light... -# if getmask(indx) is not nomask: -# msg = "Masked arrays must be filled before they can be used as indices!" -# raise IndexError(msg) - _data = ndarray.view(self, ndarray.__getattribute__(self, '_baseclass')) - _mask = ndarray.__getattribute__(self, '_mask') - if isinstance(indx, basestring): - ndarray.__setitem__(_data, indx, value) - if _mask is nomask: - self._mask = _mask = make_mask_none(self.shape, self.dtype) - _mask[indx] = getmask(value) - return - #........................................ - _dtype = ndarray.__getattribute__(_data, 'dtype') - nbfields = len(_dtype.names or ()) - #........................................ - if value is masked: - # The mask wasn't set: create a full version... - if _mask is nomask: - _mask = self._mask = make_mask_none(self.shape, _dtype) - # Now, set the mask to its value. - if nbfields: - _mask[indx] = tuple([True] * nbfields) - else: - _mask[indx] = True - if not self._isfield: - self._sharedmask = False - return - #........................................ - # Get the _data part of the new value - dval = value - # Get the _mask part of the new value - mval = getattr(value, '_mask', nomask) - if nbfields and mval is nomask: - mval = tuple([False] * nbfields) - if _mask is nomask: - # Set the data, then the mask - ndarray.__setitem__(_data, indx, dval) - if mval is not nomask: - _mask = self._mask = make_mask_none(self.shape, _dtype) - ndarray.__setitem__(_mask, indx, mval) - elif not self._hardmask: - # Unshare the mask if necessary to avoid propagation - if not self._isfield: - self.unshare_mask() - _mask = ndarray.__getattribute__(self, '_mask') - # Set the data, then the mask - ndarray.__setitem__(_data, indx, dval) - ndarray.__setitem__(_mask, indx, mval) - elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): - indx = indx * umath.logical_not(_mask) - ndarray.__setitem__(_data, indx, dval) - else: - if nbfields: - err_msg = "Flexible 'hard' masks are not yet supported..." - raise NotImplementedError(err_msg) - mindx = mask_or(_mask[indx], mval, copy=True) - dindx = self._data[indx] - if dindx.size > 1: - np.copyto(dindx, dval, where=~mindx) - elif mindx is nomask: - dindx = dval - ndarray.__setitem__(_data, indx, dindx) - _mask[indx] = mindx - return - - - def __getslice__(self, i, j): - """x.__getslice__(i, j) <==> x[i:j] - - Return the slice described by (i, j). The use of negative - indices is not supported. - - """ - return self.__getitem__(slice(i, j)) - - def __setslice__(self, i, j, value): - """x.__setslice__(i, j, value) <==> x[i:j]=value - - Set the slice (i,j) of a to value. If value is masked, mask - those locations. - - """ - self.__setitem__(slice(i, j), value) - - - def __setmask__(self, mask, copy=False): - """Set the mask. - - """ - idtype = ndarray.__getattribute__(self, 'dtype') - current_mask = ndarray.__getattribute__(self, '_mask') - if mask is masked: - mask = True - # Make sure the mask is set - if (current_mask is nomask): - # Just don't do anything is there's nothing to do... - if mask is nomask: - return - current_mask = self._mask = make_mask_none(self.shape, idtype) - # No named fields......... - if idtype.names is None: - # Hardmask: don't unmask the data - if self._hardmask: - current_mask |= mask - # Softmask: set everything to False - # If it's obviously a compatible scalar, use a quick update - # method... - elif isinstance(mask, (int, float, np.bool_, np.number)): - current_mask[...] = mask - # ...otherwise fall back to the slower, general purpose way. - else: - current_mask.flat = mask - # Named fields w/ ............ - else: - mdtype = current_mask.dtype - mask = np.array(mask, copy=False) - # Mask is a singleton - if not mask.ndim: - # It's a boolean : make a record - if mask.dtype.kind == 'b': - mask = np.array(tuple([mask.item()]*len(mdtype)), - dtype=mdtype) - # It's a record: make sure the dtype is correct - else: - mask = mask.astype(mdtype) - # Mask is a sequence - else: - # Make sure the new mask is a ndarray with the proper dtype - try: - mask = np.array(mask, copy=copy, dtype=mdtype) - # Or assume it's a sequence of bool/int - except TypeError: - mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) - # Hardmask: don't unmask the data - if self._hardmask: - for n in idtype.names: - current_mask[n] |= mask[n] - # Softmask: set everything to False - # If it's obviously a compatible scalar, use a quick update - # method... - elif isinstance(mask, (int, float, np.bool_, np.number)): - current_mask[...] = mask - # ...otherwise fall back to the slower, general purpose way. - else: - current_mask.flat = mask - # Reshape if needed - if current_mask.shape: - current_mask.shape = self.shape - return - _set_mask = __setmask__ - #.... - def _get_mask(self): - """Return the current mask. - - """ - # We could try to force a reshape, but that wouldn't work in some cases. -# return self._mask.reshape(self.shape) - return self._mask - mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") - - - def _get_recordmask(self): - """ - Return the mask of the records. - A record is masked when all the fields are masked. - - """ - _mask = ndarray.__getattribute__(self, '_mask').view(ndarray) - if _mask.dtype.names is None: - return _mask - return np.all(flatten_structured_array(_mask), axis= -1) - - - def _set_recordmask(self): - """Return the mask of the records. - A record is masked when all the fields are masked. - - """ - raise NotImplementedError("Coming soon: setting the mask per records!") - recordmask = property(fget=_get_recordmask) - - #............................................ - def harden_mask(self): - """ - Force the mask to hard. - - Whether the mask of a masked array is hard or soft is determined by - its `hardmask` property. `harden_mask` sets `hardmask` to True. - - See Also - -------- - hardmask - - """ - self._hardmask = True - return self - - def soften_mask(self): - """ - Force the mask to soft. - - Whether the mask of a masked array is hard or soft is determined by - its `hardmask` property. `soften_mask` sets `hardmask` to False. - - See Also - -------- - hardmask - - """ - self._hardmask = False - return self - - hardmask = property(fget=lambda self: self._hardmask, - doc="Hardness of the mask") - - - def unshare_mask(self): - """ - Copy the mask and set the sharedmask flag to False. - - Whether the mask is shared between masked arrays can be seen from - the `sharedmask` property. `unshare_mask` ensures the mask is not shared. - A copy of the mask is only made if it was shared. - - See Also - -------- - sharedmask - - """ - if self._sharedmask: - self._mask = self._mask.copy() - self._sharedmask = False - return self - - sharedmask = property(fget=lambda self: self._sharedmask, - doc="Share status of the mask (read-only).") - - def shrink_mask(self): - """ - Reduce a mask to nomask when possible. - - Parameters - ---------- - None - - Returns - ------- - None - - Examples - -------- - >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) - >>> x.mask - array([[False, False], - [False, False]], dtype=bool) - >>> x.shrink_mask() - >>> x.mask - False - - """ - m = self._mask - if m.ndim and not m.any(): - self._mask = nomask - return self - - #............................................ - - baseclass = property(fget=lambda self:self._baseclass, - doc="Class of the underlying data (read-only).") - - def _get_data(self): - """Return the current data, as a view of the original - underlying data. - - """ - return ndarray.view(self, self._baseclass) - _data = property(fget=_get_data) - data = property(fget=_get_data) - - def _get_flat(self): - "Return a flat iterator." - return MaskedIterator(self) - # - def _set_flat (self, value): - "Set a flattened version of self to value." - y = self.ravel() - y[:] = value - # - flat = property(fget=_get_flat, fset=_set_flat, - doc="Flat version of the array.") - - - def get_fill_value(self): - """ - Return the filling value of the masked array. - - Returns - ------- - fill_value : scalar - The filling value. - - Examples - -------- - >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: - ... np.ma.array([0, 1], dtype=dt).get_fill_value() - ... - 999999 - 999999 - 1e+20 - (1e+20+0j) - - >>> x = np.ma.array([0, 1.], fill_value=-np.inf) - >>> x.get_fill_value() - -inf - - """ - if self._fill_value is None: - self._fill_value = _check_fill_value(None, self.dtype) - return self._fill_value[()] - - def set_fill_value(self, value=None): - """ - Set the filling value of the masked array. - - Parameters - ---------- - value : scalar, optional - The new filling value. Default is None, in which case a default - based on the data type is used. - - See Also - -------- - ma.set_fill_value : Equivalent function. - - Examples - -------- - >>> x = np.ma.array([0, 1.], fill_value=-np.inf) - >>> x.fill_value - -inf - >>> x.set_fill_value(np.pi) - >>> x.fill_value - 3.1415926535897931 - - Reset to default: - - >>> x.set_fill_value() - >>> x.fill_value - 1e+20 - - """ - target = _check_fill_value(value, self.dtype) - _fill_value = self._fill_value - if _fill_value is None: - # Create the attribute if it was undefined - self._fill_value = target - else: - # Don't overwrite the attribute, just fill it (for propagation) - _fill_value[()] = target - - fill_value = property(fget=get_fill_value, fset=set_fill_value, - doc="Filling value.") - - - def filled(self, fill_value=None): - """ - Return a copy of self, with masked values filled with a given value. - - Parameters - ---------- - fill_value : scalar, optional - The value to use for invalid entries (None by default). - If None, the `fill_value` attribute of the array is used instead. - - Returns - ------- - filled_array : ndarray - A copy of ``self`` with invalid entries replaced by *fill_value* - (be it the function argument or the attribute of ``self``. - - Notes - ----- - The result is **not** a MaskedArray! - - Examples - -------- - >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) - >>> x.filled() - array([1, 2, -999, 4, -999]) - >>> type(x.filled()) - - - Subclassing is preserved. This means that if the data part of the masked - array is a matrix, `filled` returns a matrix: - - >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) - >>> x.filled() - matrix([[ 1, 999999], - [999999, 4]]) - - """ - m = self._mask - if m is nomask: - return self._data - # - if fill_value is None: - fill_value = self.fill_value - else: - fill_value = _check_fill_value(fill_value, self.dtype) - # - if self is masked_singleton: - return np.asanyarray(fill_value) - # - if m.dtype.names: - result = self._data.copy('K') - _recursive_filled(result, self._mask, fill_value) - elif not m.any(): - return self._data - else: - result = self._data.copy('K') - try: - np.copyto(result, fill_value, where=m) - except (TypeError, AttributeError): - fill_value = narray(fill_value, dtype=object) - d = result.astype(object) - result = np.choose(m, (d, fill_value)) - except IndexError: - #ok, if scalar - if self._data.shape: - raise - elif m: - result = np.array(fill_value, dtype=self.dtype) - else: - result = self._data - return result - - def compressed(self): - """ - Return all the non-masked data as a 1-D array. - - Returns - ------- - data : ndarray - A new `ndarray` holding the non-masked data is returned. - - Notes - ----- - The result is **not** a MaskedArray! - - Examples - -------- - >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) - >>> x.compressed() - array([0, 1]) - >>> type(x.compressed()) - - - """ - data = ndarray.ravel(self._data) - if self._mask is not nomask: - data = data.compress(np.logical_not(ndarray.ravel(self._mask))) - return data - - - def compress(self, condition, axis=None, out=None): - """ - Return `a` where condition is ``True``. - - If condition is a `MaskedArray`, missing values are considered - as ``False``. - - Parameters - ---------- - condition : var - Boolean 1-d array selecting which entries to return. If len(condition) - is less than the size of a along the axis, then output is truncated - to length of condition array. - axis : {None, int}, optional - Axis along which the operation must be performed. - out : {None, ndarray}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - Returns - ------- - result : MaskedArray - A :class:`MaskedArray` object. - - Notes - ----- - Please note the difference with :meth:`compressed` ! - The output of :meth:`compress` has a mask, the output of - :meth:`compressed` does not. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> x.compress([1, 0, 1]) - masked_array(data = [1 3], - mask = [False False], - fill_value=999999) - - >>> x.compress([1, 0, 1], axis=1) - masked_array(data = - [[1 3] - [-- --] - [7 9]], - mask = - [[False False] - [ True True] - [False False]], - fill_value=999999) - - """ - # Get the basic components - (_data, _mask) = (self._data, self._mask) - # Force the condition to a regular ndarray (forget the missing values...) - condition = np.array(condition, copy=False, subok=False) - # - _new = _data.compress(condition, axis=axis, out=out).view(type(self)) - _new._update_from(self) - if _mask is not nomask: - _new._mask = _mask.compress(condition, axis=axis) - return _new - - #............................................ - def __str__(self): - """String representation. - - """ - if masked_print_option.enabled(): - f = masked_print_option - if self is masked: - return str(f) - m = self._mask - if m is nomask: - res = self._data - else: - if m.shape == (): - if m.dtype.names: - m = m.view((bool, len(m.dtype))) - if m.any(): - return str(tuple((f if _m else _d) for _d, _m in - zip(self._data.tolist(), m))) - else: - return str(self._data) - elif m: - return str(f) - else: - return str(self._data) - # convert to object array to make filled work - names = self.dtype.names - if names is None: - res = self._data.astype("O") - res.view(ndarray)[m] = f - else: - rdtype = _recursive_make_descr(self.dtype, "O") - res = self._data.astype(rdtype) - _recursive_printoption(res, m, f) - else: - res = self.filled(self.fill_value) - return str(res) - - def __repr__(self): - """Literal string representation. - - """ - n = len(self.shape) - if self._baseclass is np.ndarray: - name = 'array' - else: - name = self._baseclass.__name__ - - parameters = dict(name=name, nlen=" " * len(name), - data=str(self), mask=str(self._mask), - fill=str(self.fill_value), dtype=str(self.dtype)) - if self.dtype.names: - if n <= 1: - return _print_templates['short_flx'] % parameters - return _print_templates['long_flx'] % parameters - elif n <= 1: - return _print_templates['short_std'] % parameters - return _print_templates['long_std'] % parameters - - def __eq__(self, other): - "Check whether other equals self elementwise" - if self is masked: - return masked - omask = getattr(other, '_mask', nomask) - if omask is nomask: - check = ndarray.__eq__(self.filled(0), other) - try: - check = check.view(type(self)) - check._mask = self._mask - except AttributeError: - # Dang, we have a bool instead of an array: return the bool - return check - else: - odata = filled(other, 0) - check = ndarray.__eq__(self.filled(0), odata).view(type(self)) - if self._mask is nomask: - check._mask = omask - else: - mask = mask_or(self._mask, omask) - if mask.dtype.names: - if mask.size > 1: - axis = 1 - else: - axis = None - try: - mask = mask.view((bool_, len(self.dtype))).all(axis) - except ValueError: - mask = np.all([[f[n].all() for n in mask.dtype.names] - for f in mask], axis=axis) - check._mask = mask - return check - # - def __ne__(self, other): - "Check whether other doesn't equal self elementwise" - if self is masked: - return masked - omask = getattr(other, '_mask', nomask) - if omask is nomask: - check = ndarray.__ne__(self.filled(0), other) - try: - check = check.view(type(self)) - check._mask = self._mask - except AttributeError: - # In case check is a boolean (or a numpy.bool) - return check - else: - odata = filled(other, 0) - check = ndarray.__ne__(self.filled(0), odata).view(type(self)) - if self._mask is nomask: - check._mask = omask - else: - mask = mask_or(self._mask, omask) - if mask.dtype.names: - if mask.size > 1: - axis = 1 - else: - axis = None - try: - mask = mask.view((bool_, len(self.dtype))).all(axis) - except ValueError: - mask = np.all([[f[n].all() for n in mask.dtype.names] - for f in mask], axis=axis) - check._mask = mask - return check - # - def __add__(self, other): - "Add other to self, and return a new masked array." - return add(self, other) - # - def __radd__(self, other): - "Add other to self, and return a new masked array." - return add(self, other) - # - def __sub__(self, other): - "Subtract other to self, and return a new masked array." - return subtract(self, other) - # - def __rsub__(self, other): - "Subtract other to self, and return a new masked array." - return subtract(other, self) - # - def __mul__(self, other): - "Multiply other by self, and return a new masked array." - return multiply(self, other) - # - def __rmul__(self, other): - "Multiply other by self, and return a new masked array." - return multiply(self, other) - # - def __div__(self, other): - "Divide other into self, and return a new masked array." - return divide(self, other) - # - def __truediv__(self, other): - "Divide other into self, and return a new masked array." - return true_divide(self, other) - # - def __rtruediv__(self, other): - "Divide other into self, and return a new masked array." - return true_divide(other, self) - # - def __floordiv__(self, other): - "Divide other into self, and return a new masked array." - return floor_divide(self, other) - # - def __rfloordiv__(self, other): - "Divide other into self, and return a new masked array." - return floor_divide(other, self) - # - def __pow__(self, other): - "Raise self to the power other, masking the potential NaNs/Infs" - return power(self, other) - # - def __rpow__(self, other): - "Raise self to the power other, masking the potential NaNs/Infs" - return power(other, self) - #............................................ - def __iadd__(self, other): - "Add other to self in-place." - m = getmask(other) - if self._mask is nomask: - if m is not nomask and m.any(): - self._mask = make_mask_none(self.shape, self.dtype) - self._mask += m - else: - if m is not nomask: - self._mask += m - ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other))) - return self - #.... - def __isub__(self, other): - "Subtract other from self in-place." - m = getmask(other) - if self._mask is nomask: - if m is not nomask and m.any(): - self._mask = make_mask_none(self.shape, self.dtype) - self._mask += m - elif m is not nomask: - self._mask += m - ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other))) - return self - #.... - def __imul__(self, other): - "Multiply self by other in-place." - m = getmask(other) - if self._mask is nomask: - if m is not nomask and m.any(): - self._mask = make_mask_none(self.shape, self.dtype) - self._mask += m - elif m is not nomask: - self._mask += m - ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other))) - return self - #.... - def __idiv__(self, other): - "Divide self by other in-place." - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 3 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.divide] - other_data = np.where(dom_mask, fval, other_data) -# self._mask = mask_or(self._mask, new_mask) - self._mask |= new_mask - ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data)) - return self - #.... - def __ifloordiv__(self, other): - "Floor divide self by other in-place." - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 3 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.floor_divide] - other_data = np.where(dom_mask, fval, other_data) -# self._mask = mask_or(self._mask, new_mask) - self._mask |= new_mask - ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data)) - return self - #.... - def __itruediv__(self, other): - "True divide self by other in-place." - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 3 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.true_divide] - other_data = np.where(dom_mask, fval, other_data) -# self._mask = mask_or(self._mask, new_mask) - self._mask |= new_mask - ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data)) - return self - #... - def __ipow__(self, other): - "Raise self to the power other, in place." - other_data = getdata(other) - other_mask = getmask(other) - with np.errstate(divide='ignore', invalid='ignore'): - ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data)) - invalid = np.logical_not(np.isfinite(self._data)) - if invalid.any(): - if self._mask is not nomask: - self._mask |= invalid - else: - self._mask = invalid - np.copyto(self._data, self.fill_value, where=invalid) - new_mask = mask_or(other_mask, invalid) - self._mask = mask_or(self._mask, new_mask) - return self - #............................................ - def __float__(self): - "Convert to float." - if self.size > 1: - raise TypeError("Only length-1 arrays can be converted " - "to Python scalars") - elif self._mask: - warnings.warn("Warning: converting a masked element to nan.") - return np.nan - return float(self.item()) - - def __int__(self): - "Convert to int." - if self.size > 1: - raise TypeError("Only length-1 arrays can be converted " - "to Python scalars") - elif self._mask: - raise MaskError('Cannot convert masked element to a Python int.') - return int(self.item()) - - - def get_imag(self): - """ - Return the imaginary part of the masked array. - - The returned array is a view on the imaginary part of the `MaskedArray` - whose `get_imag` method is called. - - Parameters - ---------- - None - - Returns - ------- - result : MaskedArray - The imaginary part of the masked array. - - See Also - -------- - get_real, real, imag - - Examples - -------- - >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) - >>> x.get_imag() - masked_array(data = [1.0 -- 1.6], - mask = [False True False], - fill_value = 1e+20) - - """ - result = self._data.imag.view(type(self)) - result.__setmask__(self._mask) - return result - imag = property(fget=get_imag, doc="Imaginary part.") - - def get_real(self): - """ - Return the real part of the masked array. - - The returned array is a view on the real part of the `MaskedArray` - whose `get_real` method is called. - - Parameters - ---------- - None - - Returns - ------- - result : MaskedArray - The real part of the masked array. - - See Also - -------- - get_imag, real, imag - - Examples - -------- - >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) - >>> x.get_real() - masked_array(data = [1.0 -- 3.45], - mask = [False True False], - fill_value = 1e+20) - - """ - result = self._data.real.view(type(self)) - result.__setmask__(self._mask) - return result - real = property(fget=get_real, doc="Real part") - - - #............................................ - def count(self, axis=None): - """ - Count the non-masked elements of the array along the given axis. - - Parameters - ---------- - axis : int, optional - Axis along which to count the non-masked elements. If `axis` is - `None`, all non-masked elements are counted. - - Returns - ------- - result : int or ndarray - If `axis` is `None`, an integer count is returned. When `axis` is - not `None`, an array with shape determined by the lengths of the - remaining axes, is returned. - - See Also - -------- - count_masked : Count masked elements in array or along a given axis. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.arange(6).reshape((2, 3)) - >>> a[1, :] = ma.masked - >>> a - masked_array(data = - [[0 1 2] - [-- -- --]], - mask = - [[False False False] - [ True True True]], - fill_value = 999999) - >>> a.count() - 3 - - When the `axis` keyword is specified an array of appropriate size is - returned. - - >>> a.count(axis=0) - array([1, 1, 1]) - >>> a.count(axis=1) - array([3, 0]) - - """ - m = self._mask - s = self.shape - if m is nomask: - if axis is None: - return self.size - else: - n = s[axis] - t = list(s) - del t[axis] - return np.full(t, n, dtype=np.intp) - n1 = np.size(m, axis) - n2 = np.sum(m, axis=axis, dtype=np.intp) - if axis is None: - return (n1 - n2) - else: - return narray(n1 - n2) - #............................................ - flatten = _arraymethod('flatten') - # - def ravel(self): - """ - Returns a 1D version of self, as a view. - - Returns - ------- - MaskedArray - Output view is of shape ``(self.size,)`` (or - ``(np.ma.product(self.shape),)``). - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> print x.ravel() - [1 -- 3 -- 5 -- 7 -- 9] - - """ - r = ndarray.ravel(self._data).view(type(self)) - r._update_from(self) - if self._mask is not nomask: - r._mask = ndarray.ravel(self._mask).reshape(r.shape) - else: - r._mask = nomask - return r - # - repeat = _arraymethod('repeat') - # - def reshape (self, *s, **kwargs): - """ - Give a new shape to the array without changing its data. - - Returns a masked array containing the same data, but with a new shape. - The result is a view on the original array; if this is not possible, a - ValueError is raised. - - Parameters - ---------- - shape : int or tuple of ints - The new shape should be compatible with the original shape. If an - integer is supplied, then the result will be a 1-D array of that - length. - order : {'C', 'F'}, optional - Determines whether the array data should be viewed as in C - (row-major) or FORTRAN (column-major) order. - - Returns - ------- - reshaped_array : array - A new view on the array. - - See Also - -------- - reshape : Equivalent function in the masked array module. - numpy.ndarray.reshape : Equivalent method on ndarray object. - numpy.reshape : Equivalent function in the NumPy module. - - Notes - ----- - The reshaping operation cannot guarantee that a copy will not be made, - to modify the shape in place, use ``a.shape = s`` - - Examples - -------- - >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) - >>> print x - [[-- 2] - [3 --]] - >>> x = x.reshape((4,1)) - >>> print x - [[--] - [2] - [3] - [--]] - - """ - kwargs.update(order=kwargs.get('order', 'C')) - result = self._data.reshape(*s, **kwargs).view(type(self)) - result._update_from(self) - mask = self._mask - if mask is not nomask: - result._mask = mask.reshape(*s, **kwargs) - return result - # - def resize(self, newshape, refcheck=True, order=False): - """ - .. warning:: - - This method does nothing, except raise a ValueError exception. A - masked array does not own its data and therefore cannot safely be - resized in place. Use the `numpy.ma.resize` function instead. - - This method is difficult to implement safely and may be deprecated in - future releases of NumPy. - - """ - # Note : the 'order' keyword looks broken, let's just drop it -# try: -# ndarray.resize(self, newshape, refcheck=refcheck) -# if self.mask is not nomask: -# self._mask.resize(newshape, refcheck=refcheck) -# except ValueError: -# raise ValueError("Cannot resize an array that has been referenced " -# "or is referencing another array in this way.\n" -# "Use the numpy.ma.resize function.") -# return None - errmsg = "A masked array does not own its data "\ - "and therefore cannot be resized.\n" \ - "Use the numpy.ma.resize function instead." - raise ValueError(errmsg) - # - def put(self, indices, values, mode='raise'): - """ - Set storage-indexed locations to corresponding values. - - Sets self._data.flat[n] = values[n] for each n in indices. - If `values` is shorter than `indices` then it will repeat. - If `values` has some masked values, the initial mask is updated - in consequence, else the corresponding values are unmasked. - - Parameters - ---------- - indices : 1-D array_like - Target indices, interpreted as integers. - values : array_like - Values to place in self._data copy at target indices. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - 'raise' : raise an error. - 'wrap' : wrap around. - 'clip' : clip to the range. - - Notes - ----- - `values` can be a scalar or length 1 array. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> x.put([0,4,8],[10,20,30]) - >>> print x - [[10 -- 3] - [-- 20 --] - [7 -- 30]] - - >>> x.put(4,999) - >>> print x - [[10 -- 3] - [-- 999 --] - [7 -- 30]] - - """ - m = self._mask - # Hard mask: Get rid of the values/indices that fall on masked data - if self._hardmask and self._mask is not nomask: - mask = self._mask[indices] - indices = narray(indices, copy=False) - values = narray(values, copy=False, subok=True) - values.resize(indices.shape) - indices = indices[~mask] - values = values[~mask] - #.... - self._data.put(indices, values, mode=mode) - #.... - if m is nomask: - m = getmask(values) - else: - m = m.copy() - if getmask(values) is nomask: - m.put(indices, False, mode=mode) - else: - m.put(indices, values._mask, mode=mode) - m = make_mask(m, copy=False, shrink=True) - self._mask = m - #............................................ - def ids (self): - """ - Return the addresses of the data and mask areas. - - Parameters - ---------- - None - - Examples - -------- - >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) - >>> x.ids() - (166670640, 166659832) - - If the array has no mask, the address of `nomask` is returned. This address - is typically not close to the data in memory: - - >>> x = np.ma.array([1, 2, 3]) - >>> x.ids() - (166691080, 3083169284L) - - """ - if self._mask is nomask: - return (self.ctypes.data, id(nomask)) - return (self.ctypes.data, self._mask.ctypes.data) - - def iscontiguous(self): - """ - Return a boolean indicating whether the data is contiguous. - - Parameters - ---------- - None - - Examples - -------- - >>> x = np.ma.array([1, 2, 3]) - >>> x.iscontiguous() - True - - `iscontiguous` returns one of the flags of the masked array: - - >>> x.flags - C_CONTIGUOUS : True - F_CONTIGUOUS : True - OWNDATA : False - WRITEABLE : True - ALIGNED : True - UPDATEIFCOPY : False - - """ - return self.flags['CONTIGUOUS'] - - #............................................ - def all(self, axis=None, out=None): - """ - Check if all of the elements of `a` are true. - - Performs a :func:`logical_and` over the given axis and returns the result. - Masked values are considered as True during computation. - For convenience, the output array is masked where ALL the values along the - current axis are masked: if the output would have been a scalar and that - all the values are masked, then the output is `masked`. - - Parameters - ---------- - axis : {None, integer} - Axis to perform the operation over. - If None, perform over flattened array. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - See Also - -------- - all : equivalent function - - Examples - -------- - >>> np.ma.array([1,2,3]).all() - True - >>> a = np.ma.array([1,2,3], mask=True) - >>> (a.all() is np.ma.masked) - True - - """ - mask = _check_mask_axis(self._mask, axis) - if out is None: - d = self.filled(True).all(axis=axis).view(type(self)) - if d.ndim: - d.__setmask__(mask) - elif mask: - return masked - return d - self.filled(True).all(axis=axis, out=out) - if isinstance(out, MaskedArray): - if out.ndim or mask: - out.__setmask__(mask) - return out - - - def any(self, axis=None, out=None): - """ - Check if any of the elements of `a` are true. - - Performs a logical_or over the given axis and returns the result. - Masked values are considered as False during computation. - - Parameters - ---------- - axis : {None, integer} - Axis to perform the operation over. - If None, perform over flattened array and return a scalar. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - See Also - -------- - any : equivalent function - - """ - mask = _check_mask_axis(self._mask, axis) - if out is None: - d = self.filled(False).any(axis=axis).view(type(self)) - if d.ndim: - d.__setmask__(mask) - elif mask: - d = masked - return d - self.filled(False).any(axis=axis, out=out) - if isinstance(out, MaskedArray): - if out.ndim or mask: - out.__setmask__(mask) - return out - - - def nonzero(self): - """ - Return the indices of unmasked elements that are not zero. - - Returns a tuple of arrays, one for each dimension, containing the - indices of the non-zero elements in that dimension. The corresponding - non-zero values can be obtained with:: - - a[a.nonzero()] - - To group the indices by element, rather than dimension, use - instead:: - - np.transpose(a.nonzero()) - - The result of this is always a 2d array, with a row for each non-zero - element. - - Parameters - ---------- - None - - Returns - ------- - tuple_of_arrays : tuple - Indices of elements that are non-zero. - - See Also - -------- - numpy.nonzero : - Function operating on ndarrays. - flatnonzero : - Return indices that are non-zero in the flattened version of the input - array. - ndarray.nonzero : - Equivalent ndarray method. - count_nonzero : - Counts the number of non-zero elements in the input array. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.array(np.eye(3)) - >>> x - masked_array(data = - [[ 1. 0. 0.] - [ 0. 1. 0.] - [ 0. 0. 1.]], - mask = - False, - fill_value=1e+20) - >>> x.nonzero() - (array([0, 1, 2]), array([0, 1, 2])) - - Masked elements are ignored. - - >>> x[1, 1] = ma.masked - >>> x - masked_array(data = - [[1.0 0.0 0.0] - [0.0 -- 0.0] - [0.0 0.0 1.0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=1e+20) - >>> x.nonzero() - (array([0, 2]), array([0, 2])) - - Indices can also be grouped by element. - - >>> np.transpose(x.nonzero()) - array([[0, 0], - [2, 2]]) - - A common use for ``nonzero`` is to find the indices of an array, where - a condition is True. Given an array `a`, the condition `a` > 3 is a - boolean array and since False is interpreted as 0, ma.nonzero(a > 3) - yields the indices of the `a` where the condition is true. - - >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) - >>> a > 3 - masked_array(data = - [[False False False] - [ True True True] - [ True True True]], - mask = - False, - fill_value=999999) - >>> ma.nonzero(a > 3) - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - The ``nonzero`` method of the condition array can also be called. - - >>> (a > 3).nonzero() - (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) - - """ - return narray(self.filled(0), copy=False).nonzero() - - - def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """ - (this docstring should be overwritten) - """ - #!!!: implement out + test! - m = self._mask - if m is nomask: - result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, - axis2=axis2, out=out) - return result.astype(dtype) - else: - D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) - return D.astype(dtype).filled(0).sum(axis=None, out=out) - trace.__doc__ = ndarray.trace.__doc__ - - def sum(self, axis=None, dtype=None, out=None): - """ - Return the sum of the array elements over the given axis. - Masked elements are set to 0 internally. - - Parameters - ---------- - axis : {None, -1, int}, optional - Axis along which the sum is computed. The default - (`axis` = None) is to compute over the flattened array. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and - the type of a is an integer type of precision less than the default - platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of a. - out : {None, ndarray}, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - - Returns - ------- - sum_along_axis : MaskedArray or scalar - An array with the same shape as self, with the specified - axis removed. If self is a 0-d array, or if `axis` is None, a scalar - is returned. If an output array is specified, a reference to - `out` is returned. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> print x.sum() - 25 - >>> print x.sum(axis=1) - [4 5 16] - >>> print x.sum(axis=0) - [8 5 12] - >>> print type(x.sum(axis=0, dtype=np.int64)[0]) - - - """ - _mask = ndarray.__getattribute__(self, '_mask') - newmask = _check_mask_axis(_mask, axis) - # No explicit output - if out is None: - result = self.filled(0).sum(axis, dtype=dtype) - rndim = getattr(result, 'ndim', 0) - if rndim: - result = result.view(type(self)) - result.__setmask__(newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(0).sum(axis, dtype=dtype, out=out) - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - return out - - - def cumsum(self, axis=None, dtype=None, out=None): - """ - Return the cumulative sum of the elements along the given axis. - The cumulative sum is calculated over the flattened array by - default, otherwise over the specified axis. - - Masked values are set to 0 internally during the computation. - However, their position is saved, and the result will be masked at - the same locations. - - Parameters - ---------- - axis : {None, -1, int}, optional - Axis along which the sum is computed. The default (`axis` = None) is to - compute over the flattened array. `axis` may be negative, in which case - it counts from the last to the first axis. - dtype : {None, dtype}, optional - Type of the returned array and of the accumulator in which the - elements are summed. If `dtype` is not specified, it defaults - to the dtype of `a`, unless `a` has an integer dtype with a - precision less than that of the default platform integer. In - that case, the default platform integer is used. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - - Returns - ------- - cumsum : ndarray. - A new array holding the result is returned unless ``out`` is - specified, in which case a reference to ``out`` is returned. - - Notes - ----- - The mask is lost if `out` is not a valid :class:`MaskedArray` ! - - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - Examples - -------- - >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) - >>> print marr.cumsum() - [0 1 3 -- -- -- 9 16 24 33] - - """ - result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(self.mask) - return out - result = result.view(type(self)) - result.__setmask__(self._mask) - return result - - - def prod(self, axis=None, dtype=None, out=None): - """ - Return the product of the array elements over the given axis. - Masked elements are set to 1 internally for computation. - - Parameters - ---------- - axis : {None, int}, optional - Axis over which the product is taken. If None is used, then the - product is over all the array elements. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are multiplied. If ``dtype`` has the value ``None`` - and the type of a is an integer type of precision less than the default - platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of a. - out : {None, array}, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - Returns - ------- - product_along_axis : {array, scalar}, see dtype parameter above. - Returns an array whose shape is the same as a with the specified - axis removed. Returns a 0d array when a is 1d or axis=None. - Returns a reference to the specified output array if specified. - - See Also - -------- - prod : equivalent function - - Notes - ----- - Arithmetic is modular when using integer types, and no error is raised - on overflow. - - Examples - -------- - >>> np.prod([1.,2.]) - 2.0 - >>> np.prod([1.,2.], dtype=np.int32) - 2 - >>> np.prod([[1.,2.],[3.,4.]]) - 24.0 - >>> np.prod([[1.,2.],[3.,4.]], axis=1) - array([ 2., 12.]) - - """ - _mask = ndarray.__getattribute__(self, '_mask') - newmask = _check_mask_axis(_mask, axis) - # No explicit output - if out is None: - result = self.filled(1).prod(axis, dtype=dtype) - rndim = getattr(result, 'ndim', 0) - if rndim: - result = result.view(type(self)) - result.__setmask__(newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(1).prod(axis, dtype=dtype, out=out) - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - return out - - product = prod - - def cumprod(self, axis=None, dtype=None, out=None): - """ - Return the cumulative product of the elements along the given axis. - The cumulative product is taken over the flattened array by - default, otherwise over the specified axis. - - Masked values are set to 1 internally during the computation. - However, their position is saved, and the result will be masked at - the same locations. - - Parameters - ---------- - axis : {None, -1, int}, optional - Axis along which the product is computed. The default - (`axis` = None) is to compute over the flattened array. - dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are multiplied. If ``dtype`` has the value ``None`` - and the type of ``a`` is an integer type of precision less than the - default platform integer, then the default platform integer precision - is used. Otherwise, the dtype is the same as that of ``a``. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - - Returns - ------- - cumprod : ndarray - A new array holding the result is returned unless out is specified, - in which case a reference to out is returned. - - Notes - ----- - The mask is lost if `out` is not a valid MaskedArray ! - - Arithmetic is modular when using integer types, and no error is - raised on overflow. - - """ - result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(self._mask) - return out - result = result.view(type(self)) - result.__setmask__(self._mask) - return result - - - def mean(self, axis=None, dtype=None, out=None): - """ - Returns the average of the array elements. - - Masked entries are ignored. - The average is taken over the flattened array by default, otherwise over - the specified axis. Refer to `numpy.mean` for the full documentation. - - Parameters - ---------- - a : array_like - Array containing numbers whose mean is desired. If `a` is not an - array, a conversion is attempted. - axis : int, optional - Axis along which the means are computed. The default is to compute - the mean of the flattened array. - dtype : dtype, optional - Type to use in computing the mean. For integer inputs, the default - is float64; for floating point, inputs it is the same as the input - dtype. - out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape as the expected output but the type will be cast if - necessary. - - Returns - ------- - mean : ndarray, see dtype parameter above - If `out=None`, returns a new array containing the mean values, - otherwise a reference to the output array is returned. - - See Also - -------- - numpy.ma.mean : Equivalent function. - numpy.mean : Equivalent function on non-masked arrays. - numpy.ma.average: Weighted average. - - Examples - -------- - >>> a = np.ma.array([1,2,3], mask=[False, False, True]) - >>> a - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) - >>> a.mean() - 1.5 - - """ - if self._mask is nomask: - result = super(MaskedArray, self).mean(axis=axis, dtype=dtype) - else: - dsum = self.sum(axis=axis, dtype=dtype) - cnt = self.count(axis=axis) - if cnt.shape == () and (cnt == 0): - result = masked - else: - result = dsum * 1. / cnt - if out is not None: - out.flat = result - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = getattr(result, '_mask', nomask) - return out - return result - - def anom(self, axis=None, dtype=None): - """ - Compute the anomalies (deviations from the arithmetic mean) - along the given axis. - - Returns an array of anomalies, with the same shape as the input and - where the arithmetic mean is computed along the given axis. - - Parameters - ---------- - axis : int, optional - Axis over which the anomalies are taken. - The default is to use the mean of the flattened array as reference. - dtype : dtype, optional - Type to use in computing the variance. For arrays of integer type - the default is float32; for arrays of float types it is the same as - the array type. - - See Also - -------- - mean : Compute the mean of the array. - - Examples - -------- - >>> a = np.ma.array([1,2,3]) - >>> a.anom() - masked_array(data = [-1. 0. 1.], - mask = False, - fill_value = 1e+20) - - """ - m = self.mean(axis, dtype) - if not axis: - return (self - m) - else: - return (self - expand_dims(m, axis)) - - def var(self, axis=None, dtype=None, out=None, ddof=0): - "" - # Easy case: nomask, business as usual - if self._mask is nomask: - return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof) - # Some data are masked, yay! - cnt = self.count(axis=axis) - ddof - danom = self.anom(axis=axis, dtype=dtype) - if iscomplexobj(self): - danom = umath.absolute(danom) ** 2 - else: - danom *= danom - dvar = divide(danom.sum(axis), cnt).view(type(self)) - # Apply the mask if it's not a scalar - if dvar.ndim: - dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0)) - dvar._update_from(self) - elif getattr(dvar, '_mask', False): - # Make sure that masked is returned when the scalar is masked. - dvar = masked - if out is not None: - if isinstance(out, MaskedArray): - out.flat = 0 - out.__setmask__(True) - elif out.dtype.kind in 'biu': - errmsg = "Masked data information would be lost in one or "\ - "more location." - raise MaskError(errmsg) - else: - out.flat = np.nan - return out - # In case with have an explicit output - if out is not None: - # Set the data - out.flat = dvar - # Set the mask if needed - if isinstance(out, MaskedArray): - out.__setmask__(dvar.mask) - return out - return dvar - var.__doc__ = np.var.__doc__ - - - def std(self, axis=None, dtype=None, out=None, ddof=0): - "" - dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof) - if dvar is not masked: - if out is not None: - np.power(out, 0.5, out=out, casting='unsafe') - return out - dvar = sqrt(dvar) - return dvar - std.__doc__ = np.std.__doc__ - - #............................................ - def round(self, decimals=0, out=None): - """ - Return an array rounded a to the given number of decimals. - - Refer to `numpy.around` for full documentation. - - See Also - -------- - numpy.around : equivalent function - - """ - result = self._data.round(decimals=decimals, out=out).view(type(self)) - result._mask = self._mask - result._update_from(self) - # No explicit output: we're done - if out is None: - return result - if isinstance(out, MaskedArray): - out.__setmask__(self._mask) - return out - round.__doc__ = ndarray.round.__doc__ - - #............................................ - def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None): - """ - Return an ndarray of indices that sort the array along the - specified axis. Masked values are filled beforehand to - `fill_value`. - - Parameters - ---------- - axis : int, optional - Axis along which to sort. The default is -1 (last axis). - If None, the flattened array is used. - fill_value : var, optional - Value used to fill the array before sorting. - The default is the `fill_value` attribute of the input array. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. - order : list, optional - When `a` is an array with fields defined, this argument specifies - which fields to compare first, second, etc. Not all fields need be - specified. - - Returns - ------- - index_array : ndarray, int - Array of indices that sort `a` along the specified axis. - In other words, ``a[index_array]`` yields a sorted `a`. - - See Also - -------- - sort : Describes sorting algorithms used. - lexsort : Indirect stable sort with multiple keys. - ndarray.sort : Inplace sort. - - Notes - ----- - See `sort` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = np.ma.array([3,2,1], mask=[False, False, True]) - >>> a - masked_array(data = [3 2 --], - mask = [False False True], - fill_value = 999999) - >>> a.argsort() - array([1, 0, 2]) - - """ - if fill_value is None: - fill_value = default_fill_value(self) - d = self.filled(fill_value).view(ndarray) - return d.argsort(axis=axis, kind=kind, order=order) - - - def argmin(self, axis=None, fill_value=None, out=None): - """ - Return array of indices to the minimum values along the given axis. - - Parameters - ---------- - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - fill_value : {var}, optional - Value used to fill in the masked values. If None, the output of - minimum_fill_value(self._data) is used instead. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - Returns - ------- - {ndarray, scalar} - If multi-dimension input, returns a new ndarray of indices to the - minimum values along the given axis. Otherwise, returns a scalar - of index to the minimum values along the given axis. - - Examples - -------- - >>> x = np.ma.array(arange(4), mask=[1,1,0,0]) - >>> x.shape = (2,2) - >>> print x - [[-- --] - [2 3]] - >>> print x.argmin(axis=0, fill_value=-1) - [0 0] - >>> print x.argmin(axis=0, fill_value=9) - [1 1] - - """ - if fill_value is None: - fill_value = minimum_fill_value(self) - d = self.filled(fill_value).view(ndarray) - return d.argmin(axis, out=out) - - - def argmax(self, axis=None, fill_value=None, out=None): - """ - Returns array of indices of the maximum values along the given axis. - Masked values are treated as if they had the value fill_value. - - Parameters - ---------- - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis - fill_value : {var}, optional - Value used to fill in the masked values. If None, the output of - maximum_fill_value(self._data) is used instead. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - Returns - ------- - index_array : {integer_array} - - Examples - -------- - >>> a = np.arange(6).reshape(2,3) - >>> a.argmax() - 5 - >>> a.argmax(0) - array([1, 1, 1]) - >>> a.argmax(1) - array([2, 2]) - - """ - if fill_value is None: - fill_value = maximum_fill_value(self._data) - d = self.filled(fill_value).view(ndarray) - return d.argmax(axis, out=out) - - - def sort(self, axis= -1, kind='quicksort', order=None, - endwith=True, fill_value=None): - """ - Sort the array, in-place - - Parameters - ---------- - a : array_like - Array to be sorted. - axis : int, optional - Axis along which to sort. If None, the array is flattened before - sorting. The default is -1, which sorts along the last axis. - kind : {'quicksort', 'mergesort', 'heapsort'}, optional - Sorting algorithm. Default is 'quicksort'. - order : list, optional - When `a` is a structured array, this argument specifies which fields - to compare first, second, and so on. This list does not need to - include all of the fields. - endwith : {True, False}, optional - Whether missing values (if any) should be forced in the upper indices - (at the end of the array) (True) or lower indices (at the beginning). - When the array contains unmasked values of the largest (or smallest if - False) representable value of the datatype the ordering of these values - and the masked values is undefined. To enforce the masked values are - at the end (beginning) in this case one must sort the mask. - fill_value : {var}, optional - Value used internally for the masked values. - If ``fill_value`` is not None, it supersedes ``endwith``. - - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - - See Also - -------- - ndarray.sort : Method to sort an array in-place. - argsort : Indirect sort. - lexsort : Indirect stable sort on multiple keys. - searchsorted : Find elements in a sorted array. - - Notes - ----- - See ``sort`` for notes on the different sorting algorithms. - - Examples - -------- - >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) - >>> # Default - >>> a.sort() - >>> print a - [1 3 5 -- --] - - >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) - >>> # Put missing values in the front - >>> a.sort(endwith=False) - >>> print a - [-- -- 1 3 5] - - >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) - >>> # fill_value takes over endwith - >>> a.sort(endwith=False, fill_value=3) - >>> print a - [1 -- -- 3 5] - - """ - if self._mask is nomask: - ndarray.sort(self, axis=axis, kind=kind, order=order) - else: - if self is masked: - return self - if fill_value is None: - if endwith: - filler = minimum_fill_value(self) - else: - filler = maximum_fill_value(self) - else: - filler = fill_value - - sidx = self.filled(filler).argsort(axis=axis, kind=kind, - order=order) - # save meshgrid memory for 1d arrays - if self.ndim == 1: - idx = sidx - else: - idx = np.meshgrid(*[np.arange(x) for x in self.shape], sparse=True, - indexing='ij') - idx[axis] = sidx - tmp_mask = self._mask[idx].flat - tmp_data = self._data[idx].flat - self._data.flat = tmp_data - self._mask.flat = tmp_mask - return - - #............................................ - def min(self, axis=None, out=None, fill_value=None): - """ - Return the minimum along a given axis. - - Parameters - ---------- - axis : {None, int}, optional - Axis along which to operate. By default, ``axis`` is None and the - flattened input is used. - out : array_like, optional - Alternative output array in which to place the result. Must be of - the same shape and buffer length as the expected output. - fill_value : {var}, optional - Value used to fill in the masked values. - If None, use the output of `minimum_fill_value`. - - Returns - ------- - amin : array_like - New array holding the result. - If ``out`` was specified, ``out`` is returned. - - See Also - -------- - minimum_fill_value - Returns the minimum filling value for a given datatype. - - """ - _mask = ndarray.__getattribute__(self, '_mask') - newmask = _check_mask_axis(_mask, axis) - if fill_value is None: - fill_value = minimum_fill_value(self) - # No explicit output - if out is None: - result = self.filled(fill_value).min(axis=axis, out=out).view(type(self)) - if result.ndim: - # Set the mask - result.__setmask__(newmask) - # Get rid of Infs - if newmask.ndim: - np.copyto(result, result.fill_value, where=newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(fill_value).min(axis=axis, out=out) - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - else: - if out.dtype.kind in 'biu': - errmsg = "Masked data information would be lost in one or more"\ - " location." - raise MaskError(errmsg) - np.copyto(out, np.nan, where=newmask) - return out - - def mini(self, axis=None): - """ - Return the array minimum along the specified axis. - - Parameters - ---------- - axis : int, optional - The axis along which to find the minima. Default is None, in which case - the minimum value in the whole array is returned. - - Returns - ------- - min : scalar or MaskedArray - If `axis` is None, the result is a scalar. Otherwise, if `axis` is - given and the array is at least 2-D, the result is a masked array with - dimension one smaller than the array on which `mini` is called. - - Examples - -------- - >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) - >>> print x - [[0 --] - [2 3] - [4 --]] - >>> x.mini() - 0 - >>> x.mini(axis=0) - masked_array(data = [0 3], - mask = [False False], - fill_value = 999999) - >>> print x.mini(axis=1) - [0 2 4] - - """ - if axis is None: - return minimum(self) - else: - return minimum.reduce(self, axis) - - #........................ - def max(self, axis=None, out=None, fill_value=None): - """ - Return the maximum along a given axis. - - Parameters - ---------- - axis : {None, int}, optional - Axis along which to operate. By default, ``axis`` is None and the - flattened input is used. - out : array_like, optional - Alternative output array in which to place the result. Must - be of the same shape and buffer length as the expected output. - fill_value : {var}, optional - Value used to fill in the masked values. - If None, use the output of maximum_fill_value(). - - Returns - ------- - amax : array_like - New array holding the result. - If ``out`` was specified, ``out`` is returned. - - See Also - -------- - maximum_fill_value - Returns the maximum filling value for a given datatype. - - """ - _mask = ndarray.__getattribute__(self, '_mask') - newmask = _check_mask_axis(_mask, axis) - if fill_value is None: - fill_value = maximum_fill_value(self) - # No explicit output - if out is None: - result = self.filled(fill_value).max(axis=axis, out=out).view(type(self)) - if result.ndim: - # Set the mask - result.__setmask__(newmask) - # Get rid of Infs - if newmask.ndim: - np.copyto(result, result.fill_value, where=newmask) - elif newmask: - result = masked - return result - # Explicit output - result = self.filled(fill_value).max(axis=axis, out=out) - if isinstance(out, MaskedArray): - outmask = getattr(out, '_mask', nomask) - if (outmask is nomask): - outmask = out._mask = make_mask_none(out.shape) - outmask.flat = newmask - else: - - if out.dtype.kind in 'biu': - errmsg = "Masked data information would be lost in one or more"\ - " location." - raise MaskError(errmsg) - np.copyto(out, np.nan, where=newmask) - return out - - def ptp(self, axis=None, out=None, fill_value=None): - """ - Return (maximum - minimum) along the the given dimension - (i.e. peak-to-peak value). - - Parameters - ---------- - axis : {None, int}, optional - Axis along which to find the peaks. If None (default) the - flattened array is used. - out : {None, array_like}, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - fill_value : {var}, optional - Value used to fill in the masked values. - - Returns - ------- - ptp : ndarray. - A new array holding the result, unless ``out`` was - specified, in which case a reference to ``out`` is returned. - - """ - if out is None: - result = self.max(axis=axis, fill_value=fill_value) - result -= self.min(axis=axis, fill_value=fill_value) - return result - out.flat = self.max(axis=axis, out=out, fill_value=fill_value) - min_value = self.min(axis=axis, fill_value=fill_value) - np.subtract(out, min_value, out=out, casting='unsafe') - return out - - def take(self, indices, axis=None, out=None, mode='raise'): - """ - """ - (_data, _mask) = (self._data, self._mask) - cls = type(self) - # Make sure the indices are not masked - maskindices = getattr(indices, '_mask', nomask) - if maskindices is not nomask: - indices = indices.filled(0) - # Get the data - if out is None: - out = _data.take(indices, axis=axis, mode=mode).view(cls) - else: - np.take(_data, indices, axis=axis, mode=mode, out=out) - # Get the mask - if isinstance(out, MaskedArray): - if _mask is nomask: - outmask = maskindices - else: - outmask = _mask.take(indices, axis=axis, mode=mode) - outmask |= maskindices - out.__setmask__(outmask) - return out - - - # Array methods --------------------------------------- - copy = _arraymethod('copy') - diagonal = _arraymethod('diagonal') - transpose = _arraymethod('transpose') - T = property(fget=lambda self:self.transpose()) - swapaxes = _arraymethod('swapaxes') - clip = _arraymethod('clip', onmask=False) - copy = _arraymethod('copy') - squeeze = _arraymethod('squeeze') - #-------------------------------------------- - def tolist(self, fill_value=None): - """ - Return the data portion of the masked array as a hierarchical Python list. - - Data items are converted to the nearest compatible Python type. - Masked values are converted to `fill_value`. If `fill_value` is None, - the corresponding entries in the output list will be ``None``. - - Parameters - ---------- - fill_value : scalar, optional - The value to use for invalid entries. Default is None. - - Returns - ------- - result : list - The Python list representation of the masked array. - - Examples - -------- - >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) - >>> x.tolist() - [[1, None, 3], [None, 5, None], [7, None, 9]] - >>> x.tolist(-999) - [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] - - """ - _mask = self._mask - # No mask ? Just return .data.tolist ? - if _mask is nomask: - return self._data.tolist() - # Explicit fill_value: fill the array and get the list - if fill_value is not None: - return self.filled(fill_value).tolist() - # Structured array ............. - names = self.dtype.names - if names: - result = self._data.astype([(_, object) for _ in names]) - for n in names: - result[n][_mask[n]] = None - return result.tolist() - # Standard arrays ............... - if _mask is nomask: - return [None] - # Set temps to save time when dealing w/ marrays... - inishape = self.shape - result = np.array(self._data.ravel(), dtype=object) - result[_mask.ravel()] = None - result.shape = inishape - return result.tolist() -# if fill_value is not None: -# return self.filled(fill_value).tolist() -# result = self.filled().tolist() -# # Set temps to save time when dealing w/ mrecarrays... -# _mask = self._mask -# if _mask is nomask: -# return result -# nbdims = self.ndim -# dtypesize = len(self.dtype) -# if nbdims == 0: -# return tuple([None] * dtypesize) -# elif nbdims == 1: -# maskedidx = _mask.nonzero()[0].tolist() -# if dtypesize: -# nodata = tuple([None] * dtypesize) -# else: -# nodata = None -# [operator.setitem(result, i, nodata) for i in maskedidx] -# else: -# for idx in zip(*[i.tolist() for i in _mask.nonzero()]): -# tmp = result -# for i in idx[:-1]: -# tmp = tmp[i] -# tmp[idx[-1]] = None -# return result - #........................ - def tostring(self, fill_value=None, order='C'): - """ - This function is a compatibility alias for tobytes. Despite its name it - returns bytes not strings. - """ - - return self.tobytes(fill_value, order='C') - #........................ - def tobytes(self, fill_value=None, order='C'): - """ - Return the array data as a string containing the raw bytes in the array. - - The array is filled with a fill value before the string conversion. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - fill_value : scalar, optional - Value used to fill in the masked values. Deafult is None, in which - case `MaskedArray.fill_value` is used. - order : {'C','F','A'}, optional - Order of the data item in the copy. Default is 'C'. - - - 'C' -- C order (row major). - - 'F' -- Fortran order (column major). - - 'A' -- Any, current order of array. - - None -- Same as 'A'. - - See Also - -------- - ndarray.tobytes - tolist, tofile - - Notes - ----- - As for `ndarray.tobytes`, information about the shape, dtype, etc., - but also about `fill_value`, will be lost. - - Examples - -------- - >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) - >>> x.tobytes() - '\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00' - - """ - return self.filled(fill_value).tobytes(order=order) - #........................ - def tofile(self, fid, sep="", format="%s"): - """ - Save a masked array to a file in binary format. - - .. warning:: - This function is not implemented yet. - - Raises - ------ - NotImplementedError - When `tofile` is called. - - """ - raise NotImplementedError("Not implemented yet, sorry...") - - def toflex(self): - """ - Transforms a masked array into a flexible-type array. - - The flexible type array that is returned will have two fields: - - * the ``_data`` field stores the ``_data`` part of the array. - * the ``_mask`` field stores the ``_mask`` part of the array. - - Parameters - ---------- - None - - Returns - ------- - record : ndarray - A new flexible-type `ndarray` with two fields: the first element - containing a value, the second element containing the corresponding - mask boolean. The returned record shape matches self.shape. - - Notes - ----- - A side-effect of transforming a masked array into a flexible `ndarray` is - that meta information (``fill_value``, ...) will be lost. - - Examples - -------- - >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> print x.toflex() - [[(1, False) (2, True) (3, False)] - [(4, True) (5, False) (6, True)] - [(7, False) (8, True) (9, False)]] - - """ - # Get the basic dtype .... - ddtype = self.dtype - # Make sure we have a mask - _mask = self._mask - if _mask is None: - _mask = make_mask_none(self.shape, ddtype) - # And get its dtype - mdtype = self._mask.dtype - # - record = np.ndarray(shape=self.shape, - dtype=[('_data', ddtype), ('_mask', mdtype)]) - record['_data'] = self._data - record['_mask'] = self._mask - return record - torecords = toflex - #-------------------------------------------- - # Pickling - def __getstate__(self): - """Return the internal state of the masked array, for pickling - purposes. - - """ - cf = 'CF'[self.flags.fnc] - state = (1, - self.shape, - self.dtype, - self.flags.fnc, - self._data.tobytes(cf), - #self._data.tolist(), - getmaskarray(self).tobytes(cf), - #getmaskarray(self).tolist(), - self._fill_value, - ) - return state - # - def __setstate__(self, state): - """Restore the internal state of the masked array, for - pickling purposes. ``state`` is typically the output of the - ``__getstate__`` output, and is a 5-tuple: - - - class name - - a tuple giving the shape of the data - - a typecode for the data - - a binary string for the data - - a binary string for the mask. - - """ - (_, shp, typ, isf, raw, msk, flv) = state - ndarray.__setstate__(self, (shp, typ, isf, raw)) - self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) - self.fill_value = flv - # - def __reduce__(self): - """Return a 3-tuple for pickling a MaskedArray. - - """ - return (_mareconstruct, - (self.__class__, self._baseclass, (0,), 'b',), - self.__getstate__()) - # - def __deepcopy__(self, memo=None): - from copy import deepcopy - copied = MaskedArray.__new__(type(self), self, copy=True) - if memo is None: - memo = {} - memo[id(self)] = copied - for (k, v) in self.__dict__.items(): - copied.__dict__[k] = deepcopy(v, memo) - return copied - - -def _mareconstruct(subtype, baseclass, baseshape, basetype,): - """Internal function that builds a new MaskedArray from the - information stored in a pickle. - - """ - _data = ndarray.__new__(baseclass, baseshape, basetype) - _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) - return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) - - - - - - -class mvoid(MaskedArray): - """ - Fake a 'void' object to use for masked array with structured dtypes. - """ - # - def __new__(self, data, mask=nomask, dtype=None, fill_value=None, - hardmask=False, copy=False, subok=True): - _data = np.array(data, copy=copy, subok=subok, dtype=dtype) - _data = _data.view(self) - _data._hardmask = hardmask - if mask is not nomask: - if isinstance(mask, np.void): - _data._mask = mask - else: - try: - # Mask is already a 0D array - _data._mask = np.void(mask) - except TypeError: - # Transform the mask to a void - mdtype = make_mask_descr(dtype) - _data._mask = np.array(mask, dtype=mdtype)[()] - if fill_value is not None: - _data.fill_value = fill_value - return _data - - def _get_data(self): - # Make sure that the _data part is a np.void - return self.view(ndarray)[()] - _data = property(fget=_get_data) - - def __getitem__(self, indx): - "Get the index..." - m = self._mask - if m is not nomask and m[indx]: - return masked - return self._data[indx] - - def __setitem__(self, indx, value): - self._data[indx] = value - if self._hardmask: - self._mask[indx] |= getattr(value, "_mask", False) - else: - self._mask[indx] = getattr(value, "_mask", False) - - def __str__(self): - m = self._mask - if (m is nomask): - return self._data.__str__() - m = tuple(m) - if (not any(m)): - return self._data.__str__() - r = self._data.tolist() - p = masked_print_option - if not p.enabled(): - p = 'N/A' - else: - p = str(p) - r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)] - return "(%s)" % ", ".join(r) - - def __repr__(self): - m = self._mask - if (m is nomask): - return self._data.__repr__() - m = tuple(m) - if not any(m): - return self._data.__repr__() - p = masked_print_option - if not p.enabled(): - return self.filled(self.fill_value).__repr__() - p = str(p) - r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)] - return "(%s)" % ", ".join(r) - - def __iter__(self): - "Defines an iterator for mvoid" - (_data, _mask) = (self._data, self._mask) - if _mask is nomask: - for d in _data: - yield d - else: - for (d, m) in zip(_data, _mask): - if m: - yield masked - else: - yield d - - def __len__(self): - return self._data.__len__() - - def filled(self, fill_value=None): - """ - Return a copy with masked fields filled with a given value. - - Parameters - ---------- - fill_value : scalar, optional - The value to use for invalid entries (None by default). - If None, the `fill_value` attribute is used instead. - - Returns - ------- - filled_void - A `np.void` object - - See Also - -------- - MaskedArray.filled - - """ - return asarray(self).filled(fill_value)[()] - - def tolist(self): - """ - Transforms the mvoid object into a tuple. - - Masked fields are replaced by None. - - Returns - ------- - returned_tuple - Tuple of fields - """ - _mask = self._mask - if _mask is nomask: - return self._data.tolist() - result = [] - for (d, m) in zip(self._data, self._mask): - if m: - result.append(None) - else: - # .item() makes sure we return a standard Python object - result.append(d.item()) - return tuple(result) - - - -#####-------------------------------------------------------------------------- -#---- --- Shortcuts --- -#####--------------------------------------------------------------------------- -def isMaskedArray(x): - """ - Test whether input is an instance of MaskedArray. - - This function returns True if `x` is an instance of MaskedArray - and returns False otherwise. Any object is accepted as input. - - Parameters - ---------- - x : object - Object to test. - - Returns - ------- - result : bool - True if `x` is a MaskedArray. - - See Also - -------- - isMA : Alias to isMaskedArray. - isarray : Alias to isMaskedArray. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.eye(3, 3) - >>> a - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) - >>> m = ma.masked_values(a, 0) - >>> m - masked_array(data = - [[1.0 -- --] - [-- 1.0 --] - [-- -- 1.0]], - mask = - [[False True True] - [ True False True] - [ True True False]], - fill_value=0.0) - >>> ma.isMaskedArray(a) - False - >>> ma.isMaskedArray(m) - True - >>> ma.isMaskedArray([0, 1, 2]) - False - - """ - return isinstance(x, MaskedArray) -isarray = isMaskedArray -isMA = isMaskedArray #backward compatibility - -# We define the masked singleton as a float for higher precedence... -# Note that it can be tricky sometimes w/ type comparison - -class MaskedConstant(MaskedArray): - # - _data = data = np.array(0.) - _mask = mask = np.array(True) - _baseclass = ndarray - # - def __new__(self): - return self._data.view(self) - # - def __array_finalize__(self, obj): - return - # - def __array_wrap__(self, obj): - return self - # - def __str__(self): - return str(masked_print_option._display) - # - def __repr__(self): - return 'masked' - # - def flatten(self): - return masked_array([self._data], dtype=float, mask=[True]) - - def __reduce__(self): - """Override of MaskedArray's __reduce__. - """ - return (self.__class__, ()) - - -masked = masked_singleton = MaskedConstant() - - - -masked_array = MaskedArray - -def array(data, dtype=None, copy=False, order=False, - mask=nomask, fill_value=None, - keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0, - ): - """array(data, dtype=None, copy=False, order=False, mask=nomask, - fill_value=None, keep_mask=True, hard_mask=False, shrink=True, - subok=True, ndmin=0) - - Acts as shortcut to MaskedArray, with options in a different order - for convenience. And backwards compatibility... - - """ - #!!!: we should try to put 'order' somwehere - return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, - keep_mask=keep_mask, hard_mask=hard_mask, - fill_value=fill_value, ndmin=ndmin, shrink=shrink) -array.__doc__ = masked_array.__doc__ - -def is_masked(x): - """ - Determine whether input has masked values. - - Accepts any object as input, but always returns False unless the - input is a MaskedArray containing masked values. - - Parameters - ---------- - x : array_like - Array to check for masked values. - - Returns - ------- - result : bool - True if `x` is a MaskedArray with masked values, False otherwise. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) - >>> x - masked_array(data = [-- 1 -- 2 3], - mask = [ True False True False False], - fill_value=999999) - >>> ma.is_masked(x) - True - >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) - >>> x - masked_array(data = [0 1 0 2 3], - mask = False, - fill_value=999999) - >>> ma.is_masked(x) - False - - Always returns False if `x` isn't a MaskedArray. - - >>> x = [False, True, False] - >>> ma.is_masked(x) - False - >>> x = 'a string' - >>> ma.is_masked(x) - False - - """ - m = getmask(x) - if m is nomask: - return False - elif m.any(): - return True - return False - - -#####--------------------------------------------------------------------------- -#---- --- Extrema functions --- -#####--------------------------------------------------------------------------- -class _extrema_operation(object): - """ - Generic class for maximum/minimum functions. - - .. note:: - This is the base class for `_maximum_operation` and - `_minimum_operation`. - - """ - def __call__(self, a, b=None): - "Executes the call behavior." - if b is None: - return self.reduce(a) - return where(self.compare(a, b), a, b) - #......... - def reduce(self, target, axis=None): - "Reduce target along the given axis." - target = narray(target, copy=False, subok=True) - m = getmask(target) - if axis is not None: - kargs = { 'axis' : axis } - else: - kargs = {} - target = target.ravel() - if not (m is nomask): - m = m.ravel() - if m is nomask: - t = self.ufunc.reduce(target, **kargs) - else: - target = target.filled(self.fill_value_func(target)).view(type(target)) - t = self.ufunc.reduce(target, **kargs) - m = umath.logical_and.reduce(m, **kargs) - if hasattr(t, '_mask'): - t._mask = m - elif m: - t = masked - return t - #......... - def outer (self, a, b): - "Return the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - result = self.ufunc.outer(filled(a), filled(b)) - if not isinstance(result, MaskedArray): - result = result.view(MaskedArray) - result._mask = m - return result - -#............................ -class _minimum_operation(_extrema_operation): - "Object to calculate minima" - def __init__ (self): - """minimum(a, b) or minimum(a) -In one argument case, returns the scalar minimum. - """ - self.ufunc = umath.minimum - self.afunc = amin - self.compare = less - self.fill_value_func = minimum_fill_value - -#............................ -class _maximum_operation(_extrema_operation): - "Object to calculate maxima" - def __init__ (self): - """maximum(a, b) or maximum(a) - In one argument case returns the scalar maximum. - """ - self.ufunc = umath.maximum - self.afunc = amax - self.compare = greater - self.fill_value_func = maximum_fill_value - -#.......................................................... -def min(obj, axis=None, out=None, fill_value=None): - try: - return obj.min(axis=axis, fill_value=fill_value, out=out) - except (AttributeError, TypeError): - # If obj doesn't have a min method, - # ...or if the method doesn't accept a fill_value argument - return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out) -min.__doc__ = MaskedArray.min.__doc__ - -def max(obj, axis=None, out=None, fill_value=None): - try: - return obj.max(axis=axis, fill_value=fill_value, out=out) - except (AttributeError, TypeError): - # If obj doesn't have a max method, - # ...or if the method doesn't accept a fill_value argument - return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out) -max.__doc__ = MaskedArray.max.__doc__ - -def ptp(obj, axis=None, out=None, fill_value=None): - """a.ptp(axis=None) = a.max(axis)-a.min(axis)""" - try: - return obj.ptp(axis, out=out, fill_value=fill_value) - except (AttributeError, TypeError): - # If obj doesn't have a ptp method, - # ...or if the method doesn't accept a fill_value argument - return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out) -ptp.__doc__ = MaskedArray.ptp.__doc__ - - -#####--------------------------------------------------------------------------- -#---- --- Definition of functions from the corresponding methods --- -#####--------------------------------------------------------------------------- -class _frommethod: - """ - Define functions from existing MaskedArray methods. - - Parameters - ---------- - methodname : str - Name of the method to transform. - - """ - def __init__(self, methodname, reversed=False): - self.__name__ = methodname - self.__doc__ = self.getdoc() - self.reversed = reversed - # - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - meth = getattr(MaskedArray, self.__name__, None) or\ - getattr(np, self.__name__, None) - signature = self.__name__ + get_object_signature(meth) - if meth is not None: - doc = """ %s\n%s""" % (signature, getattr(meth, '__doc__', None)) - return doc - # - def __call__(self, a, *args, **params): - if self.reversed: - args = list(args) - arr = args[0] - args[0] = a - a = arr - # Get the method from the array (if possible) - method_name = self.__name__ - method = getattr(a, method_name, None) - if method is not None: - return method(*args, **params) - # Still here ? Then a is not a MaskedArray - method = getattr(MaskedArray, method_name, None) - if method is not None: - return method(MaskedArray(a), *args, **params) - # Still here ? OK, let's call the corresponding np function - method = getattr(np, method_name) - return method(a, *args, **params) - -all = _frommethod('all') -anomalies = anom = _frommethod('anom') -any = _frommethod('any') -compress = _frommethod('compress', reversed=True) -cumprod = _frommethod('cumprod') -cumsum = _frommethod('cumsum') -copy = _frommethod('copy') -diagonal = _frommethod('diagonal') -harden_mask = _frommethod('harden_mask') -ids = _frommethod('ids') -maximum = _maximum_operation() -mean = _frommethod('mean') -minimum = _minimum_operation() -nonzero = _frommethod('nonzero') -prod = _frommethod('prod') -product = _frommethod('prod') -ravel = _frommethod('ravel') -repeat = _frommethod('repeat') -shrink_mask = _frommethod('shrink_mask') -soften_mask = _frommethod('soften_mask') -std = _frommethod('std') -sum = _frommethod('sum') -swapaxes = _frommethod('swapaxes') -#take = _frommethod('take') -trace = _frommethod('trace') -var = _frommethod('var') - -def take(a, indices, axis=None, out=None, mode='raise'): - """ - """ - a = masked_array(a) - return a.take(indices, axis=axis, out=out, mode=mode) - - -#.............................................................................. -def power(a, b, third=None): - """ - Returns element-wise base array raised to power from second array. - - This is the masked array version of `numpy.power`. For details see - `numpy.power`. - - See Also - -------- - numpy.power - - Notes - ----- - The *out* argument to `numpy.power` is not supported, `third` has to be - None. - - """ - if third is not None: - raise MaskError("3-argument power not supported.") - # Get the masks - ma = getmask(a) - mb = getmask(b) - m = mask_or(ma, mb) - # Get the rawdata - fa = getdata(a) - fb = getdata(b) - # Get the type of the result (so that we preserve subclasses) - if isinstance(a, MaskedArray): - basetype = type(a) - else: - basetype = MaskedArray - # Get the result and view it as a (subclass of) MaskedArray - with np.errstate(divide='ignore', invalid='ignore'): - result = np.where(m, fa, umath.power(fa, fb)).view(basetype) - result._update_from(a) - # Find where we're in trouble w/ NaNs and Infs - invalid = np.logical_not(np.isfinite(result.view(ndarray))) - # Add the initial mask - if m is not nomask: - if not (result.ndim): - return masked - result._mask = np.logical_or(m, invalid) - # Fix the invalid parts - if invalid.any(): - if not result.ndim: - return masked - elif result._mask is nomask: - result._mask = invalid - result._data[invalid] = result.fill_value - return result - -# if fb.dtype.char in typecodes["Integer"]: -# return masked_array(umath.power(fa, fb), m) -# m = mask_or(m, (fa < 0) & (fb != fb.astype(int))) -# if m is nomask: -# return masked_array(umath.power(fa, fb)) -# else: -# fa = fa.copy() -# if m.all(): -# fa.flat = 1 -# else: -# np.copyto(fa, 1, where=m) -# return masked_array(umath.power(fa, fb), m) - -#.............................................................................. -def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None): - "Function version of the eponymous method." - if fill_value is None: - fill_value = default_fill_value(a) - d = filled(a, fill_value) - if axis is None: - return d.argsort(kind=kind, order=order) - return d.argsort(axis, kind=kind, order=order) -argsort.__doc__ = MaskedArray.argsort.__doc__ - -def argmin(a, axis=None, fill_value=None): - "Function version of the eponymous method." - if fill_value is None: - fill_value = default_fill_value(a) - d = filled(a, fill_value) - return d.argmin(axis=axis) -argmin.__doc__ = MaskedArray.argmin.__doc__ - -def argmax(a, axis=None, fill_value=None): - "Function version of the eponymous method." - if fill_value is None: - fill_value = default_fill_value(a) - try: - fill_value = -fill_value - except: - pass - d = filled(a, fill_value) - return d.argmax(axis=axis) -argmax.__doc__ = MaskedArray.argmax.__doc__ - -def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None): - "Function version of the eponymous method." - a = narray(a, copy=True, subok=True) - if axis is None: - a = a.flatten() - axis = 0 - if fill_value is None: - if endwith: - filler = minimum_fill_value(a) - else: - filler = maximum_fill_value(a) - else: - filler = fill_value - - sindx = filled(a, filler).argsort(axis=axis, kind=kind, order=order) - - # save meshgrid memory for 1d arrays - if a.ndim == 1: - indx = sindx - else: - indx = np.meshgrid(*[np.arange(x) for x in a.shape], sparse=True, - indexing='ij') - indx[axis] = sindx - return a[indx] -sort.__doc__ = MaskedArray.sort.__doc__ - - -def compressed(x): - """ - Return all the non-masked data as a 1-D array. - - This function is equivalent to calling the "compressed" method of a - `MaskedArray`, see `MaskedArray.compressed` for details. - - See Also - -------- - MaskedArray.compressed - Equivalent method. - - """ - if not isinstance(x, MaskedArray): - x = asanyarray(x) - return x.compressed() - - -def concatenate(arrays, axis=0): - """ - Concatenate a sequence of arrays along the given axis. - - Parameters - ---------- - arrays : sequence of array_like - The arrays must have the same shape, except in the dimension - corresponding to `axis` (the first, by default). - axis : int, optional - The axis along which the arrays will be joined. Default is 0. - - Returns - ------- - result : MaskedArray - The concatenated array with any masked entries preserved. - - See Also - -------- - numpy.concatenate : Equivalent function in the top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.arange(3) - >>> a[1] = ma.masked - >>> b = ma.arange(2, 5) - >>> a - masked_array(data = [0 -- 2], - mask = [False True False], - fill_value = 999999) - >>> b - masked_array(data = [2 3 4], - mask = False, - fill_value = 999999) - >>> ma.concatenate([a, b]) - masked_array(data = [0 -- 2 2 3 4], - mask = [False True False False False False], - fill_value = 999999) - - """ - d = np.concatenate([getdata(a) for a in arrays], axis) - rcls = get_masked_subclass(*arrays) - data = d.view(rcls) - # Check whether one of the arrays has a non-empty mask... - for x in arrays: - if getmask(x) is not nomask: - break - else: - return data - # OK, so we have to concatenate the masks - dm = np.concatenate([getmaskarray(a) for a in arrays], axis) - # If we decide to keep a '_shrinkmask' option, we want to check that ... - # ... all of them are True, and then check for dm.any() -# shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays]) -# if shrink and not dm.any(): - if not dm.dtype.fields and not dm.any(): - data._mask = nomask - else: - data._mask = dm.reshape(d.shape) - return data - -def count(a, axis=None): - if isinstance(a, MaskedArray): - return a.count(axis) - return masked_array(a, copy=False).count(axis) -count.__doc__ = MaskedArray.count.__doc__ - - -def diag(v, k=0): - """ - Extract a diagonal or construct a diagonal array. - - This function is the equivalent of `numpy.diag` that takes masked - values into account, see `numpy.diag` for details. - - See Also - -------- - numpy.diag : Equivalent function for ndarrays. - - """ - output = np.diag(v, k).view(MaskedArray) - if getmask(v) is not nomask: - output._mask = np.diag(v._mask, k) - return output - - -def expand_dims(x, axis): - """ - Expand the shape of an array. - - Expands the shape of the array by including a new axis before the one - specified by the `axis` parameter. This function behaves the same as - `numpy.expand_dims` but preserves masked elements. - - See Also - -------- - numpy.expand_dims : Equivalent function in top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.array([1, 2, 4]) - >>> x[1] = ma.masked - >>> x - masked_array(data = [1 -- 4], - mask = [False True False], - fill_value = 999999) - >>> np.expand_dims(x, axis=0) - array([[1, 2, 4]]) - >>> ma.expand_dims(x, axis=0) - masked_array(data = - [[1 -- 4]], - mask = - [[False True False]], - fill_value = 999999) - - The same result can be achieved using slicing syntax with `np.newaxis`. - - >>> x[np.newaxis, :] - masked_array(data = - [[1 -- 4]], - mask = - [[False True False]], - fill_value = 999999) - - """ - result = n_expand_dims(x, axis) - if isinstance(x, MaskedArray): - new_shape = result.shape - result = x.view() - result.shape = new_shape - if result._mask is not nomask: - result._mask.shape = new_shape - return result - -#...................................... -def left_shift (a, n): - """ - Shift the bits of an integer to the left. - - This is the masked array version of `numpy.left_shift`, for details - see that function. - - See Also - -------- - numpy.left_shift - - """ - m = getmask(a) - if m is nomask: - d = umath.left_shift(filled(a), n) - return masked_array(d) - else: - d = umath.left_shift(filled(a, 0), n) - return masked_array(d, mask=m) - -def right_shift (a, n): - """ - Shift the bits of an integer to the right. - - This is the masked array version of `numpy.right_shift`, for details - see that function. - - See Also - -------- - numpy.right_shift - - """ - m = getmask(a) - if m is nomask: - d = umath.right_shift(filled(a), n) - return masked_array(d) - else: - d = umath.right_shift(filled(a, 0), n) - return masked_array(d, mask=m) - -#...................................... -def put(a, indices, values, mode='raise'): - """ - Set storage-indexed locations to corresponding values. - - This function is equivalent to `MaskedArray.put`, see that method - for details. - - See Also - -------- - MaskedArray.put - - """ - # We can't use 'frommethod', the order of arguments is different - try: - return a.put(indices, values, mode=mode) - except AttributeError: - return narray(a, copy=False).put(indices, values, mode=mode) - -def putmask(a, mask, values): #, mode='raise'): - """ - Changes elements of an array based on conditional and input values. - - This is the masked array version of `numpy.putmask`, for details see - `numpy.putmask`. - - See Also - -------- - numpy.putmask - - Notes - ----- - Using a masked array as `values` will **not** transform a `ndarray` into - a `MaskedArray`. - - """ - # We can't use 'frommethod', the order of arguments is different - if not isinstance(a, MaskedArray): - a = a.view(MaskedArray) - (valdata, valmask) = (getdata(values), getmask(values)) - if getmask(a) is nomask: - if valmask is not nomask: - a._sharedmask = True - a._mask = make_mask_none(a.shape, a.dtype) - np.copyto(a._mask, valmask, where=mask) - elif a._hardmask: - if valmask is not nomask: - m = a._mask.copy() - np.copyto(m, valmask, where=mask) - a.mask |= m - else: - if valmask is nomask: - valmask = getmaskarray(values) - np.copyto(a._mask, valmask, where=mask) - np.copyto(a._data, valdata, where=mask) - return - -def transpose(a, axes=None): - """ - Permute the dimensions of an array. - - This function is exactly equivalent to `numpy.transpose`. - - See Also - -------- - numpy.transpose : Equivalent function in top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> x = ma.arange(4).reshape((2,2)) - >>> x[1, 1] = ma.masked - >>>> x - masked_array(data = - [[0 1] - [2 --]], - mask = - [[False False] - [False True]], - fill_value = 999999) - >>> ma.transpose(x) - masked_array(data = - [[0 2] - [1 --]], - mask = - [[False False] - [False True]], - fill_value = 999999) - - """ - #We can't use 'frommethod', as 'transpose' doesn't take keywords - try: - return a.transpose(axes) - except AttributeError: - return narray(a, copy=False).transpose(axes).view(MaskedArray) - -def reshape(a, new_shape, order='C'): - """ - Returns an array containing the same data with a new shape. - - Refer to `MaskedArray.reshape` for full documentation. - - See Also - -------- - MaskedArray.reshape : equivalent function - - """ - #We can't use 'frommethod', it whine about some parameters. Dmmit. - try: - return a.reshape(new_shape, order=order) - except AttributeError: - _tmp = narray(a, copy=False).reshape(new_shape, order=order) - return _tmp.view(MaskedArray) - -def resize(x, new_shape): - """ - Return a new masked array with the specified size and shape. - - This is the masked equivalent of the `numpy.resize` function. The new - array is filled with repeated copies of `x` (in the order that the - data are stored in memory). If `x` is masked, the new array will be - masked, and the new mask will be a repetition of the old one. - - See Also - -------- - numpy.resize : Equivalent function in the top level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.array([[1, 2] ,[3, 4]]) - >>> a[0, 1] = ma.masked - >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value = 999999) - >>> np.resize(a, (3, 3)) - array([[1, 2, 3], - [4, 1, 2], - [3, 4, 1]]) - >>> ma.resize(a, (3, 3)) - masked_array(data = - [[1 -- 3] - [4 1 --] - [3 4 1]], - mask = - [[False True False] - [False False True] - [False False False]], - fill_value = 999999) - - A MaskedArray is always returned, regardless of the input type. - - >>> a = np.array([[1, 2] ,[3, 4]]) - >>> ma.resize(a, (3, 3)) - masked_array(data = - [[1 2 3] - [4 1 2] - [3 4 1]], - mask = - False, - fill_value = 999999) - - """ - # We can't use _frommethods here, as N.resize is notoriously whiny. - m = getmask(x) - if m is not nomask: - m = np.resize(m, new_shape) - result = np.resize(x, new_shape).view(get_masked_subclass(x)) - if result.ndim: - result._mask = m - return result - - -#................................................ -def rank(obj): - "maskedarray version of the numpy function." - return np.rank(getdata(obj)) -rank.__doc__ = np.rank.__doc__ -# -def shape(obj): - "maskedarray version of the numpy function." - return np.shape(getdata(obj)) -shape.__doc__ = np.shape.__doc__ -# -def size(obj, axis=None): - "maskedarray version of the numpy function." - return np.size(getdata(obj), axis) -size.__doc__ = np.size.__doc__ -#................................................ - -#####-------------------------------------------------------------------------- -#---- --- Extra functions --- -#####-------------------------------------------------------------------------- -def where (condition, x=None, y=None): - """ - Return a masked array with elements from x or y, depending on condition. - - Returns a masked array, shaped like condition, where the elements - are from `x` when `condition` is True, and from `y` otherwise. - If neither `x` nor `y` are given, the function returns a tuple of - indices where `condition` is True (the result of - ``condition.nonzero()``). - - Parameters - ---------- - condition : array_like, bool - The condition to meet. For each True element, yield the corresponding - element from `x`, otherwise from `y`. - x, y : array_like, optional - Values from which to choose. `x` and `y` need to have the same shape - as condition, or be broadcast-able to that shape. - - Returns - ------- - out : MaskedArray or tuple of ndarrays - The resulting masked array if `x` and `y` were given, otherwise - the result of ``condition.nonzero()``. - - See Also - -------- - numpy.where : Equivalent function in the top-level NumPy module. - - Examples - -------- - >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], - ... [1, 0, 1], - ... [0, 1, 0]]) - >>> print x - [[0.0 -- 2.0] - [-- 4.0 --] - [6.0 -- 8.0]] - >>> np.ma.where(x > 5) # return the indices where x > 5 - (array([2, 2]), array([0, 2])) - - >>> print np.ma.where(x > 5, x, -3.1416) - [[-3.1416 -- -3.1416] - [-- -3.1416 --] - [6.0 -- 8.0]] - - """ - if x is None and y is None: - return filled(condition, 0).nonzero() - elif x is None or y is None: - raise ValueError("Either both or neither x and y should be given.") - # Get the condition ............... - fc = filled(condition, 0).astype(MaskType) - notfc = np.logical_not(fc) - # Get the data ...................................... - xv = getdata(x) - yv = getdata(y) - if x is masked: - ndtype = yv.dtype - elif y is masked: - ndtype = xv.dtype - else: - ndtype = np.find_common_type([xv.dtype, yv.dtype], []) - # Construct an empty array and fill it - d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray) - _data = d._data - np.copyto(_data, xv.astype(ndtype), where=fc) - np.copyto(_data, yv.astype(ndtype), where=notfc) - # Create an empty mask and fill it - _mask = d._mask = np.zeros(fc.shape, dtype=MaskType) - np.copyto(_mask, getmask(x), where=fc) - np.copyto(_mask, getmask(y), where=notfc) - _mask |= getmaskarray(condition) - if not _mask.any(): - d._mask = nomask - return d - -def choose (indices, choices, out=None, mode='raise'): - """ - Use an index array to construct a new array from a set of choices. - - Given an array of integers and a set of n choice arrays, this method - will create a new array that merges each of the choice arrays. Where a - value in `a` is i, the new array will have the value that choices[i] - contains in the same place. - - Parameters - ---------- - a : ndarray of ints - This array must contain integers in ``[0, n-1]``, where n is the - number of choices. - choices : sequence of arrays - Choice arrays. The index array and all of the choices should be - broadcastable to the same shape. - out : array, optional - If provided, the result will be inserted into this array. It should - be of the appropriate shape and `dtype`. - mode : {'raise', 'wrap', 'clip'}, optional - Specifies how out-of-bounds indices will behave. - - * 'raise' : raise an error - * 'wrap' : wrap around - * 'clip' : clip to the range - - Returns - ------- - merged_array : array - - See Also - -------- - choose : equivalent function - - Examples - -------- - >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) - >>> a = np.array([2, 1, 0]) - >>> np.ma.choose(a, choice) - masked_array(data = [3 2 1], - mask = False, - fill_value=999999) - - """ - def fmask (x): - "Returns the filled array, or True if masked." - if x is masked: - return True - return filled(x) - def nmask (x): - "Returns the mask, True if ``masked``, False if ``nomask``." - if x is masked: - return True - return getmask(x) - # Get the indices...... - c = filled(indices, 0) - # Get the masks........ - masks = [nmask(x) for x in choices] - data = [fmask(x) for x in choices] - # Construct the mask - outputmask = np.choose(c, masks, mode=mode) - outputmask = make_mask(mask_or(outputmask, getmask(indices)), - copy=0, shrink=True) - # Get the choices...... - d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) - if out is not None: - if isinstance(out, MaskedArray): - out.__setmask__(outputmask) - return out - d.__setmask__(outputmask) - return d - - -def round_(a, decimals=0, out=None): - """ - Return a copy of a, rounded to 'decimals' places. - - When 'decimals' is negative, it specifies the number of positions - to the left of the decimal point. The real and imaginary parts of - complex numbers are rounded separately. Nothing is done if the - array is not of float type and 'decimals' is greater than or equal - to 0. - - Parameters - ---------- - decimals : int - Number of decimals to round to. May be negative. - out : array_like - Existing array to use for output. - If not given, returns a default copy of a. - - Notes - ----- - If out is given and does not have a mask attribute, the mask of a - is lost! - - """ - if out is None: - return np.round_(a, decimals, out) - else: - np.round_(getdata(a), decimals, out) - if hasattr(out, '_mask'): - out._mask = getmask(a) - return out -round = round_ - -def inner(a, b): - """ - Returns the inner product of a and b for arrays of floating point types. - - Like the generic NumPy equivalent the product sum is over the last dimension - of a and b. - - Notes - ----- - The first argument is not conjugated. - - """ - fa = filled(a, 0) - fb = filled(b, 0) - if len(fa.shape) == 0: - fa.shape = (1,) - if len(fb.shape) == 0: - fb.shape = (1,) - return np.inner(fa, fb).view(MaskedArray) -inner.__doc__ = doc_note(np.inner.__doc__, - "Masked values are replaced by 0.") -innerproduct = inner - -def outer(a, b): - "maskedarray version of the numpy function." - fa = filled(a, 0).ravel() - fb = filled(b, 0).ravel() - d = np.outer(fa, fb) - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - return masked_array(d) - ma = getmaskarray(a) - mb = getmaskarray(b) - m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0) - return masked_array(d, mask=m) -outer.__doc__ = doc_note(np.outer.__doc__, - "Masked values are replaced by 0.") -outerproduct = outer - -def allequal (a, b, fill_value=True): - """ - Return True if all entries of a and b are equal, using - fill_value as a truth value where either or both are masked. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - fill_value : bool, optional - Whether masked values in a or b are considered equal (True) or not - (False). - - Returns - ------- - y : bool - Returns True if the two arrays are equal within the given - tolerance, False otherwise. If either array contains NaN, - then False is returned. - - See Also - -------- - all, any - numpy.ma.allclose - - Examples - -------- - >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) - >>> a - masked_array(data = [10000000000.0 1e-07 --], - mask = [False False True], - fill_value=1e+20) - - >>> b = array([1e10, 1e-7, -42.0]) - >>> b - array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) - >>> ma.allequal(a, b, fill_value=False) - False - >>> ma.allequal(a, b) - True - - """ - m = mask_or(getmask(a), getmask(b)) - if m is nomask: - x = getdata(a) - y = getdata(b) - d = umath.equal(x, y) - return d.all() - elif fill_value: - x = getdata(a) - y = getdata(b) - d = umath.equal(x, y) - dm = array(d, mask=m, copy=False) - return dm.filled(True).all(None) - else: - return False - -def allclose (a, b, masked_equal=True, rtol=1e-5, atol=1e-8): - """ - Returns True if two arrays are element-wise equal within a tolerance. - - This function is equivalent to `allclose` except that masked values - are treated as equal (default) or unequal, depending on the `masked_equal` - argument. - - Parameters - ---------- - a, b : array_like - Input arrays to compare. - masked_equal : bool, optional - Whether masked values in `a` and `b` are considered equal (True) or not - (False). They are considered equal by default. - rtol : float, optional - Relative tolerance. The relative difference is equal to ``rtol * b``. - Default is 1e-5. - atol : float, optional - Absolute tolerance. The absolute difference is equal to `atol`. - Default is 1e-8. - - Returns - ------- - y : bool - Returns True if the two arrays are equal within the given - tolerance, False otherwise. If either array contains NaN, then - False is returned. - - See Also - -------- - all, any - numpy.allclose : the non-masked `allclose`. - - Notes - ----- - If the following equation is element-wise True, then `allclose` returns - True:: - - absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - - Return True if all elements of `a` and `b` are equal subject to - given tolerances. - - Examples - -------- - >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) - >>> a - masked_array(data = [10000000000.0 1e-07 --], - mask = [False False True], - fill_value = 1e+20) - >>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) - >>> ma.allclose(a, b) - False - - >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) - >>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) - >>> ma.allclose(a, b) - True - >>> ma.allclose(a, b, masked_equal=False) - False - - Masked values are not compared directly. - - >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) - >>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) - >>> ma.allclose(a, b) - True - >>> ma.allclose(a, b, masked_equal=False) - False - - """ - x = masked_array(a, copy=False) - y = masked_array(b, copy=False) - - # make sure y is an inexact type to avoid abs(MIN_INT); will cause - # casting of x later. - dtype = np.result_type(y, 1.) - if y.dtype != dtype: - y = masked_array(y, dtype=dtype, copy=False) - - m = mask_or(getmask(x), getmask(y)) - xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) - # If we have some infs, they should fall at the same place. - if not np.all(xinf == filled(np.isinf(y), False)): - return False - # No infs at all - if not np.any(xinf): - d = filled(umath.less_equal(umath.absolute(x - y), - atol + rtol * umath.absolute(y)), - masked_equal) - return np.all(d) - - if not np.all(filled(x[xinf] == y[xinf], masked_equal)): - return False - x = x[~xinf] - y = y[~xinf] - - d = filled(umath.less_equal(umath.absolute(x - y), - atol + rtol * umath.absolute(y)), - masked_equal) - - return np.all(d) - -#.............................................................................. -def asarray(a, dtype=None, order=None): - """ - Convert the input to a masked array of the given data-type. - - No copy is performed if the input is already an `ndarray`. If `a` is - a subclass of `MaskedArray`, a base class `MaskedArray` is returned. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to a masked array. This - includes lists, lists of tuples, tuples, tuples of tuples, tuples - of lists, ndarrays and masked arrays. - dtype : dtype, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. - - Returns - ------- - out : MaskedArray - Masked array interpretation of `a`. - - See Also - -------- - asanyarray : Similar to `asarray`, but conserves subclasses. - - Examples - -------- - >>> x = np.arange(10.).reshape(2, 5) - >>> x - array([[ 0., 1., 2., 3., 4.], - [ 5., 6., 7., 8., 9.]]) - >>> np.ma.asarray(x) - masked_array(data = - [[ 0. 1. 2. 3. 4.] - [ 5. 6. 7. 8. 9.]], - mask = - False, - fill_value = 1e+20) - >>> type(np.ma.asarray(x)) - - - """ - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False) - -def asanyarray(a, dtype=None): - """ - Convert the input to a masked array, conserving subclasses. - - If `a` is a subclass of `MaskedArray`, its class is conserved. - No copy is performed if the input is already an `ndarray`. - - Parameters - ---------- - a : array_like - Input data, in any form that can be converted to an array. - dtype : dtype, optional - By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. - - Returns - ------- - out : MaskedArray - MaskedArray interpretation of `a`. - - See Also - -------- - asarray : Similar to `asanyarray`, but does not conserve subclass. - - Examples - -------- - >>> x = np.arange(10.).reshape(2, 5) - >>> x - array([[ 0., 1., 2., 3., 4.], - [ 5., 6., 7., 8., 9.]]) - >>> np.ma.asanyarray(x) - masked_array(data = - [[ 0. 1. 2. 3. 4.] - [ 5. 6. 7. 8. 9.]], - mask = - False, - fill_value = 1e+20) - >>> type(np.ma.asanyarray(x)) - - - """ - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) - - -#####-------------------------------------------------------------------------- -#---- --- Pickling --- -#####-------------------------------------------------------------------------- -def dump(a, F): - """ - Pickle a masked array to a file. - - This is a wrapper around ``cPickle.dump``. - - Parameters - ---------- - a : MaskedArray - The array to be pickled. - F : str or file-like object - The file to pickle `a` to. If a string, the full path to the file. - - """ - if not hasattr(F, 'readline'): - F = open(F, 'w') - return pickle.dump(a, F) - -def dumps(a): - """ - Return a string corresponding to the pickling of a masked array. - - This is a wrapper around ``cPickle.dumps``. - - Parameters - ---------- - a : MaskedArray - The array for which the string representation of the pickle is - returned. - - """ - return pickle.dumps(a) - -def load(F): - """ - Wrapper around ``cPickle.load`` which accepts either a file-like object - or a filename. - - Parameters - ---------- - F : str or file - The file or file name to load. - - See Also - -------- - dump : Pickle an array - - Notes - ----- - This is different from `numpy.load`, which does not use cPickle but loads - the NumPy binary .npy format. - - """ - if not hasattr(F, 'readline'): - F = open(F, 'r') - return pickle.load(F) - -def loads(strg): - """ - Load a pickle from the current string. - - The result of ``cPickle.loads(strg)`` is returned. - - Parameters - ---------- - strg : str - The string to load. - - See Also - -------- - dumps : Return a string corresponding to the pickling of a masked array. - - """ - return pickle.loads(strg) - -################################################################################ -def fromfile(file, dtype=float, count= -1, sep=''): - raise NotImplementedError("Not yet implemented. Sorry") - - -def fromflex(fxarray): - """ - Build a masked array from a suitable flexible-type array. - - The input array has to have a data-type with ``_data`` and ``_mask`` - fields. This type of array is output by `MaskedArray.toflex`. - - Parameters - ---------- - fxarray : ndarray - The structured input array, containing ``_data`` and ``_mask`` - fields. If present, other fields are discarded. - - Returns - ------- - result : MaskedArray - The constructed masked array. - - See Also - -------- - MaskedArray.toflex : Build a flexible-type array from a masked array. - - Examples - -------- - >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) - >>> rec = x.toflex() - >>> rec - array([[(0, False), (1, True), (2, False)], - [(3, True), (4, False), (5, True)], - [(6, False), (7, True), (8, False)]], - dtype=[('_data', '>> x2 = np.ma.fromflex(rec) - >>> x2 - masked_array(data = - [[0 -- 2] - [-- 4 --] - [6 -- 8]], - mask = - [[False True False] - [ True False True] - [False True False]], - fill_value = 999999) - - Extra fields can be present in the structured array but are discarded: - - >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) - >>> rec2 - array([[(0, False, 0.0), (0, False, 0.0)], - [(0, False, 0.0), (0, False, 0.0)]], - dtype=[('_data', '>> y = np.ma.fromflex(rec2) - >>> y - masked_array(data = - [[0 0] - [0 0]], - mask = - [[False False] - [False False]], - fill_value = 999999) - - """ - return masked_array(fxarray['_data'], mask=fxarray['_mask']) - - - -class _convert2ma: - """ - Convert functions from numpy to numpy.ma. - - Parameters - ---------- - _methodname : string - Name of the method to transform. - - """ - __doc__ = None - # - def __init__(self, funcname, params=None): - self._func = getattr(np, funcname) - self.__doc__ = self.getdoc() - self._extras = params or {} - # - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - doc = getattr(self._func, '__doc__', None) - sig = get_object_signature(self._func) - if doc: - # Add the signature of the function at the beginning of the doc - if sig: - sig = "%s%s\n" % (self._func.__name__, sig) - doc = sig + doc - return doc - # - def __call__(self, a, *args, **params): - # Find the common parameters to the call and the definition - _extras = self._extras - common_params = set(params).intersection(_extras) - # Drop the common parameters from the call - for p in common_params: - _extras[p] = params.pop(p) - # Get the result - result = self._func.__call__(a, *args, **params).view(MaskedArray) - if "fill_value" in common_params: - result.fill_value = _extras.get("fill_value", None) - if "hardmask" in common_params: - result._hardmask = bool(_extras.get("hard_mask", False)) - return result - -arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False)) -clip = np.clip -diff = np.diff -empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False)) -empty_like = _convert2ma('empty_like') -frombuffer = _convert2ma('frombuffer') -fromfunction = _convert2ma('fromfunction') -identity = _convert2ma('identity', params=dict(fill_value=None, hardmask=False)) -indices = np.indices -ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False)) -ones_like = np.ones_like -squeeze = np.squeeze -zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False)) -zeros_like = np.zeros_like - -############################################################################### -def append(a, b, axis=None): - """Append values to the end of an array. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - arr : array_like - Values are appended to a copy of this array. - values : array_like - These values are appended to a copy of `arr`. It must be of the - correct shape (the same shape as `arr`, excluding `axis`). If `axis` - is not specified, `values` can be any shape and will be flattened - before use. - axis : int, optional - The axis along which `values` are appended. If `axis` is not given, - both `arr` and `values` are flattened before use. - - Returns - ------- - append : MaskedArray - A copy of `arr` with `values` appended to `axis`. Note that `append` - does not occur in-place: a new array is allocated and filled. If - `axis` is None, the result is a flattened array. - - See Also - -------- - numpy.append : Equivalent function in the top-level NumPy module. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = ma.masked_values([1, 2, 3], 2) - >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) - >>> print(ma.append(a, b)) - [1 -- 3 4 5 6 -- 8 9] - """ - return concatenate([a, b], axis) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/extras.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/extras.py deleted file mode 100644 index 82a61a67c3fb2..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/extras.py +++ /dev/null @@ -1,1923 +0,0 @@ -""" -Masked arrays add-ons. - -A collection of utilities for `numpy.ma`. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ - -""" -from __future__ import division, absolute_import, print_function - -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -__all__ = ['apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', - 'atleast_3d', 'average', - 'clump_masked', 'clump_unmasked', 'column_stack', 'compress_cols', - 'compress_rowcols', 'compress_rows', 'count_masked', 'corrcoef', - 'cov', - 'diagflat', 'dot', 'dstack', - 'ediff1d', - 'flatnotmasked_contiguous', 'flatnotmasked_edges', - 'hsplit', 'hstack', - 'in1d', 'intersect1d', - 'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all', - 'masked_all_like', 'median', 'mr_', - 'notmasked_contiguous', 'notmasked_edges', - 'polyfit', - 'row_stack', - 'setdiff1d', 'setxor1d', - 'unique', 'union1d', - 'vander', 'vstack', - ] - -import itertools -import warnings - -from . import core as ma -from .core import MaskedArray, MAError, add, array, asarray, concatenate, count, \ - filled, getmask, getmaskarray, make_mask_descr, masked, masked_array, \ - mask_or, nomask, ones, sort, zeros -#from core import * - -import numpy as np -from numpy import ndarray, array as nxarray -import numpy.core.umath as umath -from numpy.lib.index_tricks import AxisConcatenator -from numpy.linalg import lstsq - - -#............................................................................... -def issequence(seq): - """Is seq a sequence (ndarray, list or tuple)?""" - if isinstance(seq, (ndarray, tuple, list)): - return True - return False - -def count_masked(arr, axis=None): - """ - Count the number of masked elements along the given axis. - - Parameters - ---------- - arr : array_like - An array with (possibly) masked elements. - axis : int, optional - Axis along which to count. If None (default), a flattened - version of the array is used. - - Returns - ------- - count : int, ndarray - The total number of masked elements (axis=None) or the number - of masked elements along each slice of the given axis. - - See Also - -------- - MaskedArray.count : Count non-masked elements. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.arange(9).reshape((3,3)) - >>> a = ma.array(a) - >>> a[1, 0] = ma.masked - >>> a[1, 2] = ma.masked - >>> a[2, 1] = ma.masked - >>> a - masked_array(data = - [[0 1 2] - [-- 4 --] - [6 -- 8]], - mask = - [[False False False] - [ True False True] - [False True False]], - fill_value=999999) - >>> ma.count_masked(a) - 3 - - When the `axis` keyword is used an array is returned. - - >>> ma.count_masked(a, axis=0) - array([1, 1, 1]) - >>> ma.count_masked(a, axis=1) - array([0, 2, 1]) - - """ - m = getmaskarray(arr) - return m.sum(axis) - -def masked_all(shape, dtype=float): - """ - Empty masked array with all elements masked. - - Return an empty masked array of the given shape and dtype, where all the - data are masked. - - Parameters - ---------- - shape : tuple - Shape of the required MaskedArray. - dtype : dtype, optional - Data type of the output. - - Returns - ------- - a : MaskedArray - A masked array with all data masked. - - See Also - -------- - masked_all_like : Empty masked array modelled on an existing array. - - Examples - -------- - >>> import numpy.ma as ma - >>> ma.masked_all((3, 3)) - masked_array(data = - [[-- -- --] - [-- -- --] - [-- -- --]], - mask = - [[ True True True] - [ True True True] - [ True True True]], - fill_value=1e+20) - - The `dtype` parameter defines the underlying data type. - - >>> a = ma.masked_all((3, 3)) - >>> a.dtype - dtype('float64') - >>> a = ma.masked_all((3, 3), dtype=np.int32) - >>> a.dtype - dtype('int32') - - """ - a = masked_array(np.empty(shape, dtype), - mask=np.ones(shape, make_mask_descr(dtype))) - return a - -def masked_all_like(arr): - """ - Empty masked array with the properties of an existing array. - - Return an empty masked array of the same shape and dtype as - the array `arr`, where all the data are masked. - - Parameters - ---------- - arr : ndarray - An array describing the shape and dtype of the required MaskedArray. - - Returns - ------- - a : MaskedArray - A masked array with all data masked. - - Raises - ------ - AttributeError - If `arr` doesn't have a shape attribute (i.e. not an ndarray) - - See Also - -------- - masked_all : Empty masked array with all elements masked. - - Examples - -------- - >>> import numpy.ma as ma - >>> arr = np.zeros((2, 3), dtype=np.float32) - >>> arr - array([[ 0., 0., 0.], - [ 0., 0., 0.]], dtype=float32) - >>> ma.masked_all_like(arr) - masked_array(data = - [[-- -- --] - [-- -- --]], - mask = - [[ True True True] - [ True True True]], - fill_value=1e+20) - - The dtype of the masked array matches the dtype of `arr`. - - >>> arr.dtype - dtype('float32') - >>> ma.masked_all_like(arr).dtype - dtype('float32') - - """ - a = np.empty_like(arr).view(MaskedArray) - a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) - return a - - -#####-------------------------------------------------------------------------- -#---- --- Standard functions --- -#####-------------------------------------------------------------------------- -class _fromnxfunction: - """ - Defines a wrapper to adapt NumPy functions to masked arrays. - - - An instance of `_fromnxfunction` can be called with the same parameters - as the wrapped NumPy function. The docstring of `newfunc` is adapted from - the wrapped function as well, see `getdoc`. - - Parameters - ---------- - funcname : str - The name of the function to be adapted. The function should be - in the NumPy namespace (i.e. ``np.funcname``). - - """ - - def __init__(self, funcname): - self.__name__ = funcname - self.__doc__ = self.getdoc() - - def getdoc(self): - """ - Retrieve the docstring and signature from the function. - - The ``__doc__`` attribute of the function is used as the docstring for - the new masked array version of the function. A note on application - of the function to the mask is appended. - - .. warning:: - If the function docstring already contained a Notes section, the - new docstring will have two Notes sections instead of appending a note - to the existing section. - - Parameters - ---------- - None - - """ - npfunc = getattr(np, self.__name__, None) - doc = getattr(npfunc, '__doc__', None) - if doc: - sig = self.__name__ + ma.get_object_signature(npfunc) - locdoc = "Notes\n-----\nThe function is applied to both the _data"\ - " and the _mask, if any." - return '\n'.join((sig, doc, locdoc)) - return - - - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - if len(args) == 1: - x = args[0] - if isinstance(x, ndarray): - _d = func(x.__array__(), **params) - _m = func(getmaskarray(x), **params) - return masked_array(_d, mask=_m) - elif isinstance(x, tuple) or isinstance(x, list): - _d = func(tuple([np.asarray(a) for a in x]), **params) - _m = func(tuple([getmaskarray(a) for a in x]), **params) - return masked_array(_d, mask=_m) - else: - arrays = [] - args = list(args) - while len(args) > 0 and issequence(args[0]): - arrays.append(args.pop(0)) - res = [] - for x in arrays: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - res.append(masked_array(_d, mask=_m)) - return res - -atleast_1d = _fromnxfunction('atleast_1d') -atleast_2d = _fromnxfunction('atleast_2d') -atleast_3d = _fromnxfunction('atleast_3d') -#atleast_1d = np.atleast_1d -#atleast_2d = np.atleast_2d -#atleast_3d = np.atleast_3d - -vstack = row_stack = _fromnxfunction('vstack') -hstack = _fromnxfunction('hstack') -column_stack = _fromnxfunction('column_stack') -dstack = _fromnxfunction('dstack') - -hsplit = _fromnxfunction('hsplit') - -diagflat = _fromnxfunction('diagflat') - - -#####-------------------------------------------------------------------------- -#---- -#####-------------------------------------------------------------------------- -def flatten_inplace(seq): - """Flatten a sequence in place.""" - k = 0 - while (k != len(seq)): - while hasattr(seq[k], '__iter__'): - seq[k:(k + 1)] = seq[k] - k += 1 - return seq - - -def apply_along_axis(func1d, axis, arr, *args, **kwargs): - """ - (This docstring should be overwritten) - """ - arr = array(arr, copy=False, subok=True) - nd = arr.ndim - if axis < 0: - axis += nd - if (axis >= nd): - raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." - % (axis, nd)) - ind = [0] * (nd - 1) - i = np.zeros(nd, 'O') - indlist = list(range(nd)) - indlist.remove(axis) - i[axis] = slice(None, None) - outshape = np.asarray(arr.shape).take(indlist) - i.put(indlist, ind) - j = i.copy() - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - # if res is a number, then we have a smaller output array - asscalar = np.isscalar(res) - if not asscalar: - try: - len(res) - except TypeError: - asscalar = True - # Note: we shouldn't set the dtype of the output from the first result... - #...so we force the type to object, and build a list of dtypes - #...we'll just take the largest, to avoid some downcasting - dtypes = [] - if asscalar: - dtypes.append(np.asarray(res).dtype) - outarr = zeros(outshape, object) - outarr[tuple(ind)] = res - Ntot = np.product(outshape) - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= outshape[n]) and (n > (1 - nd)): - ind[n - 1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - outarr[tuple(ind)] = res - dtypes.append(asarray(res).dtype) - k += 1 - else: - res = array(res, copy=False, subok=True) - j = i.copy() - j[axis] = ([slice(None, None)] * res.ndim) - j.put(indlist, ind) - Ntot = np.product(outshape) - holdshape = outshape - outshape = list(arr.shape) - outshape[axis] = res.shape - dtypes.append(asarray(res).dtype) - outshape = flatten_inplace(outshape) - outarr = zeros(outshape, object) - outarr[tuple(flatten_inplace(j.tolist()))] = res - k = 1 - while k < Ntot: - # increment the index - ind[-1] += 1 - n = -1 - while (ind[n] >= holdshape[n]) and (n > (1 - nd)): - ind[n - 1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - j.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())], *args, **kwargs) - outarr[tuple(flatten_inplace(j.tolist()))] = res - dtypes.append(asarray(res).dtype) - k += 1 - max_dtypes = np.dtype(np.asarray(dtypes).max()) - if not hasattr(arr, '_mask'): - result = np.asarray(outarr, dtype=max_dtypes) - else: - result = asarray(outarr, dtype=max_dtypes) - result.fill_value = ma.default_fill_value(result) - return result -apply_along_axis.__doc__ = np.apply_along_axis.__doc__ - - -def apply_over_axes(func, a, axes): - """ - (This docstring will be overwritten) - """ - val = asarray(a) - N = a.ndim - if array(axes).ndim == 0: - axes = (axes,) - for axis in axes: - if axis < 0: axis = N + axis - args = (val, axis) - res = func(*args) - if res.ndim == val.ndim: - val = res - else: - res = ma.expand_dims(res, axis) - if res.ndim == val.ndim: - val = res - else: - raise ValueError("function is not returning " - "an array of the correct shape") - return val -apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ - :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ - """ - - Examples - -------- - >>> a = ma.arange(24).reshape(2,3,4) - >>> a[:,0,1] = ma.masked - >>> a[:,1,:] = ma.masked - >>> print a - [[[0 -- 2 3] - [-- -- -- --] - [8 9 10 11]] - - [[12 -- 14 15] - [-- -- -- --] - [20 21 22 23]]] - >>> print ma.apply_over_axes(ma.sum, a, [0,2]) - [[[46] - [--] - [124]]] - - Tuple axis arguments to ufuncs are equivalent: - - >>> print ma.sum(a, axis=(0,2)).reshape((1,-1,1)) - [[[46] - [--] - [124]]] -""" - - -def average(a, axis=None, weights=None, returned=False): - """ - Return the weighted average of array over the given axis. - - Parameters - ---------- - a : array_like - Data to be averaged. - Masked entries are not taken into account in the computation. - axis : int, optional - Axis along which the average is computed. The default is to compute - the average of the flattened array. - weights : array_like, optional - The importance that each element has in the computation of the average. - The weights array can either be 1-D (in which case its length must be - the size of `a` along the given axis) or of the same shape as `a`. - If ``weights=None``, then all data in `a` are assumed to have a - weight equal to one. If `weights` is complex, the imaginary parts - are ignored. - returned : bool, optional - Flag indicating whether a tuple ``(result, sum of weights)`` - should be returned as output (True), or just the result (False). - Default is False. - - Returns - ------- - average, [sum_of_weights] : (tuple of) scalar or MaskedArray - The average along the specified axis. When returned is `True`, - return a tuple with the average as the first element and the sum - of the weights as the second element. The return type is `np.float64` - if `a` is of integer type, otherwise it is of the same type as `a`. - If returned, `sum_of_weights` is of the same type as `average`. - - Examples - -------- - >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) - >>> np.ma.average(a, weights=[3, 1, 0, 0]) - 1.25 - - >>> x = np.ma.arange(6.).reshape(3, 2) - >>> print x - [[ 0. 1.] - [ 2. 3.] - [ 4. 5.]] - >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], - ... returned=True) - >>> print avg - [2.66666666667 3.66666666667] - - """ - a = asarray(a) - mask = a.mask - ash = a.shape - if ash == (): - ash = (1,) - if axis is None: - if mask is nomask: - if weights is None: - n = a.sum(axis=None) - d = float(a.size) - else: - w = filled(weights, 0.0).ravel() - n = umath.add.reduce(a._data.ravel() * w) - d = umath.add.reduce(w) - del w - else: - if weights is None: - n = a.filled(0).sum(axis=None) - d = float(umath.add.reduce((~mask).ravel())) - else: - w = array(filled(weights, 0.0), float, mask=mask).ravel() - n = add.reduce(a.ravel() * w) - d = add.reduce(w) - del w - else: - if mask is nomask: - if weights is None: - d = ash[axis] * 1.0 - n = add.reduce(a._data, axis) - else: - w = filled(weights, 0.0) - wsh = w.shape - if wsh == (): - wsh = (1,) - if wsh == ash: - w = np.array(w, float, copy=0) - n = add.reduce(a * w, axis) - d = add.reduce(w, axis) - del w - elif wsh == (ash[axis],): - ni = ash[axis] - r = [None] * len(ash) - r[axis] = slice(None, None, 1) - w = eval ("w[" + repr(tuple(r)) + "] * ones(ash, float)") - n = add.reduce(a * w, axis) - d = add.reduce(w, axis, dtype=float) - del w, r - else: - raise ValueError('average: weights wrong shape.') - else: - if weights is None: - n = add.reduce(a, axis) - d = umath.add.reduce((~mask), axis=axis, dtype=float) - else: - w = filled(weights, 0.0) - wsh = w.shape - if wsh == (): - wsh = (1,) - if wsh == ash: - w = array(w, dtype=float, mask=mask, copy=0) - n = add.reduce(a * w, axis) - d = add.reduce(w, axis, dtype=float) - elif wsh == (ash[axis],): - ni = ash[axis] - r = [None] * len(ash) - r[axis] = slice(None, None, 1) - w = eval ("w[" + repr(tuple(r)) + \ - "] * masked_array(ones(ash, float), mask)") - n = add.reduce(a * w, axis) - d = add.reduce(w, axis, dtype=float) - else: - raise ValueError('average: weights wrong shape.') - del w - if n is masked or d is masked: - return masked - result = n / d - del n - - if isinstance(result, MaskedArray): - if ((axis is None) or (axis == 0 and a.ndim == 1)) and \ - (result.mask is nomask): - result = result._data - if returned: - if not isinstance(d, MaskedArray): - d = masked_array(d) - if isinstance(d, ndarray) and (not d.shape == result.shape): - d = ones(result.shape, dtype=float) * d - if returned: - return result, d - else: - return result - - -def median(a, axis=None, out=None, overwrite_input=False): - """ - Compute the median along the specified axis. - - Returns the median of the array elements. - - Parameters - ---------- - a : array_like - Input array or object that can be converted to an array. - axis : int, optional - Axis along which the medians are computed. The default (None) is - to compute the median along a flattened version of the array. - out : ndarray, optional - Alternative output array in which to place the result. It must - have the same shape and buffer length as the expected output - but the type will be cast if necessary. - overwrite_input : bool, optional - If True, then allow use of memory of input array (a) for - calculations. The input array will be modified by the call to - median. This will save memory when you do not need to preserve - the contents of the input array. Treat the input as undefined, - but it will probably be fully or partially sorted. Default is - False. Note that, if `overwrite_input` is True, and the input - is not already an `ndarray`, an error will be raised. - - Returns - ------- - median : ndarray - A new array holding the result is returned unless out is - specified, in which case a reference to out is returned. - Return data-type is `float64` for integers and floats smaller than - `float64`, or the input data-type, otherwise. - - See Also - -------- - mean - - Notes - ----- - Given a vector ``V`` with ``N`` non masked values, the median of ``V`` - is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. - ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` - when ``N`` is even. - - Examples - -------- - >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) - >>> np.ma.extras.median(x) - 1.5 - - >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) - >>> np.ma.extras.median(x) - 2.5 - >>> np.ma.extras.median(x, axis=-1, overwrite_input=True) - masked_array(data = [ 2. 5.], - mask = False, - fill_value = 1e+20) - - """ - if not hasattr(a, 'mask') or np.count_nonzero(a.mask) == 0: - return masked_array(np.median(a, axis=axis, out=out, - overwrite_input=overwrite_input), copy=False) - if overwrite_input: - if axis is None: - asorted = a.ravel() - asorted.sort() - else: - a.sort(axis=axis) - asorted = a - else: - asorted = sort(a, axis=axis) - if axis is None: - axis = 0 - elif axis < 0: - axis += a.ndim - - counts = asorted.shape[axis] - (asorted.mask).sum(axis=axis) - h = counts // 2 - # create indexing mesh grid for all but reduced axis - axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape) - if i != axis] - ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij') - # insert indices of low and high median - ind.insert(axis, h - 1) - low = asorted[ind] - ind[axis] = h - high = asorted[ind] - # duplicate high if odd number of elements so mean does nothing - odd = counts % 2 == 1 - if asorted.ndim == 1: - if odd: - low = high - else: - low[odd] = high[odd] - return np.ma.mean([low, high], axis=0, out=out) - - -#.............................................................................. -def compress_rowcols(x, axis=None): - """ - Suppress the rows and/or columns of a 2-D array that contain - masked values. - - The suppression behavior is selected with the `axis` parameter. - - - If axis is None, both rows and columns are suppressed. - - If axis is 0, only rows are suppressed. - - If axis is 1 or -1, only columns are suppressed. - - Parameters - ---------- - axis : int, optional - Axis along which to perform the operation. Default is None. - - Returns - ------- - compressed_array : ndarray - The compressed array. - - Examples - -------- - >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], - ... [1, 0, 0], - ... [0, 0, 0]]) - >>> x - masked_array(data = - [[-- 1 2] - [-- 4 5] - [6 7 8]], - mask = - [[ True False False] - [ True False False] - [False False False]], - fill_value = 999999) - - >>> np.ma.extras.compress_rowcols(x) - array([[7, 8]]) - >>> np.ma.extras.compress_rowcols(x, 0) - array([[6, 7, 8]]) - >>> np.ma.extras.compress_rowcols(x, 1) - array([[1, 2], - [4, 5], - [7, 8]]) - - """ - x = asarray(x) - if x.ndim != 2: - raise NotImplementedError("compress2d works for 2D arrays only.") - m = getmask(x) - # Nothing is masked: return x - if m is nomask or not m.any(): - return x._data - # All is masked: return empty - if m.all(): - return nxarray([]) - # Builds a list of rows/columns indices - (idxr, idxc) = (list(range(len(x))), list(range(x.shape[1]))) - masked = m.nonzero() - if not axis: - for i in np.unique(masked[0]): - idxr.remove(i) - if axis in [None, 1, -1]: - for j in np.unique(masked[1]): - idxc.remove(j) - return x._data[idxr][:, idxc] - -def compress_rows(a): - """ - Suppress whole rows of a 2-D array that contain masked values. - - This is equivalent to ``np.ma.extras.compress_rowcols(a, 0)``, see - `extras.compress_rowcols` for details. - - See Also - -------- - extras.compress_rowcols - - """ - return compress_rowcols(a, 0) - -def compress_cols(a): - """ - Suppress whole columns of a 2-D array that contain masked values. - - This is equivalent to ``np.ma.extras.compress_rowcols(a, 1)``, see - `extras.compress_rowcols` for details. - - See Also - -------- - extras.compress_rowcols - - """ - return compress_rowcols(a, 1) - -def mask_rowcols(a, axis=None): - """ - Mask rows and/or columns of a 2D array that contain masked values. - - Mask whole rows and/or columns of a 2D array that contain - masked values. The masking behavior is selected using the - `axis` parameter. - - - If `axis` is None, rows *and* columns are masked. - - If `axis` is 0, only rows are masked. - - If `axis` is 1 or -1, only columns are masked. - - Parameters - ---------- - a : array_like, MaskedArray - The array to mask. If not a MaskedArray instance (or if no array - elements are masked). The result is a MaskedArray with `mask` set - to `nomask` (False). Must be a 2D array. - axis : int, optional - Axis along which to perform the operation. If None, applies to a - flattened version of the array. - - Returns - ------- - a : MaskedArray - A modified version of the input array, masked depending on the value - of the `axis` parameter. - - Raises - ------ - NotImplementedError - If input array `a` is not 2D. - - See Also - -------- - mask_rows : Mask rows of a 2D array that contain masked values. - mask_cols : Mask cols of a 2D array that contain masked values. - masked_where : Mask where a condition is met. - - Notes - ----- - The input array's mask is modified by this function. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) - >>> ma.mask_rowcols(a) - masked_array(data = - [[0 -- 0] - [-- -- --] - [0 -- 0]], - mask = - [[False True False] - [ True True True] - [False True False]], - fill_value=999999) - - """ - a = array(a, subok=False) - if a.ndim != 2: - raise NotImplementedError("mask_rowcols works for 2D arrays only.") - m = getmask(a) - # Nothing is masked: return a - if m is nomask or not m.any(): - return a - maskedval = m.nonzero() - a._mask = a._mask.copy() - if not axis: - a[np.unique(maskedval[0])] = masked - if axis in [None, 1, -1]: - a[:, np.unique(maskedval[1])] = masked - return a - -def mask_rows(a, axis=None): - """ - Mask rows of a 2D array that contain masked values. - - This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. - - See Also - -------- - mask_rowcols : Mask rows and/or columns of a 2D array. - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) - >>> ma.mask_rows(a) - masked_array(data = - [[0 0 0] - [-- -- --] - [0 0 0]], - mask = - [[False False False] - [ True True True] - [False False False]], - fill_value=999999) - - """ - return mask_rowcols(a, 0) - -def mask_cols(a, axis=None): - """ - Mask columns of a 2D array that contain masked values. - - This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. - - See Also - -------- - mask_rowcols : Mask rows and/or columns of a 2D array. - masked_where : Mask where a condition is met. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) - >>> ma.mask_cols(a) - masked_array(data = - [[0 -- 0] - [0 -- 0] - [0 -- 0]], - mask = - [[False True False] - [False True False] - [False True False]], - fill_value=999999) - - """ - return mask_rowcols(a, 1) - - -def dot(a, b, strict=False): - """ - Return the dot product of two arrays. - - .. note:: - Works only with 2-D arrays at the moment. - - This function is the equivalent of `numpy.dot` that takes masked values - into account, see `numpy.dot` for details. - - Parameters - ---------- - a, b : ndarray - Inputs arrays. - strict : bool, optional - Whether masked data are propagated (True) or set to 0 (False) for the - computation. Default is False. - Propagating the mask means that if a masked value appears in a row or - column, the whole row or column is considered masked. - - See Also - -------- - numpy.dot : Equivalent function for ndarrays. - - Examples - -------- - >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) - >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) - >>> np.ma.dot(a, b) - masked_array(data = - [[21 26] - [45 64]], - mask = - [[False False] - [False False]], - fill_value = 999999) - >>> np.ma.dot(a, b, strict=True) - masked_array(data = - [[-- --] - [-- 64]], - mask = - [[ True True] - [ True False]], - fill_value = 999999) - - """ - #!!!: Works only with 2D arrays. There should be a way to get it to run with higher dimension - if strict and (a.ndim == 2) and (b.ndim == 2): - a = mask_rows(a) - b = mask_cols(b) - # - d = np.dot(filled(a, 0), filled(b, 0)) - # - am = (~getmaskarray(a)) - bm = (~getmaskarray(b)) - m = ~np.dot(am, bm) - return masked_array(d, mask=m) - -#####-------------------------------------------------------------------------- -#---- --- arraysetops --- -#####-------------------------------------------------------------------------- - -def ediff1d(arr, to_end=None, to_begin=None): - """ - Compute the differences between consecutive elements of an array. - - This function is the equivalent of `numpy.ediff1d` that takes masked - values into account, see `numpy.ediff1d` for details. - - See Also - -------- - numpy.ediff1d : Equivalent function for ndarrays. - - """ - arr = ma.asanyarray(arr).flat - ed = arr[1:] - arr[:-1] - arrays = [ed] - # - if to_begin is not None: - arrays.insert(0, to_begin) - if to_end is not None: - arrays.append(to_end) - # - if len(arrays) != 1: - # We'll save ourselves a copy of a potentially large array in the common - # case where neither to_begin or to_end was given. - ed = hstack(arrays) - # - return ed - - -def unique(ar1, return_index=False, return_inverse=False): - """ - Finds the unique elements of an array. - - Masked values are considered the same element (masked). The output array - is always a masked array. See `numpy.unique` for more details. - - See Also - -------- - numpy.unique : Equivalent function for ndarrays. - - """ - output = np.unique(ar1, - return_index=return_index, - return_inverse=return_inverse) - if isinstance(output, tuple): - output = list(output) - output[0] = output[0].view(MaskedArray) - output = tuple(output) - else: - output = output.view(MaskedArray) - return output - - -def intersect1d(ar1, ar2, assume_unique=False): - """ - Returns the unique elements common to both arrays. - - Masked values are considered equal one to the other. - The output is always a masked array. - - See `numpy.intersect1d` for more details. - - See Also - -------- - numpy.intersect1d : Equivalent function for ndarrays. - - Examples - -------- - >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) - >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - >>> intersect1d(x, y) - masked_array(data = [1 3 --], - mask = [False False True], - fill_value = 999999) - - """ - if assume_unique: - aux = ma.concatenate((ar1, ar2)) - else: - # Might be faster than unique( intersect1d( ar1, ar2 ) )? - aux = ma.concatenate((unique(ar1), unique(ar2))) - aux.sort() - return aux[:-1][aux[1:] == aux[:-1]] - - -def setxor1d(ar1, ar2, assume_unique=False): - """ - Set exclusive-or of 1-D arrays with unique elements. - - The output is always a masked array. See `numpy.setxor1d` for more details. - - See Also - -------- - numpy.setxor1d : Equivalent function for ndarrays. - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - - aux = ma.concatenate((ar1, ar2)) - if aux.size == 0: - return aux - aux.sort() - auxf = aux.filled() -# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 - flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) -# flag2 = ediff1d( flag ) == 0 - flag2 = (flag[1:] == flag[:-1]) - return aux[flag2] - -def in1d(ar1, ar2, assume_unique=False, invert=False): - """ - Test whether each element of an array is also present in a second - array. - - The output is always a masked array. See `numpy.in1d` for more details. - - See Also - -------- - numpy.in1d : Equivalent function for ndarrays. - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - if not assume_unique: - ar1, rev_idx = unique(ar1, return_inverse=True) - ar2 = unique(ar2) - - ar = ma.concatenate((ar1, ar2)) - # We need this to be a stable sort, so always use 'mergesort' - # here. The values from the first array should always come before - # the values from the second array. - order = ar.argsort(kind='mergesort') - sar = ar[order] - if invert: - bool_ar = (sar[1:] != sar[:-1]) - else: - bool_ar = (sar[1:] == sar[:-1]) - flag = ma.concatenate((bool_ar, [invert])) - indx = order.argsort(kind='mergesort')[:len(ar1)] - - if assume_unique: - return flag[indx] - else: - return flag[indx][rev_idx] - - -def union1d(ar1, ar2): - """ - Union of two arrays. - - The output is always a masked array. See `numpy.union1d` for more details. - - See also - -------- - numpy.union1d : Equivalent function for ndarrays. - - """ - return unique(ma.concatenate((ar1, ar2))) - - -def setdiff1d(ar1, ar2, assume_unique=False): - """ - Set difference of 1D arrays with unique elements. - - The output is always a masked array. See `numpy.setdiff1d` for more - details. - - See Also - -------- - numpy.setdiff1d : Equivalent function for ndarrays. - - Examples - -------- - >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) - >>> np.ma.extras.setdiff1d(x, [1, 2]) - masked_array(data = [3 --], - mask = [False True], - fill_value = 999999) - - """ - if not assume_unique: - ar1 = unique(ar1) - ar2 = unique(ar2) - aux = in1d(ar1, ar2, assume_unique=True) - if aux.size == 0: - return aux - else: - return ma.asarray(ar1)[aux == 0] - - -#####-------------------------------------------------------------------------- -#---- --- Covariance --- -#####-------------------------------------------------------------------------- - - - - -def _covhelper(x, y=None, rowvar=True, allow_masked=True): - """ - Private function for the computation of covariance and correlation - coefficients. - - """ - x = ma.array(x, ndmin=2, copy=True, dtype=float) - xmask = ma.getmaskarray(x) - # Quick exit if we can't process masked data - if not allow_masked and xmask.any(): - raise ValueError("Cannot process masked data...") - # - if x.shape[0] == 1: - rowvar = True - # Make sure that rowvar is either 0 or 1 - rowvar = int(bool(rowvar)) - axis = 1 - rowvar - if rowvar: - tup = (slice(None), None) - else: - tup = (None, slice(None)) - # - if y is None: - xnotmask = np.logical_not(xmask).astype(int) - else: - y = array(y, copy=False, ndmin=2, dtype=float) - ymask = ma.getmaskarray(y) - if not allow_masked and ymask.any(): - raise ValueError("Cannot process masked data...") - if xmask.any() or ymask.any(): - if y.shape == x.shape: - # Define some common mask - common_mask = np.logical_or(xmask, ymask) - if common_mask is not nomask: - x.unshare_mask() - y.unshare_mask() - xmask = x._mask = y._mask = ymask = common_mask - x = ma.concatenate((x, y), axis) - xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) - x -= x.mean(axis=rowvar)[tup] - return (x, xnotmask, rowvar) - - -def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): - """ - Estimate the covariance matrix. - - Except for the handling of missing data this function does the same as - `numpy.cov`. For more details and examples, see `numpy.cov`. - - By default, masked values are recognized as such. If `x` and `y` have the - same shape, a common mask is allocated: if ``x[i,j]`` is masked, then - ``y[i,j]`` will also be masked. - Setting `allow_masked` to False will raise an exception if values are - missing in either of the input arrays. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `x` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - form as `x`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : bool, optional - Default normalization (False) is by ``(N-1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is True, - then normalization is by ``N``. This keyword can be overridden by - the keyword ``ddof`` in numpy versions >= 1.5. - allow_masked : bool, optional - If True, masked values are propagated pair-wise: if a value is masked - in `x`, the corresponding value is masked in `y`. - If False, raises a `ValueError` exception when some values are missing. - ddof : {None, int}, optional - .. versionadded:: 1.5 - If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is - the number of observations; this overrides the value implied by - ``bias``. The default value is ``None``. - - - Raises - ------ - ValueError - Raised if some values are missing and `allow_masked` is False. - - See Also - -------- - numpy.cov - - """ - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError("ddof must be an integer") - # Set up ddof - if ddof is None: - if bias: - ddof = 0 - else: - ddof = 1 - - (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) - if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof - result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() - else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof - result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() - return result - - -def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): - """ - Return correlation coefficients of the input array. - - Except for the handling of missing data this function does the same as - `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. - - Parameters - ---------- - x : array_like - A 1-D or 2-D array containing multiple variables and observations. - Each row of `x` represents a variable, and each column a single - observation of all those variables. Also see `rowvar` below. - y : array_like, optional - An additional set of variables and observations. `y` has the same - shape as `x`. - rowvar : bool, optional - If `rowvar` is True (default), then each row represents a - variable, with observations in the columns. Otherwise, the relationship - is transposed: each column represents a variable, while the rows - contain observations. - bias : bool, optional - Default normalization (False) is by ``(N-1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is 1, - then normalization is by ``N``. This keyword can be overridden by - the keyword ``ddof`` in numpy versions >= 1.5. - allow_masked : bool, optional - If True, masked values are propagated pair-wise: if a value is masked - in `x`, the corresponding value is masked in `y`. - If False, raises an exception. - ddof : {None, int}, optional - .. versionadded:: 1.5 - If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is - the number of observations; this overrides the value implied by - ``bias``. The default value is ``None``. - - See Also - -------- - numpy.corrcoef : Equivalent function in top-level NumPy module. - cov : Estimate the covariance matrix. - - """ - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError("ddof must be an integer") - # Set up ddof - if ddof is None: - if bias: - ddof = 0 - else: - ddof = 1 - - # Get the data - (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) - # Compute the covariance matrix - if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof - c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() - else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof - c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() - # Check whether we have a scalar - try: - diag = ma.diagonal(c) - except ValueError: - return 1 - # - if xnotmask.all(): - _denom = ma.sqrt(ma.multiply.outer(diag, diag)) - else: - _denom = diagflat(diag) - n = x.shape[1 - rowvar] - if rowvar: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols(vstack((x[i], x[j]))).var(axis=1, ddof=ddof) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - else: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols( - vstack((x[:, i], x[:, j]))).var(axis=1, ddof=ddof) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - return c / _denom - -#####-------------------------------------------------------------------------- -#---- --- Concatenation helpers --- -#####-------------------------------------------------------------------------- - -class MAxisConcatenator(AxisConcatenator): - """ - Translate slice objects to concatenation along an axis. - - For documentation on usage, see `mr_class`. - - See Also - -------- - mr_class - - """ - - def __init__(self, axis=0): - AxisConcatenator.__init__(self, axis, matrix=False) - - def __getitem__(self, key): - if isinstance(key, str): - raise MAError("Unavailable for masked array.") - if not isinstance(key, tuple): - key = (key,) - objs = [] - scalars = [] - final_dtypedescr = None - for k in range(len(key)): - scalar = False - if isinstance(key[k], slice): - step = key[k].step - start = key[k].start - stop = key[k].stop - if start is None: - start = 0 - if step is None: - step = 1 - if isinstance(step, complex): - size = int(abs(step)) - newobj = np.linspace(start, stop, num=size) - else: - newobj = np.arange(start, stop, step) - elif isinstance(key[k], str): - if (key[k] in 'rc'): - self.matrix = True - self.col = (key[k] == 'c') - continue - try: - self.axis = int(key[k]) - continue - except (ValueError, TypeError): - raise ValueError("Unknown special directive") - elif type(key[k]) in np.ScalarType: - newobj = asarray([key[k]]) - scalars.append(k) - scalar = True - else: - newobj = key[k] - objs.append(newobj) - if isinstance(newobj, ndarray) and not scalar: - if final_dtypedescr is None: - final_dtypedescr = newobj.dtype - elif newobj.dtype > final_dtypedescr: - final_dtypedescr = newobj.dtype - if final_dtypedescr is not None: - for k in scalars: - objs[k] = objs[k].astype(final_dtypedescr) - res = concatenate(tuple(objs), axis=self.axis) - return self._retval(res) - -class mr_class(MAxisConcatenator): - """ - Translate slice objects to concatenation along the first axis. - - This is the masked array version of `lib.index_tricks.RClass`. - - See Also - -------- - lib.index_tricks.RClass - - Examples - -------- - >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] - array([1, 2, 3, 0, 0, 4, 5, 6]) - - """ - def __init__(self): - MAxisConcatenator.__init__(self, 0) - -mr_ = mr_class() - -#####-------------------------------------------------------------------------- -#---- Find unmasked data --- -#####-------------------------------------------------------------------------- - -def flatnotmasked_edges(a): - """ - Find the indices of the first and last unmasked values. - - Expects a 1-D `MaskedArray`, returns None if all values are masked. - - Parameters - ---------- - arr : array_like - Input 1-D `MaskedArray` - - Returns - ------- - edges : ndarray or None - The indices of first and last non-masked value in the array. - Returns None if all values are masked. - - See Also - -------- - flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, - clump_masked, clump_unmasked - - Notes - ----- - Only accepts 1-D arrays. - - Examples - -------- - >>> a = np.ma.arange(10) - >>> flatnotmasked_edges(a) - [0,-1] - - >>> mask = (a < 3) | (a > 8) | (a == 5) - >>> a[mask] = np.ma.masked - >>> np.array(a[~a.mask]) - array([3, 4, 6, 7, 8]) - - >>> flatnotmasked_edges(a) - array([3, 8]) - - >>> a[:] = np.ma.masked - >>> print flatnotmasked_edges(ma) - None - - """ - m = getmask(a) - if m is nomask or not np.any(m): - return np.array([0, a.size - 1]) - unmasked = np.flatnonzero(~m) - if len(unmasked) > 0: - return unmasked[[0, -1]] - else: - return None - - -def notmasked_edges(a, axis=None): - """ - Find the indices of the first and last unmasked values along an axis. - - If all values are masked, return None. Otherwise, return a list - of two tuples, corresponding to the indices of the first and last - unmasked values respectively. - - Parameters - ---------- - a : array_like - The input array. - axis : int, optional - Axis along which to perform the operation. - If None (default), applies to a flattened version of the array. - - Returns - ------- - edges : ndarray or list - An array of start and end indexes if there are any masked data in - the array. If there are no masked data in the array, `edges` is a - list of the first and last index. - - See Also - -------- - flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous, - clump_masked, clump_unmasked - - Examples - -------- - >>> a = np.arange(9).reshape((3, 3)) - >>> m = np.zeros_like(a) - >>> m[1:, 1:] = 1 - - >>> am = np.ma.array(a, mask=m) - >>> np.array(am[~am.mask]) - array([0, 1, 2, 3, 6]) - - >>> np.ma.extras.notmasked_edges(ma) - array([0, 6]) - - """ - a = asarray(a) - if axis is None or a.ndim == 1: - return flatnotmasked_edges(a) - m = getmaskarray(a) - idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) - return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), - tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] - - -def flatnotmasked_contiguous(a): - """ - Find contiguous unmasked data in a masked array along the given axis. - - Parameters - ---------- - a : narray - The input array. - - Returns - ------- - slice_list : list - A sorted sequence of slices (start index, end index). - - See Also - -------- - flatnotmasked_edges, notmasked_contiguous, notmasked_edges, - clump_masked, clump_unmasked - - Notes - ----- - Only accepts 2-D arrays at most. - - Examples - -------- - >>> a = np.ma.arange(10) - >>> np.ma.extras.flatnotmasked_contiguous(a) - slice(0, 10, None) - - >>> mask = (a < 3) | (a > 8) | (a == 5) - >>> a[mask] = np.ma.masked - >>> np.array(a[~a.mask]) - array([3, 4, 6, 7, 8]) - - >>> np.ma.extras.flatnotmasked_contiguous(a) - [slice(3, 5, None), slice(6, 9, None)] - >>> a[:] = np.ma.masked - >>> print np.ma.extras.flatnotmasked_edges(a) - None - - """ - m = getmask(a) - if m is nomask: - return slice(0, a.size, None) - i = 0 - result = [] - for (k, g) in itertools.groupby(m.ravel()): - n = len(list(g)) - if not k: - result.append(slice(i, i + n)) - i += n - return result or None - -def notmasked_contiguous(a, axis=None): - """ - Find contiguous unmasked data in a masked array along the given axis. - - Parameters - ---------- - a : array_like - The input array. - axis : int, optional - Axis along which to perform the operation. - If None (default), applies to a flattened version of the array. - - Returns - ------- - endpoints : list - A list of slices (start and end indexes) of unmasked indexes - in the array. - - See Also - -------- - flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, - clump_masked, clump_unmasked - - Notes - ----- - Only accepts 2-D arrays at most. - - Examples - -------- - >>> a = np.arange(9).reshape((3, 3)) - >>> mask = np.zeros_like(a) - >>> mask[1:, 1:] = 1 - - >>> ma = np.ma.array(a, mask=mask) - >>> np.array(ma[~ma.mask]) - array([0, 1, 2, 3, 6]) - - >>> np.ma.extras.notmasked_contiguous(ma) - [slice(0, 4, None), slice(6, 7, None)] - - """ - a = asarray(a) - nd = a.ndim - if nd > 2: - raise NotImplementedError("Currently limited to atmost 2D array.") - if axis is None or nd == 1: - return flatnotmasked_contiguous(a) - # - result = [] - # - other = (axis + 1) % 2 - idx = [0, 0] - idx[axis] = slice(None, None) - # - for i in range(a.shape[other]): - idx[other] = i - result.append(flatnotmasked_contiguous(a[idx]) or None) - return result - - -def _ezclump(mask): - """ - Finds the clumps (groups of data with the same values) for a 1D bool array. - - Returns a series of slices. - """ - #def clump_masked(a): - if mask.ndim > 1: - mask = mask.ravel() - idx = (mask[1:] ^ mask[:-1]).nonzero() - idx = idx[0] + 1 - slices = [slice(left, right) - for (left, right) in zip(itertools.chain([0], idx), - itertools.chain(idx, [len(mask)]),)] - return slices - - -def clump_unmasked(a): - """ - Return list of slices corresponding to the unmasked clumps of a 1-D array. - (A "clump" is defined as a contiguous region of the array). - - Parameters - ---------- - a : ndarray - A one-dimensional masked array. - - Returns - ------- - slices : list of slice - The list of slices, one for each continuous region of unmasked - elements in `a`. - - Notes - ----- - .. versionadded:: 1.4.0 - - See Also - -------- - flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, - notmasked_contiguous, clump_masked - - Examples - -------- - >>> a = np.ma.masked_array(np.arange(10)) - >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked - >>> np.ma.extras.clump_unmasked(a) - [slice(3, 6, None), slice(7, 8, None)] - - """ - mask = getattr(a, '_mask', nomask) - if mask is nomask: - return [slice(0, a.size)] - slices = _ezclump(mask) - if a[0] is masked: - result = slices[1::2] - else: - result = slices[::2] - return result - - -def clump_masked(a): - """ - Returns a list of slices corresponding to the masked clumps of a 1-D array. - (A "clump" is defined as a contiguous region of the array). - - Parameters - ---------- - a : ndarray - A one-dimensional masked array. - - Returns - ------- - slices : list of slice - The list of slices, one for each continuous region of masked elements - in `a`. - - Notes - ----- - .. versionadded:: 1.4.0 - - See Also - -------- - flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges, - notmasked_contiguous, clump_unmasked - - Examples - -------- - >>> a = np.ma.masked_array(np.arange(10)) - >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked - >>> np.ma.extras.clump_masked(a) - [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] - - """ - mask = ma.getmask(a) - if mask is nomask: - return [] - slices = _ezclump(mask) - if len(slices): - if a[0] is masked: - slices = slices[::2] - else: - slices = slices[1::2] - return slices - - - -#####-------------------------------------------------------------------------- -#---- Polynomial fit --- -#####-------------------------------------------------------------------------- - -def vander(x, n=None): - """ - Masked values in the input array result in rows of zeros. - """ - _vander = np.vander(x, n) - m = getmask(x) - if m is not nomask: - _vander[m] = 0 - return _vander -vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) - - -def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - """ - Any masked values in x is propagated in y, and vice-versa. - """ - x = asarray(x) - y = asarray(y) - - m = getmask(x) - if y.ndim == 1: - m = mask_or(m, getmask(y)) - elif y.ndim == 2: - my = getmask(mask_rows(y)) - if my is not nomask: - m = mask_or(m, my[:, 0]) - else: - raise TypeError("Expected a 1D or 2D array for y!") - - if w is not None: - w = asarray(w) - if w.ndim != 1: - raise TypeError("expected a 1-d array for weights") - if w.shape[0] != y.shape[0] : - raise TypeError("expected w and y to have the same length") - m = mask_or(m, getmask(w)) - - if m is not nomask: - if w is not None: - w = ~m*w - else: - w = ~m - - return np.polyfit(x, y, deg, rcond, full, w, cov) - -polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) - -################################################################################ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/mrecords.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/mrecords.py deleted file mode 100644 index e66596509f63e..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/mrecords.py +++ /dev/null @@ -1,734 +0,0 @@ -""":mod:`numpy.ma..mrecords` - -Defines the equivalent of :class:`numpy.recarrays` for masked arrays, -where fields can be accessed as attributes. -Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes -and the masking of individual fields. - -:author: Pierre Gerard-Marchant - -""" -from __future__ import division, absolute_import, print_function - -#!!!: * We should make sure that no field is called '_mask','mask','_fieldmask', -#!!!: or whatever restricted keywords. -#!!!: An idea would be to no bother in the first place, and then rename the -#!!!: invalid fields with a trailing underscore... -#!!!: Maybe we could just overload the parser function ? - - -__author__ = "Pierre GF Gerard-Marchant" - -import sys -import warnings - -import numpy as np -import numpy.core.numerictypes as ntypes -from numpy.compat import basestring -from numpy import ( - bool_, dtype, ndarray, recarray, array as narray - ) -from numpy.core.records import ( - fromarrays as recfromarrays, fromrecords as recfromrecords - ) - -_byteorderconv = np.core.records._byteorderconv -_typestr = ntypes._typestr - -import numpy.ma as ma -from numpy.ma import MAError, MaskedArray, masked, nomask, masked_array, \ - getdata, getmaskarray, filled - -_check_fill_value = ma.core._check_fill_value - - -__all__ = ['MaskedRecords', 'mrecarray', - 'fromarrays', 'fromrecords', 'fromtextfile', 'addfield', - ] - -reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] - -def _getformats(data): - "Returns the formats of each array of arraylist as a comma-separated string." - if hasattr(data, 'dtype'): - return ",".join([desc[1] for desc in data.dtype.descr]) - - formats = '' - for obj in data: - obj = np.asarray(obj) - formats += _typestr[obj.dtype.type] - if issubclass(obj.dtype.type, ntypes.flexible): - formats += repr(obj.itemsize) - formats += ',' - return formats[:-1] - -def _checknames(descr, names=None): - """Checks that the field names of the descriptor ``descr`` are not some -reserved keywords. If this is the case, a default 'f%i' is substituted. -If the argument `names` is not None, updates the field names to valid names. - """ - ndescr = len(descr) - default_names = ['f%i' % i for i in range(ndescr)] - if names is None: - new_names = default_names - else: - if isinstance(names, (tuple, list)): - new_names = names - elif isinstance(names, str): - new_names = names.split(',') - else: - raise NameError("illegal input names %s" % repr(names)) - nnames = len(new_names) - if nnames < ndescr: - new_names += default_names[nnames:] - ndescr = [] - for (n, d, t) in zip(new_names, default_names, descr.descr): - if n in reserved_fields: - if t[0] in reserved_fields: - ndescr.append((d, t[1])) - else: - ndescr.append(t) - else: - ndescr.append((n, t[1])) - return np.dtype(ndescr) - - -def _get_fieldmask(self): - mdescr = [(n, '|b1') for n in self.dtype.names] - fdmask = np.empty(self.shape, dtype=mdescr) - fdmask.flat = tuple([False] * len(mdescr)) - return fdmask - - -class MaskedRecords(MaskedArray, object): - """ - -*IVariables*: - _data : {recarray} - Underlying data, as a record array. - _mask : {boolean array} - Mask of the records. A record is masked when all its fields are masked. - _fieldmask : {boolean recarray} - Record array of booleans, setting the mask of each individual field of each record. - _fill_value : {record} - Filling values for each field. - """ - #............................................ - def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, - formats=None, names=None, titles=None, - byteorder=None, aligned=False, - mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, - copy=False, - **options): - # - self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, - strides=strides, formats=formats, names=names, - titles=titles, byteorder=byteorder, - aligned=aligned,) - # - mdtype = ma.make_mask_descr(self.dtype) - if mask is nomask or not np.size(mask): - if not keep_mask: - self._mask = tuple([False] * len(mdtype)) - else: - mask = np.array(mask, copy=copy) - if mask.shape != self.shape: - (nd, nm) = (self.size, mask.size) - if nm == 1: - mask = np.resize(mask, self.shape) - elif nm == nd: - mask = np.reshape(mask, self.shape) - else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise MAError(msg % (nd, nm)) - copy = True - if not keep_mask: - self.__setmask__(mask) - self._sharedmask = True - else: - if mask.dtype == mdtype: - _mask = mask - else: - _mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) - self._mask = _mask - return self - #...................................................... - def __array_finalize__(self, obj): - # Make sure we have a _fieldmask by default .. - _mask = getattr(obj, '_mask', None) - if _mask is None: - objmask = getattr(obj, '_mask', nomask) - _dtype = ndarray.__getattribute__(self, 'dtype') - if objmask is nomask: - _mask = ma.make_mask_none(self.shape, dtype=_dtype) - else: - mdescr = ma.make_mask_descr(_dtype) - _mask = narray([tuple([m] * len(mdescr)) for m in objmask], - dtype=mdescr).view(recarray) - # Update some of the attributes - _dict = self.__dict__ - _dict.update(_mask=_mask) - self._update_from(obj) - if _dict['_baseclass'] == ndarray: - _dict['_baseclass'] = recarray - return - - - def _getdata(self): - "Returns the data as a recarray." - return ndarray.view(self, recarray) - _data = property(fget=_getdata) - - def _getfieldmask(self): - "Alias to mask" - return self._mask - _fieldmask = property(fget=_getfieldmask) - - def __len__(self): - "Returns the length" - # We have more than one record - if self.ndim: - return len(self._data) - # We have only one record: return the nb of fields - return len(self.dtype) - - def __getattribute__(self, attr): - try: - return object.__getattribute__(self, attr) - except AttributeError: # attr must be a fieldname - pass - fielddict = ndarray.__getattribute__(self, 'dtype').fields - try: - res = fielddict[attr][:2] - except (TypeError, KeyError): - raise AttributeError("record array has no attribute %s" % attr) - # So far, so good... - _localdict = ndarray.__getattribute__(self, '__dict__') - _data = ndarray.view(self, _localdict['_baseclass']) - obj = _data.getfield(*res) - if obj.dtype.fields: - raise NotImplementedError("MaskedRecords is currently limited to"\ - "simple records...") - # Get some special attributes - # Reset the object's mask - hasmasked = False - _mask = _localdict.get('_mask', None) - if _mask is not None: - try: - _mask = _mask[attr] - except IndexError: - # Couldn't find a mask: use the default (nomask) - pass - hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any() - if (obj.shape or hasmasked): - obj = obj.view(MaskedArray) - obj._baseclass = ndarray - obj._isfield = True - obj._mask = _mask - # Reset the field values - _fill_value = _localdict.get('_fill_value', None) - if _fill_value is not None: - try: - obj._fill_value = _fill_value[attr] - except ValueError: - obj._fill_value = None - else: - obj = obj.item() - return obj - - - def __setattr__(self, attr, val): - "Sets the attribute attr to the value val." - # Should we call __setmask__ first ? - if attr in ['mask', 'fieldmask']: - self.__setmask__(val) - return - # Create a shortcut (so that we don't have to call getattr all the time) - _localdict = object.__getattribute__(self, '__dict__') - # Check whether we're creating a new field - newattr = attr not in _localdict - try: - # Is attr a generic attribute ? - ret = object.__setattr__(self, attr, val) - except: - # Not a generic attribute: exit if it's not a valid field - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} - optinfo = ndarray.__getattribute__(self, '_optinfo') or {} - if not (attr in fielddict or attr in optinfo): - exctype, value = sys.exc_info()[:2] - raise exctype(value) - else: - # Get the list of names ...... - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} - # Check the attribute - if attr not in fielddict: - return ret - if newattr: # We just added this one - try: # or this setattr worked on an internal - # attribute. - object.__delattr__(self, attr) - except: - return ret - # Let's try to set the field - try: - res = fielddict[attr][:2] - except (TypeError, KeyError): - raise AttributeError("record array has no attribute %s" % attr) - # - if val is masked: - _fill_value = _localdict['_fill_value'] - if _fill_value is not None: - dval = _localdict['_fill_value'][attr] - else: - dval = val - mval = True - else: - dval = filled(val) - mval = getmaskarray(val) - obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) - _localdict['_mask'].__setitem__(attr, mval) - return obj - - - def __getitem__(self, indx): - """Returns all the fields sharing the same fieldname base. -The fieldname base is either `_data` or `_mask`.""" - _localdict = self.__dict__ - _mask = ndarray.__getattribute__(self, '_mask') - _data = ndarray.view(self, _localdict['_baseclass']) - # We want a field ........ - if isinstance(indx, basestring): - #!!!: Make sure _sharedmask is True to propagate back to _fieldmask - #!!!: Don't use _set_mask, there are some copies being made... - #!!!: ...that break propagation - #!!!: Don't force the mask to nomask, that wrecks easy masking - obj = _data[indx].view(MaskedArray) - obj._mask = _mask[indx] - obj._sharedmask = True - fval = _localdict['_fill_value'] - if fval is not None: - obj._fill_value = fval[indx] - # Force to masked if the mask is True - if not obj.ndim and obj._mask: - return masked - return obj - # We want some elements .. - # First, the data ........ - obj = np.array(_data[indx], copy=False).view(mrecarray) - obj._mask = np.array(_mask[indx], copy=False).view(recarray) - return obj - #.... - def __setitem__(self, indx, value): - "Sets the given record to value." - MaskedArray.__setitem__(self, indx, value) - if isinstance(indx, basestring): - self._mask[indx] = ma.getmaskarray(value) - - - def __str__(self): - "Calculates the string representation." - if self.size > 1: - mstr = ["(%s)" % ",".join([str(i) for i in s]) - for s in zip(*[getattr(self, f) for f in self.dtype.names])] - return "[%s]" % ", ".join(mstr) - else: - mstr = ["%s" % ",".join([str(i) for i in s]) - for s in zip([getattr(self, f) for f in self.dtype.names])] - return "(%s)" % ", ".join(mstr) - # - def __repr__(self): - "Calculates the repr representation." - _names = self.dtype.names - fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) - reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] - reprstr.insert(0, 'masked_records(') - reprstr.extend([fmt % (' fill_value', self.fill_value), - ' )']) - return str("\n".join(reprstr)) -# #...................................................... - def view(self, dtype=None, type=None): - """Returns a view of the mrecarray.""" - # OK, basic copy-paste from MaskedArray.view... - if dtype is None: - if type is None: - output = ndarray.view(self) - else: - output = ndarray.view(self, type) - # Here again... - elif type is None: - try: - if issubclass(dtype, ndarray): - output = ndarray.view(self, dtype) - dtype = None - else: - output = ndarray.view(self, dtype) - # OK, there's the change - except TypeError: - dtype = np.dtype(dtype) - # we need to revert to MaskedArray, but keeping the possibility - # ...of subclasses (eg, TimeSeriesRecords), so we'll force a type - # ...set to the first parent - if dtype.fields is None: - basetype = self.__class__.__bases__[0] - output = self.__array__().view(dtype, basetype) - output._update_from(self) - else: - output = ndarray.view(self, dtype) - output._fill_value = None - else: - output = ndarray.view(self, dtype, type) - # Update the mask, just like in MaskedArray.view - if (getattr(output, '_mask', nomask) is not nomask): - mdtype = ma.make_mask_descr(output.dtype) - output._mask = self._mask.view(mdtype, ndarray) - output._mask.shape = output.shape - return output - - def harden_mask(self): - "Forces the mask to hard" - self._hardmask = True - def soften_mask(self): - "Forces the mask to soft" - self._hardmask = False - - def copy(self): - """Returns a copy of the masked record.""" - _localdict = self.__dict__ - copied = self._data.copy().view(type(self)) - copied._mask = self._mask.copy() - return copied - - def tolist(self, fill_value=None): - """Copy the data portion of the array to a hierarchical python - list and returns that list. - - Data items are converted to the nearest compatible Python - type. Masked values are converted to fill_value. If - fill_value is None, the corresponding entries in the output - list will be ``None``. - - """ - if fill_value is not None: - return self.filled(fill_value).tolist() - result = narray(self.filled().tolist(), dtype=object) - mask = narray(self._mask.tolist()) - result[mask] = None - return result.tolist() - #-------------------------------------------- - # Pickling - def __getstate__(self): - """Return the internal state of the masked array, for pickling purposes. - - """ - state = (1, - self.shape, - self.dtype, - self.flags.fnc, - self._data.tobytes(), - self._mask.tobytes(), - self._fill_value, - ) - return state - # - def __setstate__(self, state): - """Restore the internal state of the masked array, for pickling purposes. - ``state`` is typically the output of the ``__getstate__`` output, and is a - 5-tuple: - - - class name - - a tuple giving the shape of the data - - a typecode for the data - - a binary string for the data - - a binary string for the mask. - - """ - (ver, shp, typ, isf, raw, msk, flv) = state - ndarray.__setstate__(self, (shp, typ, isf, raw)) - mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr]) - self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) - self.fill_value = flv - # - def __reduce__(self): - """Return a 3-tuple for pickling a MaskedArray. - - """ - return (_mrreconstruct, - (self.__class__, self._baseclass, (0,), 'b',), - self.__getstate__()) - -def _mrreconstruct(subtype, baseclass, baseshape, basetype,): - """Internal function that builds a new MaskedArray from the - information stored in a pickle. - - """ - _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) -# _data._mask = ndarray.__new__(ndarray, baseshape, 'b1') -# return _data - _mask = ndarray.__new__(ndarray, baseshape, 'b1') - return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) - - -mrecarray = MaskedRecords - -#####--------------------------------------------------------------------------- -#---- --- Constructors --- -#####--------------------------------------------------------------------------- - -def fromarrays(arraylist, dtype=None, shape=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None, - fill_value=None): - """Creates a mrecarray from a (flat) list of masked arrays. - - Parameters - ---------- - arraylist : sequence - A list of (masked) arrays. Each element of the sequence is first converted - to a masked array if needed. If a 2D array is passed as argument, it is - processed line by line - dtype : {None, dtype}, optional - Data type descriptor. - shape : {None, integer}, optional - Number of records. If None, shape is defined from the shape of the - first array in the list. - formats : {None, sequence}, optional - Sequence of formats for each individual field. If None, the formats will - be autodetected by inspecting the fields and selecting the highest dtype - possible. - names : {None, sequence}, optional - Sequence of the names of each field. - fill_value : {None, sequence}, optional - Sequence of data to be used as filling values. - - Notes - ----- - Lists of tuples should be preferred over lists of lists for faster processing. - """ - datalist = [getdata(x) for x in arraylist] - masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] - _array = recfromarrays(datalist, - dtype=dtype, shape=shape, formats=formats, - names=names, titles=titles, aligned=aligned, - byteorder=byteorder).view(mrecarray) - _array._mask.flat = list(zip(*masklist)) - if fill_value is not None: - _array.fill_value = fill_value - return _array - - -#.............................................................................. -def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None, - fill_value=None, mask=nomask): - """Creates a MaskedRecords from a list of records. - - Parameters - ---------- - reclist : sequence - A list of records. Each element of the sequence is first converted - to a masked array if needed. If a 2D array is passed as argument, it is - processed line by line - dtype : {None, dtype}, optional - Data type descriptor. - shape : {None,int}, optional - Number of records. If None, ``shape`` is defined from the shape of the - first array in the list. - formats : {None, sequence}, optional - Sequence of formats for each individual field. If None, the formats will - be autodetected by inspecting the fields and selecting the highest dtype - possible. - names : {None, sequence}, optional - Sequence of the names of each field. - fill_value : {None, sequence}, optional - Sequence of data to be used as filling values. - mask : {nomask, sequence}, optional. - External mask to apply on the data. - - Notes - ----- - Lists of tuples should be preferred over lists of lists for faster processing. - """ - # Grab the initial _fieldmask, if needed: - _mask = getattr(reclist, '_mask', None) - # Get the list of records..... - try: - nfields = len(reclist[0]) - except TypeError: - nfields = len(reclist[0].dtype) - if isinstance(reclist, ndarray): - # Make sure we don't have some hidden mask - if isinstance(reclist, MaskedArray): - reclist = reclist.filled().view(ndarray) - # Grab the initial dtype, just in case - if dtype is None: - dtype = reclist.dtype - reclist = reclist.tolist() - mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, - names=names, titles=titles, - aligned=aligned, byteorder=byteorder).view(mrecarray) - # Set the fill_value if needed - if fill_value is not None: - mrec.fill_value = fill_value - # Now, let's deal w/ the mask - if mask is not nomask: - mask = np.array(mask, copy=False) - maskrecordlength = len(mask.dtype) - if maskrecordlength: - mrec._mask.flat = mask - elif len(mask.shape) == 2: - mrec._mask.flat = [tuple(m) for m in mask] - else: - mrec.__setmask__(mask) - if _mask is not None: - mrec._mask[:] = _mask - return mrec - -def _guessvartypes(arr): - """Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise -conversion. Returns a list of dtypes. -The array is first converted to ndarray. If the array is 2D, the test is performed -on the first line. An exception is raised if the file is 3D or more. - """ - vartypes = [] - arr = np.asarray(arr) - if len(arr.shape) == 2 : - arr = arr[0] - elif len(arr.shape) > 2: - raise ValueError("The array should be 2D at most!") - # Start the conversion loop ....... - for f in arr: - try: - int(f) - except ValueError: - try: - float(f) - except ValueError: - try: - val = complex(f) - except ValueError: - vartypes.append(arr.dtype) - else: - vartypes.append(np.dtype(complex)) - else: - vartypes.append(np.dtype(float)) - else: - vartypes.append(np.dtype(int)) - return vartypes - -def openfile(fname): - "Opens the file handle of file `fname`" - # A file handle ................... - if hasattr(fname, 'readline'): - return fname - # Try to open the file and guess its type - try: - f = open(fname) - except IOError: - raise IOError("No such file: '%s'" % fname) - if f.readline()[:2] != "\\x": - f.seek(0, 0) - return f - f.close() - raise NotImplementedError("Wow, binary file") - - -def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', - varnames=None, vartypes=None): - """Creates a mrecarray from data stored in the file `filename`. - - Parameters - ---------- - filename : {file name/handle} - Handle of an opened file. - delimitor : {None, string}, optional - Alphanumeric character used to separate columns in the file. - If None, any (group of) white spacestring(s) will be used. - commentchar : {'#', string}, optional - Alphanumeric character used to mark the start of a comment. - missingchar : {'', string}, optional - String indicating missing data, and used to create the masks. - varnames : {None, sequence}, optional - Sequence of the variable names. If None, a list will be created from - the first non empty line of the file. - vartypes : {None, sequence}, optional - Sequence of the variables dtypes. If None, it will be estimated from - the first non-commented line. - - - Ultra simple: the varnames are in the header, one line""" - # Try to open the file ...................... - f = openfile(fname) - - # Get the first non-empty line as the varnames - while True: - line = f.readline() - firstline = line[:line.find(commentchar)].strip() - _varnames = firstline.split(delimitor) - if len(_varnames) > 1: - break - if varnames is None: - varnames = _varnames - - # Get the data .............................. - _variables = masked_array([line.strip().split(delimitor) for line in f - if line[0] != commentchar and len(line) > 1]) - (_, nfields) = _variables.shape - f.close() - - # Try to guess the dtype .................... - if vartypes is None: - vartypes = _guessvartypes(_variables[0]) - else: - vartypes = [np.dtype(v) for v in vartypes] - if len(vartypes) != nfields: - msg = "Attempting to %i dtypes for %i fields!" - msg += " Reverting to default." - warnings.warn(msg % (len(vartypes), nfields)) - vartypes = _guessvartypes(_variables[0]) - - # Construct the descriptor .................. - mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] - mfillv = [ma.default_fill_value(f) for f in vartypes] - - # Get the data and the mask ................. - # We just need a list of masked_arrays. It's easier to create it like that: - _mask = (_variables.T == missingchar) - _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) - for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] - - return fromarrays(_datalist, dtype=mdescr) - -#.................................................................... -def addfield(mrecord, newfield, newfieldname=None): - """Adds a new field to the masked record array, using `newfield` as data -and `newfieldname` as name. If `newfieldname` is None, the new field name is -set to 'fi', where `i` is the number of existing fields. - """ - _data = mrecord._data - _mask = mrecord._mask - if newfieldname is None or newfieldname in reserved_fields: - newfieldname = 'f%i' % len(_data.dtype) - newfield = ma.array(newfield) - # Get the new data ............ - # Create a new empty recarray - newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) - newdata = recarray(_data.shape, newdtype) - # Add the exisintg field - [newdata.setfield(_data.getfield(*f), *f) - for f in _data.dtype.fields.values()] - # Add the new field - newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) - newdata = newdata.view(MaskedRecords) - # Get the new mask ............. - # Create a new empty recarray - newmdtype = np.dtype([(n, bool_) for n in newdtype.names]) - newmask = recarray(_data.shape, newmdtype) - # Add the old masks - [newmask.setfield(_mask.getfield(*f), *f) - for f in _mask.dtype.fields.values()] - # Add the mask of the new field - newmask.setfield(getmaskarray(newfield), - *newmask.dtype.fields[newfieldname]) - newdata._mask = newmask - return newdata diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/setup.py deleted file mode 100644 index 5486ff46a21ab..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -import os - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('ma', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - config = configuration(top_path='').todict() - setup(**config) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_core.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_core.py deleted file mode 100644 index 34951875d3747..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_core.py +++ /dev/null @@ -1,3684 +0,0 @@ -# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102 -"""Tests suite for MaskedArray & subclassing. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -""" -from __future__ import division, absolute_import, print_function - -__author__ = "Pierre GF Gerard-Marchant" - -import warnings -import sys -import pickle -from functools import reduce - -from nose.tools import assert_raises - -import numpy as np -import numpy.ma.core -import numpy.core.fromnumeric as fromnumeric -from numpy import ndarray -from numpy.ma.testutils import * -from numpy.ma.core import * -from numpy.compat import asbytes, asbytes_nested - -pi = np.pi - - -#.............................................................................. -class TestMaskedArray(TestCase): - # Base test class for MaskedArrays. - - def setUp(self): - # Base data definition. - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - z = np.array([-.5, 0., .5, .8]) - zm = masked_array(z, mask=[0, 1, 0, 0]) - xf = np.where(m1, 1e+20, x) - xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) - - def test_basicattributes(self): - # Tests some basic array attributes. - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - assert_equal(a.ndim, 1) - assert_equal(b.ndim, 1) - assert_equal(a.size, 3) - assert_equal(b.size, 3) - assert_equal(a.shape, (3,)) - assert_equal(b.shape, (3,)) - - def test_basic0d(self): - # Checks masking a scalar - x = masked_array(0) - assert_equal(str(x), '0') - x = masked_array(0, mask=True) - assert_equal(str(x), str(masked_print_option)) - x = masked_array(0, mask=False) - assert_equal(str(x), '0') - x = array(0, mask=1) - self.assertTrue(x.filled().dtype is x._data.dtype) - - def test_basic1d(self): - # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - self.assertTrue(not isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) - self.assertTrue((xm - ym).filled(0).any()) - fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) - s = x.shape - assert_equal(np.shape(xm), s) - assert_equal(xm.shape, s) - assert_equal(xm.dtype, x.dtype) - assert_equal(zm.dtype, z.dtype) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) - assert_array_equal(xm, xf) - assert_array_equal(filled(xm, 1.e20), xf) - assert_array_equal(x, xm) - - def test_basic2d(self): - # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - # - self.assertTrue(not isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) - assert_equal(shape(xm), s) - assert_equal(xm.shape, s) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) - assert_equal(xm, xf) - assert_equal(filled(xm, 1.e20), xf) - assert_equal(x, xm) - - def test_concatenate_basic(self): - # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - # basic concatenation - assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) - assert_equal(np.concatenate((x, y)), concatenate((x, y))) - assert_equal(np.concatenate((x, y)), concatenate((xm, y))) - assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) - - def test_concatenate_alongaxis(self): - # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - # Concatenation along an axis - s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s - assert_equal(xm.mask, np.reshape(m1, s)) - assert_equal(ym.mask, np.reshape(m2, s)) - xmym = concatenate((xm, ym), 1) - assert_equal(np.concatenate((x, y), 1), xmym) - assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) - # - x = zeros(2) - y = array(ones(2), mask=[False, True]) - z = concatenate((x, y)) - assert_array_equal(z, [0, 0, 1, 1]) - assert_array_equal(z.mask, [False, False, False, True]) - z = concatenate((y, x)) - assert_array_equal(z, [1, 1, 0, 0]) - assert_array_equal(z.mask, [False, True, False, False]) - - def test_concatenate_flexible(self): - # Tests the concatenation on flexible arrays. - data = masked_array(list(zip(np.random.rand(10), - np.arange(10))), - dtype=[('a', float), ('b', int)]) - # - test = concatenate([data[:5], data[5:]]) - assert_equal_records(test, data) - - def test_creation_ndmin(self): - # Check the use of ndmin - x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) - assert_equal(x.shape, (1, 3)) - assert_equal(x._data, [[1, 2, 3]]) - assert_equal(x._mask, [[1, 0, 0]]) - - def test_creation_ndmin_from_maskedarray(self): - # Make sure we're not losing the original mask w/ ndmin - x = array([1, 2, 3]) - x[-1] = masked - xx = array(x, ndmin=2, dtype=float) - assert_equal(x.shape, x._mask.shape) - assert_equal(xx.shape, xx._mask.shape) - - def test_creation_maskcreation(self): - # Tests how masks are initialized at the creation of Maskedarrays. - data = arange(24, dtype=float) - data[[3, 6, 15]] = masked - dma_1 = MaskedArray(data) - assert_equal(dma_1.mask, data.mask) - dma_2 = MaskedArray(dma_1) - assert_equal(dma_2.mask, dma_1.mask) - dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) - fail_if_equal(dma_3.mask, dma_1.mask) - - def test_creation_with_list_of_maskedarrays(self): - # Tests creaating a masked array from alist of masked arrays. - x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) - data = array((x, x[::-1])) - assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) - assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) - # - x.mask = nomask - data = array((x, x[::-1])) - assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) - self.assertTrue(data.mask is nomask) - - def test_asarray(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - xm.fill_value = -9999 - xm._hardmask = True - xmm = asarray(xm) - assert_equal(xmm._data, xm._data) - assert_equal(xmm._mask, xm._mask) - assert_equal(xmm.fill_value, xm.fill_value) - assert_equal(xmm._hardmask, xm._hardmask) - - def test_fix_invalid(self): - # Checks fix_invalid. - with np.errstate(invalid='ignore'): - data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) - data_fixed = fix_invalid(data) - assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) - assert_equal(data_fixed._mask, [1., 0., 1.]) - - def test_maskedelement(self): - # Test of masked element - x = arange(6) - x[1] = masked - self.assertTrue(str(masked) == '--') - self.assertTrue(x[1] is masked) - assert_equal(filled(x[1], 0), 0) - # don't know why these should raise an exception... - #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) - #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) - #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) - #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) - - def test_set_element_as_object(self): - # Tests setting elements with object - a = empty(1, dtype=object) - x = (1, 2, 3, 4, 5) - a[0] = x - assert_equal(a[0], x) - self.assertTrue(a[0] is x) - # - import datetime - dt = datetime.datetime.now() - a[0] = dt - self.assertTrue(a[0] is dt) - - def test_indexing(self): - # Tests conversions and indexing - x1 = np.array([1, 2, 4, 3]) - x2 = array(x1, mask=[1, 0, 0, 0]) - x3 = array(x1, mask=[0, 1, 0, 1]) - x4 = array(x1) - # test conversion to strings - junk, garbage = str(x2), repr(x2) - assert_equal(np.sort(x1), sort(x2, endwith=False)) - # tests of indexing - assert_(type(x2[1]) is type(x1[1])) - assert_(x1[1] == x2[1]) - assert_(x2[0] is masked) - assert_equal(x1[2], x2[2]) - assert_equal(x1[2:5], x2[2:5]) - assert_equal(x1[:], x2[:]) - assert_equal(x1[1:], x3[1:]) - x1[2] = 9 - x2[2] = 9 - assert_equal(x1, x2) - x1[1:3] = 99 - x2[1:3] = 99 - assert_equal(x1, x2) - x2[1] = masked - assert_equal(x1, x2) - x2[1:3] = masked - assert_equal(x1, x2) - x2[:] = x1 - x2[1] = masked - assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) - x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) - x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) - assert_(allequal(x4, array([1, 2, 3, 4]))) - x1 = np.arange(5) * 1.0 - x2 = masked_values(x1, 3.0) - assert_equal(x1, x2) - assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) - assert_equal(3.0, x2.fill_value) - x1 = array([1, 'hello', 2, 3], object) - x2 = np.array([1, 'hello', 2, 3], object) - s1 = x1[1] - s2 = x2[1] - assert_equal(type(s2), str) - assert_equal(type(s1), str) - assert_equal(s1, s2) - assert_(x1[1:1].shape == (0,)) - - def test_copy(self): - # Tests of some subtle points of copying and sizing. - n = [0, 0, 1, 0, 0] - m = make_mask(n) - m2 = make_mask(m) - self.assertTrue(m is m2) - m3 = make_mask(m, copy=1) - self.assertTrue(m is not m3) - - x1 = np.arange(5) - y1 = array(x1, mask=m) - #self.assertTrue( y1._data is x1) - assert_equal(y1._data.__array_interface__, x1.__array_interface__) - self.assertTrue(allequal(x1, y1.data)) - #self.assertTrue( y1.mask is m) - assert_equal(y1._mask.__array_interface__, m.__array_interface__) - - y1a = array(y1) - self.assertTrue(y1a._data.__array_interface__ == - y1._data.__array_interface__) - self.assertTrue(y1a.mask is y1.mask) - - y2 = array(x1, mask=m) - self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__) - #self.assertTrue( y2.mask is m) - self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__) - self.assertTrue(y2[2] is masked) - y2[2] = 9 - self.assertTrue(y2[2] is not masked) - #self.assertTrue( y2.mask is not m) - self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__) - self.assertTrue(allequal(y2.mask, 0)) - - y3 = array(x1 * 1.0, mask=m) - self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) - - x4 = arange(4) - x4[2] = masked - y4 = resize(x4, (8,)) - assert_equal(concatenate([x4, x4]), y4) - assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) - y5 = repeat(x4, (2, 2, 2, 2), axis=0) - assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) - y6 = repeat(x4, 2, axis=0) - assert_equal(y5, y6) - y7 = x4.repeat((2, 2, 2, 2), axis=0) - assert_equal(y5, y7) - y8 = x4.repeat(2, 0) - assert_equal(y5, y8) - - y9 = x4.copy() - assert_equal(y9._data, x4._data) - assert_equal(y9._mask, x4._mask) - # - x = masked_array([1, 2, 3], mask=[0, 1, 0]) - # Copy is False by default - y = masked_array(x) - assert_equal(y._data.ctypes.data, x._data.ctypes.data) - assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) - y = masked_array(x, copy=True) - assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) - assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) - - def test_deepcopy(self): - from copy import deepcopy - a = array([0, 1, 2], mask=[False, True, False]) - copied = deepcopy(a) - assert_equal(copied.mask, a.mask) - assert_not_equal(id(a._mask), id(copied._mask)) - # - copied[1] = 1 - assert_equal(copied.mask, [0, 0, 0]) - assert_equal(a.mask, [0, 1, 0]) - # - copied = deepcopy(a) - assert_equal(copied.mask, a.mask) - copied.mask[1] = False - assert_equal(copied.mask, [0, 0, 0]) - assert_equal(a.mask, [0, 1, 0]) - - def test_str_repr(self): - a = array([0, 1, 2], mask=[False, True, False]) - assert_equal(str(a), '[0 -- 2]') - assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' - ' mask = [False True False],\n' - ' fill_value = 999999)\n') - - def test_pickling(self): - # Tests pickling - a = arange(10) - a[::3] = masked - a.fill_value = 999 - a_pickled = pickle.loads(a.dumps()) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled._data, a._data) - assert_equal(a_pickled.fill_value, 999) - - def test_pickling_subbaseclass(self): - # Test pickling w/ a subclass of ndarray - a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) - a_pickled = pickle.loads(a.dumps()) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled, a) - self.assertTrue(isinstance(a_pickled._data, np.matrix)) - - def test_pickling_maskedconstant(self): - # Test pickling MaskedConstant - mc = np.ma.masked - mc_pickled = pickle.loads(mc.dumps()) - assert_equal(mc_pickled._baseclass, mc._baseclass) - assert_equal(mc_pickled._mask, mc._mask) - assert_equal(mc_pickled._data, mc._data) - - def test_pickling_wstructured(self): - # Tests pickling w/ structured array - a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], - dtype=[('a', int), ('b', float)]) - a_pickled = pickle.loads(a.dumps()) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled, a) - - def test_pickling_keepalignment(self): - # Tests pickling w/ F_CONTIGUOUS arrays - a = arange(10) - a.shape = (-1, 2) - b = a.T - test = pickle.loads(pickle.dumps(b)) - assert_equal(test, b) - - def test_single_element_subscript(self): - # Tests single element subscripts of Maskedarrays. - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - assert_equal(a[0].shape, ()) - assert_equal(b[0].shape, ()) - assert_equal(b[1].shape, ()) - - def test_topython(self): - # Tests some communication issues with Python. - assert_equal(1, int(array(1))) - assert_equal(1.0, float(array(1))) - assert_equal(1, int(array([[[1]]]))) - assert_equal(1.0, float(array([[1]]))) - self.assertRaises(TypeError, float, array([1, 1])) - # - with warnings.catch_warnings(): - warnings.simplefilter('ignore', UserWarning) - assert_(np.isnan(float(array([1], mask=[1])))) - # - a = array([1, 2, 3], mask=[1, 0, 0]) - self.assertRaises(TypeError, lambda:float(a)) - assert_equal(float(a[-1]), 3.) - self.assertTrue(np.isnan(float(a[0]))) - self.assertRaises(TypeError, int, a) - assert_equal(int(a[-1]), 3) - self.assertRaises(MAError, lambda:int(a[0])) - - def test_oddfeatures_1(self): - # Test of other odd features - x = arange(20) - x = x.reshape(4, 5) - x.flat[5] = 12 - assert_(x[1, 0] == 12) - z = x + 10j * x - assert_equal(z.real, x) - assert_equal(z.imag, 10 * x) - assert_equal((z * conjugate(z)).real, 101 * x * x) - z.imag[...] = 0.0 - # - x = arange(10) - x[3] = masked - assert_(str(x[3]) == str(masked)) - c = x >= 8 - assert_(count(where(c, masked, masked)) == 0) - assert_(shape(where(c, masked, masked)) == c.shape) - # - z = masked_where(c, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - assert_equal(x, z) - - def test_oddfeatures_2(self): - # Tests some more features. - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - - def test_oddfeatures_3(self): - # Tests some generic features - atest = array([10], mask=True) - btest = array([20]) - idx = atest.mask - atest[idx] = btest[idx] - assert_equal(atest, [20]) - - def test_filled_w_object_dtype(self): - a = np.ma.masked_all(1, dtype='O') - assert_equal(a.filled('x')[0], 'x') - - def test_filled_w_flexible_dtype(self): - # Test filled w/ flexible dtype - flexi = array([(1, 1, 1)], - dtype=[('i', int), ('s', '|S8'), ('f', float)]) - flexi[0] = masked - assert_equal(flexi.filled(), - np.array([(default_fill_value(0), - default_fill_value('0'), - default_fill_value(0.),)], dtype=flexi.dtype)) - flexi[0] = masked - assert_equal(flexi.filled(1), - np.array([(1, '1', 1.)], dtype=flexi.dtype)) - - def test_filled_w_mvoid(self): - # Test filled w/ mvoid - ndtype = [('a', int), ('b', float)] - a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) - # Filled using default - test = a.filled() - assert_equal(tuple(test), (1, default_fill_value(1.))) - # Explicit fill_value - test = a.filled((-1, -1)) - assert_equal(tuple(test), (1, -1)) - # Using predefined filling values - a.fill_value = (-999, -999) - assert_equal(tuple(a.filled()), (1, -999)) - - def test_filled_w_nested_dtype(self): - # Test filled w/ nested dtype - ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] - a = array([(1, (1, 1)), (2, (2, 2))], - mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) - test = a.filled(0) - control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) - assert_equal(test, control) - # - test = a['B'].filled(0) - control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) - assert_equal(test, control) - - def test_filled_w_f_order(self): - # Test filled w/ F-contiguous array - a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), - mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), - order='F') # this is currently ignored - self.assertTrue(a.flags['F_CONTIGUOUS']) - self.assertTrue(a.filled(0).flags['F_CONTIGUOUS']) - - def test_optinfo_propagation(self): - # Checks that _optinfo dictionary isn't back-propagated - x = array([1, 2, 3, ], dtype=float) - x._optinfo['info'] = '???' - y = x.copy() - assert_equal(y._optinfo['info'], '???') - y._optinfo['info'] = '!!!' - assert_equal(x._optinfo['info'], '???') - - def test_fancy_printoptions(self): - # Test printing a masked array w/ fancy dtype. - fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) - test = array([(1, (2, 3.0)), (4, (5, 6.0))], - mask=[(1, (0, 1)), (0, (1, 0))], - dtype=fancydtype) - control = "[(--, (2, --)) (4, (--, 6.0))]" - assert_equal(str(test), control) - - def test_flatten_structured_array(self): - # Test flatten_structured_array on arrays - # On ndarray - ndtype = [('a', int), ('b', float)] - a = np.array([(1, 1), (2, 2)], dtype=ndtype) - test = flatten_structured_array(a) - control = np.array([[1., 1.], [2., 2.]], dtype=np.float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - # On masked_array - a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) - test = flatten_structured_array(a) - control = array([[1., 1.], [2., 2.]], - mask=[[0, 1], [1, 0]], dtype=np.float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - assert_equal(test.mask, control.mask) - # On masked array with nested structure - ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] - a = array([(1, (1, 1.1)), (2, (2, 2.2))], - mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) - test = flatten_structured_array(a) - control = array([[1., 1., 1.1], [2., 2., 2.2]], - mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - assert_equal(test.mask, control.mask) - # Keeping the initial shape - ndtype = [('a', int), ('b', float)] - a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) - test = flatten_structured_array(a) - control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float) - assert_equal(test, control) - assert_equal(test.dtype, control.dtype) - - def test_void0d(self): - # Test creating a mvoid object - ndtype = [('a', int), ('b', int)] - a = np.array([(1, 2,)], dtype=ndtype)[0] - f = mvoid(a) - assert_(isinstance(f, mvoid)) - # - a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] - assert_(isinstance(a, mvoid)) - # - a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) - f = mvoid(a._data[0], a._mask[0]) - assert_(isinstance(f, mvoid)) - - def test_mvoid_getitem(self): - # Test mvoid.__getitem__ - ndtype = [('a', int), ('b', int)] - a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], - dtype=ndtype) - # w/o mask - f = a[0] - self.assertTrue(isinstance(f, mvoid)) - assert_equal((f[0], f['a']), (1, 1)) - assert_equal(f['b'], 2) - # w/ mask - f = a[1] - self.assertTrue(isinstance(f, mvoid)) - self.assertTrue(f[0] is masked) - self.assertTrue(f['a'] is masked) - assert_equal(f[1], 4) - - def test_mvoid_iter(self): - # Test iteration on __getitem__ - ndtype = [('a', int), ('b', int)] - a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], - dtype=ndtype) - # w/o mask - assert_equal(list(a[0]), [1, 2]) - # w/ mask - assert_equal(list(a[1]), [masked, 4]) - - def test_mvoid_print(self): - # Test printing a mvoid - mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) - assert_equal(str(mx[0]), "(1, 1)") - mx['b'][0] = masked - ini_display = masked_print_option._display - masked_print_option.set_display("-X-") - try: - assert_equal(str(mx[0]), "(1, -X-)") - assert_equal(repr(mx[0]), "(1, -X-)") - finally: - masked_print_option.set_display(ini_display) - - -#------------------------------------------------------------------------------ -class TestMaskedArrayArithmetic(TestCase): - # Base test class for MaskedArrays. - - def setUp(self): - # Base data definition. - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - z = np.array([-.5, 0., .5, .8]) - zm = masked_array(z, mask=[0, 1, 0, 0]) - xf = np.where(m1, 1e+20, x) - xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') - - def tearDown(self): - np.seterr(**self.err_status) - - def test_basic_arithmetic(self): - # Test of basic arithmetic. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - a2d = array([[1, 2], [0, 4]]) - a2dm = masked_array(a2d, [[0, 0], [1, 0]]) - assert_equal(a2d * a2d, a2d * a2dm) - assert_equal(a2d + a2d, a2d + a2dm) - assert_equal(a2d - a2d, a2d - a2dm) - for s in [(12,), (4, 3), (2, 6)]: - x = x.reshape(s) - y = y.reshape(s) - xm = xm.reshape(s) - ym = ym.reshape(s) - xf = xf.reshape(s) - assert_equal(-x, -xm) - assert_equal(x + y, xm + ym) - assert_equal(x - y, xm - ym) - assert_equal(x * y, xm * ym) - assert_equal(x / y, xm / ym) - assert_equal(a10 + y, a10 + ym) - assert_equal(a10 - y, a10 - ym) - assert_equal(a10 * y, a10 * ym) - assert_equal(a10 / y, a10 / ym) - assert_equal(x + a10, xm + a10) - assert_equal(x - a10, xm - a10) - assert_equal(x * a10, xm * a10) - assert_equal(x / a10, xm / a10) - assert_equal(x ** 2, xm ** 2) - assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5) - assert_equal(x ** y, xm ** ym) - assert_equal(np.add(x, y), add(xm, ym)) - assert_equal(np.subtract(x, y), subtract(xm, ym)) - assert_equal(np.multiply(x, y), multiply(xm, ym)) - assert_equal(np.divide(x, y), divide(xm, ym)) - - def test_divide_on_different_shapes(self): - x = arange(6, dtype=float) - x.shape = (2, 3) - y = arange(3, dtype=float) - # - z = x / y - assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) - assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) - # - z = x / y[None,:] - assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) - assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) - # - y = arange(2, dtype=float) - z = x / y[:, None] - assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]]) - assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]]) - - def test_mixed_arithmetic(self): - # Tests mixed arithmetics. - na = np.array([1]) - ma = array([1]) - self.assertTrue(isinstance(na + ma, MaskedArray)) - self.assertTrue(isinstance(ma + na, MaskedArray)) - - def test_limits_arithmetic(self): - tiny = np.finfo(float).tiny - a = array([tiny, 1. / tiny, 0.]) - assert_equal(getmaskarray(a / 2), [0, 0, 0]) - assert_equal(getmaskarray(2 / a), [1, 0, 1]) - - def test_masked_singleton_arithmetic(self): - # Tests some scalar arithmetics on MaskedArrays. - # Masked singleton should remain masked no matter what - xm = array(0, mask=1) - self.assertTrue((1 / array(0)).mask) - self.assertTrue((1 + xm).mask) - self.assertTrue((-xm).mask) - self.assertTrue(maximum(xm, xm).mask) - self.assertTrue(minimum(xm, xm).mask) - - def test_masked_singleton_equality(self): - # Tests (in)equality on masked snigleton - a = array([1, 2, 3], mask=[1, 1, 0]) - assert_((a[0] == 0) is masked) - assert_((a[0] != 0) is masked) - assert_equal((a[-1] == 0), False) - assert_equal((a[-1] != 0), True) - - def test_arithmetic_with_masked_singleton(self): - # Checks that there's no collapsing to masked - x = masked_array([1, 2]) - y = x * masked - assert_equal(y.shape, x.shape) - assert_equal(y._mask, [True, True]) - y = x[0] * masked - assert_(y is masked) - y = x + masked - assert_equal(y.shape, x.shape) - assert_equal(y._mask, [True, True]) - - def test_arithmetic_with_masked_singleton_on_1d_singleton(self): - # Check that we're not losing the shape of a singleton - x = masked_array([1, ]) - y = x + masked - assert_equal(y.shape, x.shape) - assert_equal(y.mask, [True, ]) - - def test_scalar_arithmetic(self): - x = array(0, mask=0) - assert_equal(x.filled().ctypes.data, x.ctypes.data) - # Make sure we don't lose the shape in some circumstances - xm = array((0, 0)) / 0. - assert_equal(xm.shape, (2,)) - assert_equal(xm.mask, [1, 1]) - - def test_basic_ufuncs(self): - # Test various functions such as sin, cos. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_equal(np.cos(x), cos(xm)) - assert_equal(np.cosh(x), cosh(xm)) - assert_equal(np.sin(x), sin(xm)) - assert_equal(np.sinh(x), sinh(xm)) - assert_equal(np.tan(x), tan(xm)) - assert_equal(np.tanh(x), tanh(xm)) - assert_equal(np.sqrt(abs(x)), sqrt(xm)) - assert_equal(np.log(abs(x)), log(xm)) - assert_equal(np.log10(abs(x)), log10(xm)) - assert_equal(np.exp(x), exp(xm)) - assert_equal(np.arcsin(z), arcsin(zm)) - assert_equal(np.arccos(z), arccos(zm)) - assert_equal(np.arctan(z), arctan(zm)) - assert_equal(np.arctan2(x, y), arctan2(xm, ym)) - assert_equal(np.absolute(x), absolute(xm)) - assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym)) - assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True)) - assert_equal(np.equal(x, y), equal(xm, ym)) - assert_equal(np.not_equal(x, y), not_equal(xm, ym)) - assert_equal(np.less(x, y), less(xm, ym)) - assert_equal(np.greater(x, y), greater(xm, ym)) - assert_equal(np.less_equal(x, y), less_equal(xm, ym)) - assert_equal(np.greater_equal(x, y), greater_equal(xm, ym)) - assert_equal(np.conjugate(x), conjugate(xm)) - - def test_count_func(self): - # Tests count - assert_equal(1, count(1)) - assert_equal(0, array(1, mask=[1])) - - ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - res = count(ott) - self.assertTrue(res.dtype.type is np.intp) - assert_equal(3, res) - - ott = ott.reshape((2, 2)) - res = count(ott) - assert_(res.dtype.type is np.intp) - assert_equal(3, res) - res = count(ott, 0) - assert_(isinstance(res, ndarray)) - assert_equal([1, 2], res) - assert_(getmask(res) is nomask) - - ott= array([0., 1., 2., 3.]) - res = count(ott, 0) - assert_(isinstance(res, ndarray)) - assert_(res.dtype.type is np.intp) - - assert_raises(IndexError, ott.count, 1) - - def test_minmax_func(self): - # Tests minimum and maximum. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - # max doesn't work if shaped - xr = np.ravel(x) - xmr = ravel(xm) - # following are true because of careful selection of data - assert_equal(max(xr), maximum(xmr)) - assert_equal(min(xr), minimum(xmr)) - # - assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]) - assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]) - x = arange(5) - y = arange(5) - 2 - x[3] = masked - y[0] = masked - assert_equal(minimum(x, y), where(less(x, y), x, y)) - assert_equal(maximum(x, y), where(greater(x, y), x, y)) - assert_(minimum(x) == 0) - assert_(maximum(x) == 4) - # - x = arange(4).reshape(2, 2) - x[-1, -1] = masked - assert_equal(maximum(x), 2) - - def test_minimummaximum_func(self): - a = np.ones((2, 2)) - aminimum = minimum(a, a) - self.assertTrue(isinstance(aminimum, MaskedArray)) - assert_equal(aminimum, np.minimum(a, a)) - # - aminimum = minimum.outer(a, a) - self.assertTrue(isinstance(aminimum, MaskedArray)) - assert_equal(aminimum, np.minimum.outer(a, a)) - # - amaximum = maximum(a, a) - self.assertTrue(isinstance(amaximum, MaskedArray)) - assert_equal(amaximum, np.maximum(a, a)) - # - amaximum = maximum.outer(a, a) - self.assertTrue(isinstance(amaximum, MaskedArray)) - assert_equal(amaximum, np.maximum.outer(a, a)) - - def test_minmax_reduce(self): - # Test np.min/maximum.reduce on array w/ full False mask - a = array([1, 2, 3], mask=[False, False, False]) - b = np.maximum.reduce(a) - assert_equal(b, 3) - - def test_minmax_funcs_with_output(self): - # Tests the min/max functions with explicit outputs - mask = np.random.rand(12).round() - xm = array(np.random.uniform(0, 10, 12), mask=mask) - xm.shape = (3, 4) - for funcname in ('min', 'max'): - # Initialize - npfunc = getattr(np, funcname) - mafunc = getattr(numpy.ma.core, funcname) - # Use the np version - nout = np.empty((4,), dtype=int) - try: - result = npfunc(xm, axis=0, out=nout) - except MaskError: - pass - nout = np.empty((4,), dtype=float) - result = npfunc(xm, axis=0, out=nout) - self.assertTrue(result is nout) - # Use the ma version - nout.fill(-999) - result = mafunc(xm, axis=0, out=nout) - self.assertTrue(result is nout) - - def test_minmax_methods(self): - # Additional tests on max/min - (_, _, _, _, _, xm, _, _, _, _) = self.d - xm.shape = (xm.size,) - assert_equal(xm.max(), 10) - self.assertTrue(xm[0].max() is masked) - self.assertTrue(xm[0].max(0) is masked) - self.assertTrue(xm[0].max(-1) is masked) - assert_equal(xm.min(), -10.) - self.assertTrue(xm[0].min() is masked) - self.assertTrue(xm[0].min(0) is masked) - self.assertTrue(xm[0].min(-1) is masked) - assert_equal(xm.ptp(), 20.) - self.assertTrue(xm[0].ptp() is masked) - self.assertTrue(xm[0].ptp(0) is masked) - self.assertTrue(xm[0].ptp(-1) is masked) - # - x = array([1, 2, 3], mask=True) - self.assertTrue(x.min() is masked) - self.assertTrue(x.max() is masked) - self.assertTrue(x.ptp() is masked) - - def test_addsumprod(self): - # Tests add, sum, product. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_equal(np.add.reduce(x), add.reduce(x)) - assert_equal(np.add.accumulate(x), add.accumulate(x)) - assert_equal(4, sum(array(4), axis=0)) - assert_equal(4, sum(array(4), axis=0)) - assert_equal(np.sum(x, axis=0), sum(x, axis=0)) - assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)) - assert_equal(np.sum(x, 0), sum(x, 0)) - assert_equal(np.product(x, axis=0), product(x, axis=0)) - assert_equal(np.product(x, 0), product(x, 0)) - assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0)) - s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s - if len(s) > 1: - assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) - assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) - assert_equal(np.sum(x, 1), sum(x, 1)) - assert_equal(np.product(x, 1), product(x, 1)) - - def test_binops_d2D(self): - # Test binary operations on 2D data - a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) - b = array([[2., 3.], [4., 5.], [6., 7.]]) - # - test = a * b - control = array([[2., 3.], [2., 2.], [3., 3.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = b * a - control = array([[2., 3.], [4., 5.], [6., 7.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - a = array([[1.], [2.], [3.]]) - b = array([[2., 3.], [4., 5.], [6., 7.]], - mask=[[0, 0], [0, 0], [0, 1]]) - test = a * b - control = array([[2, 3], [8, 10], [18, 3]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = b * a - control = array([[2, 3], [8, 10], [18, 7]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_domained_binops_d2D(self): - # Test domained binary operations on 2D data - a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) - b = array([[2., 3.], [4., 5.], [6., 7.]]) - # - test = a / b - control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = b / a - control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], - mask=[[0, 0], [1, 1], [1, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - a = array([[1.], [2.], [3.]]) - b = array([[2., 3.], [4., 5.], [6., 7.]], - mask=[[0, 0], [0, 0], [0, 1]]) - test = a / b - control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = b / a - control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], - mask=[[0, 0], [0, 0], [0, 1]]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_noshrinking(self): - # Check that we don't shrink a mask when not wanted - # Binary operations - a = masked_array([1., 2., 3.], mask=[False, False, False], - shrink=False) - b = a + 1 - assert_equal(b.mask, [0, 0, 0]) - # In place binary operation - a += 1 - assert_equal(a.mask, [0, 0, 0]) - # Domained binary operation - b = a / 1. - assert_equal(b.mask, [0, 0, 0]) - # In place binary operation - a /= 1. - assert_equal(a.mask, [0, 0, 0]) - - def test_mod(self): - # Tests mod - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_equal(mod(x, y), mod(xm, ym)) - test = mod(ym, xm) - assert_equal(test, np.mod(ym, xm)) - assert_equal(test.mask, mask_or(xm.mask, ym.mask)) - test = mod(xm, ym) - assert_equal(test, np.mod(xm, ym)) - assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) - - def test_TakeTransposeInnerOuter(self): - # Test of take, transpose, inner, outer products - x = arange(24) - y = np.arange(24) - x[5:6] = masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) - assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) - assert_equal(np.inner(filled(x, 0), filled(y, 0)), - inner(x, y)) - assert_equal(np.outer(filled(x, 0), filled(y, 0)), - outer(x, y)) - y = array(['abc', 1, 'def', 2, 3], object) - y[2] = masked - t = take(y, [0, 3, 4]) - assert_(t[0] == 'abc') - assert_(t[1] == 2) - assert_(t[2] == 3) - - def test_imag_real(self): - # Check complex - xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) - assert_equal(xx.imag, [10, 2]) - assert_equal(xx.imag.filled(), [1e+20, 2]) - assert_equal(xx.imag.dtype, xx._data.imag.dtype) - assert_equal(xx.real, [1, 20]) - assert_equal(xx.real.filled(), [1e+20, 20]) - assert_equal(xx.real.dtype, xx._data.real.dtype) - - def test_methods_with_output(self): - xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) - xm[:, 0] = xm[0] = xm[-1, -1] = masked - # - funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) - # - for funcname in funclist: - npfunc = getattr(np, funcname) - xmmeth = getattr(xm, funcname) - # A ndarray as explicit input - output = np.empty(4, dtype=float) - output.fill(-9999) - result = npfunc(xm, axis=0, out=output) - # ... the result should be the given output - assert_(result is output) - assert_equal(result, xmmeth(axis=0, out=output)) - # - output = empty(4, dtype=int) - result = xmmeth(axis=0, out=output) - assert_(result is output) - assert_(output[0] is masked) - - def test_eq_on_structured(self): - # Test the equality of structured arrays - ndtype = [('A', int), ('B', int)] - a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) - test = (a == a) - assert_equal(test, [True, True]) - assert_equal(test.mask, [False, False]) - b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) - test = (a == b) - assert_equal(test, [False, True]) - assert_equal(test.mask, [True, False]) - b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) - test = (a == b) - assert_equal(test, [True, False]) - assert_equal(test.mask, [False, False]) - - def test_ne_on_structured(self): - # Test the equality of structured arrays - ndtype = [('A', int), ('B', int)] - a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) - test = (a != a) - assert_equal(test, [False, False]) - assert_equal(test.mask, [False, False]) - b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) - test = (a != b) - assert_equal(test, [True, False]) - assert_equal(test.mask, [True, False]) - b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) - test = (a != b) - assert_equal(test, [False, True]) - assert_equal(test.mask, [False, False]) - - def test_eq_w_None(self): - # Really, comparisons with None should not be done, but - # check them anyway - # With partial mask - a = array([1, 2], mask=[0, 1]) - assert_equal(a == None, False) - assert_equal(a.data == None, False) - assert_equal(a.mask == None, False) - assert_equal(a != None, True) - # With nomask - a = array([1, 2], mask=False) - assert_equal(a == None, False) - assert_equal(a != None, True) - # With complete mask - a = array([1, 2], mask=True) - assert_equal(a == None, False) - assert_equal(a != None, True) - # Fully masked, even comparison to None should return "masked" - a = masked - assert_equal(a == None, masked) - - def test_eq_w_scalar(self): - a = array(1) - assert_equal(a == 1, True) - assert_equal(a == 0, False) - assert_equal(a != 1, False) - assert_equal(a != 0, True) - - def test_numpyarithmetics(self): - # Check that the mask is not back-propagated when using numpy functions - a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) - control = masked_array([np.nan, np.nan, 0, np.log(2), -1], - mask=[1, 1, 0, 0, 1]) - # - test = log(a) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(a.mask, [0, 0, 0, 0, 1]) - # - test = np.log(a) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(a.mask, [0, 0, 0, 0, 1]) - - -#------------------------------------------------------------------------------ -class TestMaskedArrayAttributes(TestCase): - - def test_keepmask(self): - # Tests the keep mask flag - x = masked_array([1, 2, 3], mask=[1, 0, 0]) - mx = masked_array(x) - assert_equal(mx.mask, x.mask) - mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) - assert_equal(mx.mask, [0, 1, 0]) - mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) - assert_equal(mx.mask, [1, 1, 0]) - # We default to true - mx = masked_array(x, mask=[0, 1, 0]) - assert_equal(mx.mask, [1, 1, 0]) - - def test_hardmask(self): - # Test hard_mask - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - xh = array(d, mask=m, hard_mask=True) - # We need to copy, to avoid updating d in xh ! - xs = array(d, mask=m, hard_mask=False, copy=True) - xh[[1, 4]] = [10, 40] - xs[[1, 4]] = [10, 40] - assert_equal(xh._data, [0, 10, 2, 3, 4]) - assert_equal(xs._data, [0, 10, 2, 3, 40]) - #assert_equal(xh.mask.ctypes._data, m.ctypes._data) - assert_equal(xs.mask, [0, 0, 0, 1, 0]) - self.assertTrue(xh._hardmask) - self.assertTrue(not xs._hardmask) - xh[1:4] = [10, 20, 30] - xs[1:4] = [10, 20, 30] - assert_equal(xh._data, [0, 10, 20, 3, 4]) - assert_equal(xs._data, [0, 10, 20, 30, 40]) - #assert_equal(xh.mask.ctypes._data, m.ctypes._data) - assert_equal(xs.mask, nomask) - xh[0] = masked - xs[0] = masked - assert_equal(xh.mask, [1, 0, 0, 1, 1]) - assert_equal(xs.mask, [1, 0, 0, 0, 0]) - xh[:] = 1 - xs[:] = 1 - assert_equal(xh._data, [0, 1, 1, 3, 4]) - assert_equal(xs._data, [1, 1, 1, 1, 1]) - assert_equal(xh.mask, [1, 0, 0, 1, 1]) - assert_equal(xs.mask, nomask) - # Switch to soft mask - xh.soften_mask() - xh[:] = arange(5) - assert_equal(xh._data, [0, 1, 2, 3, 4]) - assert_equal(xh.mask, nomask) - # Switch back to hard mask - xh.harden_mask() - xh[xh < 3] = masked - assert_equal(xh._data, [0, 1, 2, 3, 4]) - assert_equal(xh._mask, [1, 1, 1, 0, 0]) - xh[filled(xh > 1, False)] = 5 - assert_equal(xh._data, [0, 1, 2, 5, 5]) - assert_equal(xh._mask, [1, 1, 1, 0, 0]) - # - xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) - xh[0] = 0 - assert_equal(xh._data, [[1, 0], [3, 4]]) - assert_equal(xh._mask, [[1, 0], [0, 0]]) - xh[-1, -1] = 5 - assert_equal(xh._data, [[1, 0], [3, 5]]) - assert_equal(xh._mask, [[1, 0], [0, 0]]) - xh[filled(xh < 5, False)] = 2 - assert_equal(xh._data, [[1, 2], [2, 5]]) - assert_equal(xh._mask, [[1, 0], [0, 0]]) - - def test_hardmask_again(self): - # Another test of hardmask - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - xh = array(d, mask=m, hard_mask=True) - xh[4:5] = 999 - #assert_equal(xh.mask.ctypes._data, m.ctypes._data) - xh[0:1] = 999 - assert_equal(xh._data, [999, 1, 2, 3, 4]) - - def test_hardmask_oncemore_yay(self): - # OK, yet another test of hardmask - # Make sure that harden_mask/soften_mask//unshare_mask returns self - a = array([1, 2, 3], mask=[1, 0, 0]) - b = a.harden_mask() - assert_equal(a, b) - b[0] = 0 - assert_equal(a, b) - assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) - a = b.soften_mask() - a[0] = 0 - assert_equal(a, b) - assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) - - def test_smallmask(self): - # Checks the behaviour of _smallmask - a = arange(10) - a[1] = masked - a[1] = 1 - assert_equal(a._mask, nomask) - a = arange(10) - a._smallmask = False - a[1] = masked - a[1] = 1 - assert_equal(a._mask, zeros(10)) - - def test_shrink_mask(self): - # Tests .shrink_mask() - a = array([1, 2, 3], mask=[0, 0, 0]) - b = a.shrink_mask() - assert_equal(a, b) - assert_equal(a.mask, nomask) - - def test_flat(self): - # Test that flat can return all types of items [#4585, #4615] - # test simple access - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - assert_equal(test.flat[1], 2) - assert_equal(test.flat[2], masked) - self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2])) - # Test flat on masked_matrices - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) - control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) - assert_equal(test, control) - # Test setting - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] - assert_equal(test, control) - testflat[0] = 9 - assert_equal(test[0, 0], 9) - # test 2-D record array - # ... on structured array w/ masked records - x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], - [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], - dtype=[('a', int), ('b', float), ('c', '|S8')]) - x['a'][0, 1] = masked - x['b'][1, 0] = masked - x['c'][0, 2] = masked - x[-1, -1] = masked - xflat = x.flat - assert_equal(xflat[0], x[0, 0]) - assert_equal(xflat[1], x[0, 1]) - assert_equal(xflat[2], x[0, 2]) - assert_equal(xflat[:3], x[0]) - assert_equal(xflat[3], x[1, 0]) - assert_equal(xflat[4], x[1, 1]) - assert_equal(xflat[5], x[1, 2]) - assert_equal(xflat[3:], x[1]) - assert_equal(xflat[-1], x[-1, -1]) - i = 0 - j = 0 - for xf in xflat: - assert_equal(xf, x[j, i]) - i += 1 - if i >= x.shape[-1]: - i = 0 - j += 1 - # test that matrices keep the correct shape (#4615) - a = masked_array(np.matrix(np.eye(2)), mask=0) - b = a.flat - b01 = b[:2] - assert_equal(b01.data, array([[1., 0.]])) - assert_equal(b01.mask, array([[False, False]])) - - -#------------------------------------------------------------------------------ -class TestFillingValues(TestCase): - - def test_check_on_scalar(self): - # Test _check_fill_value set to valid and invalid values - _check_fill_value = np.ma.core._check_fill_value - # - fval = _check_fill_value(0, int) - assert_equal(fval, 0) - fval = _check_fill_value(None, int) - assert_equal(fval, default_fill_value(0)) - # - fval = _check_fill_value(0, "|S3") - assert_equal(fval, asbytes("0")) - fval = _check_fill_value(None, "|S3") - assert_equal(fval, default_fill_value("|S3")) - self.assertRaises(TypeError, _check_fill_value, 1e+20, int) - self.assertRaises(TypeError, _check_fill_value, 'stuff', int) - - def test_check_on_fields(self): - # Tests _check_fill_value with records - _check_fill_value = np.ma.core._check_fill_value - ndtype = [('a', int), ('b', float), ('c', "|S3")] - # A check on a list should return a single record - fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - # A check on None should output the defaults - fval = _check_fill_value(None, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [default_fill_value(0), - default_fill_value(0.), - asbytes(default_fill_value("0"))]) - #.....Using a structured type as fill_value should work - fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) - fval = _check_fill_value(fill_val, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - - #.....Using a flexible type w/ a different type shouldn't matter - # BEHAVIOR in 1.5 and earlier: match structured types by position - #fill_val = np.array((-999, -12345678.9, "???"), - # dtype=[("A", int), ("B", float), ("C", "|S3")]) - # BEHAVIOR in 1.6 and later: match structured types by name - fill_val = np.array(("???", -999, -12345678.9), - dtype=[("c", "|S3"), ("a", int), ("b", float), ]) - fval = _check_fill_value(fill_val, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - - #.....Using an object-array shouldn't matter either - fill_val = np.ndarray(shape=(1,), dtype=object) - fill_val[0] = (-999, -12345678.9, asbytes("???")) - fval = _check_fill_value(fill_val, object) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - # NOTE: This test was never run properly as "fill_value" rather than - # "fill_val" was assigned. Written properly, it fails. - #fill_val = np.array((-999, -12345678.9, "???")) - #fval = _check_fill_value(fill_val, ndtype) - #self.assertTrue(isinstance(fval, ndarray)) - #assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) - #.....One-field-only flexible type should work as well - ndtype = [("a", int)] - fval = _check_fill_value(-999999999, ndtype) - self.assertTrue(isinstance(fval, ndarray)) - assert_equal(fval.item(), (-999999999,)) - - def test_fillvalue_conversion(self): - # Tests the behavior of fill_value during conversion - # We had a tailored comment to make sure special attributes are - # properly dealt with - a = array(asbytes_nested(['3', '4', '5'])) - a._optinfo.update({'comment':"updated!"}) - # - b = array(a, dtype=int) - assert_equal(b._data, [3, 4, 5]) - assert_equal(b.fill_value, default_fill_value(0)) - # - b = array(a, dtype=float) - assert_equal(b._data, [3, 4, 5]) - assert_equal(b.fill_value, default_fill_value(0.)) - # - b = a.astype(int) - assert_equal(b._data, [3, 4, 5]) - assert_equal(b.fill_value, default_fill_value(0)) - assert_equal(b._optinfo['comment'], "updated!") - # - b = a.astype([('a', '|S3')]) - assert_equal(b['a']._data, a._data) - assert_equal(b['a'].fill_value, a.fill_value) - - def test_fillvalue(self): - # Yet more fun with the fill_value - data = masked_array([1, 2, 3], fill_value=-999) - series = data[[0, 2, 1]] - assert_equal(series._fill_value, data._fill_value) - # - mtype = [('f', float), ('s', '|S3')] - x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) - x.fill_value = 999 - assert_equal(x.fill_value.item(), [999., asbytes('999')]) - assert_equal(x['f'].fill_value, 999) - assert_equal(x['s'].fill_value, asbytes('999')) - # - x.fill_value = (9, '???') - assert_equal(x.fill_value.item(), (9, asbytes('???'))) - assert_equal(x['f'].fill_value, 9) - assert_equal(x['s'].fill_value, asbytes('???')) - # - x = array([1, 2, 3.1]) - x.fill_value = 999 - assert_equal(np.asarray(x.fill_value).dtype, float) - assert_equal(x.fill_value, 999.) - assert_equal(x._fill_value, np.array(999.)) - - def test_fillvalue_exotic_dtype(self): - # Tests yet more exotic flexible dtypes - _check_fill_value = np.ma.core._check_fill_value - ndtype = [('i', int), ('s', '|S8'), ('f', float)] - control = np.array((default_fill_value(0), - default_fill_value('0'), - default_fill_value(0.),), - dtype=ndtype) - assert_equal(_check_fill_value(None, ndtype), control) - # The shape shouldn't matter - ndtype = [('f0', float, (2, 2))] - control = np.array((default_fill_value(0.),), - dtype=[('f0', float)]).astype(ndtype) - assert_equal(_check_fill_value(None, ndtype), control) - control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) - assert_equal(_check_fill_value(0, ndtype), control) - # - ndtype = np.dtype("int, (2,3)float, float") - control = np.array((default_fill_value(0), - default_fill_value(0.), - default_fill_value(0.),), - dtype="int, float, float").astype(ndtype) - test = _check_fill_value(None, ndtype) - assert_equal(test, control) - control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) - assert_equal(_check_fill_value(0, ndtype), control) - - def test_extremum_fill_value(self): - # Tests extremum fill values for flexible type. - a = array([(1, (2, 3)), (4, (5, 6))], - dtype=[('A', int), ('B', [('BA', int), ('BB', int)])]) - test = a.fill_value - assert_equal(test['A'], default_fill_value(a['A'])) - assert_equal(test['B']['BA'], default_fill_value(a['B']['BA'])) - assert_equal(test['B']['BB'], default_fill_value(a['B']['BB'])) - # - test = minimum_fill_value(a) - assert_equal(test[0], minimum_fill_value(a['A'])) - assert_equal(test[1][0], minimum_fill_value(a['B']['BA'])) - assert_equal(test[1][1], minimum_fill_value(a['B']['BB'])) - assert_equal(test[1], minimum_fill_value(a['B'])) - # - test = maximum_fill_value(a) - assert_equal(test[0], maximum_fill_value(a['A'])) - assert_equal(test[1][0], maximum_fill_value(a['B']['BA'])) - assert_equal(test[1][1], maximum_fill_value(a['B']['BB'])) - assert_equal(test[1], maximum_fill_value(a['B'])) - - def test_fillvalue_individual_fields(self): - # Test setting fill_value on individual fields - ndtype = [('a', int), ('b', int)] - # Explicit fill_value - a = array(list(zip([1, 2, 3], [4, 5, 6])), - fill_value=(-999, -999), dtype=ndtype) - aa = a['a'] - aa.set_fill_value(10) - assert_equal(aa._fill_value, np.array(10)) - assert_equal(tuple(a.fill_value), (10, -999)) - a.fill_value['b'] = -10 - assert_equal(tuple(a.fill_value), (10, -10)) - # Implicit fill_value - t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype) - tt = t['a'] - tt.set_fill_value(10) - assert_equal(tt._fill_value, np.array(10)) - assert_equal(tuple(t.fill_value), (10, default_fill_value(0))) - - def test_fillvalue_implicit_structured_array(self): - # Check that fill_value is always defined for structured arrays - ndtype = ('b', float) - adtype = ('a', float) - a = array([(1.,), (2.,)], mask=[(False,), (False,)], - fill_value=(np.nan,), dtype=np.dtype([adtype])) - b = empty(a.shape, dtype=[adtype, ndtype]) - b['a'] = a['a'] - b['a'].set_fill_value(a['a'].fill_value) - f = b._fill_value[()] - assert_(np.isnan(f[0])) - assert_equal(f[-1], default_fill_value(1.)) - - def test_fillvalue_as_arguments(self): - # Test adding a fill_value parameter to empty/ones/zeros - a = empty(3, fill_value=999.) - assert_equal(a.fill_value, 999.) - # - a = ones(3, fill_value=999., dtype=float) - assert_equal(a.fill_value, 999.) - # - a = zeros(3, fill_value=0., dtype=complex) - assert_equal(a.fill_value, 0.) - # - a = identity(3, fill_value=0., dtype=complex) - assert_equal(a.fill_value, 0.) - - def test_fillvalue_in_view(self): - # Test the behavior of fill_value in view - - # Create initial masked array - x = array([1, 2, 3], fill_value=1, dtype=np.int64) - - # Check that fill_value is preserved by default - y = x.view() - assert_(y.fill_value == 1) - - # Check that fill_value is preserved if dtype is specified and the - # dtype is an ndarray sub-class and has a _fill_value attribute - y = x.view(MaskedArray) - assert_(y.fill_value == 1) - - # Check that fill_value is preserved if type is specified and the - # dtype is an ndarray sub-class and has a _fill_value attribute (by - # default, the first argument is dtype, not type) - y = x.view(type=MaskedArray) - assert_(y.fill_value == 1) - - # Check that code does not crash if passed an ndarray sub-class that - # does not have a _fill_value attribute - y = x.view(np.ndarray) - y = x.view(type=np.ndarray) - - # Check that fill_value can be overriden with view - y = x.view(MaskedArray, fill_value=2) - assert_(y.fill_value == 2) - - # Check that fill_value can be overriden with view (using type=) - y = x.view(type=MaskedArray, fill_value=2) - assert_(y.fill_value == 2) - - # Check that fill_value gets reset if passed a dtype but not a - # fill_value. This is because even though in some cases one can safely - # cast the fill_value, e.g. if taking an int64 view of an int32 array, - # in other cases, this cannot be done (e.g. int32 view of an int64 - # array with a large fill_value). - y = x.view(dtype=np.int32) - assert_(y.fill_value == 999999) - - -#------------------------------------------------------------------------------ -class TestUfuncs(TestCase): - # Test class for the application of ufuncs on MaskedArrays. - - def setUp(self): - # Base data definition. - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), - array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') - - def tearDown(self): - np.seterr(**self.err_status) - - def test_testUfuncRegression(self): - # Tests new ufuncs on MaskedArrays. - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', - 'sin', 'cos', 'tan', - 'arcsin', 'arccos', 'arctan', - 'sinh', 'cosh', 'tanh', - 'arcsinh', - 'arccosh', - 'arctanh', - 'absolute', 'fabs', 'negative', - # 'nonzero', 'around', - 'floor', 'ceil', - # 'sometrue', 'alltrue', - 'logical_not', - 'add', 'subtract', 'multiply', - 'divide', 'true_divide', 'floor_divide', - 'remainder', 'fmod', 'hypot', 'arctan2', - 'equal', 'not_equal', 'less_equal', 'greater_equal', - 'less', 'greater', - 'logical_and', 'logical_or', 'logical_xor', - ]: - try: - uf = getattr(umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(numpy.ma.core, f) - args = self.d[:uf.nin] - ur = uf(*args) - mr = mf(*args) - assert_equal(ur.filled(0), mr.filled(0), f) - assert_mask_equal(ur.mask, mr.mask, err_msg=f) - - def test_reduce(self): - # Tests reduce on MaskedArrays. - a = self.d[0] - self.assertTrue(not alltrue(a, axis=0)) - self.assertTrue(sometrue(a, axis=0)) - assert_equal(sum(a[:3], axis=0), 0) - assert_equal(product(a, axis=0), 0) - assert_equal(add.reduce(a), pi) - - def test_minmax(self): - # Tests extrema on MaskedArrays. - a = arange(1, 13).reshape(3, 4) - amask = masked_where(a < 5, a) - assert_equal(amask.max(), a.max()) - assert_equal(amask.min(), 5) - assert_equal(amask.max(0), a.max(0)) - assert_equal(amask.min(0), [5, 6, 7, 8]) - self.assertTrue(amask.max(1)[0].mask) - self.assertTrue(amask.min(1)[0].mask) - - def test_ndarray_mask(self): - # Check that the mask of the result is a ndarray (not a MaskedArray...) - a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) - test = np.sqrt(a) - control = masked_array([-1, 0, 1, np.sqrt(2), -1], - mask=[1, 0, 0, 0, 1]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - self.assertTrue(not isinstance(test.mask, MaskedArray)) - - def test_treatment_of_NotImplemented(self): - # Check any NotImplemented returned by umath. is passed on - a = masked_array([1., 2.], mask=[1, 0]) - # basic tests for _MaskedBinaryOperation - assert_(a.__mul__('abc') is NotImplemented) - assert_(multiply.outer(a, 'abc') is NotImplemented) - # and for _DomainedBinaryOperation - assert_(a.__div__('abc') is NotImplemented) - - # also check explicitly that rmul of another class can be accessed - class MyClass(str): - def __mul__(self, other): - return "My mul" - - def __rmul__(self, other): - return "My rmul" - - me = MyClass() - assert_(me * a == "My mul") - assert_(a * me == "My rmul") - - -#------------------------------------------------------------------------------ -class TestMaskedArrayInPlaceArithmetics(TestCase): - # Test MaskedArray Arithmetics - - def setUp(self): - x = arange(10) - y = arange(10) - xm = arange(10) - xm[2] = masked - self.intdata = (x, y, xm) - self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) - - def test_inplace_addition_scalar(self): - # Test of inplace additions - (x, y, xm) = self.intdata - xm[2] = masked - x += 1 - assert_equal(x, y + 1) - xm += 1 - assert_equal(xm, y + 1) - # - (x, _, xm) = self.floatdata - id1 = x.data.ctypes._data - x += 1. - assert_(id1 == x.data.ctypes._data) - assert_equal(x, y + 1.) - - def test_inplace_addition_array(self): - # Test of inplace additions - (x, y, xm) = self.intdata - m = xm.mask - a = arange(10, dtype=np.int16) - a[-1] = masked - x += a - xm += a - assert_equal(x, y + a) - assert_equal(xm, y + a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - def test_inplace_subtraction_scalar(self): - # Test of inplace subtractions - (x, y, xm) = self.intdata - x -= 1 - assert_equal(x, y - 1) - xm -= 1 - assert_equal(xm, y - 1) - - def test_inplace_subtraction_array(self): - # Test of inplace subtractions - (x, y, xm) = self.floatdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x -= a - xm -= a - assert_equal(x, y - a) - assert_equal(xm, y - a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - def test_inplace_multiplication_scalar(self): - # Test of inplace multiplication - (x, y, xm) = self.floatdata - x *= 2.0 - assert_equal(x, y * 2) - xm *= 2.0 - assert_equal(xm, y * 2) - - def test_inplace_multiplication_array(self): - # Test of inplace multiplication - (x, y, xm) = self.floatdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x *= a - xm *= a - assert_equal(x, y * a) - assert_equal(xm, y * a) - assert_equal(xm.mask, mask_or(m, a.mask)) - - def test_inplace_division_scalar_int(self): - # Test of inplace division - (x, y, xm) = self.intdata - x = arange(10) * 2 - xm = arange(10) * 2 - xm[2] = masked - x //= 2 - assert_equal(x, y) - xm //= 2 - assert_equal(xm, y) - - def test_inplace_division_scalar_float(self): - # Test of inplace division - (x, y, xm) = self.floatdata - x /= 2.0 - assert_equal(x, y / 2.0) - xm /= arange(10) - assert_equal(xm, ones((10,))) - - def test_inplace_division_array_float(self): - # Test of inplace division - (x, y, xm) = self.floatdata - m = xm.mask - a = arange(10, dtype=float) - a[-1] = masked - x /= a - xm /= a - assert_equal(x, y / a) - assert_equal(xm, y / a) - assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) - - def test_inplace_division_misc(self): - # - x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] - y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - # - z = xm / ym - assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) - assert_equal(z._data, - [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) - #assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) - # - xm = xm.copy() - xm /= ym - assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) - assert_equal(z._data, - [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) - #assert_equal(xm._data, - # [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) - - def test_datafriendly_add(self): - # Test keeping data w/ (inplace) addition - x = array([1, 2, 3], mask=[0, 0, 1]) - # Test add w/ scalar - xx = x + 1 - assert_equal(xx.data, [2, 3, 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test iadd w/ scalar - x += 1 - assert_equal(x.data, [2, 3, 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test add w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x + array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(xx.data, [1, 4, 3]) - assert_equal(xx.mask, [1, 0, 1]) - # Test iadd w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - x += array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(x.data, [1, 4, 3]) - assert_equal(x.mask, [1, 0, 1]) - - def test_datafriendly_sub(self): - # Test keeping data w/ (inplace) subtraction - # Test sub w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x - 1 - assert_equal(xx.data, [0, 1, 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test isub w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - x -= 1 - assert_equal(x.data, [0, 1, 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test sub w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x - array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(xx.data, [1, 0, 3]) - assert_equal(xx.mask, [1, 0, 1]) - # Test isub w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - x -= array([1, 2, 3], mask=[1, 0, 0]) - assert_equal(x.data, [1, 0, 3]) - assert_equal(x.mask, [1, 0, 1]) - - def test_datafriendly_mul(self): - # Test keeping data w/ (inplace) multiplication - # Test mul w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x * 2 - assert_equal(xx.data, [2, 4, 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test imul w/ scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - x *= 2 - assert_equal(x.data, [2, 4, 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test mul w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x * array([10, 20, 30], mask=[1, 0, 0]) - assert_equal(xx.data, [1, 40, 3]) - assert_equal(xx.mask, [1, 0, 1]) - # Test imul w/ array - x = array([1, 2, 3], mask=[0, 0, 1]) - x *= array([10, 20, 30], mask=[1, 0, 0]) - assert_equal(x.data, [1, 40, 3]) - assert_equal(x.mask, [1, 0, 1]) - - def test_datafriendly_div(self): - # Test keeping data w/ (inplace) division - # Test div on scalar - x = array([1, 2, 3], mask=[0, 0, 1]) - xx = x / 2. - assert_equal(xx.data, [1 / 2., 2 / 2., 3]) - assert_equal(xx.mask, [0, 0, 1]) - # Test idiv on scalar - x = array([1., 2., 3.], mask=[0, 0, 1]) - x /= 2. - assert_equal(x.data, [1 / 2., 2 / 2., 3]) - assert_equal(x.mask, [0, 0, 1]) - # Test div on array - x = array([1., 2., 3.], mask=[0, 0, 1]) - xx = x / array([10., 20., 30.], mask=[1, 0, 0]) - assert_equal(xx.data, [1., 2. / 20., 3.]) - assert_equal(xx.mask, [1, 0, 1]) - # Test idiv on array - x = array([1., 2., 3.], mask=[0, 0, 1]) - x /= array([10., 20., 30.], mask=[1, 0, 0]) - assert_equal(x.data, [1., 2 / 20., 3.]) - assert_equal(x.mask, [1, 0, 1]) - - def test_datafriendly_pow(self): - # Test keeping data w/ (inplace) power - # Test pow on scalar - x = array([1., 2., 3.], mask=[0, 0, 1]) - xx = x ** 2.5 - assert_equal(xx.data, [1., 2. ** 2.5, 3.]) - assert_equal(xx.mask, [0, 0, 1]) - # Test ipow on scalar - x **= 2.5 - assert_equal(x.data, [1., 2. ** 2.5, 3]) - assert_equal(x.mask, [0, 0, 1]) - - def test_datafriendly_add_arrays(self): - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 0]) - a += b - assert_equal(a, [[2, 2], [4, 4]]) - if a.mask is not nomask: - assert_equal(a.mask, [[0, 0], [0, 0]]) - # - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 1]) - a += b - assert_equal(a, [[2, 2], [4, 4]]) - assert_equal(a.mask, [[0, 1], [0, 1]]) - - def test_datafriendly_sub_arrays(self): - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 0]) - a -= b - assert_equal(a, [[0, 0], [2, 2]]) - if a.mask is not nomask: - assert_equal(a.mask, [[0, 0], [0, 0]]) - # - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 1]) - a -= b - assert_equal(a, [[0, 0], [2, 2]]) - assert_equal(a.mask, [[0, 1], [0, 1]]) - - def test_datafriendly_mul_arrays(self): - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 0]) - a *= b - assert_equal(a, [[1, 1], [3, 3]]) - if a.mask is not nomask: - assert_equal(a.mask, [[0, 0], [0, 0]]) - # - a = array([[1, 1], [3, 3]]) - b = array([1, 1], mask=[0, 1]) - a *= b - assert_equal(a, [[1, 1], [3, 3]]) - assert_equal(a.mask, [[0, 1], [0, 1]]) - - -#------------------------------------------------------------------------------ -class TestMaskedArrayMethods(TestCase): - # Test class for miscellaneous MaskedArrays methods. - def setUp(self): - # Base data definition. - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = np.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) - - def test_generic_methods(self): - # Tests some MaskedArray methods. - a = array([1, 3, 2]) - assert_equal(a.any(), a._data.any()) - assert_equal(a.all(), a._data.all()) - assert_equal(a.argmax(), a._data.argmax()) - assert_equal(a.argmin(), a._data.argmin()) - assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) - assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) - assert_equal(a.conj(), a._data.conj()) - assert_equal(a.conjugate(), a._data.conjugate()) - # - m = array([[1, 2], [3, 4]]) - assert_equal(m.diagonal(), m._data.diagonal()) - assert_equal(a.sum(), a._data.sum()) - assert_equal(a.take([1, 2]), a._data.take([1, 2])) - assert_equal(m.transpose(), m._data.transpose()) - - def test_allclose(self): - # Tests allclose on arrays - a = np.random.rand(10) - b = a + np.random.rand(10) * 1e-8 - self.assertTrue(allclose(a, b)) - # Test allclose w/ infs - a[0] = np.inf - self.assertTrue(not allclose(a, b)) - b[0] = np.inf - self.assertTrue(allclose(a, b)) - # Test all close w/ masked - a = masked_array(a) - a[-1] = masked - self.assertTrue(allclose(a, b, masked_equal=True)) - self.assertTrue(not allclose(a, b, masked_equal=False)) - # Test comparison w/ scalar - a *= 1e-8 - a[0] = 0 - self.assertTrue(allclose(a, 0, masked_equal=True)) - - # Test that the function works for MIN_INT integer typed arrays - a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) - self.assertTrue(allclose(a, a)) - - def test_allany(self): - # Checks the any/all methods/functions. - x = np.array([[0.13, 0.26, 0.90], - [0.28, 0.33, 0.63], - [0.31, 0.87, 0.70]]) - m = np.array([[True, False, False], - [False, False, False], - [True, True, False]], dtype=np.bool_) - mx = masked_array(x, mask=m) - mxbig = (mx > 0.5) - mxsmall = (mx < 0.5) - # - self.assertFalse(mxbig.all()) - self.assertTrue(mxbig.any()) - assert_equal(mxbig.all(0), [False, False, True]) - assert_equal(mxbig.all(1), [False, False, True]) - assert_equal(mxbig.any(0), [False, False, True]) - assert_equal(mxbig.any(1), [True, True, True]) - # - self.assertFalse(mxsmall.all()) - self.assertTrue(mxsmall.any()) - assert_equal(mxsmall.all(0), [True, True, False]) - assert_equal(mxsmall.all(1), [False, False, False]) - assert_equal(mxsmall.any(0), [True, True, False]) - assert_equal(mxsmall.any(1), [True, True, False]) - - def test_allany_onmatrices(self): - x = np.array([[0.13, 0.26, 0.90], - [0.28, 0.33, 0.63], - [0.31, 0.87, 0.70]]) - X = np.matrix(x) - m = np.array([[True, False, False], - [False, False, False], - [True, True, False]], dtype=np.bool_) - mX = masked_array(X, mask=m) - mXbig = (mX > 0.5) - mXsmall = (mX < 0.5) - # - self.assertFalse(mXbig.all()) - self.assertTrue(mXbig.any()) - assert_equal(mXbig.all(0), np.matrix([False, False, True])) - assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) - assert_equal(mXbig.any(0), np.matrix([False, False, True])) - assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) - # - self.assertFalse(mXsmall.all()) - self.assertTrue(mXsmall.any()) - assert_equal(mXsmall.all(0), np.matrix([True, True, False])) - assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) - assert_equal(mXsmall.any(0), np.matrix([True, True, False])) - assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) - - def test_allany_oddities(self): - # Some fun with all and any - store = empty((), dtype=bool) - full = array([1, 2, 3], mask=True) - # - self.assertTrue(full.all() is masked) - full.all(out=store) - self.assertTrue(store) - self.assertTrue(store._mask, True) - self.assertTrue(store is not masked) - # - store = empty((), dtype=bool) - self.assertTrue(full.any() is masked) - full.any(out=store) - self.assertTrue(not store) - self.assertTrue(store._mask, True) - self.assertTrue(store is not masked) - - def test_argmax_argmin(self): - # Tests argmin & argmax on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - # - assert_equal(mx.argmin(), 35) - assert_equal(mX.argmin(), 35) - assert_equal(m2x.argmin(), 4) - assert_equal(m2X.argmin(), 4) - assert_equal(mx.argmax(), 28) - assert_equal(mX.argmax(), 28) - assert_equal(m2x.argmax(), 31) - assert_equal(m2X.argmax(), 31) - # - assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) - assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) - assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) - assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) - # - assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) - assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) - assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) - assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) - - def test_clip(self): - # Tests clip on MaskedArrays. - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) - mx = array(x, mask=m) - clipped = mx.clip(2, 8) - assert_equal(clipped.mask, mx.mask) - assert_equal(clipped._data, x.clip(2, 8)) - assert_equal(clipped._data, mx._data.clip(2, 8)) - - def test_compress(self): - # test compress - a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) - condition = (a > 1.5) & (a < 3.5) - assert_equal(a.compress(condition), [2., 3.]) - # - a[[2, 3]] = masked - b = a.compress(condition) - assert_equal(b._data, [2., 3.]) - assert_equal(b._mask, [0, 1]) - assert_equal(b.fill_value, 9999) - assert_equal(b, a[condition]) - # - condition = (a < 4.) - b = a.compress(condition) - assert_equal(b._data, [1., 2., 3.]) - assert_equal(b._mask, [0, 0, 1]) - assert_equal(b.fill_value, 9999) - assert_equal(b, a[condition]) - # - a = masked_array([[10, 20, 30], [40, 50, 60]], - mask=[[0, 0, 1], [1, 0, 0]]) - b = a.compress(a.ravel() >= 22) - assert_equal(b._data, [30, 40, 50, 60]) - assert_equal(b._mask, [1, 1, 0, 0]) - # - x = np.array([3, 1, 2]) - b = a.compress(x >= 2, axis=1) - assert_equal(b._data, [[10, 30], [40, 60]]) - assert_equal(b._mask, [[0, 1], [1, 0]]) - - def test_compressed(self): - # Tests compressed - a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) - b = a.compressed() - assert_equal(b, a) - a[0] = masked - b = a.compressed() - assert_equal(b, [2, 3, 4]) - # - a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) - b = a.compressed() - assert_equal(b, a) - self.assertTrue(isinstance(b, np.matrix)) - a[0, 0] = masked - b = a.compressed() - assert_equal(b, [[2, 3, 4]]) - - def test_empty(self): - # Tests empty/like - datatype = [('a', int), ('b', float), ('c', '|S8')] - a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], - dtype=datatype) - assert_equal(len(a.fill_value.item()), len(datatype)) - # - b = empty_like(a) - assert_equal(b.shape, a.shape) - assert_equal(b.fill_value, a.fill_value) - # - b = empty(len(a), dtype=datatype) - assert_equal(b.shape, a.shape) - assert_equal(b.fill_value, a.fill_value) - - def test_put(self): - # Tests put. - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - x = array(d, mask=m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is masked) - x[[1, 4]] = [10, 40] - #self.assertTrue(x.mask is not m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is not masked) - assert_equal(x, [0, 10, 2, -1, 40]) - # - x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) - i = [0, 2, 4, 6] - x.put(i, [6, 4, 2, 0]) - assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) - assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) - x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) - assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) - assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) - # - x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) - put(x, i, [6, 4, 2, 0]) - assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) - assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) - put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) - assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) - assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) - - def test_put_hardmask(self): - # Tests put on hardmask - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - xh = array(d + 1, mask=m, hard_mask=True, copy=True) - xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) - assert_equal(xh._data, [3, 4, 2, 4, 5]) - - def test_putmask(self): - x = arange(6) + 1 - mx = array(x, mask=[0, 0, 0, 1, 1, 1]) - mask = [0, 0, 1, 0, 0, 1] - # w/o mask, w/o masked values - xx = x.copy() - putmask(xx, mask, 99) - assert_equal(xx, [1, 2, 99, 4, 5, 99]) - # w/ mask, w/o masked values - mxx = mx.copy() - putmask(mxx, mask, 99) - assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) - assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) - # w/o mask, w/ masked values - values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) - xx = x.copy() - putmask(xx, mask, values) - assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) - assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) - # w/ mask, w/ masked values - mxx = mx.copy() - putmask(mxx, mask, values) - assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) - assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) - # w/ mask, w/ masked values + hardmask - mxx = mx.copy() - mxx.harden_mask() - putmask(mxx, mask, values) - assert_equal(mxx, [1, 2, 30, 4, 5, 60]) - - def test_ravel(self): - # Tests ravel - a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) - aravel = a.ravel() - assert_equal(aravel._mask.shape, aravel.shape) - a = array([0, 0], mask=[1, 1]) - aravel = a.ravel() - assert_equal(aravel._mask.shape, a.shape) - a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) - aravel = a.ravel() - assert_equal(aravel.shape, (1, 5)) - assert_equal(aravel._mask.shape, a.shape) - # Checks that small_mask is preserved - a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) - assert_equal(a.ravel()._mask, [0, 0, 0, 0]) - # Test that the fill_value is preserved - a.fill_value = -99 - a.shape = (2, 2) - ar = a.ravel() - assert_equal(ar._mask, [0, 0, 0, 0]) - assert_equal(ar._data, [1, 2, 3, 4]) - assert_equal(ar.fill_value, -99) - - def test_reshape(self): - # Tests reshape - x = arange(4) - x[0] = masked - y = x.reshape(2, 2) - assert_equal(y.shape, (2, 2,)) - assert_equal(y._mask.shape, (2, 2,)) - assert_equal(x.shape, (4,)) - assert_equal(x._mask.shape, (4,)) - - def test_sort(self): - # Test sort - x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) - # - sortedx = sort(x) - assert_equal(sortedx._data, [1, 2, 3, 4]) - assert_equal(sortedx._mask, [0, 0, 0, 1]) - # - sortedx = sort(x, endwith=False) - assert_equal(sortedx._data, [4, 1, 2, 3]) - assert_equal(sortedx._mask, [1, 0, 0, 0]) - # - x.sort() - assert_equal(x._data, [1, 2, 3, 4]) - assert_equal(x._mask, [0, 0, 0, 1]) - # - x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) - x.sort(endwith=False) - assert_equal(x._data, [4, 1, 2, 3]) - assert_equal(x._mask, [1, 0, 0, 0]) - # - x = [1, 4, 2, 3] - sortedx = sort(x) - self.assertTrue(not isinstance(sorted, MaskedArray)) - # - x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) - sortedx = sort(x, endwith=False) - assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) - x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) - sortedx = sort(x, endwith=False) - assert_equal(sortedx._data, [1, 2, -2, -1, 0]) - assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) - - def test_sort_2d(self): - # Check sort of 2D array. - # 2D array w/o mask - a = masked_array([[8, 4, 1], [2, 0, 9]]) - a.sort(0) - assert_equal(a, [[2, 0, 1], [8, 4, 9]]) - a = masked_array([[8, 4, 1], [2, 0, 9]]) - a.sort(1) - assert_equal(a, [[1, 4, 8], [0, 2, 9]]) - # 2D array w/mask - a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) - a.sort(0) - assert_equal(a, [[2, 0, 1], [8, 4, 9]]) - assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) - a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) - a.sort(1) - assert_equal(a, [[1, 4, 8], [0, 2, 9]]) - assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) - # 3D - a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], - [[1, 2, 3], [7, 8, 9], [4, 5, 6]], - [[7, 8, 9], [1, 2, 3], [4, 5, 6]], - [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) - a[a % 4 == 0] = masked - am = a.copy() - an = a.filled(99) - am.sort(0) - an.sort(0) - assert_equal(am, an) - am = a.copy() - an = a.filled(99) - am.sort(1) - an.sort(1) - assert_equal(am, an) - am = a.copy() - an = a.filled(99) - am.sort(2) - an.sort(2) - assert_equal(am, an) - - def test_sort_flexible(self): - # Test sort on flexible dtype. - a = array( - data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], - mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], - dtype=[('A', int), ('B', int)]) - # - test = sort(a) - b = array( - data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], - mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, b) - assert_equal(test.mask, b.mask) - # - test = sort(a, endwith=False) - b = array( - data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ], - mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ], - dtype=[('A', int), ('B', int)]) - assert_equal(test, b) - assert_equal(test.mask, b.mask) - - def test_argsort(self): - # Test argsort - a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) - assert_equal(np.argsort(a), argsort(a)) - - def test_squeeze(self): - # Check squeeze - data = masked_array([[1, 2, 3]]) - assert_equal(data.squeeze(), [1, 2, 3]) - data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) - assert_equal(data.squeeze(), [1, 2, 3]) - assert_equal(data.squeeze()._mask, [1, 1, 1]) - data = masked_array([[1]], mask=True) - self.assertTrue(data.squeeze() is masked) - - def test_swapaxes(self): - # Tests swapaxes on MaskedArrays. - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mX = array(x, mask=m).reshape(6, 6) - mXX = mX.reshape(3, 2, 2, 3) - # - mXswapped = mX.swapaxes(0, 1) - assert_equal(mXswapped[-1], mX[:, -1]) - - mXXswapped = mXX.swapaxes(0, 2) - assert_equal(mXXswapped.shape, (2, 2, 3, 3)) - - def test_take(self): - # Tests take - x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) - assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) - assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) - assert_equal(x.take([[0, 1], [0, 1]]), - masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) - # - x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) - assert_equal(x.take([0, 2], axis=1), - array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) - assert_equal(take(x, [0, 2], axis=1), - array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) - - def test_take_masked_indices(self): - # Test take w/ masked indices - a = np.array((40, 18, 37, 9, 22)) - indices = np.arange(3)[None,:] + np.arange(5)[:, None] - mindices = array(indices, mask=(indices >= len(a))) - # No mask - test = take(a, mindices, mode='clip') - ctrl = array([[40, 18, 37], - [18, 37, 9], - [37, 9, 22], - [9, 22, 22], - [22, 22, 22]]) - assert_equal(test, ctrl) - # Masked indices - test = take(a, mindices) - ctrl = array([[40, 18, 37], - [18, 37, 9], - [37, 9, 22], - [9, 22, 40], - [22, 40, 40]]) - ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - # Masked input + masked indices - a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) - test = take(a, mindices) - ctrl[0, 1] = ctrl[1, 0] = masked - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - - def test_tolist(self): - # Tests to list - # ... on 1D - x = array(np.arange(12)) - x[[1, -2]] = masked - xlist = x.tolist() - self.assertTrue(xlist[1] is None) - self.assertTrue(xlist[-2] is None) - # ... on 2D - x.shape = (3, 4) - xlist = x.tolist() - ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] - assert_equal(xlist[0], [0, None, 2, 3]) - assert_equal(xlist[1], [4, 5, 6, 7]) - assert_equal(xlist[2], [8, 9, None, 11]) - assert_equal(xlist, ctrl) - # ... on structured array w/ masked records - x = array(list(zip([1, 2, 3], - [1.1, 2.2, 3.3], - ['one', 'two', 'thr'])), - dtype=[('a', int), ('b', float), ('c', '|S8')]) - x[-1] = masked - assert_equal(x.tolist(), - [(1, 1.1, asbytes('one')), - (2, 2.2, asbytes('two')), - (None, None, None)]) - # ... on structured array w/ masked fields - a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], - dtype=[('a', int), ('b', int)]) - test = a.tolist() - assert_equal(test, [[1, None], [3, 4]]) - # ... on mvoid - a = a[0] - test = a.tolist() - assert_equal(test, [1, None]) - - def test_tolist_specialcase(self): - # Test mvoid.tolist: make sure we return a standard Python object - a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) - # w/o mask: each entry is a np.void whose elements are standard Python - for entry in a: - for item in entry.tolist(): - assert_(not isinstance(item, np.generic)) - # w/ mask: each entry is a ma.void whose elements should be - # standard Python - a.mask[0] = (0, 1) - for entry in a: - for item in entry.tolist(): - assert_(not isinstance(item, np.generic)) - - def test_toflex(self): - # Test the conversion to records - data = arange(10) - record = data.toflex() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) - # - data[[0, 1, 2, -1]] = masked - record = data.toflex() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) - # - ndtype = [('i', int), ('s', '|S3'), ('f', float)] - data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), - 'ABCDEFGHIJKLM', - np.random.rand(10))], - dtype=ndtype) - data[[0, 1, 2, -1]] = masked - record = data.toflex() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) - # - ndtype = np.dtype("int, (2,3)float, float") - data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), - np.random.rand(10), - np.random.rand(10))], - dtype=ndtype) - data[[0, 1, 2, -1]] = masked - record = data.toflex() - assert_equal_records(record['_data'], data._data) - assert_equal_records(record['_mask'], data._mask) - - def test_fromflex(self): - # Test the reconstruction of a masked_array from a record - a = array([1, 2, 3]) - test = fromflex(a.toflex()) - assert_equal(test, a) - assert_equal(test.mask, a.mask) - # - a = array([1, 2, 3], mask=[0, 0, 1]) - test = fromflex(a.toflex()) - assert_equal(test, a) - assert_equal(test.mask, a.mask) - # - a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], - dtype=[('A', int), ('B', float)]) - test = fromflex(a.toflex()) - assert_equal(test, a) - assert_equal(test.data, a.data) - - def test_arraymethod(self): - # Test a _arraymethod w/ n argument - marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) - control = masked_array([[1], [2], [3], [4], [5]], - mask=[0, 0, 1, 0, 0]) - assert_equal(marray.T, control) - assert_equal(marray.transpose(), control) - # - assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) - - -#------------------------------------------------------------------------------ -class TestMaskedArrayMathMethods(TestCase): - - def setUp(self): - # Base data definition. - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = np.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) - - def test_cumsumprod(self): - # Tests cumsum & cumprod on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - mXcp = mX.cumsum(0) - assert_equal(mXcp._data, mX.filled(0).cumsum(0)) - mXcp = mX.cumsum(1) - assert_equal(mXcp._data, mX.filled(0).cumsum(1)) - # - mXcp = mX.cumprod(0) - assert_equal(mXcp._data, mX.filled(1).cumprod(0)) - mXcp = mX.cumprod(1) - assert_equal(mXcp._data, mX.filled(1).cumprod(1)) - - def test_cumsumprod_with_output(self): - # Tests cumsum/cumprod w/ output - xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) - xm[:, 0] = xm[0] = xm[-1, -1] = masked - # - for funcname in ('cumsum', 'cumprod'): - npfunc = getattr(np, funcname) - xmmeth = getattr(xm, funcname) - - # A ndarray as explicit input - output = np.empty((3, 4), dtype=float) - output.fill(-9999) - result = npfunc(xm, axis=0, out=output) - # ... the result should be the given output - self.assertTrue(result is output) - assert_equal(result, xmmeth(axis=0, out=output)) - # - output = empty((3, 4), dtype=int) - result = xmmeth(axis=0, out=output) - self.assertTrue(result is output) - - def test_ptp(self): - # Tests ptp on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - (n, m) = X.shape - assert_equal(mx.ptp(), mx.compressed().ptp()) - rows = np.zeros(n, np.float) - cols = np.zeros(m, np.float) - for k in range(m): - cols[k] = mX[:, k].compressed().ptp() - for k in range(n): - rows[k] = mX[k].compressed().ptp() - assert_equal(mX.ptp(0), cols) - assert_equal(mX.ptp(1), rows) - - def test_sum_object(self): - # Test sum on object dtype - a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) - assert_equal(a.sum(), 5) - a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) - assert_equal(a.sum(axis=0), [5, 7, 9]) - - def test_prod_object(self): - # Test prod on object dtype - a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) - assert_equal(a.prod(), 2 * 3) - a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) - assert_equal(a.prod(axis=0), [4, 10, 18]) - - def test_meananom_object(self): - # Test mean/anom on object dtype - a = masked_array([1, 2, 3], dtype=np.object) - assert_equal(a.mean(), 2) - assert_equal(a.anom(), [-1, 0, 1]) - - def test_trace(self): - # Tests trace on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - mXdiag = mX.diagonal() - assert_equal(mX.trace(), mX.diagonal().compressed().sum()) - assert_almost_equal(mX.trace(), - X.trace() - sum(mXdiag.mask * X.diagonal(), - axis=0)) - - def test_varstd(self): - # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - assert_almost_equal(mX.var(axis=None), mX.compressed().var()) - assert_almost_equal(mX.std(axis=None), mX.compressed().std()) - assert_almost_equal(mX.std(axis=None, ddof=1), - mX.compressed().std(ddof=1)) - assert_almost_equal(mX.var(axis=None, ddof=1), - mX.compressed().var(ddof=1)) - assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) - assert_equal(mX.var().shape, X.var().shape) - (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - assert_almost_equal(mX.var(axis=None, ddof=2), - mX.compressed().var(ddof=2)) - assert_almost_equal(mX.std(axis=None, ddof=2), - mX.compressed().std(ddof=2)) - for k in range(6): - assert_almost_equal(mXvar1[k], mX[k].compressed().var()) - assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) - assert_almost_equal(np.sqrt(mXvar0[k]), - mX[:, k].compressed().std()) - - def test_varstd_specialcases(self): - # Test a special case for var - nout = np.array(-1, dtype=float) - mout = array(-1, dtype=float) - # - x = array(arange(10), mask=True) - for methodname in ('var', 'std'): - method = getattr(x, methodname) - self.assertTrue(method() is masked) - self.assertTrue(method(0) is masked) - self.assertTrue(method(-1) is masked) - # Using a masked array as explicit output - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - _ = method(out=mout) - self.assertTrue(mout is not masked) - assert_equal(mout.mask, True) - # Using a ndarray as explicit output - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - _ = method(out=nout) - self.assertTrue(np.isnan(nout)) - # - x = array(arange(10), mask=True) - x[-1] = 9 - for methodname in ('var', 'std'): - method = getattr(x, methodname) - self.assertTrue(method(ddof=1) is masked) - self.assertTrue(method(0, ddof=1) is masked) - self.assertTrue(method(-1, ddof=1) is masked) - # Using a masked array as explicit output - method(out=mout, ddof=1) - self.assertTrue(mout is not masked) - assert_equal(mout.mask, True) - # Using a ndarray as explicit output - method(out=nout, ddof=1) - self.assertTrue(np.isnan(nout)) - - def test_varstd_ddof(self): - a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) - test = a.std(axis=0, ddof=0) - assert_equal(test.filled(0), [0, 0, 0]) - assert_equal(test.mask, [0, 0, 1]) - test = a.std(axis=0, ddof=1) - assert_equal(test.filled(0), [0, 0, 0]) - assert_equal(test.mask, [0, 0, 1]) - test = a.std(axis=0, ddof=2) - assert_equal(test.filled(0), [0, 0, 0]) - assert_equal(test.mask, [1, 1, 1]) - - def test_diag(self): - # Test diag - x = arange(9).reshape((3, 3)) - x[1, 1] = masked - out = np.diag(x) - assert_equal(out, [0, 4, 8]) - out = diag(x) - assert_equal(out, [0, 4, 8]) - assert_equal(out.mask, [0, 1, 0]) - out = diag(out) - control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], - mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(out, control) - - def test_axis_methods_nomask(self): - # Test the combination nomask & methods w/ axis - a = array([[1, 2, 3], [4, 5, 6]]) - # - assert_equal(a.sum(0), [5, 7, 9]) - assert_equal(a.sum(-1), [6, 15]) - assert_equal(a.sum(1), [6, 15]) - # - assert_equal(a.prod(0), [4, 10, 18]) - assert_equal(a.prod(-1), [6, 120]) - assert_equal(a.prod(1), [6, 120]) - # - assert_equal(a.min(0), [1, 2, 3]) - assert_equal(a.min(-1), [1, 4]) - assert_equal(a.min(1), [1, 4]) - # - assert_equal(a.max(0), [4, 5, 6]) - assert_equal(a.max(-1), [3, 6]) - assert_equal(a.max(1), [3, 6]) - - -#------------------------------------------------------------------------------ -class TestMaskedArrayMathMethodsComplex(TestCase): - # Test class for miscellaneous MaskedArrays methods. - def setUp(self): - # Base data definition. - x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, - 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - m2 = np.array([1, 1, 0, 1, 0, 0, - 1, 1, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 1, 0, - 0, 0, 1, 0, 1, 1]) - m2x = array(data=x, mask=m2) - m2X = array(data=X, mask=m2.reshape(X.shape)) - m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) - - def test_varstd(self): - # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d - assert_almost_equal(mX.var(axis=None), mX.compressed().var()) - assert_almost_equal(mX.std(axis=None), mX.compressed().std()) - assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) - assert_equal(mX.var().shape, X.var().shape) - (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - assert_almost_equal(mX.var(axis=None, ddof=2), - mX.compressed().var(ddof=2)) - assert_almost_equal(mX.std(axis=None, ddof=2), - mX.compressed().std(ddof=2)) - for k in range(6): - assert_almost_equal(mXvar1[k], mX[k].compressed().var()) - assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) - assert_almost_equal(np.sqrt(mXvar0[k]), - mX[:, k].compressed().std()) - - -#------------------------------------------------------------------------------ -class TestMaskedArrayFunctions(TestCase): - # Test class for miscellaneous functions. - - def setUp(self): - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - xm.set_fill_value(1e+20) - self.info = (xm, ym) - - def test_masked_where_bool(self): - x = [1, 2] - y = masked_where(False, x) - assert_equal(y, [1, 2]) - assert_equal(y[1], 2) - - def test_masked_equal_wlist(self): - x = [1, 2, 3] - mx = masked_equal(x, 3) - assert_equal(mx, x) - assert_equal(mx._mask, [0, 0, 1]) - mx = masked_not_equal(x, 3) - assert_equal(mx, x) - assert_equal(mx._mask, [1, 1, 0]) - - def test_masked_equal_fill_value(self): - x = [1, 2, 3] - mx = masked_equal(x, 3) - assert_equal(mx._mask, [0, 0, 1]) - assert_equal(mx.fill_value, 3) - - def test_masked_where_condition(self): - # Tests masking functions. - x = array([1., 2., 3., 4., 5.]) - x[2] = masked - assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) - assert_equal(masked_where(greater_equal(x, 2), x), - masked_greater_equal(x, 2)) - assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) - assert_equal(masked_where(less_equal(x, 2), x), - masked_less_equal(x, 2)) - assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) - assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) - assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) - assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), - [99, 99, 3, 4, 5]) - - def test_masked_where_oddities(self): - # Tests some generic features. - atest = ones((10, 10, 10), dtype=float) - btest = zeros(atest.shape, MaskType) - ctest = masked_where(btest, atest) - assert_equal(atest, ctest) - - def test_masked_where_shape_constraint(self): - a = arange(10) - try: - test = masked_equal(1, a) - except IndexError: - pass - else: - raise AssertionError("Should have failed...") - test = masked_equal(a, 1) - assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) - - def test_masked_otherfunctions(self): - assert_equal(masked_inside(list(range(5)), 1, 3), - [0, 199, 199, 199, 4]) - assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) - assert_equal(masked_inside(array(list(range(5)), - mask=[1, 0, 0, 0, 0]), 1, 3).mask, - [1, 1, 1, 1, 0]) - assert_equal(masked_outside(array(list(range(5)), - mask=[0, 1, 0, 0, 0]), 1, 3).mask, - [1, 1, 0, 0, 1]) - assert_equal(masked_equal(array(list(range(5)), - mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 0]) - assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], - mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 1]) - - def test_round(self): - a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], - mask=[0, 1, 0, 0, 0]) - assert_equal(a.round(), [1., 2., 3., 5., 6.]) - assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) - assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) - b = empty_like(a) - a.round(out=b) - assert_equal(b, [1., 2., 3., 5., 6.]) - - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - - def test_round_with_output(self): - # Testing round with an explicit output - - xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) - xm[:, 0] = xm[0] = xm[-1, -1] = masked - - # A ndarray as explicit input - output = np.empty((3, 4), dtype=float) - output.fill(-9999) - result = np.round(xm, decimals=2, out=output) - # ... the result should be the given output - self.assertTrue(result is output) - assert_equal(result, xm.round(decimals=2, out=output)) - # - output = empty((3, 4), dtype=float) - result = xm.round(decimals=2, out=output) - self.assertTrue(result is output) - - def test_identity(self): - a = identity(5) - self.assertTrue(isinstance(a, MaskedArray)) - assert_equal(a, np.identity(5)) - - def test_power(self): - x = -1.1 - assert_almost_equal(power(x, 2.), 1.21) - self.assertTrue(power(x, masked) is masked) - x = array([-1.1, -1.1, 1.1, 1.1, 0.]) - b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) - y = power(x, b) - assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) - assert_equal(y._mask, [1, 0, 0, 0, 1]) - b.mask = nomask - y = power(x, b) - assert_equal(y._mask, [1, 0, 0, 0, 1]) - z = x ** b - assert_equal(z._mask, y._mask) - assert_almost_equal(z, y) - assert_almost_equal(z._data, y._data) - x **= b - assert_equal(x._mask, y._mask) - assert_almost_equal(x, y) - assert_almost_equal(x._data, y._data) - - def test_power_w_broadcasting(self): - # Test power w/ broadcasting - a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) - a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) - b1 = np.array([2, 4, 3]) - b2 = np.array([b1, b1]) - b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) - # - ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], - mask=[[1, 1, 0], [0, 1, 1]]) - # No broadcasting, base & exp w/ mask - test = a2m ** b2m - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - # No broadcasting, base w/ mask, exp w/o mask - test = a2m ** b2 - assert_equal(test, ctrl) - assert_equal(test.mask, a2m.mask) - # No broadcasting, base w/o mask, exp w/ mask - test = a2 ** b2m - assert_equal(test, ctrl) - assert_equal(test.mask, b2m.mask) - # - ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], - mask=[[0, 1, 0], [0, 1, 0]]) - test = b1 ** b2m - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - test = b2m ** b1 - assert_equal(test, ctrl) - assert_equal(test.mask, ctrl.mask) - - def test_where(self): - # Test the where function - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - xm.set_fill_value(1e+20) - # - d = where(xm > 2, xm, -9) - assert_equal(d, [-9., -9., -9., -9., -9., 4., - -9., -9., 10., -9., -9., 3.]) - assert_equal(d._mask, xm._mask) - d = where(xm > 2, -9, ym) - assert_equal(d, [5., 0., 3., 2., -1., -9., - -9., -10., -9., 1., 0., -9.]) - assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) - d = where(xm > 2, xm, masked) - assert_equal(d, [-9., -9., -9., -9., -9., 4., - -9., -9., 10., -9., -9., 3.]) - tmp = xm._mask.copy() - tmp[(xm <= 2).filled(True)] = True - assert_equal(d._mask, tmp) - # - ixm = xm.astype(int) - d = where(ixm > 2, ixm, masked) - assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) - assert_equal(d.dtype, ixm.dtype) - - def test_where_with_masked_choice(self): - x = arange(10) - x[3] = masked - c = x >= 8 - # Set False to masked - z = where(c, x, masked) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is masked) - assert_(z[7] is masked) - assert_(z[8] is not masked) - assert_(z[9] is not masked) - assert_equal(x, z) - # Set True to masked - z = where(c, masked, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - - def test_where_with_masked_condition(self): - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - c[0] = masked - z = where(c, x, -x) - assert_equal(z, [1., 2., 0., -4., -5]) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - # - x = arange(1, 6) - x[-1] = masked - y = arange(1, 6) * 10 - y[2] = masked - c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) - cm = c.filled(1) - z = where(c, x, y) - zm = where(cm, x, y) - assert_equal(z, zm) - assert_(getmask(zm) is nomask) - assert_equal(zm, [1, 2, 3, 40, 50]) - z = where(c, masked, 1) - assert_equal(z, [99, 99, 99, 1, 1]) - z = where(c, 1, masked) - assert_equal(z, [99, 1, 1, 99, 99]) - - def test_where_type(self): - # Test the type conservation with where - x = np.arange(4, dtype=np.int32) - y = np.arange(4, dtype=np.float32) * 2.2 - test = where(x > 1.5, y, x).dtype - control = np.find_common_type([np.int32, np.float32], []) - assert_equal(test, control) - - def test_choose(self): - # Test choose - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - chosen = choose([2, 3, 1, 0], choices) - assert_equal(chosen, array([20, 31, 12, 3])) - chosen = choose([2, 4, 1, 0], choices, mode='clip') - assert_equal(chosen, array([20, 31, 12, 3])) - chosen = choose([2, 4, 1, 0], choices, mode='wrap') - assert_equal(chosen, array([20, 1, 12, 3])) - # Check with some masked indices - indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) - chosen = choose(indices_, choices, mode='wrap') - assert_equal(chosen, array([99, 1, 12, 99])) - assert_equal(chosen.mask, [1, 0, 0, 1]) - # Check with some masked choices - choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], - [1, 0, 0, 0], [0, 0, 0, 0]]) - indices_ = [2, 3, 1, 0] - chosen = choose(indices_, choices, mode='wrap') - assert_equal(chosen, array([20, 31, 12, 3])) - assert_equal(chosen.mask, [1, 0, 0, 1]) - - def test_choose_with_out(self): - # Test choose with an explicit out keyword - choices = [[0, 1, 2, 3], [10, 11, 12, 13], - [20, 21, 22, 23], [30, 31, 32, 33]] - store = empty(4, dtype=int) - chosen = choose([2, 3, 1, 0], choices, out=store) - assert_equal(store, array([20, 31, 12, 3])) - self.assertTrue(store is chosen) - # Check with some masked indices + out - store = empty(4, dtype=int) - indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) - chosen = choose(indices_, choices, mode='wrap', out=store) - assert_equal(store, array([99, 31, 12, 99])) - assert_equal(store.mask, [1, 0, 0, 1]) - # Check with some masked choices + out ina ndarray ! - choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], - [1, 0, 0, 0], [0, 0, 0, 0]]) - indices_ = [2, 3, 1, 0] - store = empty(4, dtype=int).view(ndarray) - chosen = choose(indices_, choices, mode='wrap', out=store) - assert_equal(store, array([999999, 31, 12, 999999])) - - def test_reshape(self): - a = arange(10) - a[0] = masked - # Try the default - b = a.reshape((5, 2)) - assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['C']) - # Try w/ arguments as list instead of tuple - b = a.reshape(5, 2) - assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['C']) - # Try w/ order - b = a.reshape((5, 2), order='F') - assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['F']) - # Try w/ order - b = a.reshape(5, 2, order='F') - assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['F']) - # - c = np.reshape(a, (2, 5)) - self.assertTrue(isinstance(c, MaskedArray)) - assert_equal(c.shape, (2, 5)) - self.assertTrue(c[0, 0] is masked) - self.assertTrue(c.flags['C']) - - def test_make_mask_descr(self): - # Test make_mask_descr - # Flexible - ntype = [('a', np.float), ('b', np.float)] - test = make_mask_descr(ntype) - assert_equal(test, [('a', np.bool), ('b', np.bool)]) - # Standard w/ shape - ntype = (np.float, 2) - test = make_mask_descr(ntype) - assert_equal(test, (np.bool, 2)) - # Standard standard - ntype = np.float - test = make_mask_descr(ntype) - assert_equal(test, np.dtype(np.bool)) - # Nested - ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])] - test = make_mask_descr(ntype) - control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) - assert_equal(test, control) - # Named+ shape - ntype = [('a', (np.float, 2))] - test = make_mask_descr(ntype) - assert_equal(test, np.dtype([('a', (np.bool, 2))])) - # 2 names - ntype = [(('A', 'a'), float)] - test = make_mask_descr(ntype) - assert_equal(test, np.dtype([(('A', 'a'), bool)])) - - def test_make_mask(self): - # Test make_mask - # w/ a list as an input - mask = [0, 1] - test = make_mask(mask) - assert_equal(test.dtype, MaskType) - assert_equal(test, [0, 1]) - # w/ a ndarray as an input - mask = np.array([0, 1], dtype=np.bool) - test = make_mask(mask) - assert_equal(test.dtype, MaskType) - assert_equal(test, [0, 1]) - # w/ a flexible-type ndarray as an input - use default - mdtype = [('a', np.bool), ('b', np.bool)] - mask = np.array([(0, 0), (0, 1)], dtype=mdtype) - test = make_mask(mask) - assert_equal(test.dtype, MaskType) - assert_equal(test, [1, 1]) - # w/ a flexible-type ndarray as an input - use input dtype - mdtype = [('a', np.bool), ('b', np.bool)] - mask = np.array([(0, 0), (0, 1)], dtype=mdtype) - test = make_mask(mask, dtype=mask.dtype) - assert_equal(test.dtype, mdtype) - assert_equal(test, mask) - # w/ a flexible-type ndarray as an input - use input dtype - mdtype = [('a', np.float), ('b', np.float)] - bdtype = [('a', np.bool), ('b', np.bool)] - mask = np.array([(0, 0), (0, 1)], dtype=mdtype) - test = make_mask(mask, dtype=mask.dtype) - assert_equal(test.dtype, bdtype) - assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) - - def test_mask_or(self): - # Initialize - mtype = [('a', np.bool), ('b', np.bool)] - mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) - # Test using nomask as input - test = mask_or(mask, nomask) - assert_equal(test, mask) - test = mask_or(nomask, mask) - assert_equal(test, mask) - # Using False as input - test = mask_or(mask, False) - assert_equal(test, mask) - # Using True as input. Won't work, but keep it for the kicks - # test = mask_or(mask, True) - # control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype) - # assert_equal(test, control) - # Using another array w / the same dtype - other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) - test = mask_or(mask, other) - control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) - assert_equal(test, control) - # Using another array w / a different dtype - othertype = [('A', np.bool), ('B', np.bool)] - other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) - try: - test = mask_or(mask, other) - except ValueError: - pass - # Using nested arrays - dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])] - amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) - bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) - cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) - assert_equal(mask_or(amask, bmask), cntrl) - - def test_flatten_mask(self): - # Tests flatten mask - # Standarad dtype - mask = np.array([0, 0, 1], dtype=np.bool) - assert_equal(flatten_mask(mask), mask) - # Flexible dtype - mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) - test = flatten_mask(mask) - control = np.array([0, 0, 0, 1], dtype=bool) - assert_equal(test, control) - - mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] - data = [(0, (0, 0)), (0, (0, 1))] - mask = np.array(data, dtype=mdtype) - test = flatten_mask(mask) - control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) - assert_equal(test, control) - - def test_on_ndarray(self): - # Test functions on ndarrays - a = np.array([1, 2, 3, 4]) - m = array(a, mask=False) - test = anom(a) - assert_equal(test, m.anom()) - test = reshape(a, (2, 2)) - assert_equal(test, m.reshape(2, 2)) - - def test_compress(self): - # Test compress function on ndarray and masked array - # Address Github #2495. - arr = np.arange(8) - arr.shape = 4, 2 - cond = np.array([True, False, True, True]) - control = arr[[0, 2, 3]] - test = np.ma.compress(cond, arr, axis=0) - assert_equal(test, control) - marr = np.ma.array(arr) - test = np.ma.compress(cond, marr, axis=0) - assert_equal(test, control) - - def test_compressed(self): - # Test ma.compressed function. - # Address gh-4026 - a = np.ma.array([1, 2]) - test = np.ma.compressed(a) - assert_(type(test) is np.ndarray) - # Test case when input data is ndarray subclass - class A(np.ndarray): - pass - a = np.ma.array(A(shape=0)) - test = np.ma.compressed(a) - assert_(type(test) is A) - # Test that compress flattens - test = np.ma.compressed([[1],[2]]) - assert_equal(test.ndim, 1) - test = np.ma.compressed([[[[[1]]]]]) - assert_equal(test.ndim, 1) - # Test case when input is MaskedArray subclass - class M(MaskedArray): - pass - test = np.ma.compressed(M(shape=(0,1,2))) - assert_equal(test.ndim, 1) - # with .compessed() overriden - class M(MaskedArray): - def compressed(self): - return 42 - test = np.ma.compressed(M(shape=(0,1,2))) - assert_equal(test, 42) - -#------------------------------------------------------------------------------ -class TestMaskedFields(TestCase): - # - def setUp(self): - ilist = [1, 2, 3, 4, 5] - flist = [1.1, 2.2, 3.3, 4.4, 5.5] - slist = ['one', 'two', 'three', 'four', 'five'] - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mdtype = [('a', bool), ('b', bool), ('c', bool)] - mask = [0, 1, 0, 0, 1] - base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) - self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) - - def test_set_records_masks(self): - base = self.data['base'] - mdtype = self.data['mdtype'] - # Set w/ nomask or masked - base.mask = nomask - assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) - base.mask = masked - assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) - # Set w/ simple boolean - base.mask = False - assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) - base.mask = True - assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) - # Set w/ list - base.mask = [0, 0, 0, 1, 1] - assert_equal_records(base._mask, - np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], - dtype=mdtype)) - - def test_set_record_element(self): - # Check setting an element of a record) - base = self.data['base'] - (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) - base[0] = (pi, pi, 'pi') - - assert_equal(base_a.dtype, int) - assert_equal(base_a._data, [3, 2, 3, 4, 5]) - - assert_equal(base_b.dtype, float) - assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) - - assert_equal(base_c.dtype, '|S8') - assert_equal(base_c._data, - asbytes_nested(['pi', 'two', 'three', 'four', 'five'])) - - def test_set_record_slice(self): - base = self.data['base'] - (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) - base[:3] = (pi, pi, 'pi') - - assert_equal(base_a.dtype, int) - assert_equal(base_a._data, [3, 3, 3, 4, 5]) - - assert_equal(base_b.dtype, float) - assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) - - assert_equal(base_c.dtype, '|S8') - assert_equal(base_c._data, - asbytes_nested(['pi', 'pi', 'pi', 'four', 'five'])) - - def test_mask_element(self): - "Check record access" - base = self.data['base'] - (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) - base[0] = masked - # - for n in ('a', 'b', 'c'): - assert_equal(base[n].mask, [1, 1, 0, 0, 1]) - assert_equal(base[n]._data, base._data[n]) - - def test_getmaskarray(self): - # Test getmaskarray on flexible dtype - ndtype = [('a', int), ('b', float)] - test = empty(3, dtype=ndtype) - assert_equal(getmaskarray(test), - np.array([(0, 0), (0, 0), (0, 0)], - dtype=[('a', '|b1'), ('b', '|b1')])) - test[:] = masked - assert_equal(getmaskarray(test), - np.array([(1, 1), (1, 1), (1, 1)], - dtype=[('a', '|b1'), ('b', '|b1')])) - - def test_view(self): - # Test view w/ flexible dtype - iterator = list(zip(np.arange(10), np.random.rand(10))) - data = np.array(iterator) - a = array(iterator, dtype=[('a', float), ('b', float)]) - a.mask[0] = (1, 0) - controlmask = np.array([1] + 19 * [0], dtype=bool) - # Transform globally to simple dtype - test = a.view(float) - assert_equal(test, data.ravel()) - assert_equal(test.mask, controlmask) - # Transform globally to dty - test = a.view((float, 2)) - assert_equal(test, data) - assert_equal(test.mask, controlmask.reshape(-1, 2)) - # - test = a.view((float, 2), np.matrix) - assert_equal(test, data) - self.assertTrue(isinstance(test, np.matrix)) - - def test_getitem(self): - ndtype = [('a', float), ('b', float)] - a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) - a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], - [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), - dtype=[('a', bool), ('b', bool)]) - # No mask - self.assertTrue(isinstance(a[1], MaskedArray)) - # One element masked - self.assertTrue(isinstance(a[0], MaskedArray)) - assert_equal_records(a[0]._data, a._data[0]) - assert_equal_records(a[0]._mask, a._mask[0]) - # All element masked - self.assertTrue(isinstance(a[-2], MaskedArray)) - assert_equal_records(a[-2]._data, a._data[-2]) - assert_equal_records(a[-2]._mask, a._mask[-2]) - - def test_setitem(self): - # Issue 4866: check that one can set individual items in [record][col] - # and [col][record] order - ndtype = np.dtype([('a', float), ('b', int)]) - ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) - ma['a'][1] = 3.0 - assert_equal(ma['a'], np.array([1.0, 3.0])) - ma[1]['a'] = 4.0 - assert_equal(ma['a'], np.array([1.0, 4.0])) - # Issue 2403 - mdtype = np.dtype([('a', bool), ('b', bool)]) - # soft mask - control = np.array([(False, True), (True, True)], dtype=mdtype) - a = np.ma.masked_all((2,), dtype=ndtype) - a['a'][0] = 2 - assert_equal(a.mask, control) - a = np.ma.masked_all((2,), dtype=ndtype) - a[0]['a'] = 2 - assert_equal(a.mask, control) - # hard mask - control = np.array([(True, True), (True, True)], dtype=mdtype) - a = np.ma.masked_all((2,), dtype=ndtype) - a.harden_mask() - a['a'][0] = 2 - assert_equal(a.mask, control) - a = np.ma.masked_all((2,), dtype=ndtype) - a.harden_mask() - a[0]['a'] = 2 - assert_equal(a.mask, control) - - def test_element_len(self): - # check that len() works for mvoid (Github issue #576) - for rec in self.data['base']: - assert_equal(len(rec), len(self.data['ddtype'])) - - -#------------------------------------------------------------------------------ -class TestMaskedView(TestCase): - # - def setUp(self): - iterator = list(zip(np.arange(10), np.random.rand(10))) - data = np.array(iterator) - a = array(iterator, dtype=[('a', float), ('b', float)]) - a.mask[0] = (1, 0) - controlmask = np.array([1] + 19 * [0], dtype=bool) - self.data = (data, a, controlmask) - - def test_view_to_nothing(self): - (data, a, controlmask) = self.data - test = a.view() - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test._data, a._data) - assert_equal(test._mask, a._mask) - - def test_view_to_type(self): - (data, a, controlmask) = self.data - test = a.view(np.ndarray) - self.assertTrue(not isinstance(test, MaskedArray)) - assert_equal(test, a._data) - assert_equal_records(test, data.view(a.dtype).squeeze()) - - def test_view_to_simple_dtype(self): - (data, a, controlmask) = self.data - # View globally - test = a.view(float) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test, data.ravel()) - assert_equal(test.mask, controlmask) - - def test_view_to_flexible_dtype(self): - (data, a, controlmask) = self.data - # - test = a.view([('A', float), ('B', float)]) - assert_equal(test.mask.dtype.names, ('A', 'B')) - assert_equal(test['A'], a['a']) - assert_equal(test['B'], a['b']) - # - test = a[0].view([('A', float), ('B', float)]) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test.mask.dtype.names, ('A', 'B')) - assert_equal(test['A'], a['a'][0]) - assert_equal(test['B'], a['b'][0]) - # - test = a[-1].view([('A', float), ('B', float)]) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test.dtype.names, ('A', 'B')) - assert_equal(test['A'], a['a'][-1]) - assert_equal(test['B'], a['b'][-1]) - - def test_view_to_subdtype(self): - (data, a, controlmask) = self.data - # View globally - test = a.view((float, 2)) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test, data) - assert_equal(test.mask, controlmask.reshape(-1, 2)) - # View on 1 masked element - test = a[0].view((float, 2)) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test, data[0]) - assert_equal(test.mask, (1, 0)) - # View on 1 unmasked element - test = a[-1].view((float, 2)) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test, data[-1]) - - def test_view_to_dtype_and_type(self): - (data, a, controlmask) = self.data - # - test = a.view((float, 2), np.matrix) - assert_equal(test, data) - self.assertTrue(isinstance(test, np.matrix)) - self.assertTrue(not isinstance(test, MaskedArray)) - - -def test_masked_array(): - a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) - assert_equal(np.argwhere(a), [[1], [3]]) - -def test_append_masked_array(): - a = np.ma.masked_equal([1,2,3], value=2) - b = np.ma.masked_equal([4,3,2], value=2) - - result = np.ma.append(a, b) - expected_data = [1, 2, 3, 4, 3, 2] - expected_mask = [False, True, False, False, False, True] - assert_array_equal(result.data, expected_data) - assert_array_equal(result.mask, expected_mask) - - a = np.ma.masked_all((2,2)) - b = np.ma.ones((3,1)) - - result = np.ma.append(a, b) - expected_data = [1] * 3 - expected_mask = [True] * 4 + [False] * 3 - assert_array_equal(result.data[-3], expected_data) - assert_array_equal(result.mask, expected_mask) - - result = np.ma.append(a, b, axis=None) - assert_array_equal(result.data[-3], expected_data) - assert_array_equal(result.mask, expected_mask) - - -def test_append_masked_array_along_axis(): - a = np.ma.masked_equal([1,2,3], value=2) - b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) - - # When `axis` is specified, `values` must have the correct shape. - assert_raises(ValueError, np.ma.append, a, b, axis=0) - - result = np.ma.append(a[np.newaxis,:], b, axis=0) - expected = np.ma.arange(1, 10) - expected[[1, 6]] = np.ma.masked - expected = expected.reshape((3,3)) - assert_array_equal(result.data, expected.data) - assert_array_equal(result.mask, expected.mask) - - -############################################################################### -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_extras.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_extras.py deleted file mode 100644 index 6ce1dc346a1d1..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_extras.py +++ /dev/null @@ -1,947 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511 -"""Tests suite for MaskedArray. -Adapted from the original test_ma by Pierre Gerard-Marchant - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ - -""" -from __future__ import division, absolute_import, print_function - -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -import numpy as np -from numpy.testing import TestCase, run_module_suite -from numpy.ma.testutils import (rand, assert_, assert_array_equal, - assert_equal, assert_almost_equal) -from numpy.ma.core import (array, arange, masked, MaskedArray, masked_array, - getmaskarray, shape, nomask, ones, zeros, count) -from numpy.ma.extras import ( - atleast_2d, mr_, dot, polyfit, - cov, corrcoef, median, average, - unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, ediff1d, - apply_over_axes, apply_along_axis, - compress_rowcols, mask_rowcols, - clump_masked, clump_unmasked, - flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, - masked_all, masked_all_like) - - -class TestGeneric(TestCase): - # - def test_masked_all(self): - # Tests masked_all - # Standard dtype - test = masked_all((2,), dtype=float) - control = array([1, 1], mask=[1, 1], dtype=float) - assert_equal(test, control) - # Flexible dtype - dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) - test = masked_all((2,), dtype=dt) - control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) - assert_equal(test, control) - test = masked_all((2, 2), dtype=dt) - control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], - mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], - dtype=dt) - assert_equal(test, control) - # Nested dtype - dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) - test = masked_all((2,), dtype=dt) - control = array([(1, (1, 1)), (1, (1, 1))], - mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) - assert_equal(test, control) - test = masked_all((2,), dtype=dt) - control = array([(1, (1, 1)), (1, (1, 1))], - mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) - assert_equal(test, control) - test = masked_all((1, 1), dtype=dt) - control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) - assert_equal(test, control) - - def test_masked_all_like(self): - # Tests masked_all - # Standard dtype - base = array([1, 2], dtype=float) - test = masked_all_like(base) - control = array([1, 1], mask=[1, 1], dtype=float) - assert_equal(test, control) - # Flexible dtype - dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) - base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) - test = masked_all_like(base) - control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) - assert_equal(test, control) - # Nested dtype - dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) - control = array([(1, (1, 1)), (1, (1, 1))], - mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) - test = masked_all_like(control) - assert_equal(test, control) - - def test_clump_masked(self): - # Test clump_masked - a = masked_array(np.arange(10)) - a[[0, 1, 2, 6, 8, 9]] = masked - # - test = clump_masked(a) - control = [slice(0, 3), slice(6, 7), slice(8, 10)] - assert_equal(test, control) - - def test_clump_unmasked(self): - # Test clump_unmasked - a = masked_array(np.arange(10)) - a[[0, 1, 2, 6, 8, 9]] = masked - test = clump_unmasked(a) - control = [slice(3, 6), slice(7, 8), ] - assert_equal(test, control) - - def test_flatnotmasked_contiguous(self): - # Test flatnotmasked_contiguous - a = arange(10) - # No mask - test = flatnotmasked_contiguous(a) - assert_equal(test, slice(0, a.size)) - # Some mask - a[(a < 3) | (a > 8) | (a == 5)] = masked - test = flatnotmasked_contiguous(a) - assert_equal(test, [slice(3, 5), slice(6, 9)]) - # - a[:] = masked - test = flatnotmasked_contiguous(a) - assert_equal(test, None) - - -class TestAverage(TestCase): - # Several tests of average. Why so many ? Good point... - def test_testAverage1(self): - # Test of average. - ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) - assert_equal(2.0, average(ott, axis=0)) - assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) - result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) - assert_equal(2.0, result) - self.assertTrue(wts == 4.0) - ott[:] = masked - assert_equal(average(ott, axis=0).mask, [True]) - ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) - ott = ott.reshape(2, 2) - ott[:, 1] = masked - assert_equal(average(ott, axis=0), [2.0, 0.0]) - assert_equal(average(ott, axis=1).mask[0], [True]) - assert_equal([2., 0.], average(ott, axis=0)) - result, wts = average(ott, axis=0, returned=1) - assert_equal(wts, [1., 0.]) - - def test_testAverage2(self): - # More tests of average. - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = arange(6, dtype=np.float_) - assert_equal(average(x, axis=0), 2.5) - assert_equal(average(x, axis=0, weights=w1), 2.5) - y = array([arange(6, dtype=np.float_), 2.0 * arange(6)]) - assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) - assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) - assert_equal(average(y, axis=1), - [average(x, axis=0), average(x, axis=0) * 2.0]) - assert_equal(average(y, None, weights=w2), 20. / 6.) - assert_equal(average(y, axis=0, weights=w2), - [0., 1., 2., 3., 4., 10.]) - assert_equal(average(y, axis=1), - [average(x, axis=0), average(x, axis=0) * 2.0]) - m1 = zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = ones(6) - m5 = [0, 1, 1, 1, 1, 1] - assert_equal(average(masked_array(x, m1), axis=0), 2.5) - assert_equal(average(masked_array(x, m2), axis=0), 2.5) - assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) - assert_equal(average(masked_array(x, m5), axis=0), 0.0) - assert_equal(count(average(masked_array(x, m4), axis=0)), 0) - z = masked_array(y, m3) - assert_equal(average(z, None), 20. / 6.) - assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) - assert_equal(average(z, axis=1), [2.5, 5.0]) - assert_equal(average(z, axis=0, weights=w2), - [0., 1., 99., 99., 4.0, 10.0]) - - def test_testAverage3(self): - # Yet more tests of average! - a = arange(6) - b = arange(6) * 3 - r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) - assert_equal(shape(r1), shape(w1)) - assert_equal(r1.shape, w1.shape) - r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) - assert_equal(shape(w2), shape(r2)) - r2, w2 = average(ones((2, 2, 3)), returned=1) - assert_equal(shape(w2), shape(r2)) - r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) - assert_equal(shape(w2), shape(r2)) - a2d = array([[1, 2], [0, 4]], float) - a2dm = masked_array(a2d, [[False, False], [True, False]]) - a2da = average(a2d, axis=0) - assert_equal(a2da, [0.5, 3.0]) - a2dma = average(a2dm, axis=0) - assert_equal(a2dma, [1.0, 3.0]) - a2dma = average(a2dm, axis=None) - assert_equal(a2dma, 7. / 3.) - a2dma = average(a2dm, axis=1) - assert_equal(a2dma, [1.5, 4.0]) - - def test_onintegers_with_mask(self): - # Test average on integers with mask - a = average(array([1, 2])) - assert_equal(a, 1.5) - a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) - assert_equal(a, 1.5) - - def test_complex(self): - # Test with complex data. - # (Regression test for https://github.com/numpy/numpy/issues/2684) - mask = np.array([[0, 0, 0, 1, 0], - [0, 1, 0, 0, 0]], dtype=bool) - a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], - [9j, 0+1j, 2+3j, 4+5j, 7+7j]], - mask=mask) - - av = average(a) - expected = np.average(a.compressed()) - assert_almost_equal(av.real, expected.real) - assert_almost_equal(av.imag, expected.imag) - - av0 = average(a, axis=0) - expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j - assert_almost_equal(av0.real, expected0.real) - assert_almost_equal(av0.imag, expected0.imag) - - av1 = average(a, axis=1) - expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j - assert_almost_equal(av1.real, expected1.real) - assert_almost_equal(av1.imag, expected1.imag) - - # Test with the 'weights' argument. - wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], - [1.0, 1.0, 1.0, 1.0, 1.0]]) - wav = average(a, weights=wts) - expected = np.average(a.compressed(), weights=wts[~mask]) - assert_almost_equal(wav.real, expected.real) - assert_almost_equal(wav.imag, expected.imag) - - wav0 = average(a, weights=wts, axis=0) - expected0 = (average(a.real, weights=wts, axis=0) + - average(a.imag, weights=wts, axis=0)*1j) - assert_almost_equal(wav0.real, expected0.real) - assert_almost_equal(wav0.imag, expected0.imag) - - wav1 = average(a, weights=wts, axis=1) - expected1 = (average(a.real, weights=wts, axis=1) + - average(a.imag, weights=wts, axis=1)*1j) - assert_almost_equal(wav1.real, expected1.real) - assert_almost_equal(wav1.imag, expected1.imag) - - -class TestConcatenator(TestCase): - # Tests for mr_, the equivalent of r_ for masked arrays. - - def test_1d(self): - # Tests mr_ on 1D arrays. - assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) - b = ones(5) - m = [1, 0, 0, 0, 0] - d = masked_array(b, mask=m) - c = mr_[d, 0, 0, d] - self.assertTrue(isinstance(c, MaskedArray)) - assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) - assert_array_equal(c.mask, mr_[m, 0, 0, m]) - - def test_2d(self): - # Tests mr_ on 2D arrays. - a_1 = rand(5, 5) - a_2 = rand(5, 5) - m_1 = np.round_(rand(5, 5), 0) - m_2 = np.round_(rand(5, 5), 0) - b_1 = masked_array(a_1, mask=m_1) - b_2 = masked_array(a_2, mask=m_2) - # append columns - d = mr_['1', b_1, b_2] - self.assertTrue(d.shape == (5, 10)) - assert_array_equal(d[:, :5], b_1) - assert_array_equal(d[:, 5:], b_2) - assert_array_equal(d.mask, np.r_['1', m_1, m_2]) - d = mr_[b_1, b_2] - self.assertTrue(d.shape == (10, 5)) - assert_array_equal(d[:5,:], b_1) - assert_array_equal(d[5:,:], b_2) - assert_array_equal(d.mask, np.r_[m_1, m_2]) - - -class TestNotMasked(TestCase): - # Tests notmasked_edges and notmasked_contiguous. - - def test_edges(self): - # Tests unmasked_edges - data = masked_array(np.arange(25).reshape(5, 5), - mask=[[0, 0, 1, 0, 0], - [0, 0, 0, 1, 1], - [1, 1, 0, 0, 0], - [0, 0, 0, 0, 0], - [1, 1, 1, 0, 0]],) - test = notmasked_edges(data, None) - assert_equal(test, [0, 24]) - test = notmasked_edges(data, 0) - assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) - assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) - test = notmasked_edges(data, 1) - assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) - assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) - # - test = notmasked_edges(data.data, None) - assert_equal(test, [0, 24]) - test = notmasked_edges(data.data, 0) - assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) - assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) - test = notmasked_edges(data.data, -1) - assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) - assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) - # - data[-2] = masked - test = notmasked_edges(data, 0) - assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) - assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) - test = notmasked_edges(data, -1) - assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) - assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) - - def test_contiguous(self): - # Tests notmasked_contiguous - a = masked_array(np.arange(24).reshape(3, 8), - mask=[[0, 0, 0, 0, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1, 1, 1], - [0, 0, 0, 0, 0, 0, 1, 0], ]) - tmp = notmasked_contiguous(a, None) - assert_equal(tmp[-1], slice(23, 24, None)) - assert_equal(tmp[-2], slice(16, 22, None)) - assert_equal(tmp[-3], slice(0, 4, None)) - # - tmp = notmasked_contiguous(a, 0) - self.assertTrue(len(tmp[-1]) == 1) - self.assertTrue(tmp[-2] is None) - assert_equal(tmp[-3], tmp[-1]) - self.assertTrue(len(tmp[0]) == 2) - # - tmp = notmasked_contiguous(a, 1) - assert_equal(tmp[0][-1], slice(0, 4, None)) - self.assertTrue(tmp[1] is None) - assert_equal(tmp[2][-1], slice(7, 8, None)) - assert_equal(tmp[2][-2], slice(0, 6, None)) - - -class Test2DFunctions(TestCase): - # Tests 2D functions - def test_compress2d(self): - # Tests compress2d - x = array(np.arange(9).reshape(3, 3), - mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) - assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) - assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) - x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) - assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) - assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(compress_rowcols(x), [[8]]) - assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) - assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - assert_equal(compress_rowcols(x).size, 0) - assert_equal(compress_rowcols(x, 0).size, 0) - assert_equal(compress_rowcols(x, 1).size, 0) - - def test_mask_rowcols(self): - # Tests mask_rowcols. - x = array(np.arange(9).reshape(3, 3), - mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x).mask, - [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) - assert_equal(mask_rowcols(x, 0).mask, - [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x, 1).mask, - [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) - x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x).mask, - [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) - assert_equal(mask_rowcols(x, 0).mask, - [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) - assert_equal(mask_rowcols(x, 1).mask, - [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) - assert_equal(mask_rowcols(x).mask, - [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) - assert_equal(mask_rowcols(x, 0).mask, - [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) - assert_equal(mask_rowcols(x, 1,).mask, - [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) - x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - self.assertTrue(mask_rowcols(x).all() is masked) - self.assertTrue(mask_rowcols(x, 0).all() is masked) - self.assertTrue(mask_rowcols(x, 1).all() is masked) - self.assertTrue(mask_rowcols(x).mask.all()) - self.assertTrue(mask_rowcols(x, 0).mask.all()) - self.assertTrue(mask_rowcols(x, 1).mask.all()) - - def test_dot(self): - # Tests dot product - n = np.arange(1, 7) - # - m = [1, 0, 0, 0, 0, 0] - a = masked_array(n, mask=m).reshape(2, 3) - b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[1, 1], [1, 0]]) - c = dot(b, a, True) - assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) - c = dot(a, b, False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - m = [0, 0, 0, 0, 0, 1] - a = masked_array(n, mask=m).reshape(2, 3) - b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[0, 1], [1, 1]]) - c = dot(b, a, True) - assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) - c = dot(a, b, False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - assert_equal(c, dot(a, b)) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - m = [0, 0, 0, 0, 0, 0] - a = masked_array(n, mask=m).reshape(2, 3) - b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b) - assert_equal(c.mask, nomask) - c = dot(b, a) - assert_equal(c.mask, nomask) - # - a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) - b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[1, 1], [0, 0]]) - c = dot(a, b, False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, True) - assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) - b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[0, 0], [1, 1]]) - c = dot(a, b) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, True) - assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - # - a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) - b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, True) - assert_equal(c.mask, [[1, 0], [1, 1]]) - c = dot(a, b, False) - assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, True) - assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) - c = dot(b, a, False) - assert_equal(c, np.dot(b.filled(0), a.filled(0))) - - -class TestApplyAlongAxis(TestCase): - # Tests 2D functions - def test_3d(self): - a = arange(12.).reshape(2, 2, 3) - - def myfunc(b): - return b[1] - - xa = apply_along_axis(myfunc, 2, a) - assert_equal(xa, [[1, 4], [7, 10]]) - - # Tests kwargs functions - def test_3d_kwargs(self): - a = arange(12).reshape(2, 2, 3) - - def myfunc(b, offset=0): - return b[1+offset] - - xa = apply_along_axis(myfunc, 2, a, offset=1) - assert_equal(xa, [[2, 5], [8, 11]]) - - -class TestApplyOverAxes(TestCase): - # Tests apply_over_axes - def test_basic(self): - a = arange(24).reshape(2, 3, 4) - test = apply_over_axes(np.sum, a, [0, 2]) - ctrl = np.array([[[60], [92], [124]]]) - assert_equal(test, ctrl) - a[(a % 2).astype(np.bool)] = masked - test = apply_over_axes(np.sum, a, [0, 2]) - ctrl = np.array([[[28], [44], [60]]]) - assert_equal(test, ctrl) - - -class TestMedian(TestCase): - - def test_2d(self): - # Tests median w/ 2D - (n, p) = (101, 30) - x = masked_array(np.linspace(-1., 1., n),) - x[:10] = x[-10:] = masked - z = masked_array(np.empty((n, p), dtype=float)) - z[:, 0] = x[:] - idx = np.arange(len(x)) - for i in range(1, p): - np.random.shuffle(idx) - z[:, i] = x[idx] - assert_equal(median(z[:, 0]), 0) - assert_equal(median(z), 0) - assert_equal(median(z, axis=0), np.zeros(p)) - assert_equal(median(z.T, axis=1), np.zeros(p)) - - def test_2d_waxis(self): - # Tests median w/ 2D arrays and different axis. - x = masked_array(np.arange(30).reshape(10, 3)) - x[:3] = x[-3:] = masked - assert_equal(median(x), 14.5) - assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) - assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) - assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) - - def test_3d(self): - # Tests median w/ 3D - x = np.ma.arange(24).reshape(3, 4, 2) - x[x % 3 == 0] = masked - assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) - x.shape = (4, 3, 2) - assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) - x = np.ma.arange(24).reshape(4, 3, 2) - x[x % 5 == 0] = masked - assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) - - def test_neg_axis(self): - x = masked_array(np.arange(30).reshape(10, 3)) - x[:3] = x[-3:] = masked - assert_equal(median(x, axis=-1), median(x, axis=1)) - - def test_out(self): - x = masked_array(np.arange(30).reshape(10, 3)) - x[:3] = x[-3:] = masked - out = masked_array(np.ones(10)) - r = median(x, axis=1, out=out) - assert_equal(r, out) - assert_(type(r) == MaskedArray) - - -class TestCov(TestCase): - - def setUp(self): - self.data = array(np.random.rand(12)) - - def test_1d_wo_missing(self): - # Test cov on 1D variable w/o missing values - x = self.data - assert_almost_equal(np.cov(x), cov(x)) - assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) - assert_almost_equal(np.cov(x, rowvar=False, bias=True), - cov(x, rowvar=False, bias=True)) - - def test_2d_wo_missing(self): - # Test cov on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) - assert_almost_equal(np.cov(x), cov(x)) - assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) - assert_almost_equal(np.cov(x, rowvar=False, bias=True), - cov(x, rowvar=False, bias=True)) - - def test_1d_w_missing(self): - # Test cov 1 1D variable w/missing values - x = self.data - x[-1] = masked - x -= x.mean() - nx = x.compressed() - assert_almost_equal(np.cov(nx), cov(x)) - assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) - assert_almost_equal(np.cov(nx, rowvar=False, bias=True), - cov(x, rowvar=False, bias=True)) - # - try: - cov(x, allow_masked=False) - except ValueError: - pass - # - # 2 1D variables w/ missing values - nx = x[1:-1] - assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) - assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), - cov(x, x[::-1], rowvar=False)) - assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), - cov(x, x[::-1], rowvar=False, bias=True)) - - def test_2d_w_missing(self): - # Test cov on 2D variable w/ missing value - x = self.data - x[-1] = masked - x = x.reshape(3, 4) - valid = np.logical_not(getmaskarray(x)).astype(int) - frac = np.dot(valid, valid.T) - xf = (x - x.mean(1)[:, None]).filled(0) - assert_almost_equal(cov(x), - np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) - assert_almost_equal(cov(x, bias=True), - np.cov(xf, bias=True) * x.shape[1] / frac) - frac = np.dot(valid.T, valid) - xf = (x - x.mean(0)).filled(0) - assert_almost_equal(cov(x, rowvar=False), - (np.cov(xf, rowvar=False) * - (x.shape[0] - 1) / (frac - 1.))) - assert_almost_equal(cov(x, rowvar=False, bias=True), - (np.cov(xf, rowvar=False, bias=True) * - x.shape[0] / frac)) - - -class TestCorrcoef(TestCase): - - def setUp(self): - self.data = array(np.random.rand(12)) - - def test_ddof(self): - # Test ddof keyword - x = self.data - assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) - - def test_1d_wo_missing(self): - # Test cov on 1D variable w/o missing values - x = self.data - assert_almost_equal(np.corrcoef(x), corrcoef(x)) - assert_almost_equal(np.corrcoef(x, rowvar=False), - corrcoef(x, rowvar=False)) - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) - - def test_2d_wo_missing(self): - # Test corrcoef on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) - assert_almost_equal(np.corrcoef(x), corrcoef(x)) - assert_almost_equal(np.corrcoef(x, rowvar=False), - corrcoef(x, rowvar=False)) - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) - - def test_1d_w_missing(self): - # Test corrcoef 1 1D variable w/missing values - x = self.data - x[-1] = masked - x -= x.mean() - nx = x.compressed() - assert_almost_equal(np.corrcoef(nx), corrcoef(x)) - assert_almost_equal(np.corrcoef(nx, rowvar=False), - corrcoef(x, rowvar=False)) - assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) - # - try: - corrcoef(x, allow_masked=False) - except ValueError: - pass - # - # 2 1D variables w/ missing values - nx = x[1:-1] - assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) - assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), - corrcoef(x, x[::-1], rowvar=False)) - assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False, bias=True), - corrcoef(x, x[::-1], rowvar=False, bias=True)) - - def test_2d_w_missing(self): - # Test corrcoef on 2D variable w/ missing value - x = self.data - x[-1] = masked - x = x.reshape(3, 4) - - test = corrcoef(x) - control = np.corrcoef(x) - assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - - -class TestPolynomial(TestCase): - # - def test_polyfit(self): - # Tests polyfit - # On ndarrays - x = np.random.rand(10) - y = np.random.rand(20).reshape(-1, 2) - assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) - # ON 1D maskedarrays - x = x.view(MaskedArray) - x[0] = masked - y = y.view(MaskedArray) - y[0, 0] = y[-1, -1] = masked - # - (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, - full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - # - (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - # - (C, R, K, S, D) = polyfit(x, y, 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - # - w = np.random.rand(10) + 1 - wo = w.copy() - xs = x[1:-1] - ys = y[1:-1] - ws = w[1:-1] - (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) - (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) - assert_equal(w, wo) - for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): - assert_almost_equal(a, a_) - - -class TestArraySetOps(TestCase): - - def test_unique_onlist(self): - # Test unique on list - data = [1, 1, 1, 2, 2, 3] - test = unique(data, return_index=True, return_inverse=True) - self.assertTrue(isinstance(test[0], MaskedArray)) - assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) - assert_equal(test[1], [0, 3, 5]) - assert_equal(test[2], [0, 0, 0, 1, 1, 2]) - - def test_unique_onmaskedarray(self): - # Test unique on masked data w/use_mask=True - data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) - assert_equal(test[1], [0, 3, 5, 2]) - assert_equal(test[2], [0, 0, 3, 1, 3, 2]) - # - data.fill_value = 3 - data = masked_array(data=[1, 1, 1, 2, 2, 3], - mask=[0, 0, 1, 0, 1, 0], fill_value=3) - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) - assert_equal(test[1], [0, 3, 5, 2]) - assert_equal(test[2], [0, 0, 3, 1, 3, 2]) - - def test_unique_allmasked(self): - # Test all masked - data = masked_array([1, 1, 1], mask=True) - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1, ], mask=[True])) - assert_equal(test[1], [0]) - assert_equal(test[2], [0, 0, 0]) - # - # Test masked - data = masked - test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array(masked)) - assert_equal(test[1], [0]) - assert_equal(test[2], [0]) - - def test_ediff1d(self): - # Tests mediff1d - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) - test = ediff1d(x) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_ediff1d_tobegin(self): - # Test ediff1d w/ to_begin - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - test = ediff1d(x, to_begin=masked) - control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_begin=[1, 2, 3]) - control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_ediff1d_toend(self): - # Test ediff1d w/ to_end - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - test = ediff1d(x, to_end=masked) - control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_end=[1, 2, 3]) - control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_ediff1d_tobegin_toend(self): - # Test ediff1d w/ to_begin and to_end - x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) - test = ediff1d(x, to_end=masked, to_begin=masked) - control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) - control = array([0, 1, 1, 1, 4, 1, 2, 3], - mask=[1, 1, 0, 0, 1, 0, 0, 0]) - assert_equal(test, control) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_ediff1d_ndarray(self): - # Test ediff1d w/ a ndarray - x = np.arange(5) - test = ediff1d(x) - control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) - assert_equal(test, control) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - # - test = ediff1d(x, to_end=masked, to_begin=masked) - control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) - self.assertTrue(isinstance(test, MaskedArray)) - assert_equal(test.data, control.data) - assert_equal(test.mask, control.mask) - - def test_intersect1d(self): - # Test intersect1d - x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) - y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - test = intersect1d(x, y) - control = array([1, 3, -1], mask=[0, 0, 1]) - assert_equal(test, control) - - def test_setxor1d(self): - # Test setxor1d - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - test = setxor1d(a, b) - assert_equal(test, array([3, 4, 7])) - # - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = [1, 2, 3, 4, 5] - test = setxor1d(a, b) - assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) - # - a = array([1, 2, 3]) - b = array([6, 5, 4]) - test = setxor1d(a, b) - assert_(isinstance(test, MaskedArray)) - assert_equal(test, [1, 2, 3, 4, 5, 6]) - # - a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) - b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) - test = setxor1d(a, b) - assert_(isinstance(test, MaskedArray)) - assert_equal(test, [1, 2, 3, 4, 5, 6]) - # - assert_array_equal([], setxor1d([], [])) - - def test_in1d(self): - # Test in1d - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - test = in1d(a, b) - assert_equal(test, [True, True, True, False, True]) - # - a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 5, -1], mask=[0, 0, 1]) - test = in1d(a, b) - assert_equal(test, [True, True, False, True, True]) - # - assert_array_equal([], in1d([], [])) - - def test_in1d_invert(self): - # Test in1d's invert parameter - a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) - - a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) - b = array([1, 5, -1], mask=[0, 0, 1]) - assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) - - assert_array_equal([], in1d([], [], invert=True)) - - def test_union1d(self): - # Test union1d - a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) - test = union1d(a, b) - control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) - assert_equal(test, control) - # - assert_array_equal([], union1d([], [])) - - def test_setdiff1d(self): - # Test setdiff1d - a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) - b = array([2, 4, 3, 3, 2, 1, 5]) - test = setdiff1d(a, b) - assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) - # - a = arange(10) - b = arange(8) - assert_equal(setdiff1d(a, b), array([8, 9])) - - def test_setdiff1d_char_array(self): - # Test setdiff1d_charray - a = np.array(['a', 'b', 'c']) - b = np.array(['a', 'b', 's']) - assert_array_equal(setdiff1d(a, b), np.array(['c'])) - - -class TestShapeBase(TestCase): - # - def test_atleast2d(self): - # Test atleast_2d - a = masked_array([0, 1, 2], mask=[0, 1, 0]) - b = atleast_2d(a) - assert_equal(b.shape, (1, 3)) - assert_equal(b.mask.shape, b.data.shape) - assert_equal(a.shape, (3,)) - assert_equal(a.mask.shape, a.data.shape) - - -############################################################################### -#------------------------------------------------------------------------------ -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_mrecords.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_mrecords.py deleted file mode 100644 index 54945e8f007f8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_mrecords.py +++ /dev/null @@ -1,521 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for mrecords. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import pickle - -import numpy as np -import numpy.ma as ma -from numpy import recarray -from numpy.core.records import (fromrecords as recfromrecords, - fromarrays as recfromarrays) - -from numpy.compat import asbytes, asbytes_nested -from numpy.ma.testutils import * -from numpy.ma import masked, nomask -from numpy.ma.mrecords import (MaskedRecords, mrecarray, fromarrays, - fromtextfile, fromrecords, addfield) - - -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - - -#.............................................................................. -class TestMRecords(TestCase): - # Base test class for MaskedArrays. - def __init__(self, *args, **kwds): - TestCase.__init__(self, *args, **kwds) - self.setup() - - def setup(self): - # Generic setup - ilist = [1, 2, 3, 4, 5] - flist = [1.1, 2.2, 3.3, 4.4, 5.5] - slist = asbytes_nested(['one', 'two', 'three', 'four', 'five']) - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mask = [0, 1, 0, 0, 1] - self.base = ma.array(list(zip(ilist, flist, slist)), - mask=mask, dtype=ddtype) - - def test_byview(self): - # Test creation by view - base = self.base - mbase = base.view(mrecarray) - assert_equal(mbase.recordmask, base.recordmask) - assert_equal_records(mbase._mask, base._mask) - assert_(isinstance(mbase._data, recarray)) - assert_equal_records(mbase._data, base._data.view(recarray)) - for field in ('a', 'b', 'c'): - assert_equal(base[field], mbase[field]) - assert_equal_records(mbase.view(mrecarray), mbase) - - def test_get(self): - # Tests fields retrieval - base = self.base.copy() - mbase = base.view(mrecarray) - # As fields.......... - for field in ('a', 'b', 'c'): - assert_equal(getattr(mbase, field), mbase[field]) - assert_equal(base[field], mbase[field]) - # as elements ....... - mbase_first = mbase[0] - assert_(isinstance(mbase_first, mrecarray)) - assert_equal(mbase_first.dtype, mbase.dtype) - assert_equal(mbase_first.tolist(), (1, 1.1, asbytes('one'))) - # Used to be mask, now it's recordmask - assert_equal(mbase_first.recordmask, nomask) - assert_equal(mbase_first._mask.item(), (False, False, False)) - assert_equal(mbase_first['a'], mbase['a'][0]) - mbase_last = mbase[-1] - assert_(isinstance(mbase_last, mrecarray)) - assert_equal(mbase_last.dtype, mbase.dtype) - assert_equal(mbase_last.tolist(), (None, None, None)) - # Used to be mask, now it's recordmask - assert_equal(mbase_last.recordmask, True) - assert_equal(mbase_last._mask.item(), (True, True, True)) - assert_equal(mbase_last['a'], mbase['a'][-1]) - assert_((mbase_last['a'] is masked)) - # as slice .......... - mbase_sl = mbase[:2] - assert_(isinstance(mbase_sl, mrecarray)) - assert_equal(mbase_sl.dtype, mbase.dtype) - # Used to be mask, now it's recordmask - assert_equal(mbase_sl.recordmask, [0, 1]) - assert_equal_records(mbase_sl.mask, - np.array([(False, False, False), - (True, True, True)], - dtype=mbase._mask.dtype)) - assert_equal_records(mbase_sl, base[:2].view(mrecarray)) - for field in ('a', 'b', 'c'): - assert_equal(getattr(mbase_sl, field), base[:2][field]) - - def test_set_fields(self): - # Tests setting fields. - base = self.base.copy() - mbase = base.view(mrecarray) - mbase = mbase.copy() - mbase.fill_value = (999999, 1e20, 'N/A') - # Change the data, the mask should be conserved - mbase.a._data[:] = 5 - assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) - assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) - # Change the elements, and the mask will follow - mbase.a = 1 - assert_equal(mbase['a']._data, [1]*5) - assert_equal(ma.getmaskarray(mbase['a']), [0]*5) - # Use to be _mask, now it's recordmask - assert_equal(mbase.recordmask, [False]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 0), - (0, 1, 1), - (0, 0, 0), - (0, 0, 0), - (0, 1, 1)], - dtype=bool)) - # Set a field to mask ........................ - mbase.c = masked - # Use to be mask, and now it's still mask ! - assert_equal(mbase.c.mask, [1]*5) - assert_equal(mbase.c.recordmask, [1]*5) - assert_equal(ma.getmaskarray(mbase['c']), [1]*5) - assert_equal(ma.getdata(mbase['c']), [asbytes('N/A')]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 1), - (0, 1, 1), - (0, 0, 1), - (0, 0, 1), - (0, 1, 1)], - dtype=bool)) - # Set fields by slices ....................... - mbase = base.view(mrecarray).copy() - mbase.a[3:] = 5 - assert_equal(mbase.a, [1, 2, 3, 5, 5]) - assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) - mbase.b[3:] = masked - assert_equal(mbase.b, base['b']) - assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) - # Set fields globally.......................... - ndtype = [('alpha', '|S1'), ('num', int)] - data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) - rdata = data.view(MaskedRecords) - val = ma.array([10, 20, 30], mask=[1, 0, 0]) - # - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - rdata['num'] = val - assert_equal(rdata.num, val) - assert_equal(rdata.num.mask, [1, 0, 0]) - - def test_set_fields_mask(self): - # Tests setting the mask of a field. - base = self.base.copy() - # This one has already a mask.... - mbase = base.view(mrecarray) - mbase['a'][-2] = masked - assert_equal(mbase.a, [1, 2, 3, 4, 5]) - assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) - # This one has not yet - mbase = fromarrays([np.arange(5), np.random.rand(5)], - dtype=[('a', int), ('b', float)]) - mbase['a'][-2] = masked - assert_equal(mbase.a, [0, 1, 2, 3, 4]) - assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) - - def test_set_mask(self): - base = self.base.copy() - mbase = base.view(mrecarray) - # Set the mask to True ....................... - mbase.mask = masked - assert_equal(ma.getmaskarray(mbase['b']), [1]*5) - assert_equal(mbase['a']._mask, mbase['b']._mask) - assert_equal(mbase['a']._mask, mbase['c']._mask) - assert_equal(mbase._mask.tolist(), - np.array([(1, 1, 1)]*5, dtype=bool)) - # Delete the mask ............................ - mbase.mask = nomask - assert_equal(ma.getmaskarray(mbase['c']), [0]*5) - assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 0)]*5, dtype=bool)) - - def test_set_mask_fromarray(self): - base = self.base.copy() - mbase = base.view(mrecarray) - # Sets the mask w/ an array - mbase.mask = [1, 0, 0, 0, 1] - assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) - assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) - assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) - # Yay, once more ! - mbase.mask = [0, 0, 0, 0, 1] - assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) - - def test_set_mask_fromfields(self): - mbase = self.base.copy().view(mrecarray) - # - nmask = np.array( - [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], - dtype=[('a', bool), ('b', bool), ('c', bool)]) - mbase.mask = nmask - assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) - assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) - assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) - # Reinitalizes and redo - mbase.mask = False - mbase.fieldmask = nmask - assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) - assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) - assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) - - def test_set_elements(self): - base = self.base.copy() - # Set an element to mask ..................... - mbase = base.view(mrecarray).copy() - mbase[-2] = masked - assert_equal( - mbase._mask.tolist(), - np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], - dtype=bool)) - # Used to be mask, now it's recordmask! - assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) - # Set slices ................................. - mbase = base.view(mrecarray).copy() - mbase[:2] = (5, 5, 5) - assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) - assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) - assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) - assert_equal(mbase.c._data, - asbytes_nested(['5', '5', 'three', 'four', 'five'])) - assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) - # - mbase = base.view(mrecarray).copy() - mbase[:2] = masked - assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) - assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) - assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) - assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) - assert_equal(mbase.c._data, - asbytes_nested(['one', 'two', 'three', 'four', 'five'])) - assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) - - def test_setslices_hardmask(self): - # Tests setting slices w/ hardmask. - base = self.base.copy() - mbase = base.view(mrecarray) - mbase.harden_mask() - try: - mbase[-2:] = (5, 5, 5) - assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) - assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) - assert_equal(mbase.c._data, - asbytes_nested(['one', 'two', 'three', '5', 'five'])) - assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) - assert_equal(mbase.b._mask, mbase.a._mask) - assert_equal(mbase.b._mask, mbase.c._mask) - except NotImplementedError: - # OK, not implemented yet... - pass - except AssertionError: - raise - else: - raise Exception("Flexible hard masks should be supported !") - # Not using a tuple should crash - try: - mbase[-2:] = 3 - except (NotImplementedError, TypeError): - pass - else: - raise TypeError("Should have expected a readable buffer object!") - - def test_hardmask(self): - # Test hardmask - base = self.base.copy() - mbase = base.view(mrecarray) - mbase.harden_mask() - self.assertTrue(mbase._hardmask) - mbase.mask = nomask - assert_equal_records(mbase._mask, base._mask) - mbase.soften_mask() - self.assertTrue(not mbase._hardmask) - mbase.mask = nomask - # So, the mask of a field is no longer set to nomask... - assert_equal_records(mbase._mask, - ma.make_mask_none(base.shape, base.dtype)) - self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask) - assert_equal(mbase['a']._mask, mbase['b']._mask) - - def test_pickling(self): - # Test pickling - base = self.base.copy() - mrec = base.view(mrecarray) - _ = pickle.dumps(mrec) - mrec_ = pickle.loads(_) - assert_equal(mrec_.dtype, mrec.dtype) - assert_equal_records(mrec_._data, mrec._data) - assert_equal(mrec_._mask, mrec._mask) - assert_equal_records(mrec_._mask, mrec._mask) - - def test_filled(self): - # Test filling the array - _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) - _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) - _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mrec = fromarrays([_a, _b, _c], dtype=ddtype, - fill_value=(99999, 99999., 'N/A')) - mrecfilled = mrec.filled() - assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) - assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), - dtype=float)) - assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), - dtype='|S8')) - - def test_tolist(self): - # Test tolist. - _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) - _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) - _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mrec = fromarrays([_a, _b, _c], dtype=ddtype, - fill_value=(99999, 99999., 'N/A')) - # - assert_equal(mrec.tolist(), - [(1, 1.1, None), (2, 2.2, asbytes('two')), - (None, None, asbytes('three'))]) - - def test_withnames(self): - # Test the creation w/ format and names - x = mrecarray(1, formats=float, names='base') - x[0]['base'] = 10 - assert_equal(x['base'][0], 10) - - def test_exotic_formats(self): - # Test that 'exotic' formats are processed properly - easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) - easy[0] = masked - assert_equal(easy.filled(1).item(), (1, asbytes('1'), 1.)) - # - solo = mrecarray(1, dtype=[('f0', ' 1: - self.assertTrue(eq(np.concatenate((x, y), 1), - concatenate((xm, ym), 1))) - self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1))) - self.assertTrue(eq(np.sum(x, 1), sum(x, 1))) - self.assertTrue(eq(np.product(x, 1), product(x, 1))) - - def test_testCI(self): - # Test of conversions and indexing - x1 = np.array([1, 2, 4, 3]) - x2 = array(x1, mask=[1, 0, 0, 0]) - x3 = array(x1, mask=[0, 1, 0, 1]) - x4 = array(x1) - # test conversion to strings - junk, garbage = str(x2), repr(x2) - assert_(eq(np.sort(x1), sort(x2, fill_value=0))) - # tests of indexing - assert_(type(x2[1]) is type(x1[1])) - assert_(x1[1] == x2[1]) - assert_(x2[0] is masked) - assert_(eq(x1[2], x2[2])) - assert_(eq(x1[2:5], x2[2:5])) - assert_(eq(x1[:], x2[:])) - assert_(eq(x1[1:], x3[1:])) - x1[2] = 9 - x2[2] = 9 - assert_(eq(x1, x2)) - x1[1:3] = 99 - x2[1:3] = 99 - assert_(eq(x1, x2)) - x2[1] = masked - assert_(eq(x1, x2)) - x2[1:3] = masked - assert_(eq(x1, x2)) - x2[:] = x1 - x2[1] = masked - assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) - x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) - x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) - assert_(allequal(x4, array([1, 2, 3, 4]))) - x1 = np.arange(5) * 1.0 - x2 = masked_values(x1, 3.0) - assert_(eq(x1, x2)) - assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) - assert_(eq(3.0, x2.fill_value)) - x1 = array([1, 'hello', 2, 3], object) - x2 = np.array([1, 'hello', 2, 3], object) - s1 = x1[1] - s2 = x2[1] - self.assertEqual(type(s2), str) - self.assertEqual(type(s1), str) - self.assertEqual(s1, s2) - assert_(x1[1:1].shape == (0,)) - - def test_testCopySize(self): - # Tests of some subtle points of copying and sizing. - n = [0, 0, 1, 0, 0] - m = make_mask(n) - m2 = make_mask(m) - self.assertTrue(m is m2) - m3 = make_mask(m, copy=1) - self.assertTrue(m is not m3) - - x1 = np.arange(5) - y1 = array(x1, mask=m) - self.assertTrue(y1._data is not x1) - self.assertTrue(allequal(x1, y1._data)) - self.assertTrue(y1.mask is m) - - y1a = array(y1, copy=0) - self.assertTrue(y1a.mask is y1.mask) - - y2 = array(x1, mask=m, copy=0) - self.assertTrue(y2.mask is m) - self.assertTrue(y2[2] is masked) - y2[2] = 9 - self.assertTrue(y2[2] is not masked) - self.assertTrue(y2.mask is not m) - self.assertTrue(allequal(y2.mask, 0)) - - y3 = array(x1 * 1.0, mask=m) - self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) - - x4 = arange(4) - x4[2] = masked - y4 = resize(x4, (8,)) - self.assertTrue(eq(concatenate([x4, x4]), y4)) - self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) - y5 = repeat(x4, (2, 2, 2, 2), axis=0) - self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) - y6 = repeat(x4, 2, axis=0) - self.assertTrue(eq(y5, y6)) - - def test_testPut(self): - # Test of put - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - x = array(d, mask=m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is masked) - x[[1, 4]] = [10, 40] - self.assertTrue(x.mask is not m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is not masked) - self.assertTrue(eq(x, [0, 10, 2, -1, 40])) - - x = array(d, mask=m) - x.put([0, 1, 2], [-1, 100, 200]) - self.assertTrue(eq(x, [-1, 100, 200, 0, 0])) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is masked) - - def test_testMaPut(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] - i = np.nonzero(m)[0] - put(ym, i, zm) - assert_(all(take(ym, i, axis=0) == zm)) - - def test_testOddFeatures(self): - # Test of other odd features - x = arange(20) - x = x.reshape(4, 5) - x.flat[5] = 12 - assert_(x[1, 0] == 12) - z = x + 10j * x - assert_(eq(z.real, x)) - assert_(eq(z.imag, 10 * x)) - assert_(eq((z * conjugate(z)).real, 101 * x * x)) - z.imag[...] = 0.0 - - x = arange(10) - x[3] = masked - assert_(str(x[3]) == str(masked)) - c = x >= 8 - assert_(count(where(c, masked, masked)) == 0) - assert_(shape(where(c, masked, masked)) == c.shape) - z = where(c, x, masked) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is masked) - assert_(z[7] is masked) - assert_(z[8] is not masked) - assert_(z[9] is not masked) - assert_(eq(x, z)) - z = where(c, masked, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - z = masked_where(c, x) - assert_(z.dtype is x.dtype) - assert_(z[3] is masked) - assert_(z[4] is not masked) - assert_(z[7] is not masked) - assert_(z[8] is masked) - assert_(z[9] is masked) - assert_(eq(x, z)) - x = array([1., 2., 3., 4., 5.]) - c = array([1, 1, 1, 0, 0]) - x[2] = masked - z = where(c, x, -x) - assert_(eq(z, [1., 2., 0., -4., -5])) - c[0] = masked - z = where(c, x, -x) - assert_(eq(z, [1., 2., 0., -4., -5])) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) - assert_(eq(masked_where(greater_equal(x, 2), x), - masked_greater_equal(x, 2))) - assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) - assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) - assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) - assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) - assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) - assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) - assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) - assert_(eq(masked_inside(array(list(range(5)), - mask=[1, 0, 0, 0, 0]), 1, 3).mask, - [1, 1, 1, 1, 0])) - assert_(eq(masked_outside(array(list(range(5)), - mask=[0, 1, 0, 0, 0]), 1, 3).mask, - [1, 1, 0, 0, 1])) - assert_(eq(masked_equal(array(list(range(5)), - mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 0])) - assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], - mask=[1, 0, 0, 0, 0]), 2).mask, - [1, 0, 1, 0, 1])) - assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), - [99, 99, 3, 4, 5])) - atest = ones((10, 10, 10), dtype=float32) - btest = zeros(atest.shape, MaskType) - ctest = masked_where(btest, atest) - assert_(eq(atest, ctest)) - z = choose(c, (-x, x)) - assert_(eq(z, [1., 2., 0., -4., -5])) - assert_(z[0] is masked) - assert_(z[1] is not masked) - assert_(z[2] is masked) - x = arange(6) - x[5] = masked - y = arange(6) * 10 - y[2] = masked - c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) - cm = c.filled(1) - z = where(c, x, y) - zm = where(cm, x, y) - assert_(eq(z, zm)) - assert_(getmask(zm) is nomask) - assert_(eq(zm, [0, 1, 2, 30, 40, 50])) - z = where(c, masked, 1) - assert_(eq(z, [99, 99, 99, 1, 1, 1])) - z = where(c, 1, masked) - assert_(eq(z, [99, 1, 1, 99, 99, 99])) - - def test_testMinMax2(self): - # Test of minumum, maximum. - assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) - assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) - x = arange(5) - y = arange(5) - 2 - x[3] = masked - y[0] = masked - assert_(eq(minimum(x, y), where(less(x, y), x, y))) - assert_(eq(maximum(x, y), where(greater(x, y), x, y))) - assert_(minimum(x) == 0) - assert_(maximum(x) == 4) - - def test_testTakeTransposeInnerOuter(self): - # Test of take, transpose, inner, outer products - x = arange(24) - y = np.arange(24) - x[5:6] = masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) - assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) - assert_(eq(np.inner(filled(x, 0), filled(y, 0)), - inner(x, y))) - assert_(eq(np.outer(filled(x, 0), filled(y, 0)), - outer(x, y))) - y = array(['abc', 1, 'def', 2, 3], object) - y[2] = masked - t = take(y, [0, 3, 4]) - assert_(t[0] == 'abc') - assert_(t[1] == 2) - assert_(t[2] == 3) - - def test_testInplace(self): - # Test of inplace operations and rich comparisons - y = arange(10) - - x = arange(10) - xm = arange(10) - xm[2] = masked - x += 1 - assert_(eq(x, y + 1)) - xm += 1 - assert_(eq(x, y + 1)) - - x = arange(10) - xm = arange(10) - xm[2] = masked - x -= 1 - assert_(eq(x, y - 1)) - xm -= 1 - assert_(eq(xm, y - 1)) - - x = arange(10) * 1.0 - xm = arange(10) * 1.0 - xm[2] = masked - x *= 2.0 - assert_(eq(x, y * 2)) - xm *= 2.0 - assert_(eq(xm, y * 2)) - - x = arange(10) * 2 - xm = arange(10) - xm[2] = masked - x //= 2 - assert_(eq(x, y)) - xm //= 2 - assert_(eq(x, y)) - - x = arange(10) * 1.0 - xm = arange(10) * 1.0 - xm[2] = masked - x /= 2.0 - assert_(eq(x, y / 2.0)) - xm /= arange(10) - assert_(eq(xm, ones((10,)))) - - x = arange(10).astype(float32) - xm = arange(10) - xm[2] = masked - x += 1. - assert_(eq(x, y + 1.)) - - def test_testPickle(self): - # Test of pickling - import pickle - x = arange(12) - x[4:10:2] = masked - x = x.reshape(4, 3) - s = pickle.dumps(x) - y = pickle.loads(s) - assert_(eq(x, y)) - - def test_testMasked(self): - # Test of masked element - xx = arange(6) - xx[1] = masked - self.assertTrue(str(masked) == '--') - self.assertTrue(xx[1] is masked) - self.assertEqual(filled(xx[1], 0), 0) - # don't know why these should raise an exception... - #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) - #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) - #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) - #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) - - def test_testAverage1(self): - # Test of average. - ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - self.assertTrue(eq(2.0, average(ott, axis=0))) - self.assertTrue(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) - result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) - self.assertTrue(eq(2.0, result)) - self.assertTrue(wts == 4.0) - ott[:] = masked - self.assertTrue(average(ott, axis=0) is masked) - ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - ott = ott.reshape(2, 2) - ott[:, 1] = masked - self.assertTrue(eq(average(ott, axis=0), [2.0, 0.0])) - self.assertTrue(average(ott, axis=1)[0] is masked) - self.assertTrue(eq([2., 0.], average(ott, axis=0))) - result, wts = average(ott, axis=0, returned=1) - self.assertTrue(eq(wts, [1., 0.])) - - def test_testAverage2(self): - # More tests of average. - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = arange(6) - self.assertTrue(allclose(average(x, axis=0), 2.5)) - self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5)) - y = array([arange(6), 2.0 * arange(6)]) - self.assertTrue(allclose(average(y, None), - np.add.reduce(np.arange(6)) * 3. / 12.)) - self.assertTrue(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) - self.assertTrue(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0)*2.0])) - self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.)) - self.assertTrue(allclose(average(y, axis=0, weights=w2), - [0., 1., 2., 3., 4., 10.])) - self.assertTrue(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0)*2.0])) - m1 = zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = ones(6) - m5 = [0, 1, 1, 1, 1, 1] - self.assertTrue(allclose(average(masked_array(x, m1), axis=0), 2.5)) - self.assertTrue(allclose(average(masked_array(x, m2), axis=0), 2.5)) - self.assertTrue(average(masked_array(x, m4), axis=0) is masked) - self.assertEqual(average(masked_array(x, m5), axis=0), 0.0) - self.assertEqual(count(average(masked_array(x, m4), axis=0)), 0) - z = masked_array(y, m3) - self.assertTrue(allclose(average(z, None), 20. / 6.)) - self.assertTrue(allclose(average(z, axis=0), - [0., 1., 99., 99., 4.0, 7.5])) - self.assertTrue(allclose(average(z, axis=1), [2.5, 5.0])) - self.assertTrue(allclose(average(z, axis=0, weights=w2), - [0., 1., 99., 99., 4.0, 10.0])) - - a = arange(6) - b = arange(6) * 3 - r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) - self.assertEqual(shape(r1), shape(w1)) - self.assertEqual(r1.shape, w1.shape) - r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) - self.assertEqual(shape(w2), shape(r2)) - r2, w2 = average(ones((2, 2, 3)), returned=1) - self.assertEqual(shape(w2), shape(r2)) - r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) - self.assertTrue(shape(w2) == shape(r2)) - a2d = array([[1, 2], [0, 4]], float) - a2dm = masked_array(a2d, [[0, 0], [1, 0]]) - a2da = average(a2d, axis=0) - self.assertTrue(eq(a2da, [0.5, 3.0])) - a2dma = average(a2dm, axis=0) - self.assertTrue(eq(a2dma, [1.0, 3.0])) - a2dma = average(a2dm, axis=None) - self.assertTrue(eq(a2dma, 7. / 3.)) - a2dma = average(a2dm, axis=1) - self.assertTrue(eq(a2dma, [1.5, 4.0])) - - def test_testToPython(self): - self.assertEqual(1, int(array(1))) - self.assertEqual(1.0, float(array(1))) - self.assertEqual(1, int(array([[[1]]]))) - self.assertEqual(1.0, float(array([[1]]))) - self.assertRaises(TypeError, float, array([1, 1])) - self.assertRaises(ValueError, bool, array([0, 1])) - self.assertRaises(ValueError, bool, array([0, 0], mask=[0, 1])) - - def test_testScalarArithmetic(self): - xm = array(0, mask=1) - #TODO FIXME: Find out what the following raises a warning in r8247 - with np.errstate(divide='ignore'): - self.assertTrue((1 / array(0)).mask) - self.assertTrue((1 + xm).mask) - self.assertTrue((-xm).mask) - self.assertTrue((-xm).mask) - self.assertTrue(maximum(xm, xm).mask) - self.assertTrue(minimum(xm, xm).mask) - self.assertTrue(xm.filled().dtype is xm._data.dtype) - x = array(0, mask=0) - self.assertTrue(x.filled() == x._data) - self.assertEqual(str(xm), str(masked_print_option)) - - def test_testArrayMethods(self): - a = array([1, 3, 2]) - self.assertTrue(eq(a.any(), a._data.any())) - self.assertTrue(eq(a.all(), a._data.all())) - self.assertTrue(eq(a.argmax(), a._data.argmax())) - self.assertTrue(eq(a.argmin(), a._data.argmin())) - self.assertTrue(eq(a.choose(0, 1, 2, 3, 4), - a._data.choose(0, 1, 2, 3, 4))) - self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) - self.assertTrue(eq(a.conj(), a._data.conj())) - self.assertTrue(eq(a.conjugate(), a._data.conjugate())) - m = array([[1, 2], [3, 4]]) - self.assertTrue(eq(m.diagonal(), m._data.diagonal())) - self.assertTrue(eq(a.sum(), a._data.sum())) - self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2]))) - self.assertTrue(eq(m.transpose(), m._data.transpose())) - - def test_testArrayAttributes(self): - a = array([1, 3, 2]) - self.assertEqual(a.ndim, 1) - - def test_testAPI(self): - self.assertFalse([m for m in dir(np.ndarray) - if m not in dir(MaskedArray) and - not m.startswith('_')]) - - def test_testSingleElementSubscript(self): - a = array([1, 3, 2]) - b = array([1, 3, 2], mask=[1, 0, 1]) - self.assertEqual(a[0].shape, ()) - self.assertEqual(b[0].shape, ()) - self.assertEqual(b[1].shape, ()) - - -class TestUfuncs(TestCase): - def setUp(self): - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), - array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) - - def test_testUfuncRegression(self): - f_invalid_ignore = [ - 'sqrt', 'arctanh', 'arcsin', 'arccos', - 'arccosh', 'arctanh', 'log', 'log10', 'divide', - 'true_divide', 'floor_divide', 'remainder', 'fmod'] - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', - 'sin', 'cos', 'tan', - 'arcsin', 'arccos', 'arctan', - 'sinh', 'cosh', 'tanh', - 'arcsinh', - 'arccosh', - 'arctanh', - 'absolute', 'fabs', 'negative', - # 'nonzero', 'around', - 'floor', 'ceil', - # 'sometrue', 'alltrue', - 'logical_not', - 'add', 'subtract', 'multiply', - 'divide', 'true_divide', 'floor_divide', - 'remainder', 'fmod', 'hypot', 'arctan2', - 'equal', 'not_equal', 'less_equal', 'greater_equal', - 'less', 'greater', - 'logical_and', 'logical_or', 'logical_xor']: - try: - uf = getattr(umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(np.ma, f) - args = self.d[:uf.nin] - with np.errstate(): - if f in f_invalid_ignore: - np.seterr(invalid='ignore') - if f in ['arctanh', 'log', 'log10']: - np.seterr(divide='ignore') - ur = uf(*args) - mr = mf(*args) - self.assertTrue(eq(ur.filled(0), mr.filled(0), f)) - self.assertTrue(eqmask(ur.mask, mr.mask)) - - def test_reduce(self): - a = self.d[0] - self.assertFalse(alltrue(a, axis=0)) - self.assertTrue(sometrue(a, axis=0)) - self.assertEqual(sum(a[:3], axis=0), 0) - self.assertEqual(product(a, axis=0), 0) - - def test_minmax(self): - a = arange(1, 13).reshape(3, 4) - amask = masked_where(a < 5, a) - self.assertEqual(amask.max(), a.max()) - self.assertEqual(amask.min(), 5) - self.assertTrue((amask.max(0) == a.max(0)).all()) - self.assertTrue((amask.min(0) == [5, 6, 7, 8]).all()) - self.assertTrue(amask.max(1)[0].mask) - self.assertTrue(amask.min(1)[0].mask) - - def test_nonzero(self): - for t in "?bhilqpBHILQPfdgFDGO": - x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) - self.assertTrue(eq(nonzero(x), [0])) - - -class TestArrayMethods(TestCase): - - def setUp(self): - x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, - 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, - 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, - 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, - 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) - X = x.reshape(6, 6) - XX = x.reshape(3, 2, 2, 3) - - m = np.array([0, 1, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 1, - 0, 0, 0, 1, 1, 1, - 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0]) - mx = array(data=x, mask=m) - mX = array(data=X, mask=m.reshape(X.shape)) - mXX = array(data=XX, mask=m.reshape(XX.shape)) - - self.d = (x, X, XX, m, mx, mX, mXX) - - #------------------------------------------------------ - def test_trace(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXdiag = mX.diagonal() - self.assertEqual(mX.trace(), mX.diagonal().compressed().sum()) - self.assertTrue(eq(mX.trace(), - X.trace() - sum(mXdiag.mask * X.diagonal(), - axis=0))) - - def test_clip(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - clipped = mx.clip(2, 8) - self.assertTrue(eq(clipped.mask, mx.mask)) - self.assertTrue(eq(clipped._data, x.clip(2, 8))) - self.assertTrue(eq(clipped._data, mx._data.clip(2, 8))) - - def test_ptp(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - (n, m) = X.shape - self.assertEqual(mx.ptp(), mx.compressed().ptp()) - rows = np.zeros(n, np.float_) - cols = np.zeros(m, np.float_) - for k in range(m): - cols[k] = mX[:, k].compressed().ptp() - for k in range(n): - rows[k] = mX[k].compressed().ptp() - self.assertTrue(eq(mX.ptp(0), cols)) - self.assertTrue(eq(mX.ptp(1), rows)) - - def test_swapaxes(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXswapped = mX.swapaxes(0, 1) - self.assertTrue(eq(mXswapped[-1], mX[:, -1])) - mXXswapped = mXX.swapaxes(0, 2) - self.assertEqual(mXXswapped.shape, (2, 2, 3, 3)) - - def test_cumprod(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXcp = mX.cumprod(0) - self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(0))) - mXcp = mX.cumprod(1) - self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(1))) - - def test_cumsum(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - mXcp = mX.cumsum(0) - self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(0))) - mXcp = mX.cumsum(1) - self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(1))) - - def test_varstd(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - self.assertTrue(eq(mX.var(axis=None), mX.compressed().var())) - self.assertTrue(eq(mX.std(axis=None), mX.compressed().std())) - self.assertTrue(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) - self.assertTrue(eq(mX.var().shape, X.var().shape)) - (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) - for k in range(6): - self.assertTrue(eq(mXvar1[k], mX[k].compressed().var())) - self.assertTrue(eq(mXvar0[k], mX[:, k].compressed().var())) - self.assertTrue(eq(np.sqrt(mXvar0[k]), - mX[:, k].compressed().std())) - - -def eqmask(m1, m2): - if m1 is nomask: - return m2 is nomask - if m2 is nomask: - return m1 is nomask - return (m1 == m2).all() - -#def timingTest(): -# for f in [testf, testinplace]: -# for n in [1000,10000,50000]: -# t = testta(n, f) -# t1 = testtb(n, f) -# t2 = testtc(n, f) -# print f.test_name -# print """\ -#n = %7d -#numpy time (ms) %6.1f -#MA maskless ratio %6.1f -#MA masked ratio %6.1f -#""" % (n, t*1000.0, t1/t, t2/t) - -#def testta(n, f): -# x=np.arange(n) + 1.0 -# tn0 = time.time() -# z = f(x) -# return time.time() - tn0 - -#def testtb(n, f): -# x=arange(n) + 1.0 -# tn0 = time.time() -# z = f(x) -# return time.time() - tn0 - -#def testtc(n, f): -# x=arange(n) + 1.0 -# x[0] = masked -# tn0 = time.time() -# z = f(x) -# return time.time() - tn0 - -#def testf(x): -# for i in range(25): -# y = x **2 + 2.0 * x - 1.0 -# w = x **2 + 1.0 -# z = (y / w) ** 2 -# return z -#testf.test_name = 'Simple arithmetic' - -#def testinplace(x): -# for i in range(25): -# y = x**2 -# y += 2.0*x -# y -= 1.0 -# y /= x -# return y -#testinplace.test_name = 'Inplace operations' - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_regression.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_regression.py deleted file mode 100644 index 7b32199ea6064..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_regression.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import * -from numpy.compat import sixu - -rlevel = 1 - - -class TestRegression(TestCase): - def test_masked_array_create(self,level=rlevel): - # Ticket #17 - x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], - mask=[0, 0, 0, 1, 1, 1, 0, 0]) - assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) - - def test_masked_array(self,level=rlevel): - # Ticket #61 - np.ma.array(1, mask=[1]) - - def test_mem_masked_where(self,level=rlevel): - # Ticket #62 - from numpy.ma import masked_where, MaskType - a = np.zeros((1, 1)) - b = np.zeros(a.shape, MaskType) - c = masked_where(b, a) - a-c - - def test_masked_array_multiply(self,level=rlevel): - # Ticket #254 - a = np.ma.zeros((4, 1)) - a[2, 0] = np.ma.masked - b = np.zeros((4, 2)) - a*b - b*a - - def test_masked_array_repeat(self, level=rlevel): - # Ticket #271 - np.ma.array([1], mask=False).repeat(10) - - def test_masked_array_repr_unicode(self): - # Ticket #1256 - repr(np.ma.array(sixu("Unicode"))) - - def test_atleast_2d(self): - # Ticket #1559 - a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) - b = np.atleast_2d(a) - assert_(a.mask.ndim == 1) - assert_(b.mask.ndim == 2) - - def test_set_fill_value_unicode_py3(self): - # Ticket #2733 - a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) - a.fill_value = 'X' - assert_(a.fill_value == 'X') - - def test_var_sets_maskedarray_scalar(self): - # Issue gh-2757 - a = np.ma.array(np.arange(5), mask=True) - mout = np.ma.array(-1, dtype=float) - a.var(out=mout) - assert_(mout._data == 0) - - def test_ddof_corrcoef(self): - # See gh-3336 - x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) - y = np.array([2, 2.5, 3.1, 3, 5]) - r0 = np.ma.corrcoef(x, y, ddof=0) - r1 = np.ma.corrcoef(x, y, ddof=1) - # ddof should not have an effect (it gets cancelled out) - assert_allclose(r0.data, r1.data) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_subclassing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_subclassing.py deleted file mode 100644 index ade5c59daebfc..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/tests/test_subclassing.py +++ /dev/null @@ -1,236 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for MaskedArray & subclassing. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ - -""" -from __future__ import division, absolute_import, print_function - -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = '1.0' -__revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' - -import numpy as np -from numpy.testing import * -from numpy.ma.testutils import * -from numpy.ma.core import * - - -class SubArray(np.ndarray): - # Defines a generic np.ndarray subclass, that stores some metadata - # in the dictionary `info`. - def __new__(cls,arr,info={}): - x = np.asanyarray(arr).view(cls) - x.info = info - return x - - def __array_finalize__(self, obj): - self.info = getattr(obj, 'info', {}) - return - - def __add__(self, other): - result = np.ndarray.__add__(self, other) - result.info.update({'added':result.info.pop('added', 0)+1}) - return result - -subarray = SubArray - - -class MSubArray(SubArray, MaskedArray): - - def __new__(cls, data, info={}, mask=nomask): - subarr = SubArray(data, info) - _data = MaskedArray.__new__(cls, data=subarr, mask=mask) - _data.info = subarr.info - return _data - - def __array_finalize__(self, obj): - MaskedArray.__array_finalize__(self, obj) - SubArray.__array_finalize__(self, obj) - return - - def _get_series(self): - _view = self.view(MaskedArray) - _view._sharedmask = False - return _view - _series = property(fget=_get_series) - -msubarray = MSubArray - - -class MMatrix(MaskedArray, np.matrix,): - - def __new__(cls, data, mask=nomask): - mat = np.matrix(data) - _data = MaskedArray.__new__(cls, data=mat, mask=mask) - return _data - - def __array_finalize__(self, obj): - np.matrix.__array_finalize__(self, obj) - MaskedArray.__array_finalize__(self, obj) - return - - def _get_series(self): - _view = self.view(MaskedArray) - _view._sharedmask = False - return _view - _series = property(fget=_get_series) - -mmatrix = MMatrix - - -# also a subclass that overrides __str__, __repr__ and __setitem__, disallowing -# setting to non-class values (and thus np.ma.core.masked_print_option) -class ComplicatedSubArray(SubArray): - def __str__(self): - return 'myprefix {0} mypostfix'.format( - super(ComplicatedSubArray, self).__str__()) - - def __repr__(self): - # Return a repr that does not start with 'name(' - return '<{0} {1}>'.format(self.__class__.__name__, self) - - def __setitem__(self, item, value): - # this ensures direct assignment to masked_print_option will fail - if not isinstance(value, ComplicatedSubArray): - raise ValueError("Can only set to MySubArray values") - super(ComplicatedSubArray, self).__setitem__(item, value) - - -class TestSubclassing(TestCase): - # Test suite for masked subclasses of ndarray. - - def setUp(self): - x = np.arange(5) - mx = mmatrix(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) - - def test_data_subclassing(self): - # Tests whether the subclass is kept. - x = np.arange(5) - m = [0, 0, 1, 0, 0] - xsub = SubArray(x) - xmsub = masked_array(xsub, mask=m) - self.assertTrue(isinstance(xmsub, MaskedArray)) - assert_equal(xmsub._data, xsub) - self.assertTrue(isinstance(xmsub._data, SubArray)) - - def test_maskedarray_subclassing(self): - # Tests subclassing MaskedArray - (x, mx) = self.data - self.assertTrue(isinstance(mx._data, np.matrix)) - - def test_masked_unary_operations(self): - # Tests masked_unary_operation - (x, mx) = self.data - with np.errstate(divide='ignore'): - self.assertTrue(isinstance(log(mx), mmatrix)) - assert_equal(log(x), np.log(x)) - - def test_masked_binary_operations(self): - # Tests masked_binary_operation - (x, mx) = self.data - # Result should be a mmatrix - self.assertTrue(isinstance(add(mx, mx), mmatrix)) - self.assertTrue(isinstance(add(mx, x), mmatrix)) - # Result should work - assert_equal(add(mx, x), mx+x) - self.assertTrue(isinstance(add(mx, mx)._data, np.matrix)) - self.assertTrue(isinstance(add.outer(mx, mx), mmatrix)) - self.assertTrue(isinstance(hypot(mx, mx), mmatrix)) - self.assertTrue(isinstance(hypot(mx, x), mmatrix)) - - def test_masked_binary_operations2(self): - # Tests domained_masked_binary_operation - (x, mx) = self.data - xmx = masked_array(mx.data.__array__(), mask=mx.mask) - self.assertTrue(isinstance(divide(mx, mx), mmatrix)) - self.assertTrue(isinstance(divide(mx, x), mmatrix)) - assert_equal(divide(mx, mx), divide(xmx, xmx)) - - def test_attributepropagation(self): - x = array(arange(5), mask=[0]+[1]*4) - my = masked_array(subarray(x)) - ym = msubarray(x) - # - z = (my+1) - self.assertTrue(isinstance(z, MaskedArray)) - self.assertTrue(not isinstance(z, MSubArray)) - self.assertTrue(isinstance(z._data, SubArray)) - assert_equal(z._data.info, {}) - # - z = (ym+1) - self.assertTrue(isinstance(z, MaskedArray)) - self.assertTrue(isinstance(z, MSubArray)) - self.assertTrue(isinstance(z._data, SubArray)) - self.assertTrue(z._data.info['added'] > 0) - # - ym._set_mask([1, 0, 0, 0, 1]) - assert_equal(ym._mask, [1, 0, 0, 0, 1]) - ym._series._set_mask([0, 0, 0, 0, 1]) - assert_equal(ym._mask, [0, 0, 0, 0, 1]) - # - xsub = subarray(x, info={'name':'x'}) - mxsub = masked_array(xsub) - self.assertTrue(hasattr(mxsub, 'info')) - assert_equal(mxsub.info, xsub.info) - - def test_subclasspreservation(self): - # Checks that masked_array(...,subok=True) preserves the class. - x = np.arange(5) - m = [0, 0, 1, 0, 0] - xinfo = [(i, j) for (i, j) in zip(x, m)] - xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) - # - mxsub = masked_array(xsub, subok=False) - self.assertTrue(not isinstance(mxsub, MSubArray)) - self.assertTrue(isinstance(mxsub, MaskedArray)) - assert_equal(mxsub._mask, m) - # - mxsub = asarray(xsub) - self.assertTrue(not isinstance(mxsub, MSubArray)) - self.assertTrue(isinstance(mxsub, MaskedArray)) - assert_equal(mxsub._mask, m) - # - mxsub = masked_array(xsub, subok=True) - self.assertTrue(isinstance(mxsub, MSubArray)) - assert_equal(mxsub.info, xsub.info) - assert_equal(mxsub._mask, xsub._mask) - # - mxsub = asanyarray(xsub) - self.assertTrue(isinstance(mxsub, MSubArray)) - assert_equal(mxsub.info, xsub.info) - assert_equal(mxsub._mask, m) - - def test_subclass_repr(self): - """test that repr uses the name of the subclass - and 'array' for np.ndarray""" - x = np.arange(5) - mx = masked_array(x, mask=[True, False, True, False, False]) - self.assertTrue(repr(mx).startswith('masked_array')) - xsub = SubArray(x) - mxsub = masked_array(xsub, mask=[True, False, True, False, False]) - self.assertTrue(repr(mxsub).startswith( - 'masked_{0}(data = [-- 1 -- 3 4]'.format(SubArray.__name__))) - - def test_subclass_str(self): - """test str with subclass that has overridden str, setitem""" - # first without override - x = np.arange(5) - xsub = SubArray(x) - mxsub = masked_array(xsub, mask=[True, False, True, False, False]) - self.assertTrue(str(mxsub) == '[-- 1 -- 3 4]') - - xcsub = ComplicatedSubArray(x) - assert_raises(ValueError, xcsub.__setitem__, 0, - np.ma.core.masked_print_option) - mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) - self.assertTrue(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix') - - -############################################################################### -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/testutils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/testutils.py deleted file mode 100644 index feff3e8793d61..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/testutils.py +++ /dev/null @@ -1,240 +0,0 @@ -"""Miscellaneous functions for testing masked arrays and subclasses - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ - -""" -from __future__ import division, absolute_import, print_function - -__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" -__version__ = "1.0" -__revision__ = "$Revision: 3529 $" -__date__ = "$Date: 2007-11-13 10:01:14 +0200 (Tue, 13 Nov 2007) $" - - -import operator - -import numpy as np -from numpy import ndarray, float_ -import numpy.core.umath as umath -from numpy.testing import * -import numpy.testing.utils as utils - -from .core import mask_or, getmask, masked_array, nomask, masked, filled, \ - equal, less - -#------------------------------------------------------------------------------ -def approx (a, b, fill_value=True, rtol=1e-5, atol=1e-8): - """Returns true if all components of a and b are equal subject to given tolerances. - -If fill_value is True, masked values considered equal. Otherwise, masked values -are considered unequal. -The relative error rtol should be positive and << 1.0 -The absolute error atol comes into play for those elements of b that are very -small or zero; it says how small a must be also. - """ - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) - if d1.dtype.char == "O" or d2.dtype.char == "O": - return np.equal(d1, d2).ravel() - x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) - y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) - d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) - return d.ravel() - - -def almost(a, b, decimal=6, fill_value=True): - """Returns True if a and b are equal up to decimal places. -If fill_value is True, masked values considered equal. Otherwise, masked values -are considered unequal. - """ - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) - if d1.dtype.char == "O" or d2.dtype.char == "O": - return np.equal(d1, d2).ravel() - x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) - y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) - d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) - return d.ravel() - - -#................................................ -def _assert_equal_on_sequences(actual, desired, err_msg=''): - "Asserts the equality of two non-array sequences." - assert_equal(len(actual), len(desired), err_msg) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) - return - -def assert_equal_records(a, b): - """Asserts that two records are equal. Pretty crude for now.""" - assert_equal(a.dtype, b.dtype) - for f in a.dtype.names: - (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) - if not (af is masked) and not (bf is masked): - assert_equal(operator.getitem(a, f), operator.getitem(b, f)) - return - - -def assert_equal(actual, desired, err_msg=''): - "Asserts that two items are equal." - # Case #1: dictionary ..... - if isinstance(desired, dict): - if not isinstance(actual, dict): - raise AssertionError(repr(type(actual))) - assert_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): - if not k in actual: - raise AssertionError("%s not in %s" % (k, actual)) - assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) - return - # Case #2: lists ..... - if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): - return _assert_equal_on_sequences(actual, desired, err_msg='') - if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): - msg = build_err_msg([actual, desired], err_msg,) - if not desired == actual: - raise AssertionError(msg) - return - # Case #4. arrays or equivalent - if ((actual is masked) and not (desired is masked)) or \ - ((desired is masked) and not (actual is masked)): - msg = build_err_msg([actual, desired], - err_msg, header='', names=('x', 'y')) - raise ValueError(msg) - actual = np.array(actual, copy=False, subok=True) - desired = np.array(desired, copy=False, subok=True) - (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) - if actual_dtype.char == "S" and desired_dtype.char == "S": - return _assert_equal_on_sequences(actual.tolist(), - desired.tolist(), - err_msg='') -# elif actual_dtype.char in "OV" and desired_dtype.char in "OV": -# if (actual_dtype != desired_dtype) and actual_dtype: -# msg = build_err_msg([actual_dtype, desired_dtype], -# err_msg, header='', names=('actual', 'desired')) -# raise ValueError(msg) -# return _assert_equal_on_sequences(actual.tolist(), -# desired.tolist(), -# err_msg='') - return assert_array_equal(actual, desired, err_msg) - - -def fail_if_equal(actual, desired, err_msg='',): - """Raises an assertion error if two items are equal. - """ - if isinstance(desired, dict): - if not isinstance(actual, dict): - raise AssertionError(repr(type(actual))) - fail_if_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): - if not k in actual: - raise AssertionError(repr(k)) - fail_if_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg)) - return - if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): - fail_if_equal(len(actual), len(desired), err_msg) - for k in range(len(desired)): - fail_if_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg)) - return - if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): - return fail_if_array_equal(actual, desired, err_msg) - msg = build_err_msg([actual, desired], err_msg) - if not desired != actual: - raise AssertionError(msg) - -assert_not_equal = fail_if_equal - - -def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): - """Asserts that two items are almost equal. - The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal) - """ - if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): - return assert_array_almost_equal(actual, desired, decimal=decimal, - err_msg=err_msg, verbose=verbose) - msg = build_err_msg([actual, desired], - err_msg=err_msg, verbose=verbose) - if not round(abs(desired - actual), decimal) == 0: - raise AssertionError(msg) - - -assert_close = assert_almost_equal - - -def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', - fill_value=True): - """Asserts that a comparison relation between two masked arrays is satisfied - elementwise.""" - # Fill the data first -# xf = filled(x) -# yf = filled(y) - # Allocate a common mask and refill - m = mask_or(getmask(x), getmask(y)) - x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) - y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) - if ((x is masked) and not (y is masked)) or \ - ((y is masked) and not (x is masked)): - msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, - header=header, names=('x', 'y')) - raise ValueError(msg) - # OK, now run the basic tests on filled versions - return utils.assert_array_compare(comparison, - x.filled(fill_value), - y.filled(fill_value), - err_msg=err_msg, - verbose=verbose, header=header) - - -def assert_array_equal(x, y, err_msg='', verbose=True): - """Checks the elementwise equality of two masked arrays.""" - assert_array_compare(operator.__eq__, x, y, - err_msg=err_msg, verbose=verbose, - header='Arrays are not equal') - - -def fail_if_array_equal(x, y, err_msg='', verbose=True): - "Raises an assertion error if two masked arrays are not equal (elementwise)." - def compare(x, y): - return (not np.alltrue(approx(x, y))) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header='Arrays are not equal') - - -def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): - """Checks the elementwise equality of two masked arrays, up to a given - number of decimals.""" - def compare(x, y): - "Returns the result of the loose comparison between x and y)." - return approx(x, y, rtol=10. ** -decimal) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header='Arrays are not almost equal') - - -def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): - """Checks the elementwise equality of two masked arrays, up to a given - number of decimals.""" - def compare(x, y): - "Returns the result of the loose comparison between x and y)." - return almost(x, y, decimal) - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header='Arrays are not almost equal') - - -def assert_array_less(x, y, err_msg='', verbose=True): - "Checks that x is smaller than y elementwise." - assert_array_compare(operator.__lt__, x, y, - err_msg=err_msg, verbose=verbose, - header='Arrays are not less-ordered') - - -def assert_mask_equal(m1, m2, err_msg=''): - """Asserts the equality of two masks.""" - if m1 is nomask: - assert_(m2 is nomask) - if m2 is nomask: - assert_(m1 is nomask) - assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/timer_comparison.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/timer_comparison.py deleted file mode 100644 index b1c056cfc56cf..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/timer_comparison.py +++ /dev/null @@ -1,459 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import timeit -from functools import reduce - -import numpy as np -from numpy import float_ -import np.core.fromnumeric as fromnumeric - -from np.testing.utils import build_err_msg - -# Fixme: this does not look right. -np.seterr(all='ignore') - -pi = np.pi - - -class moduletester(object): - def __init__(self, module): - self.module = module - self.allequal = module.allequal - self.arange = module.arange - self.array = module.array -# self.average = module.average - self.concatenate = module.concatenate - self.count = module.count - self.equal = module.equal - self.filled = module.filled - self.getmask = module.getmask - self.getmaskarray = module.getmaskarray - self.id = id - self.inner = module.inner - self.make_mask = module.make_mask - self.masked = module.masked - self.masked_array = module.masked_array - self.masked_values = module.masked_values - self.mask_or = module.mask_or - self.nomask = module.nomask - self.ones = module.ones - self.outer = module.outer - self.repeat = module.repeat - self.resize = module.resize - self.sort = module.sort - self.take = module.take - self.transpose = module.transpose - self.zeros = module.zeros - self.MaskType = module.MaskType - try: - self.umath = module.umath - except AttributeError: - self.umath = module.core.umath - self.testnames = [] - - def assert_array_compare(self, comparison, x, y, err_msg='', header='', - fill_value=True): - """Asserts that a comparison relation between two masked arrays is satisfied - elementwise.""" - xf = self.filled(x) - yf = self.filled(y) - m = self.mask_or(self.getmask(x), self.getmask(y)) - - x = self.filled(self.masked_array(xf, mask=m), fill_value) - y = self.filled(self.masked_array(yf, mask=m), fill_value) - if (x.dtype.char != "O"): - x = x.astype(float_) - if isinstance(x, np.ndarray) and x.size > 1: - x[np.isnan(x)] = 0 - elif np.isnan(x): - x = 0 - if (y.dtype.char != "O"): - y = y.astype(float_) - if isinstance(y, np.ndarray) and y.size > 1: - y[np.isnan(y)] = 0 - elif np.isnan(y): - y = 0 - try: - cond = (x.shape==() or y.shape==()) or x.shape == y.shape - if not cond: - msg = build_err_msg([x, y], - err_msg - + '\n(shapes %s, %s mismatch)' % (x.shape, - y.shape), - header=header, - names=('x', 'y')) - assert cond, msg - val = comparison(x, y) - if m is not self.nomask and fill_value: - val = self.masked_array(val, mask=m) - if isinstance(val, bool): - cond = val - reduced = [0] - else: - reduced = val.ravel() - cond = reduced.all() - reduced = reduced.tolist() - if not cond: - match = 100-100.0*reduced.count(1)/len(reduced) - msg = build_err_msg([x, y], - err_msg - + '\n(mismatch %s%%)' % (match,), - header=header, - names=('x', 'y')) - assert cond, msg - except ValueError: - msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) - raise ValueError(msg) - - def assert_array_equal(self, x, y, err_msg=''): - """Checks the elementwise equality of two masked arrays.""" - self.assert_array_compare(self.equal, x, y, err_msg=err_msg, - header='Arrays are not equal') - - def test_0(self): - "Tests creation" - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - xm = self.masked_array(x, mask=m) - xm[0] - - def test_1(self): - "Tests creation" - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - a10 = 10. - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = self.masked_array(x, mask=m1) - ym = self.masked_array(y, mask=m2) - z = np.array([-.5, 0., .5, .8]) - zm = self.masked_array(z, mask=[0, 1, 0, 0]) - xf = np.where(m1, 1.e+20, x) - xm.set_fill_value(1.e+20) - - assert((xm-ym).filled(0).any()) - #fail_if_equal(xm.mask.astype(int_), ym.mask.astype(int_)) - s = x.shape - assert(xm.size == reduce(lambda x, y:x*y, s)) - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) - - for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) - - def test_2(self): - "Tests conversions and indexing" - x1 = np.array([1, 2, 4, 3]) - x2 = self.array(x1, mask=[1, 0, 0, 0]) - x3 = self.array(x1, mask=[0, 1, 0, 1]) - x4 = self.array(x1) - # test conversion to strings - junk, garbage = str(x2), repr(x2) -# assert_equal(np.sort(x1), self.sort(x2, fill_value=0)) - # tests of indexing - assert type(x2[1]) is type(x1[1]) - assert x1[1] == x2[1] -# assert self.allequal(x1[2],x2[2]) -# assert self.allequal(x1[2:5],x2[2:5]) -# assert self.allequal(x1[:],x2[:]) -# assert self.allequal(x1[1:], x3[1:]) - x1[2] = 9 - x2[2] = 9 - self.assert_array_equal(x1, x2) - x1[1:3] = 99 - x2[1:3] = 99 -# assert self.allequal(x1,x2) - x2[1] = self.masked -# assert self.allequal(x1,x2) - x2[1:3] = self.masked -# assert self.allequal(x1,x2) - x2[:] = x1 - x2[1] = self.masked -# assert self.allequal(self.getmask(x2),self.array([0,1,0,0])) - x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) -# assert self.allequal(self.getmask(x3), self.array([0,1,1,0])) - x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) -# assert self.allequal(self.getmask(x4), self.array([0,1,1,0])) -# assert self.allequal(x4, self.array([1,2,3,4])) - x1 = np.arange(5)*1.0 - x2 = self.masked_values(x1, 3.0) -# assert self.allequal(x1,x2) -# assert self.allequal(self.array([0,0,0,1,0], self.MaskType), x2.mask) - x1 = self.array([1, 'hello', 2, 3], object) - x2 = np.array([1, 'hello', 2, 3], object) - s1 = x1[1] - s2 = x2[1] - assert x1[1:1].shape == (0,) - # Tests copy-size - n = [0, 0, 1, 0, 0] - m = self.make_mask(n) - m2 = self.make_mask(m) - assert(m is m2) - m3 = self.make_mask(m, copy=1) - assert(m is not m3) - - - def test_3(self): - "Tests resize/repeat" - x4 = self.arange(4) - x4[2] = self.masked - y4 = self.resize(x4, (8,)) - assert self.allequal(self.concatenate([x4, x4]), y4) - assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) - y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) - self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) - y6 = self.repeat(x4, 2, axis=0) - assert self.allequal(y5, y6) - y7 = x4.repeat((2, 2, 2, 2), axis=0) - assert self.allequal(y5, y7) - y8 = x4.repeat(2, 0) - assert self.allequal(y5, y8) - - #---------------------------------- - def test_4(self): - "Test of take, transpose, inner, outer products" - x = self.arange(24) - y = np.arange(24) - x[5:6] = self.masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) - assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) - assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), - self.inner(x, y)) - assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), - self.outer(x, y)) - y = self.array(['abc', 1, 'def', 2, 3], object) - y[2] = self.masked - t = self.take(y, [0, 3, 4]) - assert t[0] == 'abc' - assert t[1] == 2 - assert t[2] == 3 - #---------------------------------- - def test_5(self): - "Tests inplace w/ scalar" - - x = self.arange(10) - y = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x += 1 - assert self.allequal(x, y+1) - xm += 1 - assert self.allequal(xm, y+1) - - x = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x -= 1 - assert self.allequal(x, y-1) - xm -= 1 - assert self.allequal(xm, y-1) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x *= 2.0 - assert self.allequal(x, y*2) - xm *= 2.0 - assert self.allequal(xm, y*2) - - x = self.arange(10)*2 - xm = self.arange(10)*2 - xm[2] = self.masked - x /= 2 - assert self.allequal(x, y) - xm /= 2 - assert self.allequal(xm, y) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x /= 2.0 - assert self.allequal(x, y/2.0) - xm /= self.arange(10) - self.assert_array_equal(xm, self.ones((10,))) - - x = self.arange(10).astype(float_) - xm = self.arange(10) - xm[2] = self.masked - id1 = self.id(x.raw_data()) - x += 1. - #assert id1 == self.id(x.raw_data()) - assert self.allequal(x, y+1.) - - - def test_6(self): - "Tests inplace w/ array" - - x = self.arange(10, dtype=float_) - y = self.arange(10) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x += a - xm += a - assert self.allequal(x, y+a) - assert self.allequal(xm, y+a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=float_) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x -= a - xm -= a - assert self.allequal(x, y-a) - assert self.allequal(xm, y-a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=float_) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x *= a - xm *= a - assert self.allequal(x, y*a) - assert self.allequal(xm, y*a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=float_) - xm = self.arange(10, dtype=float_) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=float_) - a[-1] = self.masked - x /= a - xm /= a - - #---------------------------------- - def test_7(self): - "Tests ufunc" - d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), - self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', -# 'sin', 'cos', 'tan', -# 'arcsin', 'arccos', 'arctan', -# 'sinh', 'cosh', 'tanh', -# 'arcsinh', -# 'arccosh', -# 'arctanh', -# 'absolute', 'fabs', 'negative', -# # 'nonzero', 'around', -# 'floor', 'ceil', -# # 'sometrue', 'alltrue', -# 'logical_not', -# 'add', 'subtract', 'multiply', -# 'divide', 'true_divide', 'floor_divide', -# 'remainder', 'fmod', 'hypot', 'arctan2', -# 'equal', 'not_equal', 'less_equal', 'greater_equal', -# 'less', 'greater', -# 'logical_and', 'logical_or', 'logical_xor', - ]: - #print f - try: - uf = getattr(self.umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(self.module, f) - args = d[:uf.nin] - ur = uf(*args) - mr = mf(*args) - self.assert_array_equal(ur.filled(0), mr.filled(0), f) - self.assert_array_equal(ur._mask, mr._mask) - - #---------------------------------- - def test_99(self): - # test average - ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - self.assert_array_equal(2.0, self.average(ott, axis=0)) - self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) - result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) - self.assert_array_equal(2.0, result) - assert(wts == 4.0) - ott[:] = self.masked - assert(self.average(ott, axis=0) is self.masked) - ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - ott = ott.reshape(2, 2) - ott[:, 1] = self.masked - self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) - assert(self.average(ott, axis=1)[0] is self.masked) - self.assert_array_equal([2., 0.], self.average(ott, axis=0)) - result, wts = self.average(ott, axis=0, returned=1) - self.assert_array_equal(wts, [1., 0.]) - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = self.arange(6) - self.assert_array_equal(self.average(x, axis=0), 2.5) - self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) - y = self.array([self.arange(6), 2.0*self.arange(6)]) - self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) - self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) - self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) - self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) - self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) - self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) - m1 = self.zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = self.ones(6) - m5 = [0, 1, 1, 1, 1, 1] - self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) - self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) - # assert(self.average(masked_array(x, m4),axis=0) is masked) - self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) - self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) - z = self.masked_array(y, m3) - self.assert_array_equal(self.average(z, None), 20./6.) - self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) - self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) - self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) - #------------------------ - def test_A(self): - x = self.arange(24) - y = np.arange(24) - x[5:6] = self.masked - x = x.reshape(2, 3, 4) - - -################################################################################ -if __name__ == '__main__': - - setup_base = "from __main__ import moduletester \n"\ - "import numpy\n" \ - "tester = moduletester(module)\n" -# setup_new = "import np.ma.core_ini as module\n"+setup_base - setup_cur = "import np.ma.core as module\n"+setup_base -# setup_alt = "import np.ma.core_alt as module\n"+setup_base -# setup_tmp = "import np.ma.core_tmp as module\n"+setup_base - - (nrepeat, nloop) = (10, 10) - - if 1: - for i in range(1, 8): - func = 'tester.test_%i()' % i -# new = timeit.Timer(func, setup_new).repeat(nrepeat, nloop*10) - cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) -# alt = timeit.Timer(func, setup_alt).repeat(nrepeat, nloop*10) -# tmp = timeit.Timer(func, setup_tmp).repeat(nrepeat, nloop*10) -# new = np.sort(new) - cur = np.sort(cur) -# alt = np.sort(alt) -# tmp = np.sort(tmp) - print("#%i" % i +50*'.') - print(eval("moduletester.test_%i.__doc__" % i)) -# print "core_ini : %.3f - %.3f" % (new[0], new[1]) - print("core_current : %.3f - %.3f" % (cur[0], cur[1])) -# print "core_alt : %.3f - %.3f" % (alt[0], alt[1]) -# print "core_tmp : %.3f - %.3f" % (tmp[0], tmp[1]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/version.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/version.py deleted file mode 100644 index a2c5c42a806ac..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/ma/version.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Version number - -""" -from __future__ import division, absolute_import, print_function - -version = '1.00' -release = False - -if not release: - from . import core - from . import extras - revision = [core.__revision__.split(':')[-1][:-1].strip(), - extras.__revision__.split(':')[-1][:-1].strip(),] - version += '.dev%04i' % max([int(rev) for rev in revision]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matlib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matlib.py deleted file mode 100644 index 677400367b00b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matlib.py +++ /dev/null @@ -1,358 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.matrixlib.defmatrix import matrix, asmatrix -# need * as we're copying the numpy namespace -from numpy import * - -__version__ = np.__version__ - -__all__ = np.__all__[:] # copy numpy namespace -__all__ += ['rand', 'randn', 'repmat'] - -def empty(shape, dtype=None, order='C'): - """ - Return a new matrix of given shape and type, without initializing entries. - - Parameters - ---------- - shape : int or tuple of int - Shape of the empty matrix. - dtype : data-type, optional - Desired output data-type. - order : {'C', 'F'}, optional - Whether to store multi-dimensional data in C (row-major) or - Fortran (column-major) order in memory. - - See Also - -------- - empty_like, zeros - - Notes - ----- - `empty`, unlike `zeros`, does not set the matrix values to zero, - and may therefore be marginally faster. On the other hand, it requires - the user to manually set all the values in the array, and should be - used with caution. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.empty((2, 2)) # filled with random data - matrix([[ 6.76425276e-320, 9.79033856e-307], - [ 7.39337286e-309, 3.22135945e-309]]) #random - >>> np.matlib.empty((2, 2), dtype=int) - matrix([[ 6600475, 0], - [ 6586976, 22740995]]) #random - - """ - return ndarray.__new__(matrix, shape, dtype, order=order) - -def ones(shape, dtype=None, order='C'): - """ - Matrix of ones. - - Return a matrix of given shape and type, filled with ones. - - Parameters - ---------- - shape : {sequence of ints, int} - Shape of the matrix - dtype : data-type, optional - The desired data-type for the matrix, default is np.float64. - order : {'C', 'F'}, optional - Whether to store matrix in C- or Fortran-contiguous order, - default is 'C'. - - Returns - ------- - out : matrix - Matrix of ones of given shape, dtype, and order. - - See Also - -------- - ones : Array of ones. - matlib.zeros : Zero matrix. - - Notes - ----- - If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, - `out` becomes a single row matrix of shape ``(1,N)``. - - Examples - -------- - >>> np.matlib.ones((2,3)) - matrix([[ 1., 1., 1.], - [ 1., 1., 1.]]) - - >>> np.matlib.ones(2) - matrix([[ 1., 1.]]) - - """ - a = ndarray.__new__(matrix, shape, dtype, order=order) - a.fill(1) - return a - -def zeros(shape, dtype=None, order='C'): - """ - Return a matrix of given shape and type, filled with zeros. - - Parameters - ---------- - shape : int or sequence of ints - Shape of the matrix - dtype : data-type, optional - The desired data-type for the matrix, default is float. - order : {'C', 'F'}, optional - Whether to store the result in C- or Fortran-contiguous order, - default is 'C'. - - Returns - ------- - out : matrix - Zero matrix of given shape, dtype, and order. - - See Also - -------- - numpy.zeros : Equivalent array function. - matlib.ones : Return a matrix of ones. - - Notes - ----- - If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, - `out` becomes a single row matrix of shape ``(1,N)``. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.zeros((2, 3)) - matrix([[ 0., 0., 0.], - [ 0., 0., 0.]]) - - >>> np.matlib.zeros(2) - matrix([[ 0., 0.]]) - - """ - a = ndarray.__new__(matrix, shape, dtype, order=order) - a.fill(0) - return a - -def identity(n,dtype=None): - """ - Returns the square identity matrix of given size. - - Parameters - ---------- - n : int - Size of the returned identity matrix. - dtype : data-type, optional - Data-type of the output. Defaults to ``float``. - - Returns - ------- - out : matrix - `n` x `n` matrix with its main diagonal set to one, - and all other elements zero. - - See Also - -------- - numpy.identity : Equivalent array function. - matlib.eye : More general matrix identity function. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.identity(3, dtype=int) - matrix([[1, 0, 0], - [0, 1, 0], - [0, 0, 1]]) - - """ - a = array([1]+n*[0], dtype=dtype) - b = empty((n, n), dtype=dtype) - b.flat = a - return b - -def eye(n,M=None, k=0, dtype=float): - """ - Return a matrix with ones on the diagonal and zeros elsewhere. - - Parameters - ---------- - n : int - Number of rows in the output. - M : int, optional - Number of columns in the output, defaults to `n`. - k : int, optional - Index of the diagonal: 0 refers to the main diagonal, - a positive value refers to an upper diagonal, - and a negative value to a lower diagonal. - dtype : dtype, optional - Data-type of the returned matrix. - - Returns - ------- - I : matrix - A `n` x `M` matrix where all elements are equal to zero, - except for the `k`-th diagonal, whose values are equal to one. - - See Also - -------- - numpy.eye : Equivalent array function. - identity : Square identity matrix. - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.eye(3, k=1, dtype=float) - matrix([[ 0., 1., 0.], - [ 0., 0., 1.], - [ 0., 0., 0.]]) - - """ - return asmatrix(np.eye(n, M, k, dtype)) - -def rand(*args): - """ - Return a matrix of random values with given shape. - - Create a matrix of the given shape and propagate it with - random samples from a uniform distribution over ``[0, 1)``. - - Parameters - ---------- - \\*args : Arguments - Shape of the output. - If given as N integers, each integer specifies the size of one - dimension. - If given as a tuple, this tuple gives the complete shape. - - Returns - ------- - out : ndarray - The matrix of random values with shape given by `\\*args`. - - See Also - -------- - randn, numpy.random.rand - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.rand(2, 3) - matrix([[ 0.68340382, 0.67926887, 0.83271405], - [ 0.00793551, 0.20468222, 0.95253525]]) #random - >>> np.matlib.rand((2, 3)) - matrix([[ 0.84682055, 0.73626594, 0.11308016], - [ 0.85429008, 0.3294825 , 0.89139555]]) #random - - If the first argument is a tuple, other arguments are ignored: - - >>> np.matlib.rand((2, 3), 4) - matrix([[ 0.46898646, 0.15163588, 0.95188261], - [ 0.59208621, 0.09561818, 0.00583606]]) #random - - """ - if isinstance(args[0], tuple): - args = args[0] - return asmatrix(np.random.rand(*args)) - -def randn(*args): - """ - Return a random matrix with data from the "standard normal" distribution. - - `randn` generates a matrix filled with random floats sampled from a - univariate "normal" (Gaussian) distribution of mean 0 and variance 1. - - Parameters - ---------- - \\*args : Arguments - Shape of the output. - If given as N integers, each integer specifies the size of one - dimension. If given as a tuple, this tuple gives the complete shape. - - Returns - ------- - Z : matrix of floats - A matrix of floating-point samples drawn from the standard normal - distribution. - - See Also - -------- - rand, random.randn - - Notes - ----- - For random samples from :math:`N(\\mu, \\sigma^2)`, use: - - ``sigma * np.matlib.randn(...) + mu`` - - Examples - -------- - >>> import numpy.matlib - >>> np.matlib.randn(1) - matrix([[-0.09542833]]) #random - >>> np.matlib.randn(1, 2, 3) - matrix([[ 0.16198284, 0.0194571 , 0.18312985], - [-0.7509172 , 1.61055 , 0.45298599]]) #random - - Two-by-four matrix of samples from :math:`N(3, 6.25)`: - - >>> 2.5 * np.matlib.randn((2, 4)) + 3 - matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922], - [ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random - - """ - if isinstance(args[0], tuple): - args = args[0] - return asmatrix(np.random.randn(*args)) - -def repmat(a, m, n): - """ - Repeat a 0-D to 2-D array or matrix MxN times. - - Parameters - ---------- - a : array_like - The array or matrix to be repeated. - m, n : int - The number of times `a` is repeated along the first and second axes. - - Returns - ------- - out : ndarray - The result of repeating `a`. - - Examples - -------- - >>> import numpy.matlib - >>> a0 = np.array(1) - >>> np.matlib.repmat(a0, 2, 3) - array([[1, 1, 1], - [1, 1, 1]]) - - >>> a1 = np.arange(4) - >>> np.matlib.repmat(a1, 2, 2) - array([[0, 1, 2, 3, 0, 1, 2, 3], - [0, 1, 2, 3, 0, 1, 2, 3]]) - - >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) - >>> np.matlib.repmat(a2, 2, 3) - matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5, 3, 4, 5], - [0, 1, 2, 0, 1, 2, 0, 1, 2], - [3, 4, 5, 3, 4, 5, 3, 4, 5]]) - - """ - a = asanyarray(a) - ndim = a.ndim - if ndim == 0: - origrows, origcols = (1, 1) - elif ndim == 1: - origrows, origcols = (1, a.shape[0]) - else: - origrows, origcols = a.shape - rows = origrows * m - cols = origcols * n - c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) - return c.reshape(rows, cols) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/__init__.py deleted file mode 100644 index d20696154ab25..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Sub-package containing the matrix class and related functions. - -""" -from __future__ import division, absolute_import, print_function - -from .defmatrix import * - -__all__ = defmatrix.__all__ - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/defmatrix.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/defmatrix.py deleted file mode 100644 index 0fd5db66a21ea..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/defmatrix.py +++ /dev/null @@ -1,1094 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] - -import sys -import numpy.core.numeric as N -from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray -from numpy.core.numerictypes import issubdtype - -# make translation table -_numchars = '0123456789.-+jeEL' - -if sys.version_info[0] >= 3: - class _NumCharTable: - def __getitem__(self, i): - if chr(i) in _numchars: - return chr(i) - else: - return None - _table = _NumCharTable() - def _eval(astr): - str_ = astr.translate(_table) - if not str_: - raise TypeError("Invalid data string supplied: " + astr) - else: - return eval(str_) - -else: - _table = [None]*256 - for k in range(256): - _table[k] = chr(k) - _table = ''.join(_table) - - _todelete = [] - for k in _table: - if k not in _numchars: - _todelete.append(k) - _todelete = ''.join(_todelete) - del k - - def _eval(astr): - str_ = astr.translate(_table, _todelete) - if not str_: - raise TypeError("Invalid data string supplied: " + astr) - else: - return eval(str_) - -def _convert_from_string(data): - rows = data.split(';') - newdata = [] - count = 0 - for row in rows: - trow = row.split(',') - newrow = [] - for col in trow: - temp = col.split() - newrow.extend(map(_eval, temp)) - if count == 0: - Ncols = len(newrow) - elif len(newrow) != Ncols: - raise ValueError("Rows not the same size.") - count += 1 - newdata.append(newrow) - return newdata - -def asmatrix(data, dtype=None): - """ - Interpret the input as a matrix. - - Unlike `matrix`, `asmatrix` does not make a copy if the input is already - a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. - - Parameters - ---------- - data : array_like - Input data. - - Returns - ------- - mat : matrix - `data` interpreted as a matrix. - - Examples - -------- - >>> x = np.array([[1, 2], [3, 4]]) - - >>> m = np.asmatrix(x) - - >>> x[0,0] = 5 - - >>> m - matrix([[5, 2], - [3, 4]]) - - """ - return matrix(data, dtype=dtype, copy=False) - -def matrix_power(M, n): - """ - Raise a square matrix to the (integer) power `n`. - - For positive integers `n`, the power is computed by repeated matrix - squarings and matrix multiplications. If ``n == 0``, the identity matrix - of the same shape as M is returned. If ``n < 0``, the inverse - is computed and then raised to the ``abs(n)``. - - Parameters - ---------- - M : ndarray or matrix object - Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``, - with `m` a positive integer. - n : int - The exponent can be any integer or long integer, positive, - negative, or zero. - - Returns - ------- - M**n : ndarray or matrix object - The return value is the same shape and type as `M`; - if the exponent is positive or zero then the type of the - elements is the same as those of `M`. If the exponent is - negative the elements are floating-point. - - Raises - ------ - LinAlgError - If the matrix is not numerically invertible. - - See Also - -------- - matrix - Provides an equivalent function as the exponentiation operator - (``**``, not ``^``). - - Examples - -------- - >>> from numpy import linalg as LA - >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit - >>> LA.matrix_power(i, 3) # should = -i - array([[ 0, -1], - [ 1, 0]]) - >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix - matrix([[ 0, -1], - [ 1, 0]]) - >>> LA.matrix_power(i, 0) - array([[1, 0], - [0, 1]]) - >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements - array([[ 0., 1.], - [-1., 0.]]) - - Somewhat more sophisticated example - - >>> q = np.zeros((4, 4)) - >>> q[0:2, 0:2] = -i - >>> q[2:4, 2:4] = i - >>> q # one of the three quarternion units not equal to 1 - array([[ 0., -1., 0., 0.], - [ 1., 0., 0., 0.], - [ 0., 0., 0., 1.], - [ 0., 0., -1., 0.]]) - >>> LA.matrix_power(q, 2) # = -np.eye(4) - array([[-1., 0., 0., 0.], - [ 0., -1., 0., 0.], - [ 0., 0., -1., 0.], - [ 0., 0., 0., -1.]]) - - """ - M = asanyarray(M) - if len(M.shape) != 2 or M.shape[0] != M.shape[1]: - raise ValueError("input must be a square array") - if not issubdtype(type(n), int): - raise TypeError("exponent must be an integer") - - from numpy.linalg import inv - - if n==0: - M = M.copy() - M[:] = identity(M.shape[0]) - return M - elif n<0: - M = inv(M) - n *= -1 - - result = M - if n <= 3: - for _ in range(n-1): - result=N.dot(result, M) - return result - - # binary decomposition to reduce the number of Matrix - # multiplications for n > 3. - beta = binary_repr(n) - Z, q, t = M, 0, len(beta) - while beta[t-q-1] == '0': - Z = N.dot(Z, Z) - q += 1 - result = Z - for k in range(q+1, t): - Z = N.dot(Z, Z) - if beta[t-k-1] == '1': - result = N.dot(result, Z) - return result - - -class matrix(N.ndarray): - """ - matrix(data, dtype=None, copy=True) - - Returns a matrix from an array-like object, or from a string of data. - A matrix is a specialized 2-D array that retains its 2-D nature - through operations. It has certain special operators, such as ``*`` - (matrix multiplication) and ``**`` (matrix power). - - Parameters - ---------- - data : array_like or string - If `data` is a string, it is interpreted as a matrix with commas - or spaces separating columns, and semicolons separating rows. - dtype : data-type - Data-type of the output matrix. - copy : bool - If `data` is already an `ndarray`, then this flag determines - whether the data is copied (the default), or whether a view is - constructed. - - See Also - -------- - array - - Examples - -------- - >>> a = np.matrix('1 2; 3 4') - >>> print a - [[1 2] - [3 4]] - - >>> np.matrix([[1, 2], [3, 4]]) - matrix([[1, 2], - [3, 4]]) - - """ - __array_priority__ = 10.0 - def __new__(subtype, data, dtype=None, copy=True): - if isinstance(data, matrix): - dtype2 = data.dtype - if (dtype is None): - dtype = dtype2 - if (dtype2 == dtype) and (not copy): - return data - return data.astype(dtype) - - if isinstance(data, N.ndarray): - if dtype is None: - intype = data.dtype - else: - intype = N.dtype(dtype) - new = data.view(subtype) - if intype != data.dtype: - return new.astype(intype) - if copy: return new.copy() - else: return new - - if isinstance(data, str): - data = _convert_from_string(data) - - # now convert data to an array - arr = N.array(data, dtype=dtype, copy=copy) - ndim = arr.ndim - shape = arr.shape - if (ndim > 2): - raise ValueError("matrix must be 2-dimensional") - elif ndim == 0: - shape = (1, 1) - elif ndim == 1: - shape = (1, shape[0]) - - order = False - if (ndim == 2) and arr.flags.fortran: - order = True - - if not (order or arr.flags.contiguous): - arr = arr.copy() - - ret = N.ndarray.__new__(subtype, shape, arr.dtype, - buffer=arr, - order=order) - return ret - - def __array_finalize__(self, obj): - self._getitem = False - if (isinstance(obj, matrix) and obj._getitem): return - ndim = self.ndim - if (ndim == 2): - return - if (ndim > 2): - newshape = tuple([x for x in self.shape if x > 1]) - ndim = len(newshape) - if ndim == 2: - self.shape = newshape - return - elif (ndim > 2): - raise ValueError("shape too large to be a matrix.") - else: - newshape = self.shape - if ndim == 0: - self.shape = (1, 1) - elif ndim == 1: - self.shape = (1, newshape[0]) - return - - def __getitem__(self, index): - self._getitem = True - - try: - out = N.ndarray.__getitem__(self, index) - finally: - self._getitem = False - - if not isinstance(out, N.ndarray): - return out - - if out.ndim == 0: - return out[()] - if out.ndim == 1: - sh = out.shape[0] - # Determine when we should have a column array - try: - n = len(index) - except: - n = 0 - if n > 1 and isscalar(index[1]): - out.shape = (sh, 1) - else: - out.shape = (1, sh) - return out - - def __mul__(self, other): - if isinstance(other, (N.ndarray, list, tuple)) : - # This promotes 1-D vectors to row vectors - return N.dot(self, asmatrix(other)) - if isscalar(other) or not hasattr(other, '__rmul__') : - return N.dot(self, other) - return NotImplemented - - def __rmul__(self, other): - return N.dot(other, self) - - def __imul__(self, other): - self[:] = self * other - return self - - def __pow__(self, other): - return matrix_power(self, other) - - def __ipow__(self, other): - self[:] = self ** other - return self - - def __rpow__(self, other): - return NotImplemented - - def __repr__(self): - s = repr(self.__array__()).replace('array', 'matrix') - # now, 'matrix' has 6 letters, and 'array' 5, so the columns don't - # line up anymore. We need to add a space. - l = s.splitlines() - for i in range(1, len(l)): - if l[i]: - l[i] = ' ' + l[i] - return '\n'.join(l) - - def __str__(self): - return str(self.__array__()) - - def _align(self, axis): - """A convenience function for operations that need to preserve axis - orientation. - """ - if axis is None: - return self[0, 0] - elif axis==0: - return self - elif axis==1: - return self.transpose() - else: - raise ValueError("unsupported axis") - - def _collapse(self, axis): - """A convenience function for operations that want to collapse - to a scalar like _align, but are using keepdims=True - """ - if axis is None: - return self[0, 0] - else: - return self - - # Necessary because base-class tolist expects dimension - # reduction by x[0] - def tolist(self): - """ - Return the matrix as a (possibly nested) list. - - See `ndarray.tolist` for full documentation. - - See Also - -------- - ndarray.tolist - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.tolist() - [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] - - """ - return self.__array__().tolist() - - # To preserve orientation of result... - def sum(self, axis=None, dtype=None, out=None): - """ - Returns the sum of the matrix elements, along the given axis. - - Refer to `numpy.sum` for full documentation. - - See Also - -------- - numpy.sum - - Notes - ----- - This is the same as `ndarray.sum`, except that where an `ndarray` would - be returned, a `matrix` object is returned instead. - - Examples - -------- - >>> x = np.matrix([[1, 2], [4, 3]]) - >>> x.sum() - 10 - >>> x.sum(axis=1) - matrix([[3], - [7]]) - >>> x.sum(axis=1, dtype='float') - matrix([[ 3.], - [ 7.]]) - >>> out = np.zeros((1, 2), dtype='float') - >>> x.sum(axis=1, dtype='float', out=out) - matrix([[ 3.], - [ 7.]]) - - """ - return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) - - def mean(self, axis=None, dtype=None, out=None): - """ - Returns the average of the matrix elements along the given axis. - - Refer to `numpy.mean` for full documentation. - - See Also - -------- - numpy.mean - - Notes - ----- - Same as `ndarray.mean` except that, where that returns an `ndarray`, - this returns a `matrix` object. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3, 4))) - >>> x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.mean() - 5.5 - >>> x.mean(0) - matrix([[ 4., 5., 6., 7.]]) - >>> x.mean(1) - matrix([[ 1.5], - [ 5.5], - [ 9.5]]) - - """ - return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) - - def std(self, axis=None, dtype=None, out=None, ddof=0): - """ - Return the standard deviation of the array elements along the given axis. - - Refer to `numpy.std` for full documentation. - - See Also - -------- - numpy.std - - Notes - ----- - This is the same as `ndarray.std`, except that where an `ndarray` would - be returned, a `matrix` object is returned instead. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3, 4))) - >>> x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.std() - 3.4520525295346629 - >>> x.std(0) - matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) - >>> x.std(1) - matrix([[ 1.11803399], - [ 1.11803399], - [ 1.11803399]]) - - """ - return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) - - def var(self, axis=None, dtype=None, out=None, ddof=0): - """ - Returns the variance of the matrix elements, along the given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var - - Notes - ----- - This is the same as `ndarray.var`, except that where an `ndarray` would - be returned, a `matrix` object is returned instead. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3, 4))) - >>> x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.var() - 11.916666666666666 - >>> x.var(0) - matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) - >>> x.var(1) - matrix([[ 1.25], - [ 1.25], - [ 1.25]]) - - """ - return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) - - def prod(self, axis=None, dtype=None, out=None): - """ - Return the product of the array elements over the given axis. - - Refer to `prod` for full documentation. - - See Also - -------- - prod, ndarray.prod - - Notes - ----- - Same as `ndarray.prod`, except, where that returns an `ndarray`, this - returns a `matrix` object instead. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.prod() - 0 - >>> x.prod(0) - matrix([[ 0, 45, 120, 231]]) - >>> x.prod(1) - matrix([[ 0], - [ 840], - [7920]]) - - """ - return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) - - def any(self, axis=None, out=None): - """ - Test whether any array element along a given axis evaluates to True. - - Refer to `numpy.any` for full documentation. - - Parameters - ---------- - axis : int, optional - Axis along which logical OR is performed - out : ndarray, optional - Output to existing array instead of creating new one, must have - same shape as expected output - - Returns - ------- - any : bool, ndarray - Returns a single bool if `axis` is ``None``; otherwise, - returns `ndarray` - - """ - return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) - - def all(self, axis=None, out=None): - """ - Test whether all matrix elements along a given axis evaluate to True. - - Parameters - ---------- - See `numpy.all` for complete descriptions - - See Also - -------- - numpy.all - - Notes - ----- - This is the same as `ndarray.all`, but it returns a `matrix` object. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> y = x[0]; y - matrix([[0, 1, 2, 3]]) - >>> (x == y) - matrix([[ True, True, True, True], - [False, False, False, False], - [False, False, False, False]], dtype=bool) - >>> (x == y).all() - False - >>> (x == y).all(0) - matrix([[False, False, False, False]], dtype=bool) - >>> (x == y).all(1) - matrix([[ True], - [False], - [False]], dtype=bool) - - """ - return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) - - def max(self, axis=None, out=None): - """ - Return the maximum value along an axis. - - Parameters - ---------- - See `amax` for complete descriptions - - See Also - -------- - amax, ndarray.max - - Notes - ----- - This is the same as `ndarray.max`, but returns a `matrix` object - where `ndarray.max` would return an ndarray. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.max() - 11 - >>> x.max(0) - matrix([[ 8, 9, 10, 11]]) - >>> x.max(1) - matrix([[ 3], - [ 7], - [11]]) - - """ - return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) - - def argmax(self, axis=None, out=None): - """ - Indices of the maximum values along an axis. - - Parameters - ---------- - See `numpy.argmax` for complete descriptions - - See Also - -------- - numpy.argmax - - Notes - ----- - This is the same as `ndarray.argmax`, but returns a `matrix` object - where `ndarray.argmax` would return an `ndarray`. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.argmax() - 11 - >>> x.argmax(0) - matrix([[2, 2, 2, 2]]) - >>> x.argmax(1) - matrix([[3], - [3], - [3]]) - - """ - return N.ndarray.argmax(self, axis, out)._align(axis) - - def min(self, axis=None, out=None): - """ - Return the minimum value along an axis. - - Parameters - ---------- - See `amin` for complete descriptions. - - See Also - -------- - amin, ndarray.min - - Notes - ----- - This is the same as `ndarray.min`, but returns a `matrix` object - where `ndarray.min` would return an ndarray. - - Examples - -------- - >>> x = -np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, -1, -2, -3], - [ -4, -5, -6, -7], - [ -8, -9, -10, -11]]) - >>> x.min() - -11 - >>> x.min(0) - matrix([[ -8, -9, -10, -11]]) - >>> x.min(1) - matrix([[ -3], - [ -7], - [-11]]) - - """ - return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) - - def argmin(self, axis=None, out=None): - """ - Return the indices of the minimum values along an axis. - - Parameters - ---------- - See `numpy.argmin` for complete descriptions. - - See Also - -------- - numpy.argmin - - Notes - ----- - This is the same as `ndarray.argmin`, but returns a `matrix` object - where `ndarray.argmin` would return an `ndarray`. - - Examples - -------- - >>> x = -np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, -1, -2, -3], - [ -4, -5, -6, -7], - [ -8, -9, -10, -11]]) - >>> x.argmin() - 11 - >>> x.argmin(0) - matrix([[2, 2, 2, 2]]) - >>> x.argmin(1) - matrix([[3], - [3], - [3]]) - - """ - return N.ndarray.argmin(self, axis, out)._align(axis) - - def ptp(self, axis=None, out=None): - """ - Peak-to-peak (maximum - minimum) value along the given axis. - - Refer to `numpy.ptp` for full documentation. - - See Also - -------- - numpy.ptp - - Notes - ----- - Same as `ndarray.ptp`, except, where that would return an `ndarray` object, - this returns a `matrix` object. - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.ptp() - 11 - >>> x.ptp(0) - matrix([[8, 8, 8, 8]]) - >>> x.ptp(1) - matrix([[3], - [3], - [3]]) - - """ - return N.ndarray.ptp(self, axis, out)._align(axis) - - def getI(self): - """ - Returns the (multiplicative) inverse of invertible `self`. - - Parameters - ---------- - None - - Returns - ------- - ret : matrix object - If `self` is non-singular, `ret` is such that ``ret * self`` == - ``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return - ``True``. - - Raises - ------ - numpy.linalg.LinAlgError: Singular matrix - If `self` is singular. - - See Also - -------- - linalg.inv - - Examples - -------- - >>> m = np.matrix('[1, 2; 3, 4]'); m - matrix([[1, 2], - [3, 4]]) - >>> m.getI() - matrix([[-2. , 1. ], - [ 1.5, -0.5]]) - >>> m.getI() * m - matrix([[ 1., 0.], - [ 0., 1.]]) - - """ - M, N = self.shape - if M == N: - from numpy.dual import inv as func - else: - from numpy.dual import pinv as func - return asmatrix(func(self)) - - def getA(self): - """ - Return `self` as an `ndarray` object. - - Equivalent to ``np.asarray(self)``. - - Parameters - ---------- - None - - Returns - ------- - ret : ndarray - `self` as an `ndarray` - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.getA() - array([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - - """ - return self.__array__() - - def getA1(self): - """ - Return `self` as a flattened `ndarray`. - - Equivalent to ``np.asarray(x).ravel()`` - - Parameters - ---------- - None - - Returns - ------- - ret : ndarray - `self`, 1-D, as an `ndarray` - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))); x - matrix([[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]]) - >>> x.getA1() - array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - - """ - return self.__array__().ravel() - - def getT(self): - """ - Returns the transpose of the matrix. - - Does *not* conjugate! For the complex conjugate transpose, use ``.H``. - - Parameters - ---------- - None - - Returns - ------- - ret : matrix object - The (non-conjugated) transpose of the matrix. - - See Also - -------- - transpose, getH - - Examples - -------- - >>> m = np.matrix('[1, 2; 3, 4]') - >>> m - matrix([[1, 2], - [3, 4]]) - >>> m.getT() - matrix([[1, 3], - [2, 4]]) - - """ - return self.transpose() - - def getH(self): - """ - Returns the (complex) conjugate transpose of `self`. - - Equivalent to ``np.transpose(self)`` if `self` is real-valued. - - Parameters - ---------- - None - - Returns - ------- - ret : matrix object - complex conjugate transpose of `self` - - Examples - -------- - >>> x = np.matrix(np.arange(12).reshape((3,4))) - >>> z = x - 1j*x; z - matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], - [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], - [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) - >>> z.getH() - matrix([[ 0. +0.j, 4. +4.j, 8. +8.j], - [ 1. +1.j, 5. +5.j, 9. +9.j], - [ 2. +2.j, 6. +6.j, 10.+10.j], - [ 3. +3.j, 7. +7.j, 11.+11.j]]) - - """ - if issubclass(self.dtype.type, N.complexfloating): - return self.transpose().conjugate() - else: - return self.transpose() - - T = property(getT, None) - A = property(getA, None) - A1 = property(getA1, None) - H = property(getH, None) - I = property(getI, None) - -def _from_string(str, gdict, ldict): - rows = str.split(';') - rowtup = [] - for row in rows: - trow = row.split(',') - newrow = [] - for x in trow: - newrow.extend(x.split()) - trow = newrow - coltup = [] - for col in trow: - col = col.strip() - try: - thismat = ldict[col] - except KeyError: - try: - thismat = gdict[col] - except KeyError: - raise KeyError("%s not found" % (col,)) - - coltup.append(thismat) - rowtup.append(concatenate(coltup, axis=-1)) - return concatenate(rowtup, axis=0) - - -def bmat(obj, ldict=None, gdict=None): - """ - Build a matrix object from a string, nested sequence, or array. - - Parameters - ---------- - obj : str or array_like - Input data. Names of variables in the current scope may be - referenced, even if `obj` is a string. - - Returns - ------- - out : matrix - Returns a matrix object, which is a specialized 2-D array. - - See Also - -------- - matrix - - Examples - -------- - >>> A = np.mat('1 1; 1 1') - >>> B = np.mat('2 2; 2 2') - >>> C = np.mat('3 4; 5 6') - >>> D = np.mat('7 8; 9 0') - - All the following expressions construct the same block matrix: - - >>> np.bmat([[A, B], [C, D]]) - matrix([[1, 1, 2, 2], - [1, 1, 2, 2], - [3, 4, 7, 8], - [5, 6, 9, 0]]) - >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) - matrix([[1, 1, 2, 2], - [1, 1, 2, 2], - [3, 4, 7, 8], - [5, 6, 9, 0]]) - >>> np.bmat('A,B; C,D') - matrix([[1, 1, 2, 2], - [1, 1, 2, 2], - [3, 4, 7, 8], - [5, 6, 9, 0]]) - - """ - if isinstance(obj, str): - if gdict is None: - # get previous frame - frame = sys._getframe().f_back - glob_dict = frame.f_globals - loc_dict = frame.f_locals - else: - glob_dict = gdict - loc_dict = ldict - - return matrix(_from_string(obj, glob_dict, loc_dict)) - - if isinstance(obj, (tuple, list)): - # [[A,B],[C,D]] - arr_rows = [] - for row in obj: - if isinstance(row, N.ndarray): # not 2-d - return matrix(concatenate(obj, axis=-1)) - else: - arr_rows.append(concatenate(row, axis=-1)) - return matrix(concatenate(arr_rows, axis=0)) - if isinstance(obj, N.ndarray): - return matrix(obj) - -mat = asmatrix diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/setup.py deleted file mode 100644 index 8c383cecec7b8..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -import os - -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('matrixlib', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - config = configuration(top_path='').todict() - setup(**config) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_defmatrix.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_defmatrix.py deleted file mode 100644 index a06a564aa8bae..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_defmatrix.py +++ /dev/null @@ -1,400 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import * -from numpy.core import * -from numpy import matrix, asmatrix, bmat -from numpy.matrixlib.defmatrix import matrix_power -from numpy.matrixlib import mat -import numpy as np -import collections - -class TestCtor(TestCase): - def test_basic(self): - A = array([[1, 2], [3, 4]]) - mA = matrix(A) - assert_(all(mA.A == A)) - - B = bmat("A,A;A,A") - C = bmat([[A, A], [A, A]]) - D = array([[1, 2, 1, 2], - [3, 4, 3, 4], - [1, 2, 1, 2], - [3, 4, 3, 4]]) - assert_(all(B.A == D)) - assert_(all(C.A == D)) - - E = array([[5, 6], [7, 8]]) - AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) - assert_(all(bmat([A, E]) == AEresult)) - - vec = arange(5) - mvec = matrix(vec) - assert_(mvec.shape == (1, 5)) - - def test_exceptions(self): - # Check for TypeError when called with invalid string data. - assert_raises(TypeError, matrix, "invalid") - - def test_bmat_nondefault_str(self): - A = array([[1, 2], [3, 4]]) - B = array([[5, 6], [7, 8]]) - Aresult = array([[1, 2, 1, 2], - [3, 4, 3, 4], - [1, 2, 1, 2], - [3, 4, 3, 4]]) - Bresult = array([[5, 6, 5, 6], - [7, 8, 7, 8], - [5, 6, 5, 6], - [7, 8, 7, 8]]) - mixresult = array([[1, 2, 5, 6], - [3, 4, 7, 8], - [5, 6, 1, 2], - [7, 8, 3, 4]]) - assert_(all(bmat("A,A;A,A") == Aresult)) - assert_(all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) - assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) - assert_(all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) - b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) - assert_(all(b2 == mixresult)) - - -class TestProperties(TestCase): - def test_sum(self): - """Test whether matrix.sum(axis=1) preserves orientation. - Fails in NumPy <= 0.9.6.2127. - """ - M = matrix([[1, 2, 0, 0], - [3, 4, 0, 0], - [1, 2, 1, 2], - [3, 4, 3, 4]]) - sum0 = matrix([8, 12, 4, 6]) - sum1 = matrix([3, 7, 6, 14]).T - sumall = 30 - assert_array_equal(sum0, M.sum(axis=0)) - assert_array_equal(sum1, M.sum(axis=1)) - assert_equal(sumall, M.sum()) - - assert_array_equal(sum0, np.sum(M, axis=0)) - assert_array_equal(sum1, np.sum(M, axis=1)) - assert_equal(sumall, np.sum(M)) - - - def test_prod(self): - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(x.prod(), 720) - assert_equal(x.prod(0), matrix([[4, 10, 18]])) - assert_equal(x.prod(1), matrix([[6], [120]])) - - assert_equal(np.prod(x), 720) - assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) - assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) - - y = matrix([0, 1, 3]) - assert_(y.prod() == 0) - - def test_max(self): - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(x.max(), 6) - assert_equal(x.max(0), matrix([[4, 5, 6]])) - assert_equal(x.max(1), matrix([[3], [6]])) - - assert_equal(np.max(x), 6) - assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) - assert_equal(np.max(x, axis=1), matrix([[3], [6]])) - - def test_min(self): - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(x.min(), 1) - assert_equal(x.min(0), matrix([[1, 2, 3]])) - assert_equal(x.min(1), matrix([[1], [4]])) - - assert_equal(np.min(x), 1) - assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) - assert_equal(np.min(x, axis=1), matrix([[1], [4]])) - - def test_ptp(self): - x = np.arange(4).reshape((2, 2)) - assert_(x.ptp() == 3) - assert_(all(x.ptp(0) == array([2, 2]))) - assert_(all(x.ptp(1) == array([1, 1]))) - - def test_var(self): - x = np.arange(9).reshape((3, 3)) - mx = x.view(np.matrix) - assert_equal(x.var(ddof=0), mx.var(ddof=0)) - assert_equal(x.var(ddof=1), mx.var(ddof=1)) - - def test_basic(self): - import numpy.linalg as linalg - - A = array([[1., 2.], - [3., 4.]]) - mA = matrix(A) - assert_(allclose(linalg.inv(A), mA.I)) - assert_(all(array(transpose(A) == mA.T))) - assert_(all(array(transpose(A) == mA.H))) - assert_(all(A == mA.A)) - - B = A + 2j*A - mB = matrix(B) - assert_(allclose(linalg.inv(B), mB.I)) - assert_(all(array(transpose(B) == mB.T))) - assert_(all(array(conjugate(transpose(B)) == mB.H))) - - def test_pinv(self): - x = matrix(arange(6).reshape(2, 3)) - xpinv = matrix([[-0.77777778, 0.27777778], - [-0.11111111, 0.11111111], - [ 0.55555556, -0.05555556]]) - assert_almost_equal(x.I, xpinv) - - def test_comparisons(self): - A = arange(100).reshape(10, 10) - mA = matrix(A) - mB = matrix(A) + 0.1 - assert_(all(mB == A+0.1)) - assert_(all(mB == matrix(A+0.1))) - assert_(not any(mB == matrix(A-0.1))) - assert_(all(mA < mB)) - assert_(all(mA <= mB)) - assert_(all(mA <= mA)) - assert_(not any(mA < mA)) - - assert_(not any(mB < mA)) - assert_(all(mB >= mA)) - assert_(all(mB >= mB)) - assert_(not any(mB > mB)) - - assert_(all(mA == mA)) - assert_(not any(mA == mB)) - assert_(all(mB != mA)) - - assert_(not all(abs(mA) > 0)) - assert_(all(abs(mB > 0))) - - def test_asmatrix(self): - A = arange(100).reshape(10, 10) - mA = asmatrix(A) - A[0, 0] = -10 - assert_(A[0, 0] == mA[0, 0]) - - def test_noaxis(self): - A = matrix([[1, 0], [0, 1]]) - assert_(A.sum() == matrix(2)) - assert_(A.mean() == matrix(0.5)) - - def test_repr(self): - A = matrix([[1, 0], [0, 1]]) - assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") - -class TestCasting(TestCase): - def test_basic(self): - A = arange(100).reshape(10, 10) - mA = matrix(A) - - mB = mA.copy() - O = ones((10, 10), float64) * 0.1 - mB = mB + O - assert_(mB.dtype.type == float64) - assert_(all(mA != mB)) - assert_(all(mB == mA+0.1)) - - mC = mA.copy() - O = ones((10, 10), complex128) - mC = mC * O - assert_(mC.dtype.type == complex128) - assert_(all(mA != mB)) - - -class TestAlgebra(TestCase): - def test_basic(self): - import numpy.linalg as linalg - - A = array([[1., 2.], - [3., 4.]]) - mA = matrix(A) - - B = identity(2) - for i in range(6): - assert_(allclose((mA ** i).A, B)) - B = dot(B, A) - - Ainv = linalg.inv(A) - B = identity(2) - for i in range(6): - assert_(allclose((mA ** -i).A, B)) - B = dot(B, Ainv) - - assert_(allclose((mA * mA).A, dot(A, A))) - assert_(allclose((mA + mA).A, (A + A))) - assert_(allclose((3*mA).A, (3*A))) - - mA2 = matrix(A) - mA2 *= 3 - assert_(allclose(mA2.A, 3*A)) - - def test_pow(self): - """Test raising a matrix to an integer power works as expected.""" - m = matrix("1. 2.; 3. 4.") - m2 = m.copy() - m2 **= 2 - mi = m.copy() - mi **= -1 - m4 = m2.copy() - m4 **= 2 - assert_array_almost_equal(m2, m**2) - assert_array_almost_equal(m4, np.dot(m2, m2)) - assert_array_almost_equal(np.dot(mi, m), np.eye(2)) - - def test_notimplemented(self): - '''Check that 'not implemented' operations produce a failure.''' - A = matrix([[1., 2.], - [3., 4.]]) - - # __rpow__ - try: - 1.0**A - except TypeError: - pass - else: - self.fail("matrix.__rpow__ doesn't raise a TypeError") - - # __mul__ with something not a list, ndarray, tuple, or scalar - try: - A*object() - except TypeError: - pass - else: - self.fail("matrix.__mul__ with non-numeric object doesn't raise" - "a TypeError") - -class TestMatrixReturn(TestCase): - def test_instance_methods(self): - a = matrix([1.0], dtype='f8') - methodargs = { - 'astype': ('intc',), - 'clip': (0.0, 1.0), - 'compress': ([1],), - 'repeat': (1,), - 'reshape': (1,), - 'swapaxes': (0, 0), - 'dot': np.array([1.0]), - } - excluded_methods = [ - 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', - 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', - 'searchsorted', 'setflags', 'setfield', 'sort', - 'partition', 'argpartition', - 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', - 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', - 'prod', 'std', 'ctypes', 'itemset', 'setasflat' - ] - for attrib in dir(a): - if attrib.startswith('_') or attrib in excluded_methods: - continue - f = getattr(a, attrib) - if isinstance(f, collections.Callable): - # reset contents of a - a.astype('f8') - a.fill(1.0) - if attrib in methodargs: - args = methodargs[attrib] - else: - args = () - b = f(*args) - assert_(type(b) is matrix, "%s" % attrib) - assert_(type(a.real) is matrix) - assert_(type(a.imag) is matrix) - c, d = matrix([0.0]).nonzero() - assert_(type(c) is matrix) - assert_(type(d) is matrix) - - -class TestIndexing(TestCase): - def test_basic(self): - x = asmatrix(zeros((3, 2), float)) - y = zeros((3, 1), float) - y[:, 0] = [0.8, 0.2, 0.3] - x[:, 1] = y>0.5 - assert_equal(x, [[0, 1], [0, 0], [0, 0]]) - - -class TestNewScalarIndexing(TestCase): - def setUp(self): - self.a = matrix([[1, 2], [3, 4]]) - - def test_dimesions(self): - a = self.a - x = a[0] - assert_equal(x.ndim, 2) - - def test_array_from_matrix_list(self): - a = self.a - x = array([a, a]) - assert_equal(x.shape, [2, 2, 2]) - - def test_array_to_list(self): - a = self.a - assert_equal(a.tolist(), [[1, 2], [3, 4]]) - - def test_fancy_indexing(self): - a = self.a - x = a[1, [0, 1, 0]] - assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[3, 4, 3]])) - x = a[[1, 0]] - assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[3, 4], [1, 2]])) - x = a[[[1], [0]], [[1, 0], [0, 1]]] - assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[4, 3], [1, 2]])) - - def test_matrix_element(self): - x = matrix([[1, 2, 3], [4, 5, 6]]) - assert_equal(x[0][0], matrix([[1, 2, 3]])) - assert_equal(x[0][0].shape, (1, 3)) - assert_equal(x[0].shape, (1, 3)) - assert_equal(x[:, 0].shape, (2, 1)) - - x = matrix(0) - assert_equal(x[0, 0], 0) - assert_equal(x[0], 0) - assert_equal(x[:, 0].shape, x.shape) - - def test_scalar_indexing(self): - x = asmatrix(zeros((3, 2), float)) - assert_equal(x[0, 0], x[0][0]) - - def test_row_column_indexing(self): - x = asmatrix(np.eye(2)) - assert_array_equal(x[0,:], [[1, 0]]) - assert_array_equal(x[1,:], [[0, 1]]) - assert_array_equal(x[:, 0], [[1], [0]]) - assert_array_equal(x[:, 1], [[0], [1]]) - - def test_boolean_indexing(self): - A = arange(6) - A.shape = (3, 2) - x = asmatrix(A) - assert_array_equal(x[:, array([True, False])], x[:, 0]) - assert_array_equal(x[array([True, False, False]),:], x[0,:]) - - def test_list_indexing(self): - A = arange(6) - A.shape = (3, 2) - x = asmatrix(A) - assert_array_equal(x[:, [1, 0]], x[:, ::-1]) - assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) - -class TestPower(TestCase): - def test_returntype(self): - a = array([[0, 1], [0, 0]]) - assert_(type(matrix_power(a, 2)) is ndarray) - a = mat(a) - assert_(type(matrix_power(a, 2)) is matrix) - - def test_list(self): - assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_multiarray.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_multiarray.py deleted file mode 100644 index fc5b1df17d7e1..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/matrixlib/tests/test_multiarray.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import * - -class TestView(TestCase): - def test_type(self): - x = np.array([1, 2, 3]) - assert_(isinstance(x.view(np.matrix), np.matrix)) - - def test_keywords(self): - x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) - # We must be specific about the endianness here: - y = x.view(dtype='= 2.6. - -""" -from __future__ import division, absolute_import, print_function - -from abc import ABCMeta, abstractmethod, abstractproperty -from numbers import Number - -import numpy as np -from . import polyutils as pu - -__all__ = ['ABCPolyBase'] - -class ABCPolyBase(object): - """An abstract base class for series classes. - - ABCPolyBase provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the - methods listed below. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - coef : array_like - Series coefficients in order of increasing degree, i.e., - ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where - ``P_i`` is the basis polynomials of degree ``i``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is the derived class domain. - window : (2,) array_like, optional - Window, see domain for its use. The default value is the - derived class window. - - Attributes - ---------- - coef : (N,) ndarray - Series coefficients in order of increasing degree. - domain : (2,) ndarray - Domain that is mapped to window. - window : (2,) ndarray - Window that domain is mapped to. - - Class Attributes - ---------------- - maxpower : int - Maximum power allowed, i.e., the largest number ``n`` such that - ``p(x)**n`` is allowed. This is to limit runaway polynomial size. - domain : (2,) ndarray - Default domain of the class. - window : (2,) ndarray - Default window of the class. - - """ - __metaclass__ = ABCMeta - - # Not hashable - __hash__ = None - - # Don't let participate in array operations. Value doesn't matter. - __array_priority__ = 1000 - - # Limit runaway size. T_n^m has degree n*m - maxpower = 100 - - @abstractproperty - def domain(self): - pass - - @abstractproperty - def window(self): - pass - - @abstractproperty - def nickname(self): - pass - - @abstractmethod - def _add(self): - pass - - @abstractmethod - def _sub(self): - pass - - @abstractmethod - def _mul(self): - pass - - @abstractmethod - def _div(self): - pass - - @abstractmethod - def _pow(self): - pass - - @abstractmethod - def _val(self): - pass - - @abstractmethod - def _int(self): - pass - - @abstractmethod - def _der(self): - pass - - @abstractmethod - def _fit(self): - pass - - @abstractmethod - def _line(self): - pass - - @abstractmethod - def _roots(self): - pass - - @abstractmethod - def _fromroots(self): - pass - - def has_samecoef(self, other): - """Check if coefficients match. - - .. versionadded:: 1.6.0 - - Parameters - ---------- - other : class instance - The other class must have the ``coef`` attribute. - - Returns - ------- - bool : boolean - True if the coefficients are the same, False otherwise. - - """ - if len(self.coef) != len(other.coef): - return False - elif not np.all(self.coef == other.coef): - return False - else: - return True - - def has_samedomain(self, other): - """Check if domains match. - - .. versionadded:: 1.6.0 - - Parameters - ---------- - other : class instance - The other class must have the ``domain`` attribute. - - Returns - ------- - bool : boolean - True if the domains are the same, False otherwise. - - """ - return np.all(self.domain == other.domain) - - def has_samewindow(self, other): - """Check if windows match. - - .. versionadded:: 1.6.0 - - Parameters - ---------- - other : class instance - The other class must have the ``window`` attribute. - - Returns - ------- - bool : boolean - True if the windows are the same, False otherwise. - - """ - return np.all(self.window == other.window) - - def has_sametype(self, other): - """Check if types match. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - other : object - Class instance. - - Returns - ------- - bool : boolean - True if other is same class as self - - """ - return isinstance(other, self.__class__) - - def _get_coefficients(self, other): - """Interpret other as polynomial coefficients. - - The `other` argument is checked to see if it is of the same - class as self with identical domain and window. If so, - return its coefficients, otherwise return `other`. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - other : anything - Object to be checked. - - Returns - ------- - coef: - The coefficients of`other` if it is a compatible instance, - of ABCPolyBase, otherwise `other`. - - Raises - ------ - TypeError: - When `other` is an incompatible instance of ABCPolyBase. - - """ - if isinstance(other, ABCPolyBase): - if not isinstance(other, self.__class__): - raise TypeError("Polynomial types differ") - elif not np.all(self.domain == other.domain): - raise TypeError("Domains differ") - elif not np.all(self.window == other.window): - raise TypeError("Windows differ") - return other.coef - return other - - def __init__(self, coef, domain=None, window=None): - [coef] = pu.as_series([coef], trim=False) - self.coef = coef - - if domain is not None: - [domain] = pu.as_series([domain], trim=False) - if len(domain) != 2: - raise ValueError("Domain has wrong number of elements.") - self.domain = domain - - if window is not None: - [window] = pu.as_series([window], trim=False) - if len(window) != 2: - raise ValueError("Window has wrong number of elements.") - self.window = window - - def __repr__(self): - format = "%s(%s, %s, %s)" - coef = repr(self.coef)[6:-1] - domain = repr(self.domain)[6:-1] - window = repr(self.window)[6:-1] - name = self.__class__.__name__ - return format % (name, coef, domain, window) - - def __str__(self): - format = "%s(%s)" - coef = str(self.coef) - name = self.nickname - return format % (name, coef) - - # Pickle and copy - - def __getstate__(self): - ret = self.__dict__.copy() - ret['coef'] = self.coef.copy() - ret['domain'] = self.domain.copy() - ret['window'] = self.window.copy() - return ret - - def __setstate__(self, dict): - self.__dict__ = dict - - # Call - - def __call__(self, arg): - off, scl = pu.mapparms(self.domain, self.window) - arg = off + scl*arg - return self._val(arg, self.coef) - - def __iter__(self): - return iter(self.coef) - - def __len__(self): - return len(self.coef) - - # Numeric properties. - - def __neg__(self): - return self.__class__(-self.coef, self.domain, self.window) - - def __pos__(self): - return self - - def __add__(self, other): - try: - othercoef = self._get_coefficients(other) - coef = self._add(self.coef, othercoef) - except TypeError as e: - raise e - except: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __sub__(self, other): - try: - othercoef = self._get_coefficients(other) - coef = self._sub(self.coef, othercoef) - except TypeError as e: - raise e - except: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __mul__(self, other): - try: - othercoef = self._get_coefficients(other) - coef = self._mul(self.coef, othercoef) - except TypeError as e: - raise e - except: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __div__(self, other): - # set to __floordiv__, /, for now. - return self.__floordiv__(other) - - def __truediv__(self, other): - # there is no true divide if the rhs is not a Number, although it - # could return the first n elements of an infinite series. - # It is hard to see where n would come from, though. - if not isinstance(other, Number) or isinstance(other, bool): - form = "unsupported types for true division: '%s', '%s'" - raise TypeError(form % (type(self), type(other))) - return self.__floordiv__(other) - - def __floordiv__(self, other): - res = self.__divmod__(other) - if res is NotImplemented: - return res - return res[0] - - def __mod__(self, other): - res = self.__divmod__(other) - if res is NotImplemented: - return res - return res[1] - - def __divmod__(self, other): - try: - othercoef = self._get_coefficients(other) - quo, rem = self._div(self.coef, othercoef) - except (TypeError, ZeroDivisionError) as e: - raise e - except: - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - def __pow__(self, other): - coef = self._pow(self.coef, other, maxpower=self.maxpower) - res = self.__class__(coef, self.domain, self.window) - return res - - def __radd__(self, other): - try: - coef = self._add(other, self.coef) - except: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rsub__(self, other): - try: - coef = self._sub(other, self.coef) - except: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rmul__(self, other): - try: - coef = self._mul(other, self.coef) - except: - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rdiv__(self, other): - # set to __floordiv__ /. - return self.__rfloordiv__(other) - - def __rtruediv__(self, other): - # An instance of ABCPolyBase is not considered a - # Number. - return NotImplemented - - def __rfloordiv__(self, other): - res = self.__rdivmod__(other) - if res is NotImplemented: - return res - return res[0] - - def __rmod__(self, other): - res = self.__rdivmod__(other) - if res is NotImplemented: - return res - return res[1] - - def __rdivmod__(self, other): - try: - quo, rem = self._div(other, self.coef) - except ZeroDivisionError as e: - raise e - except: - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - # Enhance me - # some augmented arithmetic operations could be added here - - def __eq__(self, other): - res = (isinstance(other, self.__class__) and - np.all(self.domain == other.domain) and - np.all(self.window == other.window) and - (self.coef.shape == other.coef.shape) and - np.all(self.coef == other.coef)) - return res - - def __ne__(self, other): - return not self.__eq__(other) - - # - # Extra methods. - # - - def copy(self): - """Return a copy. - - Returns - ------- - new_series : series - Copy of self. - - """ - return self.__class__(self.coef, self.domain, self.window) - - def degree(self): - """The degree of the series. - - .. versionadded:: 1.5.0 - - Returns - ------- - degree : int - Degree of the series, one less than the number of coefficients. - - """ - return len(self) - 1 - - def cutdeg(self, deg): - """Truncate series to the given degree. - - Reduce the degree of the series to `deg` by discarding the - high order terms. If `deg` is greater than the current degree a - copy of the current series is returned. This can be useful in least - squares where the coefficients of the high degree terms may be very - small. - - .. versionadded:: 1.5.0 - - Parameters - ---------- - deg : non-negative int - The series is reduced to degree `deg` by discarding the high - order terms. The value of `deg` must be a non-negative integer. - - Returns - ------- - new_series : series - New instance of series with reduced degree. - - """ - return self.truncate(deg + 1) - - def trim(self, tol=0): - """Remove trailing coefficients - - Remove trailing coefficients until a coefficient is reached whose - absolute value greater than `tol` or the beginning of the series is - reached. If all the coefficients would be removed the series is set - to ``[0]``. A new series instance is returned with the new - coefficients. The current instance remains unchanged. - - Parameters - ---------- - tol : non-negative number. - All trailing coefficients less than `tol` will be removed. - - Returns - ------- - new_series : series - Contains the new set of coefficients. - - """ - coef = pu.trimcoef(self.coef, tol) - return self.__class__(coef, self.domain, self.window) - - def truncate(self, size): - """Truncate series to length `size`. - - Reduce the series to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. This - can be useful in least squares where the coefficients of the - high degree terms may be very small. - - Parameters - ---------- - size : positive int - The series is reduced to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. - - Returns - ------- - new_series : series - New instance of series with truncated coefficients. - - """ - isize = int(size) - if isize != size or isize < 1: - raise ValueError("size must be a positive integer") - if isize >= len(self.coef): - coef = self.coef - else: - coef = self.coef[:isize] - return self.__class__(coef, self.domain, self.window) - - def convert(self, domain=None, kind=None, window=None): - """Convert series to a different kind and/or domain and/or window. - - Parameters - ---------- - domain : array_like, optional - The domain of the converted series. If the value is None, - the default domain of `kind` is used. - kind : class, optional - The polynomial series type class to which the current instance - should be converted. If kind is None, then the class of the - current instance is used. - window : array_like, optional - The window of the converted series. If the value is None, - the default window of `kind` is used. - - Returns - ------- - new_series : series - The returned class can be of different type than the current - instance and/or have a different domain and/or different - window. - - Notes - ----- - Conversion between domains and class types can result in - numerically ill defined series. - - Examples - -------- - - """ - if kind is None: - kind = self.__class__ - if domain is None: - domain = kind.domain - if window is None: - window = kind.window - return self(kind.identity(domain, window=window)) - - def mapparms(self): - """Return the mapping parameters. - - The returned values define a linear map ``off + scl*x`` that is - applied to the input arguments before the series is evaluated. The - map depends on the ``domain`` and ``window``; if the current - ``domain`` is equal to the ``window`` the resulting map is the - identity. If the coefficients of the series instance are to be - used by themselves outside this class, then the linear function - must be substituted for the ``x`` in the standard representation of - the base polynomials. - - Returns - ------- - off, scl : float or complex - The mapping function is defined by ``off + scl*x``. - - Notes - ----- - If the current domain is the interval ``[l1, r1]`` and the window - is ``[l2, r2]``, then the linear mapping function ``L`` is - defined by the equations:: - - L(l1) = l2 - L(r1) = r2 - - """ - return pu.mapparms(self.domain, self.window) - - def integ(self, m=1, k=[], lbnd=None): - """Integrate. - - Return a series instance that is the definite integral of the - current series. - - Parameters - ---------- - m : non-negative int - The number of integrations to perform. - k : array_like - Integration constants. The first constant is applied to the - first integration, the second to the second, and so on. The - list of values must less than or equal to `m` in length and any - missing values are set to zero. - lbnd : Scalar - The lower bound of the definite integral. - - Returns - ------- - new_series : series - A new series representing the integral. The domain is the same - as the domain of the integrated series. - - """ - off, scl = self.mapparms() - if lbnd is None: - lbnd = 0 - else: - lbnd = off + scl*lbnd - coef = self._int(self.coef, m, k, lbnd, 1./scl) - return self.__class__(coef, self.domain, self.window) - - def deriv(self, m=1): - """Differentiate. - - Return a series instance of that is the derivative of the current - series. - - Parameters - ---------- - m : non-negative int - The number of integrations to perform. - - Returns - ------- - new_series : series - A new series representing the derivative. The domain is the same - as the domain of the differentiated series. - - """ - off, scl = self.mapparms() - coef = self._der(self.coef, m, scl) - return self.__class__(coef, self.domain, self.window) - - def roots(self): - """Return the roots of the series polynomial. - - Compute the roots for the series. Note that the accuracy of the - roots decrease the further outside the domain they lie. - - Returns - ------- - roots : ndarray - Array containing the roots of the series. - - """ - roots = self._roots(self.coef) - return pu.mapdomain(roots, self.window, self.domain) - - def linspace(self, n=100, domain=None): - """Return x, y values at equally spaced points in domain. - - Returns the x, y values at `n` linearly spaced points across the - domain. Here y is the value of the polynomial at the points x. By - default the domain is the same as that of the series instance. - This method is intended mostly as a plotting aid. - - .. versionadded:: 1.5.0 - - Parameters - ---------- - n : int, optional - Number of point pairs to return. The default value is 100. - domain : {None, array_like}, optional - If not None, the specified domain is used instead of that of - the calling instance. It should be of the form ``[beg,end]``. - The default is None which case the class domain is used. - - Returns - ------- - x, y : ndarray - x is equal to linspace(self.domain[0], self.domain[1], n) and - y is the series evaluated at element of x. - - """ - if domain is None: - domain = self.domain - x = np.linspace(domain[0], domain[1], n) - y = self(x) - return x, y - - @classmethod - def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, - window=None): - """Least squares fit to data. - - Return a series instance that is the least squares fit to the data - `y` sampled at `x`. The domain of the returned instance can be - specified and this will often result in a superior fit with less - chance of ill conditioning. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial. - domain : {None, [beg, end], []}, optional - Domain to use for the returned series. If ``None``, - then a minimal domain that covers the points `x` is chosen. If - ``[]`` the class domain is used. The default value was the - class domain in NumPy 1.4 and ``None`` in later versions. - The ``[]`` option was added in numpy 1.5.0. - rcond : float, optional - Relative condition number of the fit. Singular values smaller - than this relative to the largest singular value will be - ignored. The default value is len(x)*eps, where eps is the - relative precision of the float type, about 2e-16 in most - cases. - full : bool, optional - Switch determining nature of return value. When it is False - (the default) just the coefficients are returned, when True - diagnostic information from the singular value decomposition is - also returned. - w : array_like, shape (M,), optional - Weights. If not None the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products - ``w[i]*y[i]`` all have the same variance. The default value is - None. - - .. versionadded:: 1.5.0 - window : {[beg, end]}, optional - Window to use for the returned series. The default - value is the default class domain - - .. versionadded:: 1.6.0 - - Returns - ------- - new_series : series - A series that represents the least squares fit to the data and - has the domain specified in the call. - - [resid, rank, sv, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - """ - if domain is None: - domain = pu.getdomain(x) - elif type(domain) is list and len(domain) == 0: - domain = cls.domain - - if window is None: - window = cls.window - - xnew = pu.mapdomain(x, domain, window) - res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) - if full: - [coef, status] = res - return cls(coef, domain=domain, window=window), status - else: - coef = res - return cls(coef, domain=domain, window=window) - - @classmethod - def fromroots(cls, roots, domain=[], window=None): - """Return series instance that has the specified roots. - - Returns a series representing the product - ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a - list of roots. - - Parameters - ---------- - roots : array_like - List of roots. - domain : {[], None, array_like}, optional - Domain for the resulting series. If None the domain is the - interval from the smallest root to the largest. If [] the - domain is the class domain. The default is []. - window : {None, array_like}, optional - Window for the returned series. If None the class window is - used. The default is None. - - Returns - ------- - new_series : series - Series with the specified roots. - - """ - [roots] = pu.as_series([roots], trim=False) - if domain is None: - domain = pu.getdomain(roots) - elif type(domain) is list and len(domain) == 0: - domain = cls.domain - - if window is None: - window = cls.window - - deg = len(roots) - off, scl = pu.mapparms(domain, window) - rnew = off + scl*roots - coef = cls._fromroots(rnew) / scl**deg - return cls(coef, domain=domain, window=window) - - @classmethod - def identity(cls, domain=None, window=None): - """Identity function. - - If ``p`` is the returned series, then ``p(x) == x`` for all - values of x. - - Parameters - ---------- - domain : {None, array_like}, optional - If given, the array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. If None is - given then the class domain is used. The default is None. - window : {None, array_like}, optional - If given, the resulting array must be if the form - ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of - the window. If None is given then the class window is used. The - default is None. - - Returns - ------- - new_series : series - Series of representing the identity. - - """ - if domain is None: - domain = cls.domain - if window is None: - window = cls.window - off, scl = pu.mapparms(window, domain) - coef = cls._line(off, scl) - return cls(coef, domain, window) - - @classmethod - def basis(cls, deg, domain=None, window=None): - """Series basis polynomial of degree `deg`. - - Returns the series representing the basis polynomial of degree `deg`. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - deg : int - Degree of the basis polynomial for the series. Must be >= 0. - domain : {None, array_like}, optional - If given, the array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. If None is - given then the class domain is used. The default is None. - window : {None, array_like}, optional - If given, the resulting array must be if the form - ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of - the window. If None is given then the class window is used. The - default is None. - - Returns - ------- - new_series : series - A series with the coefficient of the `deg` term set to one and - all others zero. - - """ - if domain is None: - domain = cls.domain - if window is None: - window = cls.window - ideg = int(deg) - - if ideg != deg or ideg < 0: - raise ValueError("deg must be non-negative integer") - return cls([0]*ideg + [1], domain, window) - - @classmethod - def cast(cls, series, domain=None, window=None): - """Convert series to series of this class. - - The `series` is expected to be an instance of some polynomial - series of one of the types supported by by the numpy.polynomial - module, but could be some other class that supports the convert - method. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - series : series - The series instance to be converted. - domain : {None, array_like}, optional - If given, the array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. If None is - given then the class domain is used. The default is None. - window : {None, array_like}, optional - If given, the resulting array must be if the form - ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of - the window. If None is given then the class window is used. The - default is None. - - Returns - ------- - new_series : series - A series of the same kind as the calling class and equal to - `series` when evaluated. - - See Also - -------- - convert : similar instance method - - """ - if domain is None: - domain = cls.domain - if window is None: - window = cls.window - return series.convert(domain, cls, window) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/chebyshev.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/chebyshev.py deleted file mode 100644 index f213ab3fd0497..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/chebyshev.py +++ /dev/null @@ -1,2056 +0,0 @@ -""" -Objects for dealing with Chebyshev series. - -This module provides a number of objects (mostly functions) useful for -dealing with Chebyshev series, including a `Chebyshev` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `chebdomain` -- Chebyshev series default domain, [-1,1]. -- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates - identically to 0. -- `chebone` -- (Coefficients of the) Chebyshev series that evaluates - identically to 1. -- `chebx` -- (Coefficients of the) Chebyshev series for the identity map, - ``f(x) = x``. - -Arithmetic ----------- -- `chebadd` -- add two Chebyshev series. -- `chebsub` -- subtract one Chebyshev series from another. -- `chebmul` -- multiply two Chebyshev series. -- `chebdiv` -- divide one Chebyshev series by another. -- `chebpow` -- raise a Chebyshev series to an positive integer power -- `chebval` -- evaluate a Chebyshev series at given points. -- `chebval2d` -- evaluate a 2D Chebyshev series at given points. -- `chebval3d` -- evaluate a 3D Chebyshev series at given points. -- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product. -- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product. - -Calculus --------- -- `chebder` -- differentiate a Chebyshev series. -- `chebint` -- integrate a Chebyshev series. - -Misc Functions --------------- -- `chebfromroots` -- create a Chebyshev series with specified roots. -- `chebroots` -- find the roots of a Chebyshev series. -- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials. -- `chebvander2d` -- Vandermonde-like matrix for 2D power series. -- `chebvander3d` -- Vandermonde-like matrix for 3D power series. -- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights. -- `chebweight` -- Chebyshev weight function. -- `chebcompanion` -- symmetrized companion matrix in Chebyshev form. -- `chebfit` -- least-squares fit returning a Chebyshev series. -- `chebpts1` -- Chebyshev points of the first kind. -- `chebpts2` -- Chebyshev points of the second kind. -- `chebtrim` -- trim leading coefficients from a Chebyshev series. -- `chebline` -- Chebyshev series representing given straight line. -- `cheb2poly` -- convert a Chebyshev series to a polynomial. -- `poly2cheb` -- convert a polynomial to a Chebyshev series. - -Classes -------- -- `Chebyshev` -- A Chebyshev series class. - -See also --------- -`numpy.polynomial` - -Notes ------ -The implementations of multiplication, division, integration, and -differentiation use the algebraic identities [1]_: - -.. math :: - T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ - z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. - -where - -.. math :: x = \\frac{z + z^{-1}}{2}. - -These identities allow a Chebyshev series to be expressed as a finite, -symmetric Laurent series. In this module, this sort of Laurent series -is referred to as a "z-series." - -References ----------- -.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev - Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 - (preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', - 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', - 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', - 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', - 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', - 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', - 'chebgauss', 'chebweight'] - -chebtrim = pu.trimcoef - -# -# A collection of functions for manipulating z-series. These are private -# functions and do minimal error checking. -# - -def _cseries_to_zseries(c): - """Covert Chebyshev series to z-series. - - Covert a Chebyshev series to the equivalent z-series. The result is - never an empty array. The dtype of the return is the same as that of - the input. No checks are run on the arguments as this routine is for - internal use. - - Parameters - ---------- - c : 1-D ndarray - Chebyshev coefficients, ordered from low to high - - Returns - ------- - zs : 1-D ndarray - Odd length symmetric z-series, ordered from low to high. - - """ - n = c.size - zs = np.zeros(2*n-1, dtype=c.dtype) - zs[n-1:] = c/2 - return zs + zs[::-1] - - -def _zseries_to_cseries(zs): - """Covert z-series to a Chebyshev series. - - Covert a z series to the equivalent Chebyshev series. The result is - never an empty array. The dtype of the return is the same as that of - the input. No checks are run on the arguments as this routine is for - internal use. - - Parameters - ---------- - zs : 1-D ndarray - Odd length symmetric z-series, ordered from low to high. - - Returns - ------- - c : 1-D ndarray - Chebyshev coefficients, ordered from low to high. - - """ - n = (zs.size + 1)//2 - c = zs[n-1:].copy() - c[1:n] *= 2 - return c - - -def _zseries_mul(z1, z2): - """Multiply two z-series. - - Multiply two z-series to produce a z-series. - - Parameters - ---------- - z1, z2 : 1-D ndarray - The arrays must be 1-D but this is not checked. - - Returns - ------- - product : 1-D ndarray - The product z-series. - - Notes - ----- - This is simply convolution. If symmetric/anti-symmetric z-series are - denoted by S/A then the following rules apply: - - S*S, A*A -> S - S*A, A*S -> A - - """ - return np.convolve(z1, z2) - - -def _zseries_div(z1, z2): - """Divide the first z-series by the second. - - Divide `z1` by `z2` and return the quotient and remainder as z-series. - Warning: this implementation only applies when both z1 and z2 have the - same symmetry, which is sufficient for present purposes. - - Parameters - ---------- - z1, z2 : 1-D ndarray - The arrays must be 1-D and have the same symmetry, but this is not - checked. - - Returns - ------- - - (quotient, remainder) : 1-D ndarrays - Quotient and remainder as z-series. - - Notes - ----- - This is not the same as polynomial division on account of the desired form - of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A - then the following rules apply: - - S/S -> S,S - A/A -> S,A - - The restriction to types of the same symmetry could be fixed but seems like - unneeded generality. There is no natural form for the remainder in the case - where there is no symmetry. - - """ - z1 = z1.copy() - z2 = z2.copy() - len1 = len(z1) - len2 = len(z2) - if len2 == 1: - z1 /= z2 - return z1, z1[:1]*0 - elif len1 < len2: - return z1[:1]*0, z1 - else: - dlen = len1 - len2 - scl = z2[0] - z2 /= scl - quo = np.empty(dlen + 1, dtype=z1.dtype) - i = 0 - j = dlen - while i < j: - r = z1[i] - quo[i] = z1[i] - quo[dlen - i] = r - tmp = r*z2 - z1[i:i+len2] -= tmp - z1[j:j+len2] -= tmp - i += 1 - j -= 1 - r = z1[i] - quo[i] = r - tmp = r*z2 - z1[i:i+len2] -= tmp - quo /= scl - rem = z1[i+1:i-1+len2].copy() - return quo, rem - - -def _zseries_der(zs): - """Differentiate a z-series. - - The derivative is with respect to x, not z. This is achieved using the - chain rule and the value of dx/dz given in the module notes. - - Parameters - ---------- - zs : z-series - The z-series to differentiate. - - Returns - ------- - derivative : z-series - The derivative - - Notes - ----- - The zseries for x (ns) has been multiplied by two in order to avoid - using floats that are incompatible with Decimal and likely other - specialized scalar types. This scaling has been compensated by - multiplying the value of zs by two also so that the two cancels in the - division. - - """ - n = len(zs)//2 - ns = np.array([-1, 0, 1], dtype=zs.dtype) - zs *= np.arange(-n, n+1)*2 - d, r = _zseries_div(zs, ns) - return d - - -def _zseries_int(zs): - """Integrate a z-series. - - The integral is with respect to x, not z. This is achieved by a change - of variable using dx/dz given in the module notes. - - Parameters - ---------- - zs : z-series - The z-series to integrate - - Returns - ------- - integral : z-series - The indefinite integral - - Notes - ----- - The zseries for x (ns) has been multiplied by two in order to avoid - using floats that are incompatible with Decimal and likely other - specialized scalar types. This scaling has been compensated by - dividing the resulting zs by two. - - """ - n = 1 + len(zs)//2 - ns = np.array([-1, 0, 1], dtype=zs.dtype) - zs = _zseries_mul(zs, ns) - div = np.arange(-n, n+1)*2 - zs[:n] /= div[:n] - zs[n+1:] /= div[n+1:] - zs[n] = 0 - return zs - -# -# Chebyshev series functions -# - - -def poly2cheb(pol): - """ - Convert a polynomial to a Chebyshev series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Chebyshev series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Chebyshev - series. - - See Also - -------- - cheb2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> p = P.Polynomial(range(4)) - >>> p - Polynomial([ 0., 1., 2., 3.], [-1., 1.]) - >>> c = p.convert(kind=P.Chebyshev) - >>> c - Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.]) - >>> P.poly2cheb(range(4)) - array([ 1. , 3.25, 1. , 0.75]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = chebadd(chebmulx(res), pol[i]) - return res - - -def cheb2poly(c): - """ - Convert a Chebyshev series to a polynomial. - - Convert an array representing the coefficients of a Chebyshev series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Chebyshev series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2cheb - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> c = P.Chebyshev(range(4)) - >>> c - Chebyshev([ 0., 1., 2., 3.], [-1., 1.]) - >>> p = c.convert(kind=P.Polynomial) - >>> p - Polynomial([ -2., -8., 4., 12.], [-1., 1.]) - >>> P.cheb2poly(range(4)) - array([ -2., -8., 4., 12.]) - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n < 3: - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], c1) - c1 = polyadd(tmp, polymulx(c1)*2) - return polyadd(c0, polymulx(c1)) - - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Chebyshev default domain. -chebdomain = np.array([-1, 1]) - -# Chebyshev coefficients representing zero. -chebzero = np.array([0]) - -# Chebyshev coefficients representing one. -chebone = np.array([1]) - -# Chebyshev coefficients representing the identity x. -chebx = np.array([0, 1]) - - -def chebline(off, scl): - """ - Chebyshev series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Chebyshev series for - ``off + scl*x``. - - See Also - -------- - polyline - - Examples - -------- - >>> import numpy.polynomial.chebyshev as C - >>> C.chebline(3,2) - array([3, 2]) - >>> C.chebval(-3, C.chebline(3,2)) # should be -3 - -3.0 - - """ - if scl != 0: - return np.array([off, scl]) - else: - return np.array([off]) - - -def chebfromroots(roots): - """ - Generate a Chebyshev series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in Chebyshev form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in Chebyshev form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, legfromroots, lagfromroots, hermfromroots, - hermefromroots. - - Examples - -------- - >>> import numpy.polynomial.chebyshev as C - >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis - array([ 0. , -0.25, 0. , 0.25]) - >>> j = complex(0,1) - >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis - array([ 1.5+0.j, 0.0+0.j, 0.5+0.j]) - - """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [chebline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [chebmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = chebmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def chebadd(c1, c2): - """ - Add one Chebyshev series to another. - - Returns the sum of two Chebyshev series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Chebyshev series of their sum. - - See Also - -------- - chebsub, chebmul, chebdiv, chebpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Chebyshev series - is a Chebyshev series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebadd(c1,c2) - array([ 4., 4., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def chebsub(c1, c2): - """ - Subtract one Chebyshev series from another. - - Returns the difference of two Chebyshev series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Chebyshev series coefficients representing their difference. - - See Also - -------- - chebadd, chebmul, chebdiv, chebpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Chebyshev - series is a Chebyshev series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebsub(c1,c2) - array([-2., 0., 2.]) - >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) - array([ 2., 0., -2.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def chebmulx(c): - """Multiply a Chebyshev series by x. - - Multiply the polynomial `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - - .. versionadded:: 1.5.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0] - if len(c) > 1: - tmp = c[1:]/2 - prd[2:] = tmp - prd[0:-2] += tmp - return prd - - -def chebmul(c1, c2): - """ - Multiply one Chebyshev series by another. - - Returns the product of two Chebyshev series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Chebyshev series coefficients representing their product. - - See Also - -------- - chebadd, chebsub, chebdiv, chebpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Chebyshev polynomial basis set. Thus, to express - the product as a C-series, it is typically necessary to "reproject" - the product onto said basis set, which typically produces - "unintuitive live" (but correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebmul(c1,c2) # multiplication requires "reprojection" - array([ 6.5, 12. , 12. , 4. , 1.5]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - z1 = _cseries_to_zseries(c1) - z2 = _cseries_to_zseries(c2) - prd = _zseries_mul(z1, z2) - ret = _zseries_to_cseries(prd) - return pu.trimseq(ret) - - -def chebdiv(c1, c2): - """ - Divide one Chebyshev series by another. - - Returns the quotient-with-remainder of two Chebyshev series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``T_0 + 2*T_1 + 3*T_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Chebyshev series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Chebyshev series coefficients representing the quotient and - remainder. - - See Also - -------- - chebadd, chebsub, chebmul, chebpow - - Notes - ----- - In general, the (polynomial) division of one C-series by another - results in quotient and remainder terms that are not in the Chebyshev - polynomial basis set. Thus, to express these results as C-series, it - is typically necessary to "reproject" the results onto said basis - set, which typically produces "unintuitive" (but correct) results; - see Examples section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not - (array([ 3.]), array([-8., -4.])) - >>> c2 = (0,1,2,3) - >>> C.chebdiv(c2,c1) # neither "intuitive" - (array([ 0., 2.]), array([-2., -4.])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - z1 = _cseries_to_zseries(c1) - z2 = _cseries_to_zseries(c2) - quo, rem = _zseries_div(z1, z2) - quo = pu.trimseq(_zseries_to_cseries(quo)) - rem = pu.trimseq(_zseries_to_cseries(rem)) - return quo, rem - - -def chebpow(c, pow, maxpower=16): - """Raise a Chebyshev series to a power. - - Returns the Chebyshev series `c` raised to the power `pow`. The - argument `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Chebyshev series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Chebyshev series of power. - - See Also - -------- - chebadd, chebsub, chebmul, chebdiv - - Examples - -------- - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - zs = _cseries_to_zseries(c) - prd = zs - for i in range(2, power + 1): - prd = np.convolve(prd, zs) - return _zseries_to_cseries(prd) - - -def chebder(c, m=1, scl=1, axis=0): - """ - Differentiate a Chebyshev series. - - Returns the Chebyshev series coefficients `c` differentiated `m` times - along `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` - while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + - 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - Array of Chebyshev series coefficients. If c is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Chebyshev series of the derivative. - - See Also - -------- - chebint - - Notes - ----- - In general, the result of differentiating a C-series needs to be - "reprojected" onto the C-series basis set. Thus, typically, the - result of this function is "unintuitive," albeit correct; see Examples - section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c = (1,2,3,4) - >>> C.chebder(c) - array([ 14., 12., 24.]) - >>> C.chebder(c,3) - array([ 96.]) - >>> C.chebder(c,scl=-1) - array([-14., -12., -24.]) - >>> C.chebder(c,2,-1) - array([ 12., 96.]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 2, -1): - der[j - 1] = (2*j)*c[j] - c[j - 2] += (j*c[j])/(j - 2) - if n > 1: - der[1] = 4*c[2] - der[0] = c[1] - c = der - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Chebyshev series. - - Returns the Chebyshev series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] - represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + - 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of Chebyshev series coefficients. If c is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at zero - is the first value in the list, the value of the second integral - at zero is the second value, etc. If ``k == []`` (the default), - all constants are set to zero. If ``m == 1``, a single scalar can - be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - C-series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - chebder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - .. math::`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a`- perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial import chebyshev as C - >>> c = (1,2,3) - >>> C.chebint(c) - array([ 0.5, -0.5, 0.5, 0.5]) - >>> C.chebint(c,3) - array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, - 0.00625 ]) - >>> C.chebint(c, k=3) - array([ 3.5, -0.5, 0.5, 0.5]) - >>> C.chebint(c,lbnd=-2) - array([ 8.5, -0.5, 0.5, 0.5]) - >>> C.chebint(c,scl=-2) - array([-1., 1., -1., -1.]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0] - if n > 1: - tmp[2] = c[1]/4 - for j in range(2, n): - t = c[j]/(2*j + 1) - tmp[j + 1] = c[j]/(2*(j + 1)) - tmp[j - 1] -= c[j]/(2*(j - 1)) - tmp[0] += k[i] - chebval(lbnd, tmp) - c = tmp - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def chebval(x, c, tensor=True): - """ - Evaluate a Chebyshev series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - chebval2d, chebgrid2d, chebval3d, chebgrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - x2 = 2*x - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - c0 = c[-i] - c1 - c1 = tmp + c1*x2 - return c0 + c1*x - - -def chebval2d(x, y, c): - """ - Evaluate a 2-D Chebyshev series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than 2 the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Chebyshev series at points formed - from pairs of corresponding values from `x` and `y`. - - See Also - -------- - chebval, chebgrid2d, chebval3d, chebgrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y = np.array((x, y), copy=0) - except: - raise ValueError('x, y are incompatible') - - c = chebval(x, c) - c = chebval(y, c, tensor=False) - return c - - -def chebgrid2d(x, y, c): - """ - Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \sum_{i,j} c_{i,j} * T_i(a) * T_j(b), - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape + y.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j is contained in `c[i,j]`. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Chebyshev series at points in the - Cartesian product of `x` and `y`. - - See Also - -------- - chebval, chebval2d, chebval3d, chebgrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = chebval(x, c) - c = chebval(y, c) - return c - - -def chebval3d(x, y, z, c): - """ - Evaluate a 3-D Chebyshev series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - chebval, chebval2d, chebgrid2d, chebgrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y, z = np.array((x, y, z), copy=0) - except: - raise ValueError('x, y, z are incompatible') - - c = chebval(x, c) - c = chebval(y, c, tensor=False) - c = chebval(z, c, tensor=False) - return c - - -def chebgrid3d(x, y, z, c): - """ - Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - chebval, chebval2d, chebgrid2d, chebval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = chebval(x, c) - c = chebval(y, c) - c = chebval(z, c) - return c - - -def chebvander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = T_i(x), - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the Chebyshev polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and - ``chebval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of Chebyshev series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding Chebyshev polynomial. The dtype will be the same as - the converted `x`. - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - # Use forward recursion to generate the entries. - v[0] = x*0 + 1 - if ideg > 0: - x2 = 2*x - v[1] = x - for i in range(2, ideg + 1): - v[i] = v[i-1]*x2 - v[i-2] - return np.rollaxis(v, 0, v.ndim) - - -def chebvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., deg[1]*i + j] = T_i(x) * T_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the Chebyshev polynomials. - - If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D Chebyshev - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - chebvander, chebvander3d. chebval2d, chebval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = chebvander(x, degx) - vy = chebvander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) - - -def chebvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the Chebyshev polynomials. - - If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D Chebyshev - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - chebvander, chebvander3d. chebval2d, chebval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = chebvander(x, degx) - vy = chebvander(y, degy) - vz = chebvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) - - -def chebfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Chebyshev series to data. - - Return the coefficients of a Legendre series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting series - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - .. versionadded:: 1.5.0 - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Chebyshev coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - polyfit, legfit, lagfit, hermfit, hermefit - chebval : Evaluates a Chebyshev series. - chebvander : Vandermonde matrix of Chebyshev series. - chebweight : Chebyshev weight function. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Chebyshev series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where :math:`w_j` are the weights. This problem is solved by setting up - as the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Chebyshev series are usually better conditioned than fits - using power series, but much can depend on the distribution of the - sample points and the smoothness of the data. If the quality of the fit - is inadequate splines may be a good alternative. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - # set up the least squares matrices in transposed form - lhs = chebvander(x, deg).T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full: - return c, [resids, rank, s, rcond] - else: - return c - - -def chebcompanion(c): - """Return the scaled companion matrix of c. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is aa Chebyshev basis polynomial. This provides - better eigenvalue estimates than the unscaled case and for basis - polynomials the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - c : array_like - 1-D array of Chebyshev series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded::1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[0] = np.sqrt(.5) - top[1:] = 1/2 - bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 - return mat - - -def chebroots(c): - """ - Compute the roots of a Chebyshev series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * T_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, legroots, lagroots, hermroots, hermeroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - The Chebyshev series basis polynomials aren't powers of `x` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> import numpy.polynomial.chebyshev as cheb - >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots - array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-c[0]/c[1]]) - - m = chebcompanion(c) - r = la.eigvals(m) - r.sort() - return r - - -def chebgauss(deg): - """ - Gauss-Chebyshev quadrature. - - Computes the sample points and weights for Gauss-Chebyshev quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with - the weight function :math:`f(x) = 1/\sqrt{1 - x^2}`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded:: 1.7.0 - - The results have only been tested up to degree 100, higher degrees may - be problematic. For Gauss-Chebyshev there are closed form solutions for - the sample points and weights. If n = `deg`, then - - .. math:: x_i = \cos(\pi (2 i - 1) / (2 n)) - - .. math:: w_i = \pi / n - - """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") - - x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) - w = np.ones(ideg)*(np.pi/ideg) - - return x, w - - -def chebweight(x): - """ - The weight function of the Chebyshev polynomials. - - The weight function is :math:`1/\sqrt{1 - x^2}` and the interval of - integration is :math:`[-1, 1]`. The Chebyshev polynomials are - orthogonal, but not normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) - return w - - -def chebpts1(npts): - """ - Chebyshev points of the first kind. - - The Chebyshev points of the first kind are the points ``cos(x)``, - where ``x = [pi*(k + .5)/npts for k in range(npts)]``. - - Parameters - ---------- - npts : int - Number of sample points desired. - - Returns - ------- - pts : ndarray - The Chebyshev points of the first kind. - - See Also - -------- - chebpts2 - - Notes - ----- - - .. versionadded:: 1.5.0 - - """ - _npts = int(npts) - if _npts != npts: - raise ValueError("npts must be integer") - if _npts < 1: - raise ValueError("npts must be >= 1") - - x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts) - return np.cos(x) - - -def chebpts2(npts): - """ - Chebyshev points of the second kind. - - The Chebyshev points of the second kind are the points ``cos(x)``, - where ``x = [pi*k/(npts - 1) for k in range(npts)]``. - - Parameters - ---------- - npts : int - Number of sample points desired. - - Returns - ------- - pts : ndarray - The Chebyshev points of the second kind. - - Notes - ----- - - .. versionadded:: 1.5.0 - - """ - _npts = int(npts) - if _npts != npts: - raise ValueError("npts must be integer") - if _npts < 2: - raise ValueError("npts must be >= 2") - - x = np.linspace(-np.pi, 0, _npts) - return np.cos(x) - - -# -# Chebyshev series class -# - -class Chebyshev(ABCPolyBase): - """A Chebyshev series class. - - The Chebyshev class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - methods listed below. - - Parameters - ---------- - coef : array_like - Chebyshev coefficients in order of increasing degree, i.e., - ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(chebadd) - _sub = staticmethod(chebsub) - _mul = staticmethod(chebmul) - _div = staticmethod(chebdiv) - _pow = staticmethod(chebpow) - _val = staticmethod(chebval) - _int = staticmethod(chebint) - _der = staticmethod(chebder) - _fit = staticmethod(chebfit) - _line = staticmethod(chebline) - _roots = staticmethod(chebroots) - _fromroots = staticmethod(chebfromroots) - - # Virtual properties - nickname = 'cheb' - domain = np.array(chebdomain) - window = np.array(chebdomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite.py deleted file mode 100644 index 1fd49d7745fac..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite.py +++ /dev/null @@ -1,1789 +0,0 @@ -""" -Objects for dealing with Hermite series. - -This module provides a number of objects (mostly functions) useful for -dealing with Hermite series, including a `Hermite` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `hermdomain` -- Hermite series default domain, [-1,1]. -- `hermzero` -- Hermite series that evaluates identically to 0. -- `hermone` -- Hermite series that evaluates identically to 1. -- `hermx` -- Hermite series for the identity map, ``f(x) = x``. - -Arithmetic ----------- -- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``. -- `hermadd` -- add two Hermite series. -- `hermsub` -- subtract one Hermite series from another. -- `hermmul` -- multiply two Hermite series. -- `hermdiv` -- divide one Hermite series by another. -- `hermval` -- evaluate a Hermite series at given points. -- `hermval2d` -- evaluate a 2D Hermite series at given points. -- `hermval3d` -- evaluate a 3D Hermite series at given points. -- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product. -- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product. - -Calculus --------- -- `hermder` -- differentiate a Hermite series. -- `hermint` -- integrate a Hermite series. - -Misc Functions --------------- -- `hermfromroots` -- create a Hermite series with specified roots. -- `hermroots` -- find the roots of a Hermite series. -- `hermvander` -- Vandermonde-like matrix for Hermite polynomials. -- `hermvander2d` -- Vandermonde-like matrix for 2D power series. -- `hermvander3d` -- Vandermonde-like matrix for 3D power series. -- `hermgauss` -- Gauss-Hermite quadrature, points and weights. -- `hermweight` -- Hermite weight function. -- `hermcompanion` -- symmetrized companion matrix in Hermite form. -- `hermfit` -- least-squares fit returning a Hermite series. -- `hermtrim` -- trim leading coefficients from a Hermite series. -- `hermline` -- Hermite series of given straight line. -- `herm2poly` -- convert a Hermite series to a polynomial. -- `poly2herm` -- convert a polynomial to a Hermite series. - -Classes -------- -- `Hermite` -- A Hermite series class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', - 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', - 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', - 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', - 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', - 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] - -hermtrim = pu.trimcoef - - -def poly2herm(pol): - """ - poly2herm(pol) - - Convert a polynomial to a Hermite series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Hermite series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Hermite - series. - - See Also - -------- - herm2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite import poly2herm - >>> poly2herm(np.arange(4)) - array([ 1. , 2.75 , 0.5 , 0.375]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = hermadd(hermmulx(res), pol[i]) - return res - - -def herm2poly(c): - """ - Convert a Hermite series to a polynomial. - - Convert an array representing the coefficients of a Hermite series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Hermite series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2herm - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite import herm2poly - >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) - array([ 0., 1., 2., 3.]) - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n == 1: - return c - if n == 2: - c[1] *= 2 - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], c1*(2*(i - 1))) - c1 = polyadd(tmp, polymulx(c1)*2) - return polyadd(c0, polymulx(c1)*2) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Hermite -hermdomain = np.array([-1, 1]) - -# Hermite coefficients representing zero. -hermzero = np.array([0]) - -# Hermite coefficients representing one. -hermone = np.array([1]) - -# Hermite coefficients representing the identity x. -hermx = np.array([0, 1/2]) - - -def hermline(off, scl): - """ - Hermite series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Hermite series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> from numpy.polynomial.hermite import hermline, hermval - >>> hermval(0,hermline(3, 2)) - 3.0 - >>> hermval(1,hermline(3, 2)) - 5.0 - - """ - if scl != 0: - return np.array([off, scl/2]) - else: - return np.array([off]) - - -def hermfromroots(roots): - """ - Generate a Hermite series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in Hermite form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in Hermite form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, legfromroots, lagfromroots, chebfromroots, - hermefromroots. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermfromroots, hermval - >>> coef = hermfromroots((-1, 0, 1)) - >>> hermval((-1, 0, 1), coef) - array([ 0., 0., 0.]) - >>> coef = hermfromroots((-1j, 1j)) - >>> hermval((-1j, 1j), coef) - array([ 0.+0.j, 0.+0.j]) - - """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [hermline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [hermmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = hermmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def hermadd(c1, c2): - """ - Add one Hermite series to another. - - Returns the sum of two Hermite series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Hermite series of their sum. - - See Also - -------- - hermsub, hermmul, hermdiv, hermpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Hermite series - is a Hermite series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite import hermadd - >>> hermadd([1, 2, 3], [1, 2, 3, 4]) - array([ 2., 4., 6., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def hermsub(c1, c2): - """ - Subtract one Hermite series from another. - - Returns the difference of two Hermite series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their difference. - - See Also - -------- - hermadd, hermmul, hermdiv, hermpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Hermite - series is a Hermite series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite import hermsub - >>> hermsub([1, 2, 3, 4], [1, 2, 3]) - array([ 0., 0., 0., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def hermmulx(c): - """Multiply a Hermite series by x. - - Multiply the Hermite series `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - The multiplication uses the recursion relationship for Hermite - polynomials in the form - - .. math:: - - xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) - - Examples - -------- - >>> from numpy.polynomial.hermite import hermmulx - >>> hermmulx([1, 2, 3]) - array([ 2. , 6.5, 1. , 1.5]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0]/2 - for i in range(1, len(c)): - prd[i + 1] = c[i]/2 - prd[i - 1] += c[i]*i - return prd - - -def hermmul(c1, c2): - """ - Multiply one Hermite series by another. - - Returns the product of two Hermite series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their product. - - See Also - -------- - hermadd, hermsub, hermdiv, hermpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Hermite polynomial basis set. Thus, to express - the product as a Hermite series, it is necessary to "reproject" the - product onto said basis set, which may produce "unintuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermmul - >>> hermmul([1, 2, 3], [0, 1, 2]) - array([ 52., 29., 52., 7., 6.]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - c = c2 - xs = c1 - else: - c = c1 - xs = c2 - - if len(c) == 1: - c0 = c[0]*xs - c1 = 0 - elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs - else: - nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) - c1 = hermadd(tmp, hermmulx(c1)*2) - return hermadd(c0, hermmulx(c1)*2) - - -def hermdiv(c1, c2): - """ - Divide one Hermite series by another. - - Returns the quotient-with-remainder of two Hermite series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Hermite series coefficients representing the quotient and - remainder. - - See Also - -------- - hermadd, hermsub, hermmul, hermpow - - Notes - ----- - In general, the (polynomial) division of one Hermite series by another - results in quotient and remainder terms that are not in the Hermite - polynomial basis set. Thus, to express these results as a Hermite - series, it is necessary to "reproject" the results onto the Hermite - basis set, which may produce "unintuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermdiv - >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 0.])) - >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 2., 2.])) - >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 1., 1.])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = hermmul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) - - -def hermpow(c, pow, maxpower=16): - """Raise a Hermite series to a power. - - Returns the Hermite series `c` raised to the power `pow`. The - argument `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Hermite series of power. - - See Also - -------- - hermadd, hermsub, hermmul, hermdiv - - Examples - -------- - >>> from numpy.polynomial.hermite import hermpow - >>> hermpow([1, 2, 3], 2) - array([ 81., 52., 82., 12., 9.]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = hermmul(prd, c) - return prd - - -def hermder(c, m=1, scl=1, axis=0): - """ - Differentiate a Hermite series. - - Returns the Hermite series coefficients `c` differentiated `m` times - along `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` - while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + - 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - Array of Hermite series coefficients. If `c` is multidimensional the - different axis correspond to different variables with the degree in - each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Hermite series of the derivative. - - See Also - -------- - hermint - - Notes - ----- - In general, the result of differentiating a Hermite series does not - resemble the same operation on a power series. Thus the result of this - function may be "unintuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermder - >>> hermder([ 1. , 0.5, 0.5, 0.5]) - array([ 1., 2., 3.]) - >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) - array([ 1., 2., 3.]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 0, -1): - der[j - 1] = (2*j)*c[j] - c = der - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Hermite series. - - Returns the Hermite series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] - represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + - 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of Hermite series coefficients. If c is multidimensional the - different axis correspond to different variables with the degree in - each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Hermite series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - hermder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - .. math::`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermint - >>> hermint([1,2,3]) # integrate once, value 0 at 0. - array([ 1. , 0.5, 0.5, 0.5]) - >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 - array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) - >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. - array([ 2. , 0.5, 0.5, 0.5]) - >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 - array([-2. , 0.5, 0.5, 0.5]) - >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) - array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0]/2 - for j in range(1, n): - tmp[j + 1] = c[j]/(2*(j + 1)) - tmp[0] += k[i] - hermval(lbnd, tmp) - c = tmp - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def hermval(x, c, tensor=True): - """ - Evaluate an Hermite series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - hermval2d, hermgrid2d, hermval3d, hermgrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermval - >>> coef = [1,2,3] - >>> hermval(1, coef) - 11.0 - >>> hermval([[1,2],[3,4]], coef) - array([[ 11., 51.], - [ 115., 203.]]) - - """ - c = np.array(c, ndmin=1, copy=0) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - x2 = x*2 - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - nd = len(c) - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = c[-i] - c1*(2*(nd - 1)) - c1 = tmp + c1*x2 - return c0 + c1*x2 - - -def hermval2d(x, y, c): - """ - Evaluate a 2-D Hermite series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points formed with - pairs of corresponding values from `x` and `y`. - - See Also - -------- - hermval, hermgrid2d, hermval3d, hermgrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y = np.array((x, y), copy=0) - except: - raise ValueError('x, y are incompatible') - - c = hermval(x, c) - c = hermval(y, c, tensor=False) - return c - - -def hermgrid2d(x, y, c): - """ - Evaluate a 2-D Hermite series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b) - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - hermval, hermval2d, hermval3d, hermgrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = hermval(x, c) - c = hermval(y, c) - return c - - -def hermval3d(x, y, z, c): - """ - Evaluate a 3-D Hermite series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - hermval, hermval2d, hermgrid2d, hermgrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y, z = np.array((x, y, z), copy=0) - except: - raise ValueError('x, y, z are incompatible') - - c = hermval(x, c) - c = hermval(y, c, tensor=False) - c = hermval(z, c, tensor=False) - return c - - -def hermgrid3d(x, y, z, c): - """ - Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - hermval, hermval2d, hermgrid2d, hermval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = hermval(x, c) - c = hermval(y, c) - c = hermval(z, c) - return c - - -def hermvander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = H_i(x), - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the Hermite polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and - ``hermval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of Hermite series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo-Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding Hermite polynomial. The dtype will be the same as - the converted `x`. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermvander - >>> x = np.array([-1, 0, 1]) - >>> hermvander(x, 3) - array([[ 1., -2., 2., 4.], - [ 1., 0., -2., -0.], - [ 1., 2., 2., -4.]]) - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 - if ideg > 0: - x2 = x*2 - v[1] = x2 - for i in range(2, ideg + 1): - v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) - return np.rollaxis(v, 0, v.ndim) - - -def hermvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., deg[1]*i + j] = H_i(x) * H_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the Hermite polynomials. - - If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D Hermite - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - hermvander, hermvander3d. hermval2d, hermval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = hermvander(x, degx) - vy = hermvander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) - - -def hermvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the Hermite polynomials. - - If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D Hermite - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - hermvander, hermvander3d. hermval2d, hermval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = hermvander(x, degx) - vy = hermvander(y, degy) - vz = hermvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) - - -def hermfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Hermite series to data. - - Return the coefficients of a Hermite series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Hermite coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, legfit, lagfit, polyfit, hermefit - hermval : Evaluates a Hermite series. - hermvander : Vandermonde matrix of Hermite series. - hermweight : Hermite weight function - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Hermite series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Hermite series are probably most useful when the data can be - approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite - weight. In that case the weight ``sqrt(w(x[i])`` should be used - together with data values ``y[i]/sqrt(w(x[i])``. The weight function is - available as `hermweight`. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - >>> from numpy.polynomial.hermite import hermfit, hermval - >>> x = np.linspace(-10, 10) - >>> err = np.random.randn(len(x))/10 - >>> y = hermval(x, [1, 2, 3]) + err - >>> hermfit(x, y, 2) - array([ 0.97902637, 1.99849131, 3.00006 ]) - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - # set up the least squares matrices in transposed form - lhs = hermvander(x, deg).T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full: - return c, [resids, rank, s, rcond] - else: - return c - - -def hermcompanion(c): - """Return the scaled companion matrix of c. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Hermite basis polynomial. This provides - better eigenvalue estimates than the unscaled case and for basis - polynomials the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded::1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-.5*c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., np.sqrt(2.*np.arange(1, n)))) - scl = np.multiply.accumulate(scl) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(.5*np.arange(1, n)) - bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 - return mat - - -def hermroots(c): - """ - Compute the roots of a Hermite series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * H_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, legroots, lagroots, chebroots, hermeroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - The Hermite series basis polynomials aren't powers of `x` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> from numpy.polynomial.hermite import hermroots, hermfromroots - >>> coef = hermfromroots([-1, 0, 1]) - >>> coef - array([ 0. , 0.25 , 0. , 0.125]) - >>> hermroots(coef) - array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) <= 1: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-.5*c[0]/c[1]]) - - m = hermcompanion(c) - r = la.eigvals(m) - r.sort() - return r - - -def hermgauss(deg): - """ - Gauss-Hermite quadrature. - - Computes the sample points and weights for Gauss-Hermite quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]` - with the weight function :math:`f(x) = \exp(-x^2)`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded::1.7.0 - - The results have only been tested up to degree 100, higher degrees may - be problematic. The weights are determined by using the fact that - - .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) - - where :math:`c` is a constant independent of :math:`k` and :math:`x_k` - is the k'th root of :math:`H_n`, and then scaling the results to get - the right value when integrating 1. - - """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") - - # first approximation of roots. We use the fact that the companion - # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) - m = hermcompanion(c) - x = la.eigvals(m) - x.sort() - - # improve roots by one application of Newton - dy = hermval(x, c) - df = hermval(x, hermder(c)) - x -= dy/df - - # compute the weights. We scale the factor to avoid possible numerical - # overflow. - fm = hermval(x, c[1:]) - fm /= np.abs(fm).max() - df /= np.abs(df).max() - w = 1/(fm * df) - - # for Hermite we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 - - # scale w to get the right value - w *= np.sqrt(np.pi) / w.sum() - - return x, w - - -def hermweight(x): - """ - Weight function of the Hermite polynomials. - - The weight function is :math:`\exp(-x^2)` and the interval of - integration is :math:`[-\inf, \inf]`. the Hermite polynomials are - orthogonal, but not normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded::1.7.0 - - """ - w = np.exp(-x**2) - return w - - -# -# Hermite series class -# - -class Hermite(ABCPolyBase): - """An Hermite series class. - - The Hermite class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - Laguerre coefficients in order of increasing degree, i.e, - ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(hermadd) - _sub = staticmethod(hermsub) - _mul = staticmethod(hermmul) - _div = staticmethod(hermdiv) - _pow = staticmethod(hermpow) - _val = staticmethod(hermval) - _int = staticmethod(hermint) - _der = staticmethod(hermder) - _fit = staticmethod(hermfit) - _line = staticmethod(hermline) - _roots = staticmethod(hermroots) - _fromroots = staticmethod(hermfromroots) - - # Virtual properties - nickname = 'herm' - domain = np.array(hermdomain) - window = np.array(hermdomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite_e.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite_e.py deleted file mode 100644 index 6e33dc0bc31c5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/hermite_e.py +++ /dev/null @@ -1,1786 +0,0 @@ -""" -Objects for dealing with Hermite_e series. - -This module provides a number of objects (mostly functions) useful for -dealing with Hermite_e series, including a `HermiteE` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `hermedomain` -- Hermite_e series default domain, [-1,1]. -- `hermezero` -- Hermite_e series that evaluates identically to 0. -- `hermeone` -- Hermite_e series that evaluates identically to 1. -- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``. - -Arithmetic ----------- -- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``. -- `hermeadd` -- add two Hermite_e series. -- `hermesub` -- subtract one Hermite_e series from another. -- `hermemul` -- multiply two Hermite_e series. -- `hermediv` -- divide one Hermite_e series by another. -- `hermeval` -- evaluate a Hermite_e series at given points. -- `hermeval2d` -- evaluate a 2D Hermite_e series at given points. -- `hermeval3d` -- evaluate a 3D Hermite_e series at given points. -- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product. -- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product. - -Calculus --------- -- `hermeder` -- differentiate a Hermite_e series. -- `hermeint` -- integrate a Hermite_e series. - -Misc Functions --------------- -- `hermefromroots` -- create a Hermite_e series with specified roots. -- `hermeroots` -- find the roots of a Hermite_e series. -- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials. -- `hermevander2d` -- Vandermonde-like matrix for 2D power series. -- `hermevander3d` -- Vandermonde-like matrix for 3D power series. -- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights. -- `hermeweight` -- Hermite_e weight function. -- `hermecompanion` -- symmetrized companion matrix in Hermite_e form. -- `hermefit` -- least-squares fit returning a Hermite_e series. -- `hermetrim` -- trim leading coefficients from a Hermite_e series. -- `hermeline` -- Hermite_e series of given straight line. -- `herme2poly` -- convert a Hermite_e series to a polynomial. -- `poly2herme` -- convert a polynomial to a Hermite_e series. - -Classes -------- -- `HermiteE` -- A Hermite_e series class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', - 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', - 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', - 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', - 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', - 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', - 'hermegauss', 'hermeweight'] - -hermetrim = pu.trimcoef - - -def poly2herme(pol): - """ - poly2herme(pol) - - Convert a polynomial to a Hermite series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Hermite series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Hermite - series. - - See Also - -------- - herme2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import poly2herme - >>> poly2herme(np.arange(4)) - array([ 2., 10., 2., 3.]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = hermeadd(hermemulx(res), pol[i]) - return res - - -def herme2poly(c): - """ - Convert a Hermite series to a polynomial. - - Convert an array representing the coefficients of a Hermite series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Hermite series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2herme - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import herme2poly - >>> herme2poly([ 2., 10., 2., 3.]) - array([ 0., 1., 2., 3.]) - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n == 1: - return c - if n == 2: - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], c1*(i - 1)) - c1 = polyadd(tmp, polymulx(c1)) - return polyadd(c0, polymulx(c1)) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Hermite -hermedomain = np.array([-1, 1]) - -# Hermite coefficients representing zero. -hermezero = np.array([0]) - -# Hermite coefficients representing one. -hermeone = np.array([1]) - -# Hermite coefficients representing the identity x. -hermex = np.array([0, 1]) - - -def hermeline(off, scl): - """ - Hermite series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Hermite series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeline - >>> from numpy.polynomial.hermite_e import hermeline, hermeval - >>> hermeval(0,hermeline(3, 2)) - 3.0 - >>> hermeval(1,hermeline(3, 2)) - 5.0 - - """ - if scl != 0: - return np.array([off, scl]) - else: - return np.array([off]) - - -def hermefromroots(roots): - """ - Generate a HermiteE series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in HermiteE form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in HermiteE form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, legfromroots, lagfromroots, hermfromroots, - chebfromroots. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval - >>> coef = hermefromroots((-1, 0, 1)) - >>> hermeval((-1, 0, 1), coef) - array([ 0., 0., 0.]) - >>> coef = hermefromroots((-1j, 1j)) - >>> hermeval((-1j, 1j), coef) - array([ 0.+0.j, 0.+0.j]) - - """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [hermeline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [hermemul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = hermemul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def hermeadd(c1, c2): - """ - Add one Hermite series to another. - - Returns the sum of two Hermite series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Hermite series of their sum. - - See Also - -------- - hermesub, hermemul, hermediv, hermepow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Hermite series - is a Hermite series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeadd - >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) - array([ 2., 4., 6., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def hermesub(c1, c2): - """ - Subtract one Hermite series from another. - - Returns the difference of two Hermite series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their difference. - - See Also - -------- - hermeadd, hermemul, hermediv, hermepow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Hermite - series is a Hermite series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermesub - >>> hermesub([1, 2, 3, 4], [1, 2, 3]) - array([ 0., 0., 0., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def hermemulx(c): - """Multiply a Hermite series by x. - - Multiply the Hermite series `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - The multiplication uses the recursion relationship for Hermite - polynomials in the form - - .. math:: - - xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermemulx - >>> hermemulx([1, 2, 3]) - array([ 2., 7., 2., 3.]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0] - for i in range(1, len(c)): - prd[i + 1] = c[i] - prd[i - 1] += c[i]*i - return prd - - -def hermemul(c1, c2): - """ - Multiply one Hermite series by another. - - Returns the product of two Hermite series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Hermite series coefficients representing their product. - - See Also - -------- - hermeadd, hermesub, hermediv, hermepow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Hermite polynomial basis set. Thus, to express - the product as a Hermite series, it is necessary to "reproject" the - product onto said basis set, which may produce "unintuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermemul - >>> hermemul([1, 2, 3], [0, 1, 2]) - array([ 14., 15., 28., 7., 6.]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - c = c2 - xs = c1 - else: - c = c1 - xs = c2 - - if len(c) == 1: - c0 = c[0]*xs - c1 = 0 - elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs - else: - nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = hermesub(c[-i]*xs, c1*(nd - 1)) - c1 = hermeadd(tmp, hermemulx(c1)) - return hermeadd(c0, hermemulx(c1)) - - -def hermediv(c1, c2): - """ - Divide one Hermite series by another. - - Returns the quotient-with-remainder of two Hermite series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Hermite series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Hermite series coefficients representing the quotient and - remainder. - - See Also - -------- - hermeadd, hermesub, hermemul, hermepow - - Notes - ----- - In general, the (polynomial) division of one Hermite series by another - results in quotient and remainder terms that are not in the Hermite - polynomial basis set. Thus, to express these results as a Hermite - series, it is necessary to "reproject" the results onto the Hermite - basis set, which may produce "unintuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermediv - >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 0.])) - >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 1., 2.])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = hermemul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) - - -def hermepow(c, pow, maxpower=16): - """Raise a Hermite series to a power. - - Returns the Hermite series `c` raised to the power `pow`. The - argument `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Hermite series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Hermite series of power. - - See Also - -------- - hermeadd, hermesub, hermemul, hermediv - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermepow - >>> hermepow([1, 2, 3], 2) - array([ 23., 28., 46., 12., 9.]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = hermemul(prd, c) - return prd - - -def hermeder(c, m=1, scl=1, axis=0): - """ - Differentiate a Hermite_e series. - - Returns the series coefficients `c` differentiated `m` times along - `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` - while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) - + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 - is ``y``. - - Parameters - ---------- - c : array_like - Array of Hermite_e series coefficients. If `c` is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Hermite series of the derivative. - - See Also - -------- - hermeint - - Notes - ----- - In general, the result of differentiating a Hermite series does not - resemble the same operation on a power series. Thus the result of this - function may be "unintuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeder - >>> hermeder([ 1., 1., 1., 1.]) - array([ 1., 2., 3.]) - >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) - array([ 1., 2., 3.]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - n = len(c) - if cnt >= n: - return c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 0, -1): - der[j - 1] = j*c[j] - c = der - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Hermite_e series. - - Returns the Hermite_e series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] - represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + - 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of Hermite_e series coefficients. If c is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Hermite_e series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - hermeder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - .. math::`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeint - >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. - array([ 1., 1., 1., 1.]) - >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 - array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) - >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. - array([ 2., 1., 1., 1.]) - >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 - array([-1., 1., 1., 1.]) - >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) - array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0] - for j in range(1, n): - tmp[j + 1] = c[j]/(j + 1) - tmp[0] += k[i] - hermeval(lbnd, tmp) - c = tmp - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def hermeval(x, c, tensor=True): - """ - Evaluate an HermiteE series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - hermeval2d, hermegrid2d, hermeval3d, hermegrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeval - >>> coef = [1,2,3] - >>> hermeval(1, coef) - 3.0 - >>> hermeval([[1,2],[3,4]], coef) - array([[ 3., 14.], - [ 31., 54.]]) - - """ - c = np.array(c, ndmin=1, copy=0) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - nd = len(c) - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = c[-i] - c1*(nd - 1) - c1 = tmp + c1*x - return c0 + c1*x - - -def hermeval2d(x, y, c): - """ - Evaluate a 2-D HermiteE series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points formed with - pairs of corresponding values from `x` and `y`. - - See Also - -------- - hermeval, hermegrid2d, hermeval3d, hermegrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y = np.array((x, y), copy=0) - except: - raise ValueError('x, y are incompatible') - - c = hermeval(x, c) - c = hermeval(y, c, tensor=False) - return c - - -def hermegrid2d(x, y, c): - """ - Evaluate a 2-D HermiteE series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b) - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - hermeval, hermeval2d, hermeval3d, hermegrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = hermeval(x, c) - c = hermeval(y, c) - return c - - -def hermeval3d(x, y, z, c): - """ - Evaluate a 3-D Hermite_e series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - hermeval, hermeval2d, hermegrid2d, hermegrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y, z = np.array((x, y, z), copy=0) - except: - raise ValueError('x, y, z are incompatible') - - c = hermeval(x, c) - c = hermeval(y, c, tensor=False) - c = hermeval(z, c, tensor=False) - return c - - -def hermegrid3d(x, y, z, c): - """ - Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - hermeval, hermeval2d, hermegrid2d, hermeval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = hermeval(x, c) - c = hermeval(y, c) - c = hermeval(z, c) - return c - - -def hermevander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = He_i(x), - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the HermiteE polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and - ``hermeval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of HermiteE series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo-Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding HermiteE polynomial. The dtype will be the same as - the converted `x`. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermevander - >>> x = np.array([-1, 0, 1]) - >>> hermevander(x, 3) - array([[ 1., -1., 0., 2.], - [ 1., 0., -1., -0.], - [ 1., 1., 0., -2.]]) - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 - if ideg > 0: - v[1] = x - for i in range(2, ideg + 1): - v[i] = (v[i-1]*x - v[i-2]*(i - 1)) - return np.rollaxis(v, 0, v.ndim) - - -def hermevander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., deg[1]*i + j] = He_i(x) * He_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the HermiteE polynomials. - - If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D HermiteE - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - hermevander, hermevander3d. hermeval2d, hermeval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = hermevander(x, degx) - vy = hermevander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) - - -def hermevander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then Hehe pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the HermiteE polynomials. - - If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D HermiteE - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - hermevander, hermevander3d. hermeval2d, hermeval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = hermevander(x, degx) - vy = hermevander(y, degy) - vz = hermevander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) - - -def hermefit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Hermite series to data. - - Return the coefficients of a HermiteE series of degree `deg` that is - the least squares fit to the data values `y` given at points `x`. If - `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D - multiple fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Hermite coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, legfit, polyfit, hermfit, polyfit - hermeval : Evaluates a Hermite series. - hermevander : pseudo Vandermonde matrix of Hermite series. - hermeweight : HermiteE weight function. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the HermiteE series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` - are the coefficients to be solved for, and the elements of `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using HermiteE series are probably most useful when the data can - be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE - weight. In that case the weight ``sqrt(w(x[i])`` should be used - together with data values ``y[i]/sqrt(w(x[i])``. The weight function is - available as `hermeweight`. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermefik, hermeval - >>> x = np.linspace(-10, 10) - >>> err = np.random.randn(len(x))/10 - >>> y = hermeval(x, [1, 2, 3]) + err - >>> hermefit(x, y, 2) - array([ 1.01690445, 1.99951418, 2.99948696]) - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - # set up the least squares matrices in transposed form - lhs = hermevander(x, deg).T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full: - return c, [resids, rank, s, rcond] - else: - return c - - -def hermecompanion(c): - """ - Return the scaled companion matrix of c. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an HermiteE basis polynomial. This provides - better eigenvalue estimates than the unscaled case and for basis - polynomials the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - c : array_like - 1-D array of HermiteE series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded::1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., np.sqrt(np.arange(1, n)))) - scl = np.multiply.accumulate(scl) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(np.arange(1, n)) - bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1]) - return mat - - -def hermeroots(c): - """ - Compute the roots of a HermiteE series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * He_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, legroots, lagroots, hermroots, chebroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - The HermiteE series basis polynomials aren't powers of `x` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots - >>> coef = hermefromroots([-1, 0, 1]) - >>> coef - array([ 0., 2., 0., 1.]) - >>> hermeroots(coef) - array([-1., 0., 1.]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) <= 1: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-c[0]/c[1]]) - - m = hermecompanion(c) - r = la.eigvals(m) - r.sort() - return r - - -def hermegauss(deg): - """ - Gauss-HermiteE quadrature. - - Computes the sample points and weights for Gauss-HermiteE quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]` - with the weight function :math:`f(x) = \exp(-x^2/2)`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded::1.7.0 - - The results have only been tested up to degree 100, higher degrees may - be problematic. The weights are determined by using the fact that - - .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) - - where :math:`c` is a constant independent of :math:`k` and :math:`x_k` - is the k'th root of :math:`He_n`, and then scaling the results to get - the right value when integrating 1. - - """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") - - # first approximation of roots. We use the fact that the companion - # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) - m = hermecompanion(c) - x = la.eigvals(m) - x.sort() - - # improve roots by one application of Newton - dy = hermeval(x, c) - df = hermeval(x, hermeder(c)) - x -= dy/df - - # compute the weights. We scale the factor to avoid possible numerical - # overflow. - fm = hermeval(x, c[1:]) - fm /= np.abs(fm).max() - df /= np.abs(df).max() - w = 1/(fm * df) - - # for Hermite_e we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 - - # scale w to get the right value - w *= np.sqrt(2*np.pi) / w.sum() - - return x, w - - -def hermeweight(x): - """Weight function of the Hermite_e polynomials. - - The weight function is :math:`\exp(-x^2/2)` and the interval of - integration is :math:`[-\inf, \inf]`. the HermiteE polynomials are - orthogonal, but not normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded::1.7.0 - - """ - w = np.exp(-.5*x**2) - return w - - -# -# HermiteE series class -# - -class HermiteE(ABCPolyBase): - """An HermiteE series class. - - The HermiteE class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - Laguerre coefficients in order of increasing degree, i.e, - ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(hermeadd) - _sub = staticmethod(hermesub) - _mul = staticmethod(hermemul) - _div = staticmethod(hermediv) - _pow = staticmethod(hermepow) - _val = staticmethod(hermeval) - _int = staticmethod(hermeint) - _der = staticmethod(hermeder) - _fit = staticmethod(hermefit) - _line = staticmethod(hermeline) - _roots = staticmethod(hermeroots) - _fromroots = staticmethod(hermefromroots) - - # Virtual properties - nickname = 'herme' - domain = np.array(hermedomain) - window = np.array(hermedomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/laguerre.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/laguerre.py deleted file mode 100644 index 8d2705d5d3143..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/laguerre.py +++ /dev/null @@ -1,1781 +0,0 @@ -""" -Objects for dealing with Laguerre series. - -This module provides a number of objects (mostly functions) useful for -dealing with Laguerre series, including a `Laguerre` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `lagdomain` -- Laguerre series default domain, [-1,1]. -- `lagzero` -- Laguerre series that evaluates identically to 0. -- `lagone` -- Laguerre series that evaluates identically to 1. -- `lagx` -- Laguerre series for the identity map, ``f(x) = x``. - -Arithmetic ----------- -- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``. -- `lagadd` -- add two Laguerre series. -- `lagsub` -- subtract one Laguerre series from another. -- `lagmul` -- multiply two Laguerre series. -- `lagdiv` -- divide one Laguerre series by another. -- `lagval` -- evaluate a Laguerre series at given points. -- `lagval2d` -- evaluate a 2D Laguerre series at given points. -- `lagval3d` -- evaluate a 3D Laguerre series at given points. -- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product. -- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product. - -Calculus --------- -- `lagder` -- differentiate a Laguerre series. -- `lagint` -- integrate a Laguerre series. - -Misc Functions --------------- -- `lagfromroots` -- create a Laguerre series with specified roots. -- `lagroots` -- find the roots of a Laguerre series. -- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials. -- `lagvander2d` -- Vandermonde-like matrix for 2D power series. -- `lagvander3d` -- Vandermonde-like matrix for 3D power series. -- `laggauss` -- Gauss-Laguerre quadrature, points and weights. -- `lagweight` -- Laguerre weight function. -- `lagcompanion` -- symmetrized companion matrix in Laguerre form. -- `lagfit` -- least-squares fit returning a Laguerre series. -- `lagtrim` -- trim leading coefficients from a Laguerre series. -- `lagline` -- Laguerre series of given straight line. -- `lag2poly` -- convert a Laguerre series to a polynomial. -- `poly2lag` -- convert a polynomial to a Laguerre series. - -Classes -------- -- `Laguerre` -- A Laguerre series class. - -See also --------- -`numpy.polynomial` - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', - 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', - 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', - 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', - 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', - 'laggauss', 'lagweight'] - -lagtrim = pu.trimcoef - - -def poly2lag(pol): - """ - poly2lag(pol) - - Convert a polynomial to a Laguerre series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Laguerre series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Laguerre - series. - - See Also - -------- - lag2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.laguerre import poly2lag - >>> poly2lag(np.arange(4)) - array([ 23., -63., 58., -18.]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = lagadd(lagmulx(res), pol[i]) - return res - - -def lag2poly(c): - """ - Convert a Laguerre series to a polynomial. - - Convert an array representing the coefficients of a Laguerre series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Laguerre series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2lag - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lag2poly - >>> lag2poly([ 23., -63., 58., -18.]) - array([ 0., 1., 2., 3.]) - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n == 1: - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) - return polyadd(c0, polysub(c1, polymulx(c1))) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Laguerre -lagdomain = np.array([0, 1]) - -# Laguerre coefficients representing zero. -lagzero = np.array([0]) - -# Laguerre coefficients representing one. -lagone = np.array([1]) - -# Laguerre coefficients representing the identity x. -lagx = np.array([1, -1]) - - -def lagline(off, scl): - """ - Laguerre series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Laguerre series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagline, lagval - >>> lagval(0,lagline(3, 2)) - 3.0 - >>> lagval(1,lagline(3, 2)) - 5.0 - - """ - if scl != 0: - return np.array([off + scl, -scl]) - else: - return np.array([off]) - - -def lagfromroots(roots): - """ - Generate a Laguerre series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in Laguerre form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in Laguerre form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, legfromroots, chebfromroots, hermfromroots, - hermefromroots. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagfromroots, lagval - >>> coef = lagfromroots((-1, 0, 1)) - >>> lagval((-1, 0, 1), coef) - array([ 0., 0., 0.]) - >>> coef = lagfromroots((-1j, 1j)) - >>> lagval((-1j, 1j), coef) - array([ 0.+0.j, 0.+0.j]) - - """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [lagline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [lagmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = lagmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def lagadd(c1, c2): - """ - Add one Laguerre series to another. - - Returns the sum of two Laguerre series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Laguerre series of their sum. - - See Also - -------- - lagsub, lagmul, lagdiv, lagpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Laguerre series - is a Laguerre series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagadd - >>> lagadd([1, 2, 3], [1, 2, 3, 4]) - array([ 2., 4., 6., 4.]) - - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def lagsub(c1, c2): - """ - Subtract one Laguerre series from another. - - Returns the difference of two Laguerre series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Laguerre series coefficients representing their difference. - - See Also - -------- - lagadd, lagmul, lagdiv, lagpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Laguerre - series is a Laguerre series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagsub - >>> lagsub([1, 2, 3, 4], [1, 2, 3]) - array([ 0., 0., 0., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def lagmulx(c): - """Multiply a Laguerre series by x. - - Multiply the Laguerre series `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - The multiplication uses the recursion relationship for Laguerre - polynomials in the form - - .. math:: - - xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagmulx - >>> lagmulx([1, 2, 3]) - array([ -1., -1., 11., -9.]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0] - prd[1] = -c[0] - for i in range(1, len(c)): - prd[i + 1] = -c[i]*(i + 1) - prd[i] += c[i]*(2*i + 1) - prd[i - 1] -= c[i]*i - return prd - - -def lagmul(c1, c2): - """ - Multiply one Laguerre series by another. - - Returns the product of two Laguerre series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Laguerre series coefficients representing their product. - - See Also - -------- - lagadd, lagsub, lagdiv, lagpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Laguerre polynomial basis set. Thus, to express - the product as a Laguerre series, it is necessary to "reproject" the - product onto said basis set, which may produce "unintuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagmul - >>> lagmul([1, 2, 3], [0, 1, 2]) - array([ 8., -13., 38., -51., 36.]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - c = c2 - xs = c1 - else: - c = c1 - xs = c2 - - if len(c) == 1: - c0 = c[0]*xs - c1 = 0 - elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs - else: - nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) - c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) - return lagadd(c0, lagsub(c1, lagmulx(c1))) - - -def lagdiv(c1, c2): - """ - Divide one Laguerre series by another. - - Returns the quotient-with-remainder of two Laguerre series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Laguerre series coefficients ordered from low to - high. - - Returns - ------- - [quo, rem] : ndarrays - Of Laguerre series coefficients representing the quotient and - remainder. - - See Also - -------- - lagadd, lagsub, lagmul, lagpow - - Notes - ----- - In general, the (polynomial) division of one Laguerre series by another - results in quotient and remainder terms that are not in the Laguerre - polynomial basis set. Thus, to express these results as a Laguerre - series, it is necessary to "reproject" the results onto the Laguerre - basis set, which may produce "unintuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagdiv - >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 0.])) - >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 1., 1.])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = lagmul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) - - -def lagpow(c, pow, maxpower=16): - """Raise a Laguerre series to a power. - - Returns the Laguerre series `c` raised to the power `pow`. The - argument `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Laguerre series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Laguerre series of power. - - See Also - -------- - lagadd, lagsub, lagmul, lagdiv - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagpow - >>> lagpow([1, 2, 3], 2) - array([ 14., -16., 56., -72., 54.]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = lagmul(prd, c) - return prd - - -def lagder(c, m=1, scl=1, axis=0): - """ - Differentiate a Laguerre series. - - Returns the Laguerre series coefficients `c` differentiated `m` times - along `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` - while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + - 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - Array of Laguerre series coefficients. If `c` is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Laguerre series of the derivative. - - See Also - -------- - lagint - - Notes - ----- - In general, the result of differentiating a Laguerre series does not - resemble the same operation on a power series. Thus the result of this - function may be "unintuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagder - >>> lagder([ 1., 1., 1., -3.]) - array([ 1., 2., 3.]) - >>> lagder([ 1., 0., 0., -4., 3.], m=2) - array([ 1., 2., 3.]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 1, -1): - der[j - 1] = -c[j] - c[j - 1] += c[j] - der[0] = -c[1] - c = der - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Laguerre series. - - Returns the Laguerre series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] - represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + - 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - - Parameters - ---------- - c : array_like - Array of Laguerre series coefficients. If `c` is multidimensional - the different axis correspond to different variables with the - degree in each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Laguerre series coefficients of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - lagder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - .. math::`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagint - >>> lagint([1,2,3]) - array([ 1., 1., 1., -3.]) - >>> lagint([1,2,3], m=2) - array([ 1., 0., 0., -4., 3.]) - >>> lagint([1,2,3], k=1) - array([ 2., 1., 1., -3.]) - >>> lagint([1,2,3], lbnd=-1) - array([ 11.5, 1. , 1. , -3. ]) - >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) - array([ 11.16666667, -5. , -3. , 2. ]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0] - tmp[1] = -c[0] - for j in range(1, n): - tmp[j] += c[j] - tmp[j + 1] = -c[j] - tmp[0] += k[i] - lagval(lbnd, tmp) - c = tmp - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def lagval(x, c, tensor=True): - """ - Evaluate a Laguerre series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - lagval2d, laggrid2d, lagval3d, laggrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagval - >>> coef = [1,2,3] - >>> lagval(1, coef) - -0.5 - >>> lagval([[1,2],[3,4]], coef) - array([[-0.5, -4. ], - [-4.5, -2. ]]) - - """ - c = np.array(c, ndmin=1, copy=0) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - nd = len(c) - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = c[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*((2*nd - 1) - x))/nd - return c0 + c1*(1 - x) - - -def lagval2d(x, y, c): - """ - Evaluate a 2-D Laguerre series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points formed with - pairs of corresponding values from `x` and `y`. - - See Also - -------- - lagval, laggrid2d, lagval3d, laggrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y = np.array((x, y), copy=0) - except: - raise ValueError('x, y are incompatible') - - c = lagval(x, c) - c = lagval(y, c, tensor=False) - return c - - -def laggrid2d(x, y, c): - """ - Evaluate a 2-D Laguerre series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b) - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape + y.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j is contained in `c[i,j]`. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Chebyshev series at points in the - Cartesian product of `x` and `y`. - - See Also - -------- - lagval, lagval2d, lagval3d, laggrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = lagval(x, c) - c = lagval(y, c) - return c - - -def lagval3d(x, y, z, c): - """ - Evaluate a 3-D Laguerre series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimension polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - lagval, lagval2d, laggrid2d, laggrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y, z = np.array((x, y, z), copy=0) - except: - raise ValueError('x, y, z are incompatible') - - c = lagval(x, c) - c = lagval(y, c, tensor=False) - c = lagval(z, c, tensor=False) - return c - - -def laggrid3d(x, y, z, c): - """ - Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - lagval, lagval2d, laggrid2d, lagval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = lagval(x, c) - c = lagval(y, c) - c = lagval(z, c) - return c - - -def lagvander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = L_i(x) - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the Laguerre polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and - ``lagval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of Laguerre series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo-Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding Laguerre polynomial. The dtype will be the same as - the converted `x`. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagvander - >>> x = np.array([0, 1, 2]) - >>> lagvander(x, 3) - array([[ 1. , 1. , 1. , 1. ], - [ 1. , 0. , -0.5 , -0.66666667], - [ 1. , -1. , -1. , -0.33333333]]) - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 - if ideg > 0: - v[1] = 1 - x - for i in range(2, ideg + 1): - v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i - return np.rollaxis(v, 0, v.ndim) - - -def lagvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the Laguerre polynomials. - - If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D Laguerre - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - lagvander, lagvander3d. lagval2d, lagval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = lagvander(x, degx) - vy = lagvander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) - - -def lagvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the Laguerre polynomials. - - If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D Laguerre - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - lagvander, lagvander3d. lagval2d, lagval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = lagvander(x, degx) - vy = lagvander(y, degy) - vz = lagvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) - - -def lagfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Laguerre series to data. - - Return the coefficients of a Laguerre series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Laguerre coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, legfit, polyfit, hermfit, hermefit - lagval : Evaluates a Laguerre series. - lagvander : pseudo Vandermonde matrix of Laguerre series. - lagweight : Laguerre weight function. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Laguerre series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up as the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Laguerre series are probably most useful when the data can - be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre - weight. In that case the weight ``sqrt(w(x[i])`` should be used - together with data values ``y[i]/sqrt(w(x[i])``. The weight function is - available as `lagweight`. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagfit, lagval - >>> x = np.linspace(0, 10) - >>> err = np.random.randn(len(x))/10 - >>> y = lagval(x, [1, 2, 3]) + err - >>> lagfit(x, y, 2) - array([ 0.96971004, 2.00193749, 3.00288744]) - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - # set up the least squares matrices in transposed form - lhs = lagvander(x, deg).T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full: - return c, [resids, rank, s, rcond] - else: - return c - - -def lagcompanion(c): - """ - Return the companion matrix of c. - - The usual companion matrix of the Laguerre polynomials is already - symmetric when `c` is a basis Laguerre polynomial, so no scaling is - applied. - - Parameters - ---------- - c : array_like - 1-D array of Laguerre series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded::1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[1 + c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - top = mat.reshape(-1)[1::n+1] - mid = mat.reshape(-1)[0::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = -np.arange(1, n) - mid[...] = 2.*np.arange(n) + 1. - bot[...] = top - mat[:, -1] += (c[:-1]/c[-1])*n - return mat - - -def lagroots(c): - """ - Compute the roots of a Laguerre series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * L_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, legroots, chebroots, hermroots, hermeroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - The Laguerre series basis polynomials aren't powers of `x` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> from numpy.polynomial.laguerre import lagroots, lagfromroots - >>> coef = lagfromroots([0, 1, 2]) - >>> coef - array([ 2., -8., 12., -6.]) - >>> lagroots(coef) - array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) <= 1: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([1 + c[0]/c[1]]) - - m = lagcompanion(c) - r = la.eigvals(m) - r.sort() - return r - - -def laggauss(deg): - """ - Gauss-Laguerre quadrature. - - Computes the sample points and weights for Gauss-Laguerre quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[0, \inf]` - with the weight function :math:`f(x) = \exp(-x)`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded::1.7.0 - - The results have only been tested up to degree 100 higher degrees may - be problematic. The weights are determined by using the fact that - - .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) - - where :math:`c` is a constant independent of :math:`k` and :math:`x_k` - is the k'th root of :math:`L_n`, and then scaling the results to get - the right value when integrating 1. - - """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") - - # first approximation of roots. We use the fact that the companion - # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) - m = lagcompanion(c) - x = la.eigvals(m) - x.sort() - - # improve roots by one application of Newton - dy = lagval(x, c) - df = lagval(x, lagder(c)) - x -= dy/df - - # compute the weights. We scale the factor to avoid possible numerical - # overflow. - fm = lagval(x, c[1:]) - fm /= np.abs(fm).max() - df /= np.abs(df).max() - w = 1/(fm * df) - - # scale w to get the right value, 1 in this case - w /= w.sum() - - return x, w - - -def lagweight(x): - """Weight function of the Laguerre polynomials. - - The weight function is :math:`exp(-x)` and the interval of integration - is :math:`[0, \inf]`. The Laguerre polynomials are orthogonal, but not - normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded::1.7.0 - - """ - w = np.exp(-x) - return w - -# -# Laguerre series class -# - -class Laguerre(ABCPolyBase): - """A Laguerre series class. - - The Laguerre class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - Laguerre coefficients in order of increasing degree, i.e, - ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [0, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [0, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(lagadd) - _sub = staticmethod(lagsub) - _mul = staticmethod(lagmul) - _div = staticmethod(lagdiv) - _pow = staticmethod(lagpow) - _val = staticmethod(lagval) - _int = staticmethod(lagint) - _der = staticmethod(lagder) - _fit = staticmethod(lagfit) - _line = staticmethod(lagline) - _roots = staticmethod(lagroots) - _fromroots = staticmethod(lagfromroots) - - # Virtual properties - nickname = 'lag' - domain = np.array(lagdomain) - window = np.array(lagdomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/legendre.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/legendre.py deleted file mode 100644 index d2de282692d8d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/legendre.py +++ /dev/null @@ -1,1809 +0,0 @@ -""" -Legendre Series (:mod: `numpy.polynomial.legendre`) -=================================================== - -.. currentmodule:: numpy.polynomial.polynomial - -This module provides a number of objects (mostly functions) useful for -dealing with Legendre series, including a `Legendre` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with such polynomials is in the -docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- - -.. autosummary:: - :toctree: generated/ - - legdomain Legendre series default domain, [-1,1]. - legzero Legendre series that evaluates identically to 0. - legone Legendre series that evaluates identically to 1. - legx Legendre series for the identity map, ``f(x) = x``. - -Arithmetic ----------- - -.. autosummary:: - :toctree: generated/ - - legmulx multiply a Legendre series in P_i(x) by x. - legadd add two Legendre series. - legsub subtract one Legendre series from another. - legmul multiply two Legendre series. - legdiv divide one Legendre series by another. - legpow raise a Legendre series to an positive integer power - legval evaluate a Legendre series at given points. - legval2d evaluate a 2D Legendre series at given points. - legval3d evaluate a 3D Legendre series at given points. - leggrid2d evaluate a 2D Legendre series on a Cartesian product. - leggrid3d evaluate a 3D Legendre series on a Cartesian product. - -Calculus --------- - -.. autosummary:: - :toctree: generated/ - - legder differentiate a Legendre series. - legint integrate a Legendre series. - -Misc Functions --------------- - -.. autosummary:: - :toctree: generated/ - - legfromroots create a Legendre series with specified roots. - legroots find the roots of a Legendre series. - legvander Vandermonde-like matrix for Legendre polynomials. - legvander2d Vandermonde-like matrix for 2D power series. - legvander3d Vandermonde-like matrix for 3D power series. - leggauss Gauss-Legendre quadrature, points and weights. - legweight Legendre weight function. - legcompanion symmetrized companion matrix in Legendre form. - legfit least-squares fit returning a Legendre series. - legtrim trim leading coefficients from a Legendre series. - legline Legendre series representing given straight line. - leg2poly convert a Legendre series to a polynomial. - poly2leg convert a polynomial to a Legendre series. - -Classes -------- - Legendre A Legendre series class. - -See also --------- -numpy.polynomial.polynomial -numpy.polynomial.chebyshev -numpy.polynomial.laguerre -numpy.polynomial.hermite -numpy.polynomial.hermite_e - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import numpy as np -import numpy.linalg as la - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -__all__ = [ - 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', - 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', - 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', - 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', - 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', - 'leggauss', 'legweight'] - -legtrim = pu.trimcoef - - -def poly2leg(pol): - """ - Convert a polynomial to a Legendre series. - - Convert an array representing the coefficients of a polynomial (relative - to the "standard" basis) ordered from lowest degree to highest, to an - array of the coefficients of the equivalent Legendre series, ordered - from lowest to highest degree. - - Parameters - ---------- - pol : array_like - 1-D array containing the polynomial coefficients - - Returns - ------- - c : ndarray - 1-D array containing the coefficients of the equivalent Legendre - series. - - See Also - -------- - leg2poly - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> from numpy import polynomial as P - >>> p = P.Polynomial(np.arange(4)) - >>> p - Polynomial([ 0., 1., 2., 3.], [-1., 1.]) - >>> c = P.Legendre(P.poly2leg(p.coef)) - >>> c - Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.]) - - """ - [pol] = pu.as_series([pol]) - deg = len(pol) - 1 - res = 0 - for i in range(deg, -1, -1): - res = legadd(legmulx(res), pol[i]) - return res - - -def leg2poly(c): - """ - Convert a Legendre series to a polynomial. - - Convert an array representing the coefficients of a Legendre series, - ordered from lowest degree to highest, to an array of the coefficients - of the equivalent polynomial (relative to the "standard" basis) ordered - from lowest to highest degree. - - Parameters - ---------- - c : array_like - 1-D array containing the Legendre series coefficients, ordered - from lowest order term to highest. - - Returns - ------- - pol : ndarray - 1-D array containing the coefficients of the equivalent polynomial - (relative to the "standard" basis) ordered from lowest order term - to highest. - - See Also - -------- - poly2leg - - Notes - ----- - The easy way to do conversions between polynomial basis sets - is to use the convert method of a class instance. - - Examples - -------- - >>> c = P.Legendre(range(4)) - >>> c - Legendre([ 0., 1., 2., 3.], [-1., 1.]) - >>> p = c.convert(kind=P.Polynomial) - >>> p - Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.]) - >>> P.leg2poly(range(4)) - array([-1. , -3.5, 3. , 7.5]) - - - """ - from .polynomial import polyadd, polysub, polymulx - - [c] = pu.as_series([c]) - n = len(c) - if n < 3: - return c - else: - c0 = c[-2] - c1 = c[-1] - # i is the current degree of c1 - for i in range(n - 1, 1, -1): - tmp = c0 - c0 = polysub(c[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) - return polyadd(c0, polymulx(c1)) - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Legendre -legdomain = np.array([-1, 1]) - -# Legendre coefficients representing zero. -legzero = np.array([0]) - -# Legendre coefficients representing one. -legone = np.array([1]) - -# Legendre coefficients representing the identity x. -legx = np.array([0, 1]) - - -def legline(off, scl): - """ - Legendre series whose graph is a straight line. - - - - Parameters - ---------- - off, scl : scalars - The specified line is given by ``off + scl*x``. - - Returns - ------- - y : ndarray - This module's representation of the Legendre series for - ``off + scl*x``. - - See Also - -------- - polyline, chebline - - Examples - -------- - >>> import numpy.polynomial.legendre as L - >>> L.legline(3,2) - array([3, 2]) - >>> L.legval(-3, L.legline(3,2)) # should be -3 - -3.0 - - """ - if scl != 0: - return np.array([off, scl]) - else: - return np.array([off]) - - -def legfromroots(roots): - """ - Generate a Legendre series with given roots. - - The function returns the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - in Legendre form, where the `r_n` are the roots specified in `roots`. - If a zero has multiplicity n, then it must appear in `roots` n times. - For instance, if 2 is a root of multiplicity three and 3 is a root of - multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The - roots can appear in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) - - The coefficient of the last term is not generally 1 for monic - polynomials in Legendre form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of coefficients. If all roots are real then `out` is a - real array, if some of the roots are complex, then `out` is complex - even if all the coefficients in the result are real (see Examples - below). - - See Also - -------- - polyfromroots, chebfromroots, lagfromroots, hermfromroots, - hermefromroots. - - Examples - -------- - >>> import numpy.polynomial.legendre as L - >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis - array([ 0. , -0.4, 0. , 0.4]) - >>> j = complex(0,1) - >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis - array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) - - """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [legline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [legmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = legmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def legadd(c1, c2): - """ - Add one Legendre series to another. - - Returns the sum of two Legendre series `c1` + `c2`. The arguments - are sequences of coefficients ordered from lowest order term to - highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the Legendre series of their sum. - - See Also - -------- - legsub, legmul, legdiv, legpow - - Notes - ----- - Unlike multiplication, division, etc., the sum of two Legendre series - is a Legendre series (without having to "reproject" the result onto - the basis set) so addition, just like that of "standard" polynomials, - is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> L.legadd(c1,c2) - array([ 4., 4., 4.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def legsub(c1, c2): - """ - Subtract one Legendre series from another. - - Returns the difference of two Legendre series `c1` - `c2`. The - sequences of coefficients are from lowest order term to highest, i.e., - [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Legendre series coefficients representing their difference. - - See Also - -------- - legadd, legmul, legdiv, legpow - - Notes - ----- - Unlike multiplication, division, etc., the difference of two Legendre - series is a Legendre series (without having to "reproject" the result - onto the basis set) so subtraction, just like that of "standard" - polynomials, is simply "component-wise." - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> L.legsub(c1,c2) - array([-2., 0., 2.]) - >>> L.legsub(c2,c1) # -C.legsub(c1,c2) - array([ 2., 0., -2.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def legmulx(c): - """Multiply a Legendre series by x. - - Multiply the Legendre series `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - The multiplication uses the recursion relationship for Legendre - polynomials in the form - - .. math:: - - xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0] - for i in range(1, len(c)): - j = i + 1 - k = i - 1 - s = i + j - prd[j] = (c[i]*j)/s - prd[k] += (c[i]*i)/s - return prd - - -def legmul(c1, c2): - """ - Multiply one Legendre series by another. - - Returns the product of two Legendre series `c1` * `c2`. The arguments - are sequences of coefficients, from lowest order "term" to highest, - e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of Legendre series coefficients representing their product. - - See Also - -------- - legadd, legsub, legdiv, legpow - - Notes - ----- - In general, the (polynomial) product of two C-series results in terms - that are not in the Legendre polynomial basis set. Thus, to express - the product as a Legendre series, it is necessary to "reproject" the - product onto said basis set, which may produce "unintuitive" (but - correct) results; see Examples section below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2) - >>> P.legmul(c1,c2) # multiplication requires "reprojection" - array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) - - """ - # s1, s2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - - if len(c1) > len(c2): - c = c2 - xs = c1 - else: - c = c1 - xs = c2 - - if len(c) == 1: - c0 = c[0]*xs - c1 = 0 - elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs - else: - nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) - c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) - return legadd(c0, legmulx(c1)) - - -def legdiv(c1, c2): - """ - Divide one Legendre series by another. - - Returns the quotient-with-remainder of two Legendre series - `c1` / `c2`. The arguments are sequences of coefficients from lowest - order "term" to highest, e.g., [1,2,3] represents the series - ``P_0 + 2*P_1 + 3*P_2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of Legendre series coefficients ordered from low to - high. - - Returns - ------- - quo, rem : ndarrays - Of Legendre series coefficients representing the quotient and - remainder. - - See Also - -------- - legadd, legsub, legmul, legpow - - Notes - ----- - In general, the (polynomial) division of one Legendre series by another - results in quotient and remainder terms that are not in the Legendre - polynomial basis set. Thus, to express these results as a Legendre - series, it is necessary to "reproject" the results onto the Legendre - basis set, which may produce "unintuitive" (but correct) results; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not - (array([ 3.]), array([-8., -4.])) - >>> c2 = (0,1,2,3) - >>> L.legdiv(c2,c1) # neither "intuitive" - (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = legmul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) - - -def legpow(c, pow, maxpower=16): - """Raise a Legendre series to a power. - - Returns the Legendre series `c` raised to the power `pow`. The - arguement `c` is a sequence of coefficients ordered from low to high. - i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` - - Parameters - ---------- - c : array_like - 1-D array of Legendre series coefficients ordered from low to - high. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Legendre series of power. - - See Also - -------- - legadd, legsub, legmul, legdiv - - Examples - -------- - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = legmul(prd, c) - return prd - - -def legder(c, m=1, scl=1, axis=0): - """ - Differentiate a Legendre series. - - Returns the Legendre series coefficients `c` differentiated `m` times - along `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The argument - `c` is an array of coefficients from low to high degree along each - axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` - while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + - 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - Array of Legendre series coefficients. If c is multidimensional the - different axis correspond to different variables with the degree in - each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change of - variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Legendre series of the derivative. - - See Also - -------- - legint - - Notes - ----- - In general, the result of differentiating a Legendre series does not - resemble the same operation on a power series. Thus the result of this - function may be "unintuitive," albeit correct; see Examples section - below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c = (1,2,3,4) - >>> L.legder(c) - array([ 6., 9., 20.]) - >>> L.legder(c, 3) - array([ 60.]) - >>> L.legder(c, scl=-1) - array([ -6., -9., -20.]) - >>> L.legder(c, 2,-1) - array([ 9., 60.]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=c.dtype) - for j in range(n, 2, -1): - der[j - 1] = (2*j - 1)*c[j] - c[j - 2] += c[j] - if n > 1: - der[1] = 3*c[2] - der[0] = c[1] - c = der - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a Legendre series. - - Returns the Legendre series coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients from low to high degree along each axis, e.g., [1,2,3] - represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] - represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + - 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of Legendre series coefficients. If c is multidimensional the - different axis correspond to different variables with the degree in - each axis given by the corresponding index. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at - ``lbnd`` is the first value in the list, the value of the second - integral at ``lbnd`` is the second value, etc. If ``k == []`` (the - default), all constants are set to zero. If ``m == 1``, a single - scalar can be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Legendre series coefficient array of the integral. - - Raises - ------ - ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. - - See Also - -------- - legder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. - Why is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - .. math::`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Also note that, in general, the result of integrating a C-series needs - to be "reprojected" onto the C-series basis set. Thus, typically, - the result of this function is "unintuitive," albeit correct; see - Examples section below. - - Examples - -------- - >>> from numpy.polynomial import legendre as L - >>> c = (1,2,3) - >>> L.legint(c) - array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) - >>> L.legint(c, 3) - array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, - -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) - >>> L.legint(c, k=3) - array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) - >>> L.legint(c, lbnd=-2) - array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) - >>> L.legint(c, scl=2) - array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if not np.iterable(k): - k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - k = list(k) + [0]*(cnt - len(k)) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0] - if n > 1: - tmp[2] = c[1]/3 - for j in range(2, n): - t = c[j]/(2*j + 1) - tmp[j + 1] = t - tmp[j - 1] -= t - tmp[0] += k[i] - legval(lbnd, tmp) - c = tmp - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def legval(x, c, tensor=True): - """ - Evaluate a Legendre series at points x. - - If `c` is of length `n + 1`, this function returns the value: - - .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, algebra_like - The shape of the return value is described above. - - See Also - -------- - legval2d, leggrid2d, legval3d, leggrid3d - - Notes - ----- - The evaluation uses Clenshaw recursion, aka synthetic division. - - Examples - -------- - - """ - c = np.array(c, ndmin=1, copy=0) - if c.dtype.char in '?bBhHiIlLqQpP': - c = c.astype(np.double) - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - if len(c) == 1: - c0 = c[0] - c1 = 0 - elif len(c) == 2: - c0 = c[0] - c1 = c[1] - else: - nd = len(c) - c0 = c[-2] - c1 = c[-1] - for i in range(3, len(c) + 1): - tmp = c0 - nd = nd - 1 - c0 = c[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*x*(2*nd - 1))/nd - return c0 + c1*x - - -def legval2d(x, y, c): - """ - Evaluate a 2-D Legendre series at points (x, y). - - This function returns the values: - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` is a 1-D array a one is implicitly appended to its shape to make - it 2-D. The shape of the result will be c.shape[2:] + x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and if it isn't an ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in ``c[i,j]``. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Legendre series at points formed - from pairs of corresponding values from `x` and `y`. - - See Also - -------- - legval, leggrid2d, legval3d, leggrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y = np.array((x, y), copy=0) - except: - raise ValueError('x, y are incompatible') - - c = legval(x, c) - c = legval(y, c, tensor=False) - return c - - -def leggrid2d(x, y, c): - """ - Evaluate a 2-D Legendre series on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b) - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape + y.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j is contained in `c[i,j]`. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional Chebyshev series at points in the - Cartesian product of `x` and `y`. - - See Also - -------- - legval, legval2d, legval3d, leggrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = legval(x, c) - c = legval(y, c) - return c - - -def legval3d(x, y, z, c): - """ - Evaluate a 3-D Legendre series at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - legval, legval2d, leggrid2d, leggrid3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - try: - x, y, z = np.array((x, y, z), copy=0) - except: - raise ValueError('x, y, z are incompatible') - - c = legval(x, c) - c = legval(y, c, tensor=False) - c = legval(z, c, tensor=False) - return c - - -def leggrid3d(x, y, z, c): - """ - Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - legval, legval2d, leggrid2d, legval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - c = legval(x, c) - c = legval(y, c) - c = legval(z, c) - return c - - -def legvander(x, deg): - """Pseudo-Vandermonde matrix of given degree. - - Returns the pseudo-Vandermonde matrix of degree `deg` and sample points - `x`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., i] = L_i(x) - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the degree of the Legendre polynomial. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and - ``legval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of Legendre series of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray - The pseudo-Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where The last index is the degree of the - corresponding Legendre polynomial. The dtype will be the same as - the converted `x`. - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - # Use forward recursion to generate the entries. This is not as accurate - # as reverse recursion in this application but it is more efficient. - v[0] = x*0 + 1 - if ideg > 0: - v[1] = x - for i in range(2, ideg + 1): - v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i - return np.rollaxis(v, 0, v.ndim) - - -def legvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y), - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the degrees of - the Legendre polynomials. - - If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D Legendre - series of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - legvander, legvander3d. legval2d, legval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = legvander(x, degx) - vy = legvander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) - - -def legvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the degrees of the Legendre polynomials. - - If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D Legendre - series of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - legvander, legvander3d. legval2d, legval3d - - Notes - ----- - - .. versionadded::1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = legvander(x, degx) - vy = legvander(y, degy) - vz = legvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) - - -def legfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least squares fit of Legendre series to data. - - Return the coefficients of a Legendre series of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial - rcond : float, optional - Relative condition number of the fit. Singular values smaller than - this relative to the largest singular value will be ignored. The - default value is len(x)*eps, where eps is the relative precision of - the float type, about 2e-16 in most cases. - full : bool, optional - Switch determining nature of return value. When it is False (the - default) just the coefficients are returned, when True diagnostic - information from the singular value decomposition is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - .. versionadded:: 1.5.0 - - Returns - ------- - coef : ndarray, shape (M,) or (M, K) - Legendre coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Warns - ----- - RankWarning - The rank of the coefficient matrix in the least-squares fit is - deficient. The warning is only raised if `full` = False. The - warnings can be turned off by - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, polyfit, lagfit, hermfit, hermefit - legval : Evaluates a Legendre series. - legvander : Vandermonde matrix of Legendre series. - legweight : Legendre weight function (= 1). - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the Legendre series `p` that - minimizes the sum of the weighted squared errors - - .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where :math:`w_j` are the weights. This problem is solved by setting up - as the (typically) overdetermined matrix equation - - .. math:: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected, then a `RankWarning` will be issued. This means that the - coefficient values may be poorly determined. Using a lower order fit - will usually get rid of the warning. The `rcond` parameter can also be - set to a value smaller than its default, but the resulting fit may be - spurious and have large contributions from roundoff error. - - Fits using Legendre series are usually better conditioned than fits - using power series, but much can depend on the distribution of the - sample points and the smoothness of the data. If the quality of the fit - is inadequate splines may be a good alternative. - - References - ---------- - .. [1] Wikipedia, "Curve fitting", - http://en.wikipedia.org/wiki/Curve_fitting - - Examples - -------- - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - # set up the least squares matrices in transposed form - lhs = legvander(x, deg).T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full: - return c, [resids, rank, s, rcond] - else: - return c - - -def legcompanion(c): - """Return the scaled companion matrix of c. - - The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Legendre basis polynomial. This provides - better eigenvalue estimates than the unscaled case and for basis - polynomials the eigenvalues are guaranteed to be real if - `numpy.linalg.eigvalsh` is used to obtain them. - - Parameters - ---------- - c : array_like - 1-D array of Legendre series coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded::1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - scl = 1./np.sqrt(2*np.arange(n) + 1) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] - bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) - return mat - - -def legroots(c): - """ - Compute the roots of a Legendre series. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * L_i(x). - - Parameters - ---------- - c : 1-D array_like - 1-D array of coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the series. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - polyroots, chebroots, lagroots, hermroots, hermeroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the series for such values. - Roots with multiplicity greater than 1 will also show larger errors as - the value of the series near such points is relatively insensitive to - errors in the roots. Isolated roots near the origin can be improved by - a few iterations of Newton's method. - - The Legendre series basis polynomials aren't powers of ``x`` so the - results of this function may seem unintuitive. - - Examples - -------- - >>> import numpy.polynomial.legendre as leg - >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots - array([-0.85099543, -0.11407192, 0.51506735]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-c[0]/c[1]]) - - m = legcompanion(c) - r = la.eigvals(m) - r.sort() - return r - - -def leggauss(deg): - """ - Gauss-Legendre quadrature. - - Computes the sample points and weights for Gauss-Legendre quadrature. - These sample points and weights will correctly integrate polynomials of - degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with - the weight function :math:`f(x) = 1`. - - Parameters - ---------- - deg : int - Number of sample points and weights. It must be >= 1. - - Returns - ------- - x : ndarray - 1-D ndarray containing the sample points. - y : ndarray - 1-D ndarray containing the weights. - - Notes - ----- - - .. versionadded::1.7.0 - - The results have only been tested up to degree 100, higher degrees may - be problematic. The weights are determined by using the fact that - - .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) - - where :math:`c` is a constant independent of :math:`k` and :math:`x_k` - is the k'th root of :math:`L_n`, and then scaling the results to get - the right value when integrating 1. - - """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") - - # first approximation of roots. We use the fact that the companion - # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) - m = legcompanion(c) - x = la.eigvals(m) - x.sort() - - # improve roots by one application of Newton - dy = legval(x, c) - df = legval(x, legder(c)) - x -= dy/df - - # compute the weights. We scale the factor to avoid possible numerical - # overflow. - fm = legval(x, c[1:]) - fm /= np.abs(fm).max() - df /= np.abs(df).max() - w = 1/(fm * df) - - # for Legendre we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 - - # scale w to get the right value - w *= 2. / w.sum() - - return x, w - - -def legweight(x): - """ - Weight function of the Legendre polynomials. - - The weight function is :math:`1` and the interval of integration is - :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not - normalized, with respect to this weight function. - - Parameters - ---------- - x : array_like - Values at which the weight function will be computed. - - Returns - ------- - w : ndarray - The weight function at `x`. - - Notes - ----- - - .. versionadded::1.7.0 - - """ - w = x*0.0 + 1.0 - return w - -# -# Legendre series class -# - -class Legendre(ABCPolyBase): - """A Legendre series class. - - The Legendre class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - Legendre coefficients in order of increasing degree, i.e., - ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(legadd) - _sub = staticmethod(legsub) - _mul = staticmethod(legmul) - _div = staticmethod(legdiv) - _pow = staticmethod(legpow) - _val = staticmethod(legval) - _int = staticmethod(legint) - _der = staticmethod(legder) - _fit = staticmethod(legfit) - _line = staticmethod(legline) - _roots = staticmethod(legroots) - _fromroots = staticmethod(legfromroots) - - # Virtual properties - nickname = 'leg' - domain = np.array(legdomain) - window = np.array(legdomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polynomial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polynomial.py deleted file mode 100644 index 60e339a1d2ca3..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polynomial.py +++ /dev/null @@ -1,1532 +0,0 @@ -""" -Objects for dealing with polynomials. - -This module provides a number of objects (mostly functions) useful for -dealing with polynomials, including a `Polynomial` class that -encapsulates the usual arithmetic operations. (General information -on how this module represents and works with polynomial objects is in -the docstring for its "parent" sub-package, `numpy.polynomial`). - -Constants ---------- -- `polydomain` -- Polynomial default domain, [-1,1]. -- `polyzero` -- (Coefficients of the) "zero polynomial." -- `polyone` -- (Coefficients of the) constant polynomial 1. -- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``. - -Arithmetic ----------- -- `polyadd` -- add two polynomials. -- `polysub` -- subtract one polynomial from another. -- `polymul` -- multiply two polynomials. -- `polydiv` -- divide one polynomial by another. -- `polypow` -- raise a polynomial to an positive integer power -- `polyval` -- evaluate a polynomial at given points. -- `polyval2d` -- evaluate a 2D polynomial at given points. -- `polyval3d` -- evaluate a 3D polynomial at given points. -- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product. -- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product. - -Calculus --------- -- `polyder` -- differentiate a polynomial. -- `polyint` -- integrate a polynomial. - -Misc Functions --------------- -- `polyfromroots` -- create a polynomial with specified roots. -- `polyroots` -- find the roots of a polynomial. -- `polyvander` -- Vandermonde-like matrix for powers. -- `polyvander2d` -- Vandermonde-like matrix for 2D power series. -- `polyvander3d` -- Vandermonde-like matrix for 3D power series. -- `polycompanion` -- companion matrix in power series form. -- `polyfit` -- least-squares fit returning a polynomial. -- `polytrim` -- trim leading coefficients from a polynomial. -- `polyline` -- polynomial representing given straight line. - -Classes -------- -- `Polynomial` -- polynomial class. - -See Also --------- -`numpy.polynomial` - -""" -from __future__ import division, absolute_import, print_function - -__all__ = [ - 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', - 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', - 'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit', - 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', - 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] - -import warnings -import numpy as np -import numpy.linalg as la - -from . import polyutils as pu -from ._polybase import ABCPolyBase - -polytrim = pu.trimcoef - -# -# These are constant arrays are of integer type so as to be compatible -# with the widest range of other types, such as Decimal. -# - -# Polynomial default domain. -polydomain = np.array([-1, 1]) - -# Polynomial coefficients representing zero. -polyzero = np.array([0]) - -# Polynomial coefficients representing one. -polyone = np.array([1]) - -# Polynomial coefficients representing the identity x. -polyx = np.array([0, 1]) - -# -# Polynomial series functions -# - - -def polyline(off, scl): - """ - Returns an array representing a linear polynomial. - - Parameters - ---------- - off, scl : scalars - The "y-intercept" and "slope" of the line, respectively. - - Returns - ------- - y : ndarray - This module's representation of the linear polynomial ``off + - scl*x``. - - See Also - -------- - chebline - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> P.polyline(1,-1) - array([ 1, -1]) - >>> P.polyval(1, P.polyline(1,-1)) # should be 0 - 0.0 - - """ - if scl != 0: - return np.array([off, scl]) - else: - return np.array([off]) - - -def polyfromroots(roots): - """ - Generate a monic polynomial with given roots. - - Return the coefficients of the polynomial - - .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), - - where the `r_n` are the roots specified in `roots`. If a zero has - multiplicity n, then it must appear in `roots` n times. For instance, - if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, - then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear - in any order. - - If the returned coefficients are `c`, then - - .. math:: p(x) = c_0 + c_1 * x + ... + x^n - - The coefficient of the last term is 1 for monic polynomials in this - form. - - Parameters - ---------- - roots : array_like - Sequence containing the roots. - - Returns - ------- - out : ndarray - 1-D array of the polynomial's coefficients If all the roots are - real, then `out` is also real, otherwise it is complex. (see - Examples below). - - See Also - -------- - chebfromroots, legfromroots, lagfromroots, hermfromroots - hermefromroots - - Notes - ----- - The coefficients are determined by multiplying together linear factors - of the form `(x - r_i)`, i.e. - - .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) - - where ``n == len(roots) - 1``; note that this implies that `1` is always - returned for :math:`a_n`. - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x - array([ 0., -1., 0., 1.]) - >>> j = complex(0,1) - >>> P.polyfromroots((-j,j)) # complex returned, though values are real - array([ 1.+0.j, 0.+0.j, 1.+0.j]) - - """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [polyline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [polymul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = polymul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] - - -def polyadd(c1, c2): - """ - Add one polynomial to another. - - Returns the sum of two polynomials `c1` + `c2`. The arguments are - sequences of coefficients from lowest order term to highest, i.e., - [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of polynomial coefficients ordered from low to high. - - Returns - ------- - out : ndarray - The coefficient array representing their sum. - - See Also - -------- - polysub, polymul, polydiv, polypow - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> sum = P.polyadd(c1,c2); sum - array([ 4., 4., 4.]) - >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) - 28.0 - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def polysub(c1, c2): - """ - Subtract one polynomial from another. - - Returns the difference of two polynomials `c1` - `c2`. The arguments - are sequences of coefficients from lowest order term to highest, i.e., - [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of polynomial coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Of coefficients representing their difference. - - See Also - -------- - polyadd, polymul, polydiv, polypow - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> P.polysub(c1,c2) - array([-2., 0., 2.]) - >>> P.polysub(c2,c1) # -P.polysub(c1,c2) - array([ 2., 0., -2.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) - - -def polymulx(c): - """Multiply a polynomial by x. - - Multiply the polynomial `c` by x, where x is the independent - variable. - - - Parameters - ---------- - c : array_like - 1-D array of polynomial coefficients ordered from low to - high. - - Returns - ------- - out : ndarray - Array representing the result of the multiplication. - - Notes - ----- - - .. versionadded:: 1.5.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - # The zero series needs special treatment - if len(c) == 1 and c[0] == 0: - return c - - prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1:] = c - return prd - - -def polymul(c1, c2): - """ - Multiply one polynomial by another. - - Returns the product of two polynomials `c1` * `c2`. The arguments are - sequences of coefficients, from lowest order term to highest, e.g., - [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of coefficients representing a polynomial, relative to the - "standard" basis, and ordered from lowest order term to highest. - - Returns - ------- - out : ndarray - Of the coefficients of their product. - - See Also - -------- - polyadd, polysub, polydiv, polypow - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> P.polymul(c1,c2) - array([ 3., 8., 14., 8., 3.]) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - ret = np.convolve(c1, c2) - return pu.trimseq(ret) - - -def polydiv(c1, c2): - """ - Divide one polynomial by another. - - Returns the quotient-with-remainder of two polynomials `c1` / `c2`. - The arguments are sequences of coefficients, from lowest order term - to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. - - Parameters - ---------- - c1, c2 : array_like - 1-D arrays of polynomial coefficients ordered from low to high. - - Returns - ------- - [quo, rem] : ndarrays - Of coefficient series representing the quotient and remainder. - - See Also - -------- - polyadd, polysub, polymul, polypow - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c1 = (1,2,3) - >>> c2 = (3,2,1) - >>> P.polydiv(c1,c2) - (array([ 3.]), array([-8., -4.])) - >>> P.polydiv(c2,c1) - (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) - - """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - len1 = len(c1) - len2 = len(c2) - if len2 == 1: - return c1/c2[-1], c1[:1]*0 - elif len1 < len2: - return c1[:1]*0, c1 - else: - dlen = len1 - len2 - scl = c2[-1] - c2 = c2[:-1]/scl - i = dlen - j = len1 - 1 - while i >= 0: - c1[i:j] -= c2*c1[j] - i -= 1 - j -= 1 - return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) - - -def polypow(c, pow, maxpower=None): - """Raise a polynomial to a power. - - Returns the polynomial `c` raised to the power `pow`. The argument - `c` is a sequence of coefficients ordered from low to high. i.e., - [1,2,3] is the series ``1 + 2*x + 3*x**2.`` - - Parameters - ---------- - c : array_like - 1-D array of array of series coefficients ordered from low to - high degree. - pow : integer - Power to which the series will be raised - maxpower : integer, optional - Maximum power allowed. This is mainly to limit growth of the series - to unmanageable size. Default is 16 - - Returns - ------- - coef : ndarray - Power series of power. - - See Also - -------- - polyadd, polysub, polymul, polydiv - - Examples - -------- - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = np.convolve(prd, c) - return prd - - -def polyder(c, m=1, scl=1, axis=0): - """ - Differentiate a polynomial. - - Returns the polynomial coefficients `c` differentiated `m` times along - `axis`. At each iteration the result is multiplied by `scl` (the - scaling factor is for use in a linear change of variable). The - argument `c` is an array of coefficients from low to high degree along - each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` - while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is - ``x`` and axis=1 is ``y``. - - Parameters - ---------- - c : array_like - Array of polynomial coefficients. If c is multidimensional the - different axis correspond to different variables with the degree - in each axis given by the corresponding index. - m : int, optional - Number of derivatives taken, must be non-negative. (Default: 1) - scl : scalar, optional - Each differentiation is multiplied by `scl`. The end result is - multiplication by ``scl**m``. This is for use in a linear change - of variable. (Default: 1) - axis : int, optional - Axis over which the derivative is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - der : ndarray - Polynomial coefficients of the derivative. - - See Also - -------- - polyint - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 - >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 - array([ 2., 6., 12.]) - >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24 - array([ 24.]) - >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2 - array([ -2., -6., -12.]) - >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x - array([ 6., 24.]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - # astype fails with NA - c = c + 0.0 - cdt = c.dtype - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") - if cnt < 0: - raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - c = np.rollaxis(c, iaxis) - n = len(c) - if cnt >= n: - c = c[:1]*0 - else: - for i in range(cnt): - n = n - 1 - c *= scl - der = np.empty((n,) + c.shape[1:], dtype=cdt) - for j in range(n, 0, -1): - der[j - 1] = j*c[j] - c = der - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): - """ - Integrate a polynomial. - - Returns the polynomial coefficients `c` integrated `m` times from - `lbnd` along `axis`. At each iteration the resulting series is - **multiplied** by `scl` and an integration constant, `k`, is added. - The scaling factor is for use in a linear change of variable. ("Buyer - beware": note that, depending on what one is doing, one may want `scl` - to be the reciprocal of what one might expect; for more information, - see the Notes section below.) The argument `c` is an array of - coefficients, from low to high degree along each axis, e.g., [1,2,3] - represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] - represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is - ``y``. - - Parameters - ---------- - c : array_like - 1-D array of polynomial coefficients, ordered from low to high. - m : int, optional - Order of integration, must be positive. (Default: 1) - k : {[], list, scalar}, optional - Integration constant(s). The value of the first integral at zero - is the first value in the list, the value of the second integral - at zero is the second value, etc. If ``k == []`` (the default), - all constants are set to zero. If ``m == 1``, a single scalar can - be given instead of a list. - lbnd : scalar, optional - The lower bound of the integral. (Default: 0) - scl : scalar, optional - Following each integration the result is *multiplied* by `scl` - before the integration constant is added. (Default: 1) - axis : int, optional - Axis over which the integral is taken. (Default: 0). - - .. versionadded:: 1.7.0 - - Returns - ------- - S : ndarray - Coefficient array of the integral. - - Raises - ------ - ValueError - If ``m < 1``, ``len(k) > m``. - - See Also - -------- - polyder - - Notes - ----- - Note that the result of each integration is *multiplied* by `scl`. Why - is this important to note? Say one is making a linear change of - variable :math:`u = ax + b` in an integral relative to `x`. Then - .. math::`dx = du/a`, so one will need to set `scl` equal to - :math:`1/a` - perhaps not what one would have first thought. - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> c = (1,2,3) - >>> P.polyint(c) # should return array([0, 1, 1, 1]) - array([ 0., 1., 1., 1.]) - >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) - array([ 0. , 0. , 0. , 0.16666667, 0.08333333, - 0.05 ]) - >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1]) - array([ 3., 1., 1., 1.]) - >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) - array([ 6., 1., 1., 1.]) - >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) - array([ 0., -2., -2., -2.]) - - """ - c = np.array(c, ndmin=1, copy=1) - if c.dtype.char in '?bBhHiIlLqQpP': - # astype doesn't preserve mask attribute. - c = c + 0.0 - cdt = c.dtype - if not np.iterable(k): - k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") - if cnt < 0: - raise ValueError("The order of integration must be non-negative") - if len(k) > cnt: - raise ValueError("Too many integration constants") - if iaxis != axis: - raise ValueError("The axis must be integer") - if not -c.ndim <= iaxis < c.ndim: - raise ValueError("The axis is out of range") - if iaxis < 0: - iaxis += c.ndim - - if cnt == 0: - return c - - k = list(k) + [0]*(cnt - len(k)) - c = np.rollaxis(c, iaxis) - for i in range(cnt): - n = len(c) - c *= scl - if n == 1 and np.all(c[0] == 0): - c[0] += k[i] - else: - tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) - tmp[0] = c[0]*0 - tmp[1] = c[0] - for j in range(1, n): - tmp[j + 1] = c[j]/(j + 1) - tmp[0] += k[i] - polyval(lbnd, tmp) - c = tmp - c = np.rollaxis(c, 0, iaxis + 1) - return c - - -def polyval(x, c, tensor=True): - """ - Evaluate a polynomial at points x. - - If `c` is of length `n + 1`, this function returns the value - - .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n - - The parameter `x` is converted to an array only if it is a tuple or a - list, otherwise it is treated as a scalar. In either case, either `x` - or its elements must support multiplication and addition both with - themselves and with the elements of `c`. - - If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If - `c` is multidimensional, then the shape of the result depends on the - value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + - x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that - scalars have shape (,). - - Trailing zeros in the coefficients will be used in the evaluation, so - they should be avoided if efficiency is a concern. - - Parameters - ---------- - x : array_like, compatible object - If `x` is a list or tuple, it is converted to an ndarray, otherwise - it is left unchanged and treated as a scalar. In either case, `x` - or its elements must support addition and multiplication with - with themselves and with the elements of `c`. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree n are contained in c[n]. If `c` is multidimensional the - remaining indices enumerate multiple polynomials. In the two - dimensional case the coefficients may be thought of as stored in - the columns of `c`. - tensor : boolean, optional - If True, the shape of the coefficient array is extended with ones - on the right, one for each dimension of `x`. Scalars have dimension 0 - for this action. The result is that every column of coefficients in - `c` is evaluated for every element of `x`. If False, `x` is broadcast - over the columns of `c` for the evaluation. This keyword is useful - when `c` is multidimensional. The default value is True. - - .. versionadded:: 1.7.0 - - Returns - ------- - values : ndarray, compatible object - The shape of the returned array is described above. - - See Also - -------- - polyval2d, polygrid2d, polyval3d, polygrid3d - - Notes - ----- - The evaluation uses Horner's method. - - Examples - -------- - >>> from numpy.polynomial.polynomial import polyval - >>> polyval(1, [1,2,3]) - 6.0 - >>> a = np.arange(4).reshape(2,2) - >>> a - array([[0, 1], - [2, 3]]) - >>> polyval(a, [1,2,3]) - array([[ 1., 6.], - [ 17., 34.]]) - >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients - >>> coef - array([[0, 1], - [2, 3]]) - >>> polyval([1,2], coef, tensor=True) - array([[ 2., 4.], - [ 4., 7.]]) - >>> polyval([1,2], coef, tensor=False) - array([ 2., 7.]) - - """ - c = np.array(c, ndmin=1, copy=0) - if c.dtype.char in '?bBhHiIlLqQpP': - # astype fails with NA - c = c + 0.0 - if isinstance(x, (tuple, list)): - x = np.asarray(x) - if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) - - c0 = c[-1] + x*0 - for i in range(2, len(c) + 1): - c0 = c[-i] + c0*x - return c0 - - -def polyval2d(x, y, c): - """ - Evaluate a 2-D polynomial at points (x, y). - - This function returns the value - - .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars and they - must have the same shape after conversion. In either case, either `x` - and `y` or their elements must support multiplication and addition both - with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points `(x, y)`, - where `x` and `y` must have the same shape. If `x` or `y` is a list - or tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term - of multi-degree i,j is contained in `c[i,j]`. If `c` has - dimension greater than two the remaining indices enumerate multiple - sets of coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points formed with - pairs of corresponding values from `x` and `y`. - - See Also - -------- - polyval, polygrid2d, polyval3d, polygrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - try: - x, y = np.array((x, y), copy=0) - except: - raise ValueError('x, y are incompatible') - - c = polyval(x, c) - c = polyval(y, c, tensor=False) - return c - - -def polygrid2d(x, y, c): - """ - Evaluate a 2-D polynomial on the Cartesian product of x and y. - - This function returns the values: - - .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j - - where the points `(a, b)` consist of all pairs formed by taking - `a` from `x` and `b` from `y`. The resulting points form a grid with - `x` in the first dimension and `y` in the second. - - The parameters `x` and `y` are converted to arrays only if they are - tuples or a lists, otherwise they are treated as a scalars. In either - case, either `x` and `y` or their elements must support multiplication - and addition both with themselves and with the elements of `c`. - - If `c` has fewer than two dimensions, ones are implicitly appended to - its shape to make it 2-D. The shape of the result will be c.shape[2:] + - x.shape + y.shape. - - Parameters - ---------- - x, y : array_like, compatible objects - The two dimensional series is evaluated at the points in the - Cartesian product of `x` and `y`. If `x` or `y` is a list or - tuple, it is first converted to an ndarray, otherwise it is left - unchanged and, if it isn't an ndarray, it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - polyval, polyval2d, polyval3d, polygrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - c = polyval(x, c) - c = polyval(y, c) - return c - - -def polyval3d(x, y, z, c): - """ - Evaluate a 3-D polynomial at points (x, y, z). - - This function returns the values: - - .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k - - The parameters `x`, `y`, and `z` are converted to arrays only if - they are tuples or a lists, otherwise they are treated as a scalars and - they must have the same shape after conversion. In either case, either - `x`, `y`, and `z` or their elements must support multiplication and - addition both with themselves and with the elements of `c`. - - If `c` has fewer than 3 dimensions, ones are implicitly appended to its - shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape. - - Parameters - ---------- - x, y, z : array_like, compatible object - The three dimensional series is evaluated at the points - `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If - any of `x`, `y`, or `z` is a list or tuple, it is first converted - to an ndarray, otherwise it is left unchanged and if it isn't an - ndarray it is treated as a scalar. - c : array_like - Array of coefficients ordered so that the coefficient of the term of - multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension - greater than 3 the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the multidimensional polynomial on points formed with - triples of corresponding values from `x`, `y`, and `z`. - - See Also - -------- - polyval, polyval2d, polygrid2d, polygrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - try: - x, y, z = np.array((x, y, z), copy=0) - except: - raise ValueError('x, y, z are incompatible') - - c = polyval(x, c) - c = polyval(y, c, tensor=False) - c = polyval(z, c, tensor=False) - return c - - -def polygrid3d(x, y, z, c): - """ - Evaluate a 3-D polynomial on the Cartesian product of x, y and z. - - This function returns the values: - - .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k - - where the points `(a, b, c)` consist of all triples formed by taking - `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form - a grid with `x` in the first dimension, `y` in the second, and `z` in - the third. - - The parameters `x`, `y`, and `z` are converted to arrays only if they - are tuples or a lists, otherwise they are treated as a scalars. In - either case, either `x`, `y`, and `z` or their elements must support - multiplication and addition both with themselves and with the elements - of `c`. - - If `c` has fewer than three dimensions, ones are implicitly appended to - its shape to make it 3-D. The shape of the result will be c.shape[3:] + - x.shape + y.shape + z.shape. - - Parameters - ---------- - x, y, z : array_like, compatible objects - The three dimensional series is evaluated at the points in the - Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a - list or tuple, it is first converted to an ndarray, otherwise it is - left unchanged and, if it isn't an ndarray, it is treated as a - scalar. - c : array_like - Array of coefficients ordered so that the coefficients for terms of - degree i,j are contained in ``c[i,j]``. If `c` has dimension - greater than two the remaining indices enumerate multiple sets of - coefficients. - - Returns - ------- - values : ndarray, compatible object - The values of the two dimensional polynomial at points in the Cartesian - product of `x` and `y`. - - See Also - -------- - polyval, polyval2d, polygrid2d, polyval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - c = polyval(x, c) - c = polyval(y, c) - c = polyval(z, c) - return c - - -def polyvander(x, deg): - """Vandermonde matrix of given degree. - - Returns the Vandermonde matrix of degree `deg` and sample points - `x`. The Vandermonde matrix is defined by - - .. math:: V[..., i] = x^i, - - where `0 <= i <= deg`. The leading indices of `V` index the elements of - `x` and the last index is the power of `x`. - - If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the - matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and - ``polyval(x, c)`` are the same up to roundoff. This equivalence is - useful both for least squares fitting and for the evaluation of a large - number of polynomials of the same degree and sample points. - - Parameters - ---------- - x : array_like - Array of points. The dtype is converted to float64 or complex128 - depending on whether any of the elements are complex. If `x` is - scalar it is converted to a 1-D array. - deg : int - Degree of the resulting matrix. - - Returns - ------- - vander : ndarray. - The Vandermonde matrix. The shape of the returned matrix is - ``x.shape + (deg + 1,)``, where the last index is the power of `x`. - The dtype will be the same as the converted `x`. - - See Also - -------- - polyvander2d, polyvander3d - - """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") - if ideg < 0: - raise ValueError("deg must be non-negative") - - x = np.array(x, copy=0, ndmin=1) + 0.0 - dims = (ideg + 1,) + x.shape - dtyp = x.dtype - v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 - if ideg > 0: - v[1] = x - for i in range(2, ideg + 1): - v[i] = v[i-1]*x - return np.rollaxis(v, 0, v.ndim) - - -def polyvander2d(x, y, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y)`. The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., deg[1]*i + j] = x^i * y^j, - - where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of - `V` index the points `(x, y)` and the last index encodes the powers of - `x` and `y`. - - If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` - correspond to the elements of a 2-D coefficient array `c` of shape - (xdeg + 1, ydeg + 1) in the order - - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... - - and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same - up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 2-D polynomials - of the same degrees and sample points. - - Parameters - ---------- - x, y : array_like - Arrays of point coordinates, all of the same shape. The dtypes - will be converted to either float64 or complex128 depending on - whether any of the elements are complex. Scalars are converted to - 1-D arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg]. - - Returns - ------- - vander2d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same - as the converted `x` and `y`. - - See Also - -------- - polyvander, polyvander3d. polyval2d, polyval3d - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = polyvander(x, degx) - vy = polyvander(y, degy) - v = vx[..., None]*vy[..., None,:] - # einsum bug - #v = np.einsum("...i,...j->...ij", vx, vy) - return v.reshape(v.shape[:-2] + (-1,)) - - -def polyvander3d(x, y, z, deg): - """Pseudo-Vandermonde matrix of given degrees. - - Returns the pseudo-Vandermonde matrix of degrees `deg` and sample - points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, - then The pseudo-Vandermonde matrix is defined by - - .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, - - where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading - indices of `V` index the points `(x, y, z)` and the last index encodes - the powers of `x`, `y`, and `z`. - - If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns - of `V` correspond to the elements of a 3-D coefficient array `c` of - shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order - - .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... - - and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the - same up to roundoff. This equivalence is useful both for least squares - fitting and for the evaluation of a large number of 3-D polynomials - of the same degrees and sample points. - - Parameters - ---------- - x, y, z : array_like - Arrays of point coordinates, all of the same shape. The dtypes will - be converted to either float64 or complex128 depending on whether - any of the elements are complex. Scalars are converted to 1-D - arrays. - deg : list of ints - List of maximum degrees of the form [x_deg, y_deg, z_deg]. - - Returns - ------- - vander3d : ndarray - The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will - be the same as the converted `x`, `y`, and `z`. - - See Also - -------- - polyvander, polyvander3d. polyval2d, polyval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = polyvander(x, degx) - vy = polyvander(y, degy) - vz = polyvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - # einsum bug - #v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz) - return v.reshape(v.shape[:-3] + (-1,)) - - -def polyfit(x, y, deg, rcond=None, full=False, w=None): - """ - Least-squares fit of a polynomial to data. - - Return the coefficients of a polynomial of degree `deg` that is the - least squares fit to the data values `y` given at points `x`. If `y` is - 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple - fits are done, one for each column of `y`, and the resulting - coefficients are stored in the corresponding columns of a 2-D return. - The fitted polynomial(s) are in the form - - .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, - - where `n` is `deg`. - - Parameters - ---------- - x : array_like, shape (`M`,) - x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. - y : array_like, shape (`M`,) or (`M`, `K`) - y-coordinates of the sample points. Several sets of sample points - sharing the same x-coordinates can be (independently) fit with one - call to `polyfit` by passing in for `y` a 2-D array that contains - one data set per column. - deg : int - Degree of the polynomial(s) to be fit. - rcond : float, optional - Relative condition number of the fit. Singular values smaller - than `rcond`, relative to the largest singular value, will be - ignored. The default value is ``len(x)*eps``, where `eps` is the - relative precision of the platform's float type, about 2e-16 in - most cases. - full : bool, optional - Switch determining the nature of the return value. When ``False`` - (the default) just the coefficients are returned; when ``True``, - diagnostic information from the singular value decomposition (used - to solve the fit's matrix equation) is also returned. - w : array_like, shape (`M`,), optional - Weights. If not None, the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products ``w[i]*y[i]`` - all have the same variance. The default value is None. - - .. versionadded:: 1.5.0 - - Returns - ------- - coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) - Polynomial coefficients ordered from low to high. If `y` was 2-D, - the coefficients in column `k` of `coef` represent the polynomial - fit to the data in `y`'s `k`-th column. - - [residuals, rank, singular_values, rcond] : list - These values are only returned if `full` = True - - resid -- sum of squared residuals of the least squares fit - rank -- the numerical rank of the scaled Vandermonde matrix - sv -- singular values of the scaled Vandermonde matrix - rcond -- value of `rcond`. - - For more details, see `linalg.lstsq`. - - Raises - ------ - RankWarning - Raised if the matrix in the least-squares fit is rank deficient. - The warning is only raised if `full` == False. The warnings can - be turned off by: - - >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) - - See Also - -------- - chebfit, legfit, lagfit, hermfit, hermefit - polyval : Evaluates a polynomial. - polyvander : Vandermonde matrix for powers. - linalg.lstsq : Computes a least-squares fit from the matrix. - scipy.interpolate.UnivariateSpline : Computes spline fits. - - Notes - ----- - The solution is the coefficients of the polynomial `p` that minimizes - the sum of the weighted squared errors - - .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, - - where the :math:`w_j` are the weights. This problem is solved by - setting up the (typically) over-determined matrix equation: - - .. math :: V(x) * c = w * y, - - where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the - coefficients to be solved for, `w` are the weights, and `y` are the - observed values. This equation is then solved using the singular value - decomposition of `V`. - - If some of the singular values of `V` are so small that they are - neglected (and `full` == ``False``), a `RankWarning` will be raised. - This means that the coefficient values may be poorly determined. - Fitting to a lower order polynomial will usually get rid of the warning - (but may not be what you want, of course; if you have independent - reason(s) for choosing the degree which isn't working, you may have to: - a) reconsider those reasons, and/or b) reconsider the quality of your - data). The `rcond` parameter can also be set to a value smaller than - its default, but the resulting fit may be spurious and have large - contributions from roundoff error. - - Polynomial fits using double precision tend to "fail" at about - (polynomial) degree 20. Fits using Chebyshev or Legendre series are - generally better conditioned, but much can still depend on the - distribution of the sample points and the smoothness of the data. If - the quality of the fit is inadequate, splines may be a good - alternative. - - Examples - -------- - >>> from numpy.polynomial import polynomial as P - >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] - >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" - >>> c, stats = P.polyfit(x,y,3,full=True) - >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 - array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) - >>> stats # note the large SSR, explaining the rather poor results - [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, - 0.28853036]), 1.1324274851176597e-014] - - Same thing without the added noise - - >>> y = x**3 - x - >>> c, stats = P.polyfit(x,y,3,full=True) - >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 - array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16, - 1.00000000e+00]) - >>> stats # note the minuscule SSR - [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, - 0.50443316, 0.28853036]), 1.1324274851176597e-014] - - """ - order = int(deg) + 1 - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - - # check arguments. - if deg < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - # set up the least squares matrices in transposed form - lhs = polyvander(x, deg).T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning) - - if full: - return c, [resids, rank, s, rcond] - else: - return c - - -def polycompanion(c): - """ - Return the companion matrix of c. - - The companion matrix for power series cannot be made symmetric by - scaling the basis, so this function differs from those for the - orthogonal polynomials. - - Parameters - ---------- - c : array_like - 1-D array of polynomial coefficients ordered from low to high - degree. - - Returns - ------- - mat : ndarray - Companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - raise ValueError('Series must have maximum degree of at least 1.') - if len(c) == 2: - return np.array([[-c[0]/c[1]]]) - - n = len(c) - 1 - mat = np.zeros((n, n), dtype=c.dtype) - bot = mat.reshape(-1)[n::n+1] - bot[...] = 1 - mat[:, -1] -= c[:-1]/c[-1] - return mat - - -def polyroots(c): - """ - Compute the roots of a polynomial. - - Return the roots (a.k.a. "zeros") of the polynomial - - .. math:: p(x) = \\sum_i c[i] * x^i. - - Parameters - ---------- - c : 1-D array_like - 1-D array of polynomial coefficients. - - Returns - ------- - out : ndarray - Array of the roots of the polynomial. If all the roots are real, - then `out` is also real, otherwise it is complex. - - See Also - -------- - chebroots - - Notes - ----- - The root estimates are obtained as the eigenvalues of the companion - matrix, Roots far from the origin of the complex plane may have large - errors due to the numerical instability of the power series for such - values. Roots with multiplicity greater than 1 will also show larger - errors as the value of the series near such points is relatively - insensitive to errors in the roots. Isolated roots near the origin can - be improved by a few iterations of Newton's method. - - Examples - -------- - >>> import numpy.polynomial.polynomial as poly - >>> poly.polyroots(poly.polyfromroots((-1,0,1))) - array([-1., 0., 1.]) - >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype - dtype('float64') - >>> j = complex(0,1) - >>> poly.polyroots(poly.polyfromroots((-j,0,j))) - array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) - - """ - # c is a trimmed copy - [c] = pu.as_series([c]) - if len(c) < 2: - return np.array([], dtype=c.dtype) - if len(c) == 2: - return np.array([-c[0]/c[1]]) - - m = polycompanion(c) - r = la.eigvals(m) - r.sort() - return r - - -# -# polynomial class -# - -class Polynomial(ABCPolyBase): - """A power series class. - - The Polynomial class provides the standard Python numerical methods - '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the - attributes and methods listed in the `ABCPolyBase` documentation. - - Parameters - ---------- - coef : array_like - Polynomial coefficients in order of increasing degree, i.e., - ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped - to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. - window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 - - """ - # Virtual Functions - _add = staticmethod(polyadd) - _sub = staticmethod(polysub) - _mul = staticmethod(polymul) - _div = staticmethod(polydiv) - _pow = staticmethod(polypow) - _val = staticmethod(polyval) - _int = staticmethod(polyint) - _der = staticmethod(polyder) - _fit = staticmethod(polyfit) - _line = staticmethod(polyline) - _roots = staticmethod(polyroots) - _fromroots = staticmethod(polyfromroots) - - # Virtual properties - nickname = 'poly' - domain = np.array(polydomain) - window = np.array(polydomain) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polytemplate.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polytemplate.py deleted file mode 100644 index e68dd18ef7bbe..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polytemplate.py +++ /dev/null @@ -1,927 +0,0 @@ -""" -Template for the Chebyshev and Polynomial classes. - -This module houses a Python string module Template object (see, e.g., -http://docs.python.org/library/string.html#template-strings) used by -the `polynomial` and `chebyshev` modules to implement their respective -`Polynomial` and `Chebyshev` classes. It provides a mechanism for easily -creating additional specific polynomial classes (e.g., Legendre, Jacobi, -etc.) in the future, such that all these classes will have a common API. - -""" -from __future__ import division, absolute_import, print_function - -import string -import sys -import warnings -from number import Number - -from numpy import ModuleDeprecationWarning - -warnings.warn("The polytemplate module will be removed in Numpy 1.10.0.", - ModuleDeprecationWarning) - -polytemplate = string.Template(''' -from __future__ import division, absolute_import, print_function -import numpy as np -import warnings -from . import polyutils as pu - -class $name(pu.PolyBase) : - """A $name series class. - - $name instances provide the standard Python numerical methods '+', - '-', '*', '//', '%', 'divmod', '**', and '()' as well as the listed - methods. - - Parameters - ---------- - coef : array_like - $name coefficients, in increasing order. For example, - ``(1, 2, 3)`` implies ``P_0 + 2P_1 + 3P_2`` where the - ``P_i`` are a graded polynomial basis. - domain : (2,) array_like, optional - Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to - the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is $domain. - window : (2,) array_like, optional - Window, see ``domain`` for its use. The default value is $domain. - .. versionadded:: 1.6.0 - - Attributes - ---------- - coef : (N,) ndarray - $name coefficients, from low to high. - domain : (2,) ndarray - Domain that is mapped to ``window``. - window : (2,) ndarray - Window that ``domain`` is mapped to. - - Class Attributes - ---------------- - maxpower : int - Maximum power allowed, i.e., the largest number ``n`` such that - ``p(x)**n`` is allowed. This is to limit runaway polynomial size. - domain : (2,) ndarray - Default domain of the class. - window : (2,) ndarray - Default window of the class. - - Notes - ----- - It is important to specify the domain in many cases, for instance in - fitting data, because many of the important properties of the - polynomial basis only hold in a specified interval and consequently - the data must be mapped into that interval in order to benefit. - - Examples - -------- - - """ - # Limit runaway size. T_n^m has degree n*2^m - maxpower = 16 - # Default domain - domain = np.array($domain) - # Default window - window = np.array($domain) - # Don't let participate in array operations. Value doesn't matter. - __array_priority__ = 1000 - # Not hashable - __hash__ = None - - def has_samecoef(self, other): - """Check if coefficients match. - - Parameters - ---------- - other : class instance - The other class must have the ``coef`` attribute. - - Returns - ------- - bool : boolean - True if the coefficients are the same, False otherwise. - - Notes - ----- - .. versionadded:: 1.6.0 - - """ - if len(self.coef) != len(other.coef): - return False - elif not np.all(self.coef == other.coef): - return False - else: - return True - - def has_samedomain(self, other): - """Check if domains match. - - Parameters - ---------- - other : class instance - The other class must have the ``domain`` attribute. - - Returns - ------- - bool : boolean - True if the domains are the same, False otherwise. - - Notes - ----- - .. versionadded:: 1.6.0 - - """ - return np.all(self.domain == other.domain) - - def has_samewindow(self, other): - """Check if windows match. - - Parameters - ---------- - other : class instance - The other class must have the ``window`` attribute. - - Returns - ------- - bool : boolean - True if the windows are the same, False otherwise. - - Notes - ----- - .. versionadded:: 1.6.0 - - """ - return np.all(self.window == other.window) - - def has_sametype(self, other): - """Check if types match. - - Parameters - ---------- - other : object - Class instance. - - Returns - ------- - bool : boolean - True if other is same class as self - - Notes - ----- - .. versionadded:: 1.7.0 - - """ - return isinstance(other, self.__class__) - - def __init__(self, coef, domain=$domain, window=$domain) : - [coef, dom, win] = pu.as_series([coef, domain, window], trim=False) - if len(dom) != 2 : - raise ValueError("Domain has wrong number of elements.") - if len(win) != 2 : - raise ValueError("Window has wrong number of elements.") - self.coef = coef - self.domain = dom - self.window = win - - def __repr__(self): - format = "%s(%s, %s, %s)" - coef = repr(self.coef)[6:-1] - domain = repr(self.domain)[6:-1] - window = repr(self.window)[6:-1] - return format % ('$name', coef, domain, window) - - def __str__(self) : - format = "%s(%s)" - coef = str(self.coef) - return format % ('$nick', coef) - - # Pickle and copy - - def __getstate__(self) : - ret = self.__dict__.copy() - ret['coef'] = self.coef.copy() - ret['domain'] = self.domain.copy() - ret['window'] = self.window.copy() - return ret - - def __setstate__(self, dict) : - self.__dict__ = dict - - # Call - - def __call__(self, arg) : - off, scl = pu.mapparms(self.domain, self.window) - arg = off + scl*arg - return ${nick}val(arg, self.coef) - - def __iter__(self) : - return iter(self.coef) - - def __len__(self) : - return len(self.coef) - - # Numeric properties. - - def __neg__(self) : - return self.__class__(-self.coef, self.domain, self.window) - - def __pos__(self) : - return self - - def __add__(self, other) : - """Returns sum""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - coef = ${nick}add(self.coef, other.coef) - else : - try : - coef = ${nick}add(self.coef, other) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __sub__(self, other) : - """Returns difference""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - coef = ${nick}sub(self.coef, other.coef) - else : - try : - coef = ${nick}sub(self.coef, other) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __mul__(self, other) : - """Returns product""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - coef = ${nick}mul(self.coef, other.coef) - else : - try : - coef = ${nick}mul(self.coef, other) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __div__(self, other): - # set to __floordiv__, /, for now. - return self.__floordiv__(other) - - def __truediv__(self, other) : - # there is no true divide if the rhs is not a Number, although it - # could return the first n elements of an infinite series. - # It is hard to see where n would come from, though. - if not isinstance(other, Number) or isinstance(other, bool): - form = "unsupported types for true division: '%s', '%s'" - raise TypeError(form % (type(self), type(other))) - return self.__floordiv__(other) - - def __floordiv__(self, other) : - """Returns the quotient.""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - quo, rem = ${nick}div(self.coef, other.coef) - else : - try : - quo, rem = ${nick}div(self.coef, other) - except : - return NotImplemented - return self.__class__(quo, self.domain, self.window) - - def __mod__(self, other) : - """Returns the remainder.""" - if isinstance(other, pu.PolyBase): - if not self.has_sametype(other): - raise TypeError("Polynomial types differ") - elif not self.has_samedomain(other): - raise TypeError("Domains differ") - elif not self.has_samewindow(other): - raise TypeError("Windows differ") - else: - quo, rem = ${nick}div(self.coef, other.coef) - else : - try : - quo, rem = ${nick}div(self.coef, other) - except : - return NotImplemented - return self.__class__(rem, self.domain, self.window) - - def __divmod__(self, other) : - """Returns quo, remainder""" - if isinstance(other, self.__class__) : - if not self.has_samedomain(other): - raise TypeError("Domains are not equal") - elif not self.has_samewindow(other): - raise TypeError("Windows are not equal") - else: - quo, rem = ${nick}div(self.coef, other.coef) - else : - try : - quo, rem = ${nick}div(self.coef, other) - except : - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - def __pow__(self, other) : - try : - coef = ${nick}pow(self.coef, other, maxpower = self.maxpower) - except : - raise - return self.__class__(coef, self.domain, self.window) - - def __radd__(self, other) : - try : - coef = ${nick}add(other, self.coef) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rsub__(self, other): - try : - coef = ${nick}sub(other, self.coef) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rmul__(self, other) : - try : - coef = ${nick}mul(other, self.coef) - except : - return NotImplemented - return self.__class__(coef, self.domain, self.window) - - def __rdiv__(self, other): - # set to __floordiv__ /. - return self.__rfloordiv__(other) - - def __rtruediv__(self, other) : - # An instance of PolyBase is not considered a - # Number. - return NotImplemented - - def __rfloordiv__(self, other) : - try : - quo, rem = ${nick}div(other, self.coef) - except: - return NotImplemented - return self.__class__(quo, self.domain, self.window) - - def __rmod__(self, other) : - try : - quo, rem = ${nick}div(other, self.coef) - except : - return NotImplemented - return self.__class__(rem, self.domain, self.window) - - def __rdivmod__(self, other) : - try : - quo, rem = ${nick}div(other, self.coef) - except : - return NotImplemented - quo = self.__class__(quo, self.domain, self.window) - rem = self.__class__(rem, self.domain, self.window) - return quo, rem - - # Enhance me - # some augmented arithmetic operations could be added here - - def __eq__(self, other) : - res = isinstance(other, self.__class__) \ - and self.has_samecoef(other) \ - and self.has_samedomain(other) \ - and self.has_samewindow(other) - return res - - def __ne__(self, other) : - return not self.__eq__(other) - - # - # Extra methods. - # - - def copy(self) : - """Return a copy. - - Return a copy of the current $name instance. - - Returns - ------- - new_instance : $name - Copy of current instance. - - """ - return self.__class__(self.coef, self.domain, self.window) - - def degree(self) : - """The degree of the series. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - return len(self) - 1 - - def cutdeg(self, deg) : - """Truncate series to the given degree. - - Reduce the degree of the $name series to `deg` by discarding the - high order terms. If `deg` is greater than the current degree a - copy of the current series is returned. This can be useful in least - squares where the coefficients of the high degree terms may be very - small. - - Parameters - ---------- - deg : non-negative int - The series is reduced to degree `deg` by discarding the high - order terms. The value of `deg` must be a non-negative integer. - - Returns - ------- - new_instance : $name - New instance of $name with reduced degree. - - Notes - ----- - .. versionadded:: 1.5.0 - - """ - return self.truncate(deg + 1) - - def trim(self, tol=0) : - """Remove small leading coefficients - - Remove leading coefficients until a coefficient is reached whose - absolute value greater than `tol` or the beginning of the series is - reached. If all the coefficients would be removed the series is set to - ``[0]``. A new $name instance is returned with the new coefficients. - The current instance remains unchanged. - - Parameters - ---------- - tol : non-negative number. - All trailing coefficients less than `tol` will be removed. - - Returns - ------- - new_instance : $name - Contains the new set of coefficients. - - """ - coef = pu.trimcoef(self.coef, tol) - return self.__class__(coef, self.domain, self.window) - - def truncate(self, size) : - """Truncate series to length `size`. - - Reduce the $name series to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. This - can be useful in least squares where the coefficients of the - high degree terms may be very small. - - Parameters - ---------- - size : positive int - The series is reduced to length `size` by discarding the high - degree terms. The value of `size` must be a positive integer. - - Returns - ------- - new_instance : $name - New instance of $name with truncated coefficients. - - """ - isize = int(size) - if isize != size or isize < 1 : - raise ValueError("size must be a positive integer") - if isize >= len(self.coef) : - coef = self.coef - else : - coef = self.coef[:isize] - return self.__class__(coef, self.domain, self.window) - - def convert(self, domain=None, kind=None, window=None) : - """Convert to different class and/or domain. - - Parameters - ---------- - domain : array_like, optional - The domain of the converted series. If the value is None, - the default domain of `kind` is used. - kind : class, optional - The polynomial series type class to which the current instance - should be converted. If kind is None, then the class of the - current instance is used. - window : array_like, optional - The window of the converted series. If the value is None, - the default window of `kind` is used. - - Returns - ------- - new_series_instance : `kind` - The returned class can be of different type than the current - instance and/or have a different domain. - - Notes - ----- - Conversion between domains and class types can result in - numerically ill defined series. - - Examples - -------- - - """ - if kind is None: - kind = $name - if domain is None: - domain = kind.domain - if window is None: - window = kind.window - return self(kind.identity(domain, window=window)) - - def mapparms(self) : - """Return the mapping parameters. - - The returned values define a linear map ``off + scl*x`` that is - applied to the input arguments before the series is evaluated. The - map depends on the ``domain`` and ``window``; if the current - ``domain`` is equal to the ``window`` the resulting map is the - identity. If the coefficients of the ``$name`` instance are to be - used by themselves outside this class, then the linear function - must be substituted for the ``x`` in the standard representation of - the base polynomials. - - Returns - ------- - off, scl : floats or complex - The mapping function is defined by ``off + scl*x``. - - Notes - ----- - If the current domain is the interval ``[l_1, r_1]`` and the window - is ``[l_2, r_2]``, then the linear mapping function ``L`` is - defined by the equations:: - - L(l_1) = l_2 - L(r_1) = r_2 - - """ - return pu.mapparms(self.domain, self.window) - - def integ(self, m=1, k=[], lbnd=None) : - """Integrate. - - Return an instance of $name that is the definite integral of the - current series. Refer to `${nick}int` for full documentation. - - Parameters - ---------- - m : non-negative int - The number of integrations to perform. - k : array_like - Integration constants. The first constant is applied to the - first integration, the second to the second, and so on. The - list of values must less than or equal to `m` in length and any - missing values are set to zero. - lbnd : Scalar - The lower bound of the definite integral. - - Returns - ------- - integral : $name - The integral of the series using the same domain. - - See Also - -------- - ${nick}int : similar function. - ${nick}der : similar function for derivative. - - """ - off, scl = self.mapparms() - if lbnd is None : - lbnd = 0 - else : - lbnd = off + scl*lbnd - coef = ${nick}int(self.coef, m, k, lbnd, 1./scl) - return self.__class__(coef, self.domain, self.window) - - def deriv(self, m=1): - """Differentiate. - - Return an instance of $name that is the derivative of the current - series. Refer to `${nick}der` for full documentation. - - Parameters - ---------- - m : non-negative int - The number of integrations to perform. - - Returns - ------- - derivative : $name - The derivative of the series using the same domain. - - See Also - -------- - ${nick}der : similar function. - ${nick}int : similar function for integration. - - """ - off, scl = self.mapparms() - coef = ${nick}der(self.coef, m, scl) - return self.__class__(coef, self.domain, self.window) - - def roots(self) : - """Return list of roots. - - Return ndarray of roots for this series. See `${nick}roots` for - full documentation. Note that the accuracy of the roots is likely to - decrease the further outside the domain they lie. - - See Also - -------- - ${nick}roots : similar function - ${nick}fromroots : function to go generate series from roots. - - """ - roots = ${nick}roots(self.coef) - return pu.mapdomain(roots, self.window, self.domain) - - def linspace(self, n=100, domain=None): - """Return x,y values at equally spaced points in domain. - - Returns x, y values at `n` linearly spaced points across domain. - Here y is the value of the polynomial at the points x. By default - the domain is the same as that of the $name instance. This method - is intended mostly as a plotting aid. - - Parameters - ---------- - n : int, optional - Number of point pairs to return. The default value is 100. - domain : {None, array_like} - If not None, the specified domain is used instead of that of - the calling instance. It should be of the form ``[beg,end]``. - The default is None. - - Returns - ------- - x, y : ndarrays - ``x`` is equal to linspace(self.domain[0], self.domain[1], n) - ``y`` is the polynomial evaluated at ``x``. - - .. versionadded:: 1.5.0 - - """ - if domain is None: - domain = self.domain - x = np.linspace(domain[0], domain[1], n) - y = self(x) - return x, y - - - - @staticmethod - def fit(x, y, deg, domain=None, rcond=None, full=False, w=None, - window=$domain): - """Least squares fit to data. - - Return a `$name` instance that is the least squares fit to the data - `y` sampled at `x`. Unlike `${nick}fit`, the domain of the returned - instance can be specified and this will often result in a superior - fit with less chance of ill conditioning. Support for NA was added - in version 1.7.0. See `${nick}fit` for full documentation of the - implementation. - - Parameters - ---------- - x : array_like, shape (M,) - x-coordinates of the M sample points ``(x[i], y[i])``. - y : array_like, shape (M,) or (M, K) - y-coordinates of the sample points. Several data sets of sample - points sharing the same x-coordinates can be fitted at once by - passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial. - domain : {None, [beg, end], []}, optional - Domain to use for the returned $name instance. If ``None``, - then a minimal domain that covers the points `x` is chosen. If - ``[]`` the default domain ``$domain`` is used. The default - value is $domain in numpy 1.4.x and ``None`` in later versions. - The ``[]`` value was added in numpy 1.5.0. - rcond : float, optional - Relative condition number of the fit. Singular values smaller - than this relative to the largest singular value will be - ignored. The default value is len(x)*eps, where eps is the - relative precision of the float type, about 2e-16 in most - cases. - full : bool, optional - Switch determining nature of return value. When it is False - (the default) just the coefficients are returned, when True - diagnostic information from the singular value decomposition is - also returned. - w : array_like, shape (M,), optional - Weights. If not None the contribution of each point - ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the - weights are chosen so that the errors of the products - ``w[i]*y[i]`` all have the same variance. The default value is - None. - .. versionadded:: 1.5.0 - window : {[beg, end]}, optional - Window to use for the returned $name instance. The default - value is ``$domain`` - .. versionadded:: 1.6.0 - - Returns - ------- - least_squares_fit : instance of $name - The $name instance is the least squares fit to the data and - has the domain specified in the call. - - [residuals, rank, singular_values, rcond] : only if `full` = True - Residuals of the least squares fit, the effective rank of the - scaled Vandermonde matrix and its singular values, and the - specified value of `rcond`. For more details, see - `linalg.lstsq`. - - See Also - -------- - ${nick}fit : similar function - - """ - if domain is None: - domain = pu.getdomain(x) - elif type(domain) is list and len(domain) == 0: - domain = $domain - - if type(window) is list and len(window) == 0: - window = $domain - - xnew = pu.mapdomain(x, domain, window) - res = ${nick}fit(xnew, y, deg, w=w, rcond=rcond, full=full) - if full : - [coef, status] = res - return $name(coef, domain=domain, window=window), status - else : - coef = res - return $name(coef, domain=domain, window=window) - - @staticmethod - def fromroots(roots, domain=$domain, window=$domain) : - """Return $name instance with specified roots. - - Returns an instance of $name representing the product - ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is the - list of roots. - - Parameters - ---------- - roots : array_like - List of roots. - domain : {array_like, None}, optional - Domain for the resulting instance of $name. If none the domain - is the interval from the smallest root to the largest. The - default is $domain. - window : array_like, optional - Window for the resulting instance of $name. The default value - is $domain. - - Returns - ------- - object : $name instance - Series with the specified roots. - - See Also - -------- - ${nick}fromroots : equivalent function - - """ - [roots] = pu.as_series([roots], trim=False) - if domain is None : - domain = pu.getdomain(roots) - deg = len(roots) - off, scl = pu.mapparms(domain, window) - rnew = off + scl*roots - coef = ${nick}fromroots(rnew) / scl**deg - return $name(coef, domain=domain, window=window) - - @staticmethod - def identity(domain=$domain, window=$domain) : - """Identity function. - - If ``p`` is the returned $name object, then ``p(x) == x`` for all - values of x. - - Parameters - ---------- - domain : array_like - The resulting array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. - window : array_like - The resulting array must be if the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the window. - - Returns - ------- - identity : $name instance - - """ - off, scl = pu.mapparms(window, domain) - coef = ${nick}line(off, scl) - return $name(coef, domain, window) - - @staticmethod - def basis(deg, domain=$domain, window=$domain): - """$name polynomial of degree `deg`. - - Returns an instance of the $name polynomial of degree `d`. - - Parameters - ---------- - deg : int - Degree of the $name polynomial. Must be >= 0. - domain : array_like - The resulting array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. - window : array_like - The resulting array must be if the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the window. - - Returns - p : $name instance - - Notes - ----- - .. versionadded:: 1.7.0 - - """ - ideg = int(deg) - if ideg != deg or ideg < 0: - raise ValueError("deg must be non-negative integer") - return $name([0]*ideg + [1], domain, window) - - @staticmethod - def cast(series, domain=$domain, window=$domain): - """Convert instance to equivalent $name series. - - The `series` is expected to be an instance of some polynomial - series of one of the types supported by by the numpy.polynomial - module, but could be some other class that supports the convert - method. - - Parameters - ---------- - series : series - The instance series to be converted. - domain : array_like - The resulting array must be of the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the domain. - window : array_like - The resulting array must be if the form ``[beg, end]``, where - ``beg`` and ``end`` are the endpoints of the window. - - Returns - p : $name instance - A $name series equal to the `poly` series. - - See Also - -------- - convert -- similar instance method - - Notes - ----- - .. versionadded:: 1.7.0 - - """ - return series.convert(domain, $name, window) - -''') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polyutils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polyutils.py deleted file mode 100644 index 9348559edb97a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/polyutils.py +++ /dev/null @@ -1,403 +0,0 @@ -""" -Utililty classes and functions for the polynomial modules. - -This module provides: error and warning objects; a polynomial base class; -and some routines used in both the `polynomial` and `chebyshev` modules. - -Error objects -------------- - -.. autosummary:: - :toctree: generated/ - - PolyError base class for this sub-package's errors. - PolyDomainError raised when domains are mismatched. - -Warning objects ---------------- - -.. autosummary:: - :toctree: generated/ - - RankWarning raised in least-squares fit for rank-deficient matrix. - -Base class ----------- - -.. autosummary:: - :toctree: generated/ - - PolyBase Obsolete base class for the polynomial classes. Do not use. - -Functions ---------- - -.. autosummary:: - :toctree: generated/ - - as_series convert list of array_likes into 1-D arrays of common type. - trimseq remove trailing zeros. - trimcoef remove small trailing coefficients. - getdomain return the domain appropriate for a given set of abscissae. - mapdomain maps points between domains. - mapparms parameters of the linear map between domains. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np - -__all__ = [ - 'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq', - 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase'] - -# -# Warnings and Exceptions -# - -class RankWarning(UserWarning): - """Issued by chebfit when the design matrix is rank deficient.""" - pass - -class PolyError(Exception): - """Base class for errors in this module.""" - pass - -class PolyDomainError(PolyError): - """Issued by the generic Poly class when two domains don't match. - - This is raised when an binary operation is passed Poly objects with - different domains. - - """ - pass - -# -# Base class for all polynomial types -# - -class PolyBase(object): - """ - Base class for all polynomial types. - - Deprecated in numpy 1.9.0, use the abstract - ABCPolyBase class instead. Note that the latter - reguires a number of virtual functions to be - implemented. - - """ - pass - -# -# Helper functions to convert inputs to 1-D arrays -# -def trimseq(seq): - """Remove small Poly series coefficients. - - Parameters - ---------- - seq : sequence - Sequence of Poly series coefficients. This routine fails for - empty sequences. - - Returns - ------- - series : sequence - Subsequence with trailing zeros removed. If the resulting sequence - would be empty, return the first element. The returned sequence may - or may not be a view. - - Notes - ----- - Do not lose the type info if the sequence contains unknown objects. - - """ - if len(seq) == 0: - return seq - else: - for i in range(len(seq) - 1, -1, -1): - if seq[i] != 0: - break - return seq[:i+1] - - -def as_series(alist, trim=True): - """ - Return argument as a list of 1-d arrays. - - The returned list contains array(s) of dtype double, complex double, or - object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of - size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays - of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array - raises a Value Error if it is not first reshaped into either a 1-d or 2-d - array. - - Parameters - ---------- - a : array_like - A 1- or 2-d array_like - trim : boolean, optional - When True, trailing zeros are removed from the inputs. - When False, the inputs are passed through intact. - - Returns - ------- - [a1, a2,...] : list of 1-D arrays - A copy of the input data as a list of 1-d arrays. - - Raises - ------ - ValueError - Raised when `as_series` cannot convert its input to 1-d arrays, or at - least one of the resulting arrays is empty. - - Examples - -------- - >>> from numpy import polynomial as P - >>> a = np.arange(4) - >>> P.as_series(a) - [array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])] - >>> b = np.arange(6).reshape((2,3)) - >>> P.as_series(b) - [array([ 0., 1., 2.]), array([ 3., 4., 5.])] - - """ - arrays = [np.array(a, ndmin=1, copy=0) for a in alist] - if min([a.size for a in arrays]) == 0: - raise ValueError("Coefficient array is empty") - if any([a.ndim != 1 for a in arrays]): - raise ValueError("Coefficient array is not 1-d") - if trim: - arrays = [trimseq(a) for a in arrays] - - if any([a.dtype == np.dtype(object) for a in arrays]): - ret = [] - for a in arrays: - if a.dtype != np.dtype(object): - tmp = np.empty(len(a), dtype=np.dtype(object)) - tmp[:] = a[:] - ret.append(tmp) - else: - ret.append(a.copy()) - else: - try: - dtype = np.common_type(*arrays) - except: - raise ValueError("Coefficient arrays have no common type") - ret = [np.array(a, copy=1, dtype=dtype) for a in arrays] - return ret - - -def trimcoef(c, tol=0): - """ - Remove "small" "trailing" coefficients from a polynomial. - - "Small" means "small in absolute value" and is controlled by the - parameter `tol`; "trailing" means highest order coefficient(s), e.g., in - ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) - both the 3-rd and 4-th order coefficients would be "trimmed." - - Parameters - ---------- - c : array_like - 1-d array of coefficients, ordered from lowest order to highest. - tol : number, optional - Trailing (i.e., highest order) elements with absolute value less - than or equal to `tol` (default value is zero) are removed. - - Returns - ------- - trimmed : ndarray - 1-d array with trailing zeros removed. If the resulting series - would be empty, a series containing a single zero is returned. - - Raises - ------ - ValueError - If `tol` < 0 - - See Also - -------- - trimseq - - Examples - -------- - >>> from numpy import polynomial as P - >>> P.trimcoef((0,0,3,0,5,0,0)) - array([ 0., 0., 3., 0., 5.]) - >>> P.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed - array([ 0.]) - >>> i = complex(0,1) # works for complex - >>> P.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) - array([ 0.0003+0.j , 0.0010-0.001j]) - - """ - if tol < 0: - raise ValueError("tol must be non-negative") - - [c] = as_series([c]) - [ind] = np.where(np.abs(c) > tol) - if len(ind) == 0: - return c[:1]*0 - else: - return c[:ind[-1] + 1].copy() - -def getdomain(x): - """ - Return a domain suitable for given abscissae. - - Find a domain suitable for a polynomial or Chebyshev series - defined at the values supplied. - - Parameters - ---------- - x : array_like - 1-d array of abscissae whose domain will be determined. - - Returns - ------- - domain : ndarray - 1-d array containing two values. If the inputs are complex, then - the two returned points are the lower left and upper right corners - of the smallest rectangle (aligned with the axes) in the complex - plane containing the points `x`. If the inputs are real, then the - two points are the ends of the smallest interval containing the - points `x`. - - See Also - -------- - mapparms, mapdomain - - Examples - -------- - >>> from numpy.polynomial import polyutils as pu - >>> points = np.arange(4)**2 - 5; points - array([-5, -4, -1, 4]) - >>> pu.getdomain(points) - array([-5., 4.]) - >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle - >>> pu.getdomain(c) - array([-1.-1.j, 1.+1.j]) - - """ - [x] = as_series([x], trim=False) - if x.dtype.char in np.typecodes['Complex']: - rmin, rmax = x.real.min(), x.real.max() - imin, imax = x.imag.min(), x.imag.max() - return np.array((complex(rmin, imin), complex(rmax, imax))) - else: - return np.array((x.min(), x.max())) - -def mapparms(old, new): - """ - Linear map parameters between domains. - - Return the parameters of the linear map ``offset + scale*x`` that maps - `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. - - Parameters - ---------- - old, new : array_like - Domains. Each domain must (successfully) convert to a 1-d array - containing precisely two values. - - Returns - ------- - offset, scale : scalars - The map ``L(x) = offset + scale*x`` maps the first domain to the - second. - - See Also - -------- - getdomain, mapdomain - - Notes - ----- - Also works for complex numbers, and thus can be used to calculate the - parameters required to map any line in the complex plane to any other - line therein. - - Examples - -------- - >>> from numpy import polynomial as P - >>> P.mapparms((-1,1),(-1,1)) - (0.0, 1.0) - >>> P.mapparms((1,-1),(-1,1)) - (0.0, -1.0) - >>> i = complex(0,1) - >>> P.mapparms((-i,-1),(1,i)) - ((1+1j), (1+0j)) - - """ - oldlen = old[1] - old[0] - newlen = new[1] - new[0] - off = (old[1]*new[0] - old[0]*new[1])/oldlen - scl = newlen/oldlen - return off, scl - -def mapdomain(x, old, new): - """ - Apply linear map to input points. - - The linear map ``offset + scale*x`` that maps the domain `old` to - the domain `new` is applied to the points `x`. - - Parameters - ---------- - x : array_like - Points to be mapped. If `x` is a subtype of ndarray the subtype - will be preserved. - old, new : array_like - The two domains that determine the map. Each must (successfully) - convert to 1-d arrays containing precisely two values. - - Returns - ------- - x_out : ndarray - Array of points of the same shape as `x`, after application of the - linear map between the two domains. - - See Also - -------- - getdomain, mapparms - - Notes - ----- - Effectively, this implements: - - .. math :: - x\\_out = new[0] + m(x - old[0]) - - where - - .. math :: - m = \\frac{new[1]-new[0]}{old[1]-old[0]} - - Examples - -------- - >>> from numpy import polynomial as P - >>> old_domain = (-1,1) - >>> new_domain = (0,2*np.pi) - >>> x = np.linspace(-1,1,6); x - array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) - >>> x_out = P.mapdomain(x, old_domain, new_domain); x_out - array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, - 6.28318531]) - >>> x - P.mapdomain(x_out, new_domain, old_domain) - array([ 0., 0., 0., 0., 0., 0.]) - - Also works for complex numbers (and thus can be used to map any line in - the complex plane to any other line therein). - - >>> i = complex(0,1) - >>> old = (-1 - i, 1 + i) - >>> new = (-1 + i, 1 - i) - >>> z = np.linspace(old[0], old[1], 6); z - array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ]) - >>> new_z = P.mapdomain(z, old, new); new_z - array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) - - """ - x = np.asanyarray(x) - off, scl = mapparms(old, new) - return off + scl*x diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/setup.py deleted file mode 100644 index cb59ee1e56d9c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/setup.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('polynomial', parent_package, top_path) - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_chebyshev.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_chebyshev.py deleted file mode 100644 index a596905f6771d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_chebyshev.py +++ /dev/null @@ -1,554 +0,0 @@ -"""Tests for chebyshev module. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.polynomial.chebyshev as cheb -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) - - -def trim(x): - return cheb.chebtrim(x, tol=1e-6) - -T0 = [1] -T1 = [0, 1] -T2 = [-1, 0, 2] -T3 = [0, -3, 0, 4] -T4 = [1, 0, -8, 0, 8] -T5 = [0, 5, 0, -20, 0, 16] -T6 = [-1, 0, 18, 0, -48, 0, 32] -T7 = [0, -7, 0, 56, 0, -112, 0, 64] -T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] -T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] - -Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] - - -class TestPrivate(TestCase): - - def test__cseries_to_zseries(self): - for i in range(5): - inp = np.array([2] + [1]*i, np.double) - tgt = np.array([.5]*i + [2] + [.5]*i, np.double) - res = cheb._cseries_to_zseries(inp) - assert_equal(res, tgt) - - def test__zseries_to_cseries(self): - for i in range(5): - inp = np.array([.5]*i + [2] + [.5]*i, np.double) - tgt = np.array([2] + [1]*i, np.double) - res = cheb._zseries_to_cseries(inp) - assert_equal(res, tgt) - - -class TestConstants(TestCase): - - def test_chebdomain(self): - assert_equal(cheb.chebdomain, [-1, 1]) - - def test_chebzero(self): - assert_equal(cheb.chebzero, [0]) - - def test_chebone(self): - assert_equal(cheb.chebone, [1]) - - def test_chebx(self): - assert_equal(cheb.chebx, [0, 1]) - - -class TestArithmetic(TestCase): - - def test_chebadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = cheb.chebadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebsub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = cheb.chebsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebmulx(self): - assert_equal(cheb.chebmulx([0]), [0]) - assert_equal(cheb.chebmulx([1]), [0, 1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [.5, 0, .5] - assert_equal(cheb.chebmulx(ser), tgt) - - def test_chebmul(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(i + j + 1) - tgt[i + j] += .5 - tgt[abs(i - j)] += .5 - res = cheb.chebmul([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_chebdiv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = cheb.chebadd(ci, cj) - quo, rem = cheb.chebdiv(tgt, ci) - res = cheb.chebadd(cheb.chebmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(TestCase): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([2.5, 2., 1.5]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_chebval(self): - #check empty input - assert_equal(cheb.chebval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Tlist] - for i in range(10): - msg = "At i=%d" % i - tgt = y[i] - res = cheb.chebval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(cheb.chebval(x, [1]).shape, dims) - assert_equal(cheb.chebval(x, [1, 0]).shape, dims) - assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) - - def test_chebval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = cheb.chebval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = cheb.chebval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_chebval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = cheb.chebval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = cheb.chebval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_chebgrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = cheb.chebgrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = cheb.chebgrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_chebgrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = cheb.chebgrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = cheb.chebgrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(TestCase): - - def test_chebint(self): - # check exceptions - assert_raises(ValueError, cheb.chebint, [0], .5) - assert_raises(ValueError, cheb.chebint, [0], -1) - assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = cheb.chebint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - chebpol = cheb.poly2cheb(pol) - chebint = cheb.chebint(chebpol, m=1, k=[i]) - res = cheb.cheb2poly(chebint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - chebpol = cheb.poly2cheb(pol) - chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(cheb.chebval(-1, chebint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - chebpol = cheb.poly2cheb(pol) - chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) - res = cheb.cheb2poly(chebint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = cheb.chebint(tgt, m=1) - res = cheb.chebint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = cheb.chebint(tgt, m=1, k=[k]) - res = cheb.chebint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) - res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) - res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T - res = cheb.chebint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([cheb.chebint(c) for c in c2d]) - res = cheb.chebint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) - res = cheb.chebint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(TestCase): - - def test_chebder(self): - # check exceptions - assert_raises(ValueError, cheb.chebder, [0], .5) - assert_raises(ValueError, cheb.chebder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = cheb.chebder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T - res = cheb.chebder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([cheb.chebder(c) for c in c2d]) - res = cheb.chebder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(TestCase): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_chebvander(self): - # check for 1d x - x = np.arange(3) - v = cheb.chebvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], cheb.chebval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = cheb.chebvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], cheb.chebval(x, coef)) - - def test_chebvander2d(self): - # also tests chebval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = cheb.chebvander2d(x1, x2, [1, 2]) - tgt = cheb.chebval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = cheb.chebvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_chebvander3d(self): - # also tests chebval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) - tgt = cheb.chebval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(TestCase): - - def test_chebfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, cheb.chebfit, [1], [1], -1) - assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) - assert_raises(TypeError, cheb.chebfit, [], [1], 0) - assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) - assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) - assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) - assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = cheb.chebfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(cheb.chebval(x, coef3), y) - # - coef4 = cheb.chebfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(cheb.chebval(x, coef4), y) - # - coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = cheb.chebfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) - - -class TestCompanion(TestCase): - - def test_raises(self): - assert_raises(ValueError, cheb.chebcompanion, []) - assert_raises(ValueError, cheb.chebcompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(cheb.chebcompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) - - -class TestGauss(TestCase): - - def test_100(self): - x, w = cheb.chebgauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = cheb.chebvander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = np.pi - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(TestCase): - - def test_chebfromroots(self): - res = cheb.chebfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - tgt = [0]*i + [1] - res = cheb.chebfromroots(roots)*2**(i-1) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebroots(self): - assert_almost_equal(cheb.chebroots([1]), []) - assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = cheb.chebroots(cheb.chebfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_chebtrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, cheb.chebtrim, coef, -1) - - # Test results - assert_equal(cheb.chebtrim(coef), coef[:-1]) - assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) - assert_equal(cheb.chebtrim(coef, 2), [0]) - - def test_chebline(self): - assert_equal(cheb.chebline(3, 4), [3, 4]) - - def test_cheb2poly(self): - for i in range(10): - assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) - - def test_poly2cheb(self): - for i in range(10): - assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(-1, 1, 11)[1:-1] - tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) - res = cheb.chebweight(x) - assert_almost_equal(res, tgt) - - def test_chebpts1(self): - #test exceptions - assert_raises(ValueError, cheb.chebpts1, 1.5) - assert_raises(ValueError, cheb.chebpts1, 0) - - #test points - tgt = [0] - assert_almost_equal(cheb.chebpts1(1), tgt) - tgt = [-0.70710678118654746, 0.70710678118654746] - assert_almost_equal(cheb.chebpts1(2), tgt) - tgt = [-0.86602540378443871, 0, 0.86602540378443871] - assert_almost_equal(cheb.chebpts1(3), tgt) - tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] - assert_almost_equal(cheb.chebpts1(4), tgt) - - def test_chebpts2(self): - #test exceptions - assert_raises(ValueError, cheb.chebpts2, 1.5) - assert_raises(ValueError, cheb.chebpts2, 1) - - #test points - tgt = [-1, 1] - assert_almost_equal(cheb.chebpts2(2), tgt) - tgt = [-1, 0, 1] - assert_almost_equal(cheb.chebpts2(3), tgt) - tgt = [-1, -0.5, .5, 1] - assert_almost_equal(cheb.chebpts2(4), tgt) - tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] - assert_almost_equal(cheb.chebpts2(5), tgt) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_classes.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_classes.py deleted file mode 100644 index cd5a54687939d..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_classes.py +++ /dev/null @@ -1,570 +0,0 @@ -"""Test inter-conversion of different polynomial classes. - -This tests the convert and cast methods of all the polynomial classes. - -""" -from __future__ import division, absolute_import, print_function - -import operator as op -from numbers import Number - -import numpy as np -from numpy.polynomial import ( - Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - run_module_suite) -from numpy.compat import long - - -classes = ( - Polynomial, Legendre, Chebyshev, Laguerre, - Hermite, HermiteE) - - -def test_class_methods(): - for Poly1 in classes: - for Poly2 in classes: - yield check_conversion, Poly1, Poly2 - yield check_cast, Poly1, Poly2 - for Poly in classes: - yield check_call, Poly - yield check_identity, Poly - yield check_basis, Poly - yield check_fromroots, Poly - yield check_fit, Poly - yield check_equal, Poly - yield check_not_equal, Poly - yield check_add, Poly - yield check_sub, Poly - yield check_mul, Poly - yield check_floordiv, Poly - yield check_truediv, Poly - yield check_mod, Poly - yield check_divmod, Poly - yield check_pow, Poly - yield check_integ, Poly - yield check_deriv, Poly - yield check_roots, Poly - yield check_linspace, Poly - yield check_mapparms, Poly - yield check_degree, Poly - yield check_copy, Poly - yield check_cutdeg, Poly - yield check_truncate, Poly - yield check_trim, Poly - - -# -# helper functions -# -random = np.random.random - - -def assert_poly_almost_equal(p1, p2, msg=""): - try: - assert_(np.all(p1.domain == p2.domain)) - assert_(np.all(p1.window == p2.window)) - assert_almost_equal(p1.coef, p2.coef) - except AssertionError: - msg = "Result: %s\nTarget: %s", (p1, p2) - raise AssertionError(msg) - - -# -# conversion methods that depend on two classes -# - - -def check_conversion(Poly1, Poly2): - x = np.linspace(0, 1, 10) - coef = random((3,)) - - d1 = Poly1.domain + random((2,))*.25 - w1 = Poly1.window + random((2,))*.25 - p1 = Poly1(coef, domain=d1, window=w1) - - d2 = Poly2.domain + random((2,))*.25 - w2 = Poly2.window + random((2,))*.25 - p2 = p1.convert(kind=Poly2, domain=d2, window=w2) - - assert_almost_equal(p2.domain, d2) - assert_almost_equal(p2.window, w2) - assert_almost_equal(p2(x), p1(x)) - - -def check_cast(Poly1, Poly2): - x = np.linspace(0, 1, 10) - coef = random((3,)) - - d1 = Poly1.domain + random((2,))*.25 - w1 = Poly1.window + random((2,))*.25 - p1 = Poly1(coef, domain=d1, window=w1) - - d2 = Poly2.domain + random((2,))*.25 - w2 = Poly2.window + random((2,))*.25 - p2 = Poly2.cast(p1, domain=d2, window=w2) - - assert_almost_equal(p2.domain, d2) - assert_almost_equal(p2.window, w2) - assert_almost_equal(p2(x), p1(x)) - - -# -# methods that depend on one class -# - - -def check_identity(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - x = np.linspace(d[0], d[1], 11) - p = Poly.identity(domain=d, window=w) - assert_equal(p.domain, d) - assert_equal(p.window, w) - assert_almost_equal(p(x), x) - - -def check_basis(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - p = Poly.basis(5, domain=d, window=w) - assert_equal(p.domain, d) - assert_equal(p.window, w) - assert_equal(p.coef, [0]*5 + [1]) - - -def check_fromroots(Poly): - # check that requested roots are zeros of a polynomial - # of correct degree, domain, and window. - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - r = random((5,)) - p1 = Poly.fromroots(r, domain=d, window=w) - assert_equal(p1.degree(), len(r)) - assert_equal(p1.domain, d) - assert_equal(p1.window, w) - assert_almost_equal(p1(r), 0) - - # check that polynomial is monic - pdom = Polynomial.domain - pwin = Polynomial.window - p2 = Polynomial.cast(p1, domain=pdom, window=pwin) - assert_almost_equal(p2.coef[-1], 1) - - -def check_fit(Poly): - - def f(x): - return x*(x - 1)*(x - 2) - x = np.linspace(0, 3) - y = f(x) - - # check default value of domain and window - p = Poly.fit(x, y, 3) - assert_almost_equal(p.domain, [0, 3]) - assert_almost_equal(p(x), y) - assert_equal(p.degree(), 3) - - # check with given domains and window - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - p = Poly.fit(x, y, 3, domain=d, window=w) - assert_almost_equal(p(x), y) - assert_almost_equal(p.domain, d) - assert_almost_equal(p.window, w) - - # check with class domain default - p = Poly.fit(x, y, 3, []) - assert_equal(p.domain, Poly.domain) - assert_equal(p.window, Poly.window) - - # check that fit accepts weights. - w = np.zeros_like(x) - z = y + random(y.shape)*.25 - w[::2] = 1 - p1 = Poly.fit(x[::2], z[::2], 3) - p2 = Poly.fit(x, z, 3, w=w) - assert_almost_equal(p1(x), p2(x)) - - -def check_equal(Poly): - p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) - p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) - p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) - p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) - assert_(p1 == p1) - assert_(not p1 == p2) - assert_(not p1 == p3) - assert_(not p1 == p4) - - -def check_not_equal(Poly): - p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) - p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) - p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) - p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) - assert_(not p1 != p1) - assert_(p1 != p2) - assert_(p1 != p3) - assert_(p1 != p4) - - -def check_add(Poly): - # This checks commutation, not numerical correctness - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = p1 + p2 - assert_poly_almost_equal(p2 + p1, p3) - assert_poly_almost_equal(p1 + c2, p3) - assert_poly_almost_equal(c2 + p1, p3) - assert_poly_almost_equal(p1 + tuple(c2), p3) - assert_poly_almost_equal(tuple(c2) + p1, p3) - assert_poly_almost_equal(p1 + np.array(c2), p3) - assert_poly_almost_equal(np.array(c2) + p1, p3) - assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.add, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.add, p1, Polynomial([0])) - - -def check_sub(Poly): - # This checks commutation, not numerical correctness - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = p1 - p2 - assert_poly_almost_equal(p2 - p1, -p3) - assert_poly_almost_equal(p1 - c2, p3) - assert_poly_almost_equal(c2 - p1, -p3) - assert_poly_almost_equal(p1 - tuple(c2), p3) - assert_poly_almost_equal(tuple(c2) - p1, -p3) - assert_poly_almost_equal(p1 - np.array(c2), p3) - assert_poly_almost_equal(np.array(c2) - p1, -p3) - assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.sub, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.sub, p1, Polynomial([0])) - - -def check_mul(Poly): - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = p1 * p2 - assert_poly_almost_equal(p2 * p1, p3) - assert_poly_almost_equal(p1 * c2, p3) - assert_poly_almost_equal(c2 * p1, p3) - assert_poly_almost_equal(p1 * tuple(c2), p3) - assert_poly_almost_equal(tuple(c2) * p1, p3) - assert_poly_almost_equal(p1 * np.array(c2), p3) - assert_poly_almost_equal(np.array(c2) * p1, p3) - assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) - assert_poly_almost_equal(2 * p1, p1 * Poly([2])) - assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.mul, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.mul, p1, Polynomial([0])) - - -def check_floordiv(Poly): - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - c3 = list(random((2,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = Poly(c3) - p4 = p1 * p2 + p3 - c4 = list(p4.coef) - assert_poly_almost_equal(p4 // p2, p1) - assert_poly_almost_equal(p4 // c2, p1) - assert_poly_almost_equal(c4 // p2, p1) - assert_poly_almost_equal(p4 // tuple(c2), p1) - assert_poly_almost_equal(tuple(c4) // p2, p1) - assert_poly_almost_equal(p4 // np.array(c2), p1) - assert_poly_almost_equal(np.array(c4) // p2, p1) - assert_poly_almost_equal(2 // p2, Poly([0])) - assert_poly_almost_equal(p2 // 2, 0.5*p2) - assert_raises( - TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises( - TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) - - -def check_truediv(Poly): - # true division is valid only if the denominator is a Number and - # not a python bool. - p1 = Poly([1,2,3]) - p2 = p1 * 5 - - for stype in np.ScalarType: - if not issubclass(stype, Number) or issubclass(stype, bool): - continue - s = stype(5) - assert_poly_almost_equal(op.truediv(p2, s), p1) - assert_raises(TypeError, op.truediv, s, p2) - for stype in (int, long, float): - s = stype(5) - assert_poly_almost_equal(op.truediv(p2, s), p1) - assert_raises(TypeError, op.truediv, s, p2) - for stype in [complex]: - s = stype(5, 0) - assert_poly_almost_equal(op.truediv(p2, s), p1) - assert_raises(TypeError, op.truediv, s, p2) - for s in [tuple(), list(), dict(), bool(), np.array([1])]: - assert_raises(TypeError, op.truediv, p2, s) - assert_raises(TypeError, op.truediv, s, p2) - for ptype in classes: - assert_raises(TypeError, op.truediv, p2, ptype(1)) - - -def check_mod(Poly): - # This checks commutation, not numerical correctness - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - c3 = list(random((2,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = Poly(c3) - p4 = p1 * p2 + p3 - c4 = list(p4.coef) - assert_poly_almost_equal(p4 % p2, p3) - assert_poly_almost_equal(p4 % c2, p3) - assert_poly_almost_equal(c4 % p2, p3) - assert_poly_almost_equal(p4 % tuple(c2), p3) - assert_poly_almost_equal(tuple(c4) % p2, p3) - assert_poly_almost_equal(p4 % np.array(c2), p3) - assert_poly_almost_equal(np.array(c4) % p2, p3) - assert_poly_almost_equal(2 % p2, Poly([2])) - assert_poly_almost_equal(p2 % 2, Poly([0])) - assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, op.mod, p1, Chebyshev([0])) - else: - assert_raises(TypeError, op.mod, p1, Polynomial([0])) - - -def check_divmod(Poly): - # This checks commutation, not numerical correctness - c1 = list(random((4,)) + .5) - c2 = list(random((3,)) + .5) - c3 = list(random((2,)) + .5) - p1 = Poly(c1) - p2 = Poly(c2) - p3 = Poly(c3) - p4 = p1 * p2 + p3 - c4 = list(p4.coef) - quo, rem = divmod(p4, p2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(p4, c2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(c4, p2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(p4, tuple(c2)) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(tuple(c4), p2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(p4, np.array(c2)) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(np.array(c4), p2) - assert_poly_almost_equal(quo, p1) - assert_poly_almost_equal(rem, p3) - quo, rem = divmod(p2, 2) - assert_poly_almost_equal(quo, 0.5*p2) - assert_poly_almost_equal(rem, Poly([0])) - quo, rem = divmod(2, p2) - assert_poly_almost_equal(quo, Poly([0])) - assert_poly_almost_equal(rem, Poly([2])) - assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) - assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) - if Poly is Polynomial: - assert_raises(TypeError, divmod, p1, Chebyshev([0])) - else: - assert_raises(TypeError, divmod, p1, Polynomial([0])) - - -def check_roots(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - tgt = np.sort(random((5,))) - res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) - assert_almost_equal(res, tgt) - # default domain and window - res = np.sort(Poly.fromroots(tgt).roots()) - assert_almost_equal(res, tgt) - - -def check_degree(Poly): - p = Poly.basis(5) - assert_equal(p.degree(), 5) - - -def check_copy(Poly): - p1 = Poly.basis(5) - p2 = p1.copy() - assert_(p1 == p2) - assert_(p1 is not p2) - assert_(p1.coef is not p2.coef) - assert_(p1.domain is not p2.domain) - assert_(p1.window is not p2.window) - - -def check_integ(Poly): - P = Polynomial - # Check defaults - p0 = Poly.cast(P([1*2, 2*3, 3*4])) - p1 = P.cast(p0.integ()) - p2 = P.cast(p0.integ(2)) - assert_poly_almost_equal(p1, P([0, 2, 3, 4])) - assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) - # Check with k - p0 = Poly.cast(P([1*2, 2*3, 3*4])) - p1 = P.cast(p0.integ(k=1)) - p2 = P.cast(p0.integ(2, k=[1, 1])) - assert_poly_almost_equal(p1, P([1, 2, 3, 4])) - assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) - # Check with lbnd - p0 = Poly.cast(P([1*2, 2*3, 3*4])) - p1 = P.cast(p0.integ(lbnd=1)) - p2 = P.cast(p0.integ(2, lbnd=1)) - assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) - assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) - # Check scaling - d = 2*Poly.domain - p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) - p1 = P.cast(p0.integ()) - p2 = P.cast(p0.integ(2)) - assert_poly_almost_equal(p1, P([0, 2, 3, 4])) - assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) - - -def check_deriv(Poly): - # Check that the derivative is the inverse of integration. It is - # assumes that the integration has been checked elsewhere. - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - p1 = Poly([1, 2, 3], domain=d, window=w) - p2 = p1.integ(2, k=[1, 2]) - p3 = p1.integ(1, k=[1]) - assert_almost_equal(p2.deriv(1).coef, p3.coef) - assert_almost_equal(p2.deriv(2).coef, p1.coef) - # default domain and window - p1 = Poly([1, 2, 3]) - p2 = p1.integ(2, k=[1, 2]) - p3 = p1.integ(1, k=[1]) - assert_almost_equal(p2.deriv(1).coef, p3.coef) - assert_almost_equal(p2.deriv(2).coef, p1.coef) - - -def check_linspace(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - p = Poly([1, 2, 3], domain=d, window=w) - # check default domain - xtgt = np.linspace(d[0], d[1], 20) - ytgt = p(xtgt) - xres, yres = p.linspace(20) - assert_almost_equal(xres, xtgt) - assert_almost_equal(yres, ytgt) - # check specified domain - xtgt = np.linspace(0, 2, 20) - ytgt = p(xtgt) - xres, yres = p.linspace(20, domain=[0, 2]) - assert_almost_equal(xres, xtgt) - assert_almost_equal(yres, ytgt) - - -def check_pow(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 - tgt = Poly([1], domain=d, window=w) - tst = Poly([1, 2, 3], domain=d, window=w) - for i in range(5): - assert_poly_almost_equal(tst**i, tgt) - tgt = tgt * tst - # default domain and window - tgt = Poly([1]) - tst = Poly([1, 2, 3]) - for i in range(5): - assert_poly_almost_equal(tst**i, tgt) - tgt = tgt * tst - # check error for invalid powers - assert_raises(ValueError, op.pow, tgt, 1.5) - assert_raises(ValueError, op.pow, tgt, -1) - - -def check_call(Poly): - P = Polynomial - d = Poly.domain - x = np.linspace(d[0], d[1], 11) - - # Check defaults - p = Poly.cast(P([1, 2, 3])) - tgt = 1 + x*(2 + 3*x) - res = p(x) - assert_almost_equal(res, tgt) - - -def check_cutdeg(Poly): - p = Poly([1, 2, 3]) - assert_raises(ValueError, p.cutdeg, .5) - assert_raises(ValueError, p.cutdeg, -1) - assert_equal(len(p.cutdeg(3)), 3) - assert_equal(len(p.cutdeg(2)), 3) - assert_equal(len(p.cutdeg(1)), 2) - assert_equal(len(p.cutdeg(0)), 1) - - -def check_truncate(Poly): - p = Poly([1, 2, 3]) - assert_raises(ValueError, p.truncate, .5) - assert_raises(ValueError, p.truncate, 0) - assert_equal(len(p.truncate(4)), 3) - assert_equal(len(p.truncate(3)), 3) - assert_equal(len(p.truncate(2)), 2) - assert_equal(len(p.truncate(1)), 1) - - -def check_trim(Poly): - c = [1, 1e-6, 1e-12, 0] - p = Poly(c) - assert_equal(p.trim().coef, c[:3]) - assert_equal(p.trim(1e-10).coef, c[:2]) - assert_equal(p.trim(1e-5).coef, c[:1]) - - -def check_mapparms(Poly): - # check with defaults. Should be identity. - d = Poly.domain - w = Poly.window - p = Poly([1], domain=d, window=w) - assert_almost_equal([0, 1], p.mapparms()) - # - w = 2*d + 1 - p = Poly([1], domain=d, window=w) - assert_almost_equal([1, 2], p.mapparms()) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite.py deleted file mode 100644 index e67625a881395..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite.py +++ /dev/null @@ -1,516 +0,0 @@ -"""Tests for hermite module. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.polynomial.hermite as herm -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) - -H0 = np.array([1]) -H1 = np.array([0, 2]) -H2 = np.array([-2, 0, 4]) -H3 = np.array([0, -12, 0, 8]) -H4 = np.array([12, 0, -48, 0, 16]) -H5 = np.array([0, 120, 0, -160, 0, 32]) -H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) -H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) -H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) -H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) - -Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] - - -def trim(x): - return herm.hermtrim(x, tol=1e-6) - - -class TestConstants(TestCase): - - def test_hermdomain(self): - assert_equal(herm.hermdomain, [-1, 1]) - - def test_hermzero(self): - assert_equal(herm.hermzero, [0]) - - def test_hermone(self): - assert_equal(herm.hermone, [1]) - - def test_hermx(self): - assert_equal(herm.hermx, [0, .5]) - - -class TestArithmetic(TestCase): - x = np.linspace(-3, 3, 100) - - def test_hermadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = herm.hermadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermsub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = herm.hermsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermmulx(self): - assert_equal(herm.hermmulx([0]), [0]) - assert_equal(herm.hermmulx([1]), [0, .5]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, .5] - assert_equal(herm.hermmulx(ser), tgt) - - def test_hermmul(self): - # check values of result - for i in range(5): - pol1 = [0]*i + [1] - val1 = herm.hermval(self.x, pol1) - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - pol2 = [0]*j + [1] - val2 = herm.hermval(self.x, pol2) - pol3 = herm.hermmul(pol1, pol2) - val3 = herm.hermval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_hermdiv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = herm.hermadd(ci, cj) - quo, rem = herm.hermdiv(tgt, ci) - res = herm.hermadd(herm.hermmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(TestCase): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([2.5, 1., .75]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_hermval(self): - #check empty input - assert_equal(herm.hermval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Hlist] - for i in range(10): - msg = "At i=%d" % i - tgt = y[i] - res = herm.hermval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(herm.hermval(x, [1]).shape, dims) - assert_equal(herm.hermval(x, [1, 0]).shape, dims) - assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) - - def test_hermval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = herm.hermval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_hermval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = herm.hermval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_hermgrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = herm.hermgrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermgrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_hermgrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = herm.hermgrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herm.hermgrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(TestCase): - - def test_hermint(self): - # check exceptions - assert_raises(ValueError, herm.hermint, [0], .5) - assert_raises(ValueError, herm.hermint, [0], -1) - assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = herm.hermint([0], m=i, k=k) - assert_almost_equal(res, [0, .5]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i]) - res = herm.herm2poly(hermint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(herm.hermval(-1, hermint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - hermpol = herm.poly2herm(pol) - hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) - res = herm.herm2poly(hermint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1) - res = herm.hermint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1, k=[k]) - res = herm.hermint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) - res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herm.hermint(tgt, m=1, k=[k], scl=2) - res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T - res = herm.hermint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herm.hermint(c) for c in c2d]) - res = herm.hermint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) - res = herm.hermint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(TestCase): - - def test_hermder(self): - # check exceptions - assert_raises(ValueError, herm.hermder, [0], .5) - assert_raises(ValueError, herm.hermder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = herm.hermder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herm.hermder(herm.hermint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T - res = herm.hermder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herm.hermder(c) for c in c2d]) - res = herm.hermder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(TestCase): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_hermvander(self): - # check for 1d x - x = np.arange(3) - v = herm.hermvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herm.hermval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = herm.hermvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herm.hermval(x, coef)) - - def test_hermvander2d(self): - # also tests hermval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = herm.hermvander2d(x1, x2, [1, 2]) - tgt = herm.hermval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herm.hermvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_hermvander3d(self): - # also tests hermval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) - tgt = herm.hermval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(TestCase): - - def test_hermfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, herm.hermfit, [1], [1], -1) - assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) - assert_raises(TypeError, herm.hermfit, [], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) - assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) - assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = herm.hermfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(herm.hermval(x, coef3), y) - # - coef4 = herm.hermfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(herm.hermval(x, coef4), y) - # - coef2d = herm.hermfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = herm.hermfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) - - -class TestCompanion(TestCase): - - def test_raises(self): - assert_raises(ValueError, herm.hermcompanion, []) - assert_raises(ValueError, herm.hermcompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(herm.hermcompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) - - -class TestGauss(TestCase): - - def test_100(self): - x, w = herm.hermgauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = herm.hermvander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = np.sqrt(np.pi) - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(TestCase): - - def test_hermfromroots(self): - res = herm.hermfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = herm.hermfromroots(roots) - res = herm.hermval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(herm.herm2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_hermroots(self): - assert_almost_equal(herm.hermroots([1]), []) - assert_almost_equal(herm.hermroots([1, 1]), [-.5]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = herm.hermroots(herm.hermfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermtrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, herm.hermtrim, coef, -1) - - # Test results - assert_equal(herm.hermtrim(coef), coef[:-1]) - assert_equal(herm.hermtrim(coef, 1), coef[:-3]) - assert_equal(herm.hermtrim(coef, 2), [0]) - - def test_hermline(self): - assert_equal(herm.hermline(3, 4), [3, 2]) - - def test_herm2poly(self): - for i in range(10): - assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) - - def test_poly2herm(self): - for i in range(10): - assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(-5, 5, 11) - tgt = np.exp(-x**2) - res = herm.hermweight(x) - assert_almost_equal(res, tgt) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite_e.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite_e.py deleted file mode 100644 index f8601a82846a5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_hermite_e.py +++ /dev/null @@ -1,517 +0,0 @@ -"""Tests for hermite_e module. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.polynomial.hermite_e as herme -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) - -He0 = np.array([1]) -He1 = np.array([0, 1]) -He2 = np.array([-1, 0, 1]) -He3 = np.array([0, -3, 0, 1]) -He4 = np.array([3, 0, -6, 0, 1]) -He5 = np.array([0, 15, 0, -10, 0, 1]) -He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) -He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) -He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) -He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) - -Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] - - -def trim(x): - return herme.hermetrim(x, tol=1e-6) - - -class TestConstants(TestCase): - - def test_hermedomain(self): - assert_equal(herme.hermedomain, [-1, 1]) - - def test_hermezero(self): - assert_equal(herme.hermezero, [0]) - - def test_hermeone(self): - assert_equal(herme.hermeone, [1]) - - def test_hermex(self): - assert_equal(herme.hermex, [0, 1]) - - -class TestArithmetic(TestCase): - x = np.linspace(-3, 3, 100) - - def test_hermeadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = herme.hermeadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermesub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = herme.hermesub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_hermemulx(self): - assert_equal(herme.hermemulx([0]), [0]) - assert_equal(herme.hermemulx([1]), [0, 1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, 1] - assert_equal(herme.hermemulx(ser), tgt) - - def test_hermemul(self): - # check values of result - for i in range(5): - pol1 = [0]*i + [1] - val1 = herme.hermeval(self.x, pol1) - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - pol2 = [0]*j + [1] - val2 = herme.hermeval(self.x, pol2) - pol3 = herme.hermemul(pol1, pol2) - val3 = herme.hermeval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_hermediv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = herme.hermeadd(ci, cj) - quo, rem = herme.hermediv(tgt, ci) - res = herme.hermeadd(herme.hermemul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(TestCase): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([4., 2., 3.]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_hermeval(self): - #check empty input - assert_equal(herme.hermeval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Helist] - for i in range(10): - msg = "At i=%d" % i - tgt = y[i] - res = herme.hermeval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(herme.hermeval(x, [1]).shape, dims) - assert_equal(herme.hermeval(x, [1, 0]).shape, dims) - assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) - - def test_hermeval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = herme.hermeval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herme.hermeval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_hermeval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = herme.hermeval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herme.hermeval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_hermegrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = herme.hermegrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herme.hermegrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_hermegrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = herme.hermegrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = herme.hermegrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(TestCase): - - def test_hermeint(self): - # check exceptions - assert_raises(ValueError, herme.hermeint, [0], .5) - assert_raises(ValueError, herme.hermeint, [0], -1) - assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = herme.hermeint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - hermepol = herme.poly2herme(pol) - hermeint = herme.hermeint(hermepol, m=1, k=[i]) - res = herme.herme2poly(hermeint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - hermepol = herme.poly2herme(pol) - hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) - assert_almost_equal(herme.hermeval(-1, hermeint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - hermepol = herme.poly2herme(pol) - hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) - res = herme.herme2poly(hermeint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herme.hermeint(tgt, m=1) - res = herme.hermeint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herme.hermeint(tgt, m=1, k=[k]) - res = herme.hermeint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) - res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) - res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermeint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T - res = herme.hermeint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herme.hermeint(c) for c in c2d]) - res = herme.hermeint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) - res = herme.hermeint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(TestCase): - - def test_hermeder(self): - # check exceptions - assert_raises(ValueError, herme.hermeder, [0], .5) - assert_raises(ValueError, herme.hermeder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = herme.hermeder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = herme.hermeder( - herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermeder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T - res = herme.hermeder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([herme.hermeder(c) for c in c2d]) - res = herme.hermeder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(TestCase): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_hermevander(self): - # check for 1d x - x = np.arange(3) - v = herme.hermevander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herme.hermeval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = herme.hermevander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], herme.hermeval(x, coef)) - - def test_hermevander2d(self): - # also tests hermeval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = herme.hermevander2d(x1, x2, [1, 2]) - tgt = herme.hermeval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herme.hermevander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_hermevander3d(self): - # also tests hermeval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) - tgt = herme.hermeval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(TestCase): - - def test_hermefit(self): - def f(x): - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, herme.hermefit, [1], [1], -1) - assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) - assert_raises(TypeError, herme.hermefit, [], [1], 0) - assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) - assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) - assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) - assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = herme.hermefit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(herme.hermeval(x, coef3), y) - # - coef4 = herme.hermefit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(herme.hermeval(x, coef4), y) - # - coef2d = herme.hermefit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = herme.hermefit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) - - -class TestCompanion(TestCase): - - def test_raises(self): - assert_raises(ValueError, herme.hermecompanion, []) - assert_raises(ValueError, herme.hermecompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(herme.hermecompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) - - -class TestGauss(TestCase): - - def test_100(self): - x, w = herme.hermegauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = herme.hermevander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = np.sqrt(2*np.pi) - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(TestCase): - - def test_hermefromroots(self): - res = herme.hermefromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = herme.hermefromroots(roots) - res = herme.hermeval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(herme.herme2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_hermeroots(self): - assert_almost_equal(herme.hermeroots([1]), []) - assert_almost_equal(herme.hermeroots([1, 1]), [-1]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = herme.hermeroots(herme.hermefromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_hermetrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, herme.hermetrim, coef, -1) - - # Test results - assert_equal(herme.hermetrim(coef), coef[:-1]) - assert_equal(herme.hermetrim(coef, 1), coef[:-3]) - assert_equal(herme.hermetrim(coef, 2), [0]) - - def test_hermeline(self): - assert_equal(herme.hermeline(3, 4), [3, 4]) - - def test_herme2poly(self): - for i in range(10): - assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) - - def test_poly2herme(self): - for i in range(10): - assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(-5, 5, 11) - tgt = np.exp(-.5*x**2) - res = herme.hermeweight(x) - assert_almost_equal(res, tgt) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_laguerre.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_laguerre.py deleted file mode 100644 index 1dc57a9602945..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_laguerre.py +++ /dev/null @@ -1,513 +0,0 @@ -"""Tests for laguerre module. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.polynomial.laguerre as lag -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) - -L0 = np.array([1])/1 -L1 = np.array([1, -1])/1 -L2 = np.array([2, -4, 1])/2 -L3 = np.array([6, -18, 9, -1])/6 -L4 = np.array([24, -96, 72, -16, 1])/24 -L5 = np.array([120, -600, 600, -200, 25, -1])/120 -L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 - -Llist = [L0, L1, L2, L3, L4, L5, L6] - - -def trim(x): - return lag.lagtrim(x, tol=1e-6) - - -class TestConstants(TestCase): - - def test_lagdomain(self): - assert_equal(lag.lagdomain, [0, 1]) - - def test_lagzero(self): - assert_equal(lag.lagzero, [0]) - - def test_lagone(self): - assert_equal(lag.lagone, [1]) - - def test_lagx(self): - assert_equal(lag.lagx, [1, -1]) - - -class TestArithmetic(TestCase): - x = np.linspace(-3, 3, 100) - - def test_lagadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = lag.lagadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_lagsub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = lag.lagsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_lagmulx(self): - assert_equal(lag.lagmulx([0]), [0]) - assert_equal(lag.lagmulx([1]), [1, -1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] - assert_almost_equal(lag.lagmulx(ser), tgt) - - def test_lagmul(self): - # check values of result - for i in range(5): - pol1 = [0]*i + [1] - val1 = lag.lagval(self.x, pol1) - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - pol2 = [0]*j + [1] - val2 = lag.lagval(self.x, pol2) - pol3 = lag.lagmul(pol1, pol2) - val3 = lag.lagval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_lagdiv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = lag.lagadd(ci, cj) - quo, rem = lag.lagdiv(tgt, ci) - res = lag.lagadd(lag.lagmul(quo, ci), rem) - assert_almost_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(TestCase): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([9., -14., 6.]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_lagval(self): - #check empty input - assert_equal(lag.lagval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Llist] - for i in range(7): - msg = "At i=%d" % i - tgt = y[i] - res = lag.lagval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(lag.lagval(x, [1]).shape, dims) - assert_equal(lag.lagval(x, [1, 0]).shape, dims) - assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) - - def test_lagval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = lag.lagval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = lag.lagval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_lagval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = lag.lagval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = lag.lagval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_laggrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = lag.laggrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = lag.laggrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_laggrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = lag.laggrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = lag.laggrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(TestCase): - - def test_lagint(self): - # check exceptions - assert_raises(ValueError, lag.lagint, [0], .5) - assert_raises(ValueError, lag.lagint, [0], -1) - assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = lag.lagint([0], m=i, k=k) - assert_almost_equal(res, [1, -1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - lagpol = lag.poly2lag(pol) - lagint = lag.lagint(lagpol, m=1, k=[i]) - res = lag.lag2poly(lagint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - lagpol = lag.poly2lag(pol) - lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(lag.lagval(-1, lagint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - lagpol = lag.poly2lag(pol) - lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) - res = lag.lag2poly(lagint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = lag.lagint(tgt, m=1) - res = lag.lagint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = lag.lagint(tgt, m=1, k=[k]) - res = lag.lagint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) - res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = lag.lagint(tgt, m=1, k=[k], scl=2) - res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_lagint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T - res = lag.lagint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([lag.lagint(c) for c in c2d]) - res = lag.lagint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) - res = lag.lagint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(TestCase): - - def test_lagder(self): - # check exceptions - assert_raises(ValueError, lag.lagder, [0], .5) - assert_raises(ValueError, lag.lagder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = lag.lagder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = lag.lagder(lag.lagint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_lagder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T - res = lag.lagder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([lag.lagder(c) for c in c2d]) - res = lag.lagder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(TestCase): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_lagvander(self): - # check for 1d x - x = np.arange(3) - v = lag.lagvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], lag.lagval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = lag.lagvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], lag.lagval(x, coef)) - - def test_lagvander2d(self): - # also tests lagval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = lag.lagvander2d(x1, x2, [1, 2]) - tgt = lag.lagval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = lag.lagvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_lagvander3d(self): - # also tests lagval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) - tgt = lag.lagval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(TestCase): - - def test_lagfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, lag.lagfit, [1], [1], -1) - assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) - assert_raises(TypeError, lag.lagfit, [], [1], 0) - assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) - assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) - assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) - assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = lag.lagfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(lag.lagval(x, coef3), y) - # - coef4 = lag.lagfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(lag.lagval(x, coef4), y) - # - coef2d = lag.lagfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = lag.lagfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) - - -class TestCompanion(TestCase): - - def test_raises(self): - assert_raises(ValueError, lag.lagcompanion, []) - assert_raises(ValueError, lag.lagcompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(lag.lagcompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) - - -class TestGauss(TestCase): - - def test_100(self): - x, w = lag.laggauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = lag.lagvander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = 1.0 - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(TestCase): - - def test_lagfromroots(self): - res = lag.lagfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = lag.lagfromroots(roots) - res = lag.lagval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(lag.lag2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_lagroots(self): - assert_almost_equal(lag.lagroots([1]), []) - assert_almost_equal(lag.lagroots([0, 1]), [1]) - for i in range(2, 5): - tgt = np.linspace(0, 3, i) - res = lag.lagroots(lag.lagfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_lagtrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, lag.lagtrim, coef, -1) - - # Test results - assert_equal(lag.lagtrim(coef), coef[:-1]) - assert_equal(lag.lagtrim(coef, 1), coef[:-3]) - assert_equal(lag.lagtrim(coef, 2), [0]) - - def test_lagline(self): - assert_equal(lag.lagline(3, 4), [7, -4]) - - def test_lag2poly(self): - for i in range(7): - assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) - - def test_poly2lag(self): - for i in range(7): - assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(0, 10, 11) - tgt = np.exp(-x) - res = lag.lagweight(x) - assert_almost_equal(res, tgt) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_legendre.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_legendre.py deleted file mode 100644 index 8ac1feb589d40..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_legendre.py +++ /dev/null @@ -1,517 +0,0 @@ -"""Tests for legendre module. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.polynomial.legendre as leg -from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) - -L0 = np.array([1]) -L1 = np.array([0, 1]) -L2 = np.array([-1, 0, 3])/2 -L3 = np.array([0, -3, 0, 5])/2 -L4 = np.array([3, 0, -30, 0, 35])/8 -L5 = np.array([0, 15, 0, -70, 0, 63])/8 -L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 -L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 -L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 -L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 - -Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] - - -def trim(x): - return leg.legtrim(x, tol=1e-6) - - -class TestConstants(TestCase): - - def test_legdomain(self): - assert_equal(leg.legdomain, [-1, 1]) - - def test_legzero(self): - assert_equal(leg.legzero, [0]) - - def test_legone(self): - assert_equal(leg.legone, [1]) - - def test_legx(self): - assert_equal(leg.legx, [0, 1]) - - -class TestArithmetic(TestCase): - x = np.linspace(-1, 1, 100) - - def test_legadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = leg.legadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_legsub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = leg.legsub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_legmulx(self): - assert_equal(leg.legmulx([0]), [0]) - assert_equal(leg.legmulx([1]), [0, 1]) - for i in range(1, 5): - tmp = 2*i + 1 - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] - assert_equal(leg.legmulx(ser), tgt) - - def test_legmul(self): - # check values of result - for i in range(5): - pol1 = [0]*i + [1] - val1 = leg.legval(self.x, pol1) - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - pol2 = [0]*j + [1] - val2 = leg.legval(self.x, pol2) - pol3 = leg.legmul(pol1, pol2) - val3 = leg.legval(self.x, pol3) - assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) - - def test_legdiv(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1] - cj = [0]*j + [1] - tgt = leg.legadd(ci, cj) - quo, rem = leg.legdiv(tgt, ci) - res = leg.legadd(leg.legmul(quo, ci), rem) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - -class TestEvaluation(TestCase): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([2., 2., 2.]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = polyval(x, [1., 2., 3.]) - - def test_legval(self): - #check empty input - assert_equal(leg.legval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [polyval(x, c) for c in Llist] - for i in range(10): - msg = "At i=%d" % i - tgt = y[i] - res = leg.legval(x, [0]*i + [1]) - assert_almost_equal(res, tgt, err_msg=msg) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(leg.legval(x, [1]).shape, dims) - assert_equal(leg.legval(x, [1, 0]).shape, dims) - assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) - - def test_legval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = leg.legval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = leg.legval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_legval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = leg.legval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = leg.legval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_leggrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = leg.leggrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = leg.leggrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_leggrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = leg.leggrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = leg.leggrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(TestCase): - - def test_legint(self): - # check exceptions - assert_raises(ValueError, leg.legint, [0], .5) - assert_raises(ValueError, leg.legint, [0], -1) - assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = leg.legint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - legpol = leg.poly2leg(pol) - legint = leg.legint(legpol, m=1, k=[i]) - res = leg.leg2poly(legint) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - legpol = leg.poly2leg(pol) - legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) - assert_almost_equal(leg.legval(-1, legint), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - legpol = leg.poly2leg(pol) - legint = leg.legint(legpol, m=1, k=[i], scl=2) - res = leg.leg2poly(legint) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = leg.legint(tgt, m=1) - res = leg.legint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = leg.legint(tgt, m=1, k=[k]) - res = leg.legint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) - res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = leg.legint(tgt, m=1, k=[k], scl=2) - res = leg.legint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_legint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([leg.legint(c) for c in c2d.T]).T - res = leg.legint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([leg.legint(c) for c in c2d]) - res = leg.legint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) - res = leg.legint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(TestCase): - - def test_legder(self): - # check exceptions - assert_raises(ValueError, leg.legder, [0], .5) - assert_raises(ValueError, leg.legder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = leg.legder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = leg.legder(leg.legint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_legder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([leg.legder(c) for c in c2d.T]).T - res = leg.legder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([leg.legder(c) for c in c2d]) - res = leg.legder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(TestCase): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_legvander(self): - # check for 1d x - x = np.arange(3) - v = leg.legvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], leg.legval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = leg.legvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], leg.legval(x, coef)) - - def test_legvander2d(self): - # also tests polyval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = leg.legvander2d(x1, x2, [1, 2]) - tgt = leg.legval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = leg.legvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_legvander3d(self): - # also tests polyval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) - tgt = leg.legval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestFitting(TestCase): - - def test_legfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, leg.legfit, [1], [1], -1) - assert_raises(TypeError, leg.legfit, [[1]], [1], 0) - assert_raises(TypeError, leg.legfit, [], [1], 0) - assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) - assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) - assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) - assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = leg.legfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(leg.legval(x, coef3), y) - # - coef4 = leg.legfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(leg.legval(x, coef4), y) - # - coef2d = leg.legfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - y[0::2] = 0 - wcoef3 = leg.legfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) - - -class TestCompanion(TestCase): - - def test_raises(self): - assert_raises(ValueError, leg.legcompanion, []) - assert_raises(ValueError, leg.legcompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(leg.legcompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(leg.legcompanion([1, 2])[0, 0] == -.5) - - -class TestGauss(TestCase): - - def test_100(self): - x, w = leg.leggauss(100) - - # test orthogonality. Note that the results need to be normalized, - # otherwise the huge values that can arise from fast growing - # functions like Laguerre can be very confusing. - v = leg.legvander(x, 99) - vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:, None] * vv * vd - assert_almost_equal(vv, np.eye(100)) - - # check that the integral of 1 is correct - tgt = 2.0 - assert_almost_equal(w.sum(), tgt) - - -class TestMisc(TestCase): - - def test_legfromroots(self): - res = leg.legfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - pol = leg.legfromroots(roots) - res = leg.legval(roots, pol) - tgt = 0 - assert_(len(pol) == i + 1) - assert_almost_equal(leg.leg2poly(pol)[-1], 1) - assert_almost_equal(res, tgt) - - def test_legroots(self): - assert_almost_equal(leg.legroots([1]), []) - assert_almost_equal(leg.legroots([1, 2]), [-.5]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = leg.legroots(leg.legfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_legtrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, leg.legtrim, coef, -1) - - # Test results - assert_equal(leg.legtrim(coef), coef[:-1]) - assert_equal(leg.legtrim(coef, 1), coef[:-3]) - assert_equal(leg.legtrim(coef, 2), [0]) - - def test_legline(self): - assert_equal(leg.legline(3, 4), [3, 4]) - - def test_leg2poly(self): - for i in range(10): - assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) - - def test_poly2leg(self): - for i in range(10): - assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) - - def test_weight(self): - x = np.linspace(-1, 1, 11) - tgt = 1. - res = leg.legweight(x) - assert_almost_equal(res, tgt) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polynomial.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polynomial.py deleted file mode 100644 index c806a8497492f..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polynomial.py +++ /dev/null @@ -1,477 +0,0 @@ -"""Tests for polynomial module. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.polynomial.polynomial as poly -from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) - - -def trim(x): - return poly.polytrim(x, tol=1e-6) - -T0 = [1] -T1 = [0, 1] -T2 = [-1, 0, 2] -T3 = [0, -3, 0, 4] -T4 = [1, 0, -8, 0, 8] -T5 = [0, 5, 0, -20, 0, 16] -T6 = [-1, 0, 18, 0, -48, 0, 32] -T7 = [0, -7, 0, 56, 0, -112, 0, 64] -T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] -T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] - -Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] - - -class TestConstants(TestCase): - - def test_polydomain(self): - assert_equal(poly.polydomain, [-1, 1]) - - def test_polyzero(self): - assert_equal(poly.polyzero, [0]) - - def test_polyone(self): - assert_equal(poly.polyone, [1]) - - def test_polyx(self): - assert_equal(poly.polyx, [0, 1]) - - -class TestArithmetic(TestCase): - - def test_polyadd(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] += 1 - res = poly.polyadd([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_polysub(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(max(i, j) + 1) - tgt[i] += 1 - tgt[j] -= 1 - res = poly.polysub([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_polymulx(self): - assert_equal(poly.polymulx([0]), [0]) - assert_equal(poly.polymulx([1]), [0, 1]) - for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i + 1) + [1] - assert_equal(poly.polymulx(ser), tgt) - - def test_polymul(self): - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - tgt = np.zeros(i + j + 1) - tgt[i + j] += 1 - res = poly.polymul([0]*i + [1], [0]*j + [1]) - assert_equal(trim(res), trim(tgt), err_msg=msg) - - def test_polydiv(self): - # check zero division - assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) - - # check scalar division - quo, rem = poly.polydiv([2], [2]) - assert_equal((quo, rem), (1, 0)) - quo, rem = poly.polydiv([2, 2], [2]) - assert_equal((quo, rem), ((1, 1), 0)) - - # check rest. - for i in range(5): - for j in range(5): - msg = "At i=%d, j=%d" % (i, j) - ci = [0]*i + [1, 2] - cj = [0]*j + [1, 2] - tgt = poly.polyadd(ci, cj) - quo, rem = poly.polydiv(tgt, ci) - res = poly.polyadd(poly.polymul(quo, ci), rem) - assert_equal(res, tgt, err_msg=msg) - - -class TestEvaluation(TestCase): - # coefficients of 1 + 2*x + 3*x**2 - c1d = np.array([1., 2., 3.]) - c2d = np.einsum('i,j->ij', c1d, c1d) - c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) - - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - y = poly.polyval(x, [1., 2., 3.]) - - def test_polyval(self): - #check empty input - assert_equal(poly.polyval([], [1]).size, 0) - - #check normal input) - x = np.linspace(-1, 1) - y = [x**i for i in range(5)] - for i in range(5): - tgt = y[i] - res = poly.polyval(x, [0]*i + [1]) - assert_almost_equal(res, tgt) - tgt = x*(x**2 - 1) - res = poly.polyval(x, [0, -1, 0, 1]) - assert_almost_equal(res, tgt) - - #check that shape is preserved - for i in range(3): - dims = [2]*i - x = np.zeros(dims) - assert_equal(poly.polyval(x, [1]).shape, dims) - assert_equal(poly.polyval(x, [1, 0]).shape, dims) - assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) - - def test_polyval2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d) - - #test values - tgt = y1*y2 - res = poly.polyval2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = poly.polyval2d(z, z, self.c2d) - assert_(res.shape == (2, 3)) - - def test_polyval3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test exceptions - assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d) - - #test values - tgt = y1*y2*y3 - res = poly.polyval3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = poly.polyval3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)) - - def test_polygrid2d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j->ij', y1, y2) - res = poly.polygrid2d(x1, x2, self.c2d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = poly.polygrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) - - def test_polygrid3d(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - - #test values - tgt = np.einsum('i,j,k->ijk', y1, y2, y3) - res = poly.polygrid3d(x1, x2, x3, self.c3d) - assert_almost_equal(res, tgt) - - #test shape - z = np.ones((2, 3)) - res = poly.polygrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) - - -class TestIntegral(TestCase): - - def test_polyint(self): - # check exceptions - assert_raises(ValueError, poly.polyint, [0], .5) - assert_raises(ValueError, poly.polyint, [0], -1) - assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) - - # test integration of zero polynomial - for i in range(2, 5): - k = [0]*(i - 2) + [1] - res = poly.polyint([0], m=i, k=k) - assert_almost_equal(res, [0, 1]) - - # check single integration with integration constant - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] - res = poly.polyint(pol, m=1, k=[i]) - assert_almost_equal(trim(res), trim(tgt)) - - # check single integration with integration constant and lbnd - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - res = poly.polyint(pol, m=1, k=[i], lbnd=-1) - assert_almost_equal(poly.polyval(-1, res), i) - - # check single integration with integration constant and scaling - for i in range(5): - scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] - res = poly.polyint(pol, m=1, k=[i], scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with default k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = poly.polyint(tgt, m=1) - res = poly.polyint(pol, m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with defined k - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = poly.polyint(tgt, m=1, k=[k]) - res = poly.polyint(pol, m=j, k=list(range(j))) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with lbnd - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) - res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) - assert_almost_equal(trim(res), trim(tgt)) - - # check multiple integrations with scaling - for i in range(5): - for j in range(2, 5): - pol = [0]*i + [1] - tgt = pol[:] - for k in range(j): - tgt = poly.polyint(tgt, m=1, k=[k], scl=2) - res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyint_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T - res = poly.polyint(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([poly.polyint(c) for c in c2d]) - res = poly.polyint(c2d, axis=1) - assert_almost_equal(res, tgt) - - tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) - res = poly.polyint(c2d, k=3, axis=1) - assert_almost_equal(res, tgt) - - -class TestDerivative(TestCase): - - def test_polyder(self): - # check exceptions - assert_raises(ValueError, poly.polyder, [0], .5) - assert_raises(ValueError, poly.polyder, [0], -1) - - # check that zeroth deriviative does nothing - for i in range(5): - tgt = [0]*i + [1] - res = poly.polyder(tgt, m=0) - assert_equal(trim(res), trim(tgt)) - - # check that derivation is the inverse of integration - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = poly.polyder(poly.polyint(tgt, m=j), m=j) - assert_almost_equal(trim(res), trim(tgt)) - - # check derivation with scaling - for i in range(5): - for j in range(2, 5): - tgt = [0]*i + [1] - res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyder_axis(self): - # check that axis keyword works - c2d = np.random.random((3, 4)) - - tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T - res = poly.polyder(c2d, axis=0) - assert_almost_equal(res, tgt) - - tgt = np.vstack([poly.polyder(c) for c in c2d]) - res = poly.polyder(c2d, axis=1) - assert_almost_equal(res, tgt) - - -class TestVander(TestCase): - # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 - - def test_polyvander(self): - # check for 1d x - x = np.arange(3) - v = poly.polyvander(x, 3) - assert_(v.shape == (3, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], poly.polyval(x, coef)) - - # check for 2d x - x = np.array([[1, 2], [3, 4], [5, 6]]) - v = poly.polyvander(x, 3) - assert_(v.shape == (3, 2, 4)) - for i in range(4): - coef = [0]*i + [1] - assert_almost_equal(v[..., i], poly.polyval(x, coef)) - - def test_polyvander2d(self): - # also tests polyval2d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3)) - van = poly.polyvander2d(x1, x2, [1, 2]) - tgt = poly.polyval2d(x1, x2, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = poly.polyvander2d([x1], [x2], [1, 2]) - assert_(van.shape == (1, 5, 6)) - - def test_polyvander3d(self): - # also tests polyval3d for non-square coefficient array - x1, x2, x3 = self.x - c = np.random.random((2, 3, 4)) - van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) - tgt = poly.polyval3d(x1, x2, x3, c) - res = np.dot(van, c.flat) - assert_almost_equal(res, tgt) - - # check shape - van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) - assert_(van.shape == (1, 5, 24)) - - -class TestCompanion(TestCase): - - def test_raises(self): - assert_raises(ValueError, poly.polycompanion, []) - assert_raises(ValueError, poly.polycompanion, [1]) - - def test_dimensions(self): - for i in range(1, 5): - coef = [0]*i + [1] - assert_(poly.polycompanion(coef).shape == (i, i)) - - def test_linear_root(self): - assert_(poly.polycompanion([1, 2])[0, 0] == -.5) - - -class TestMisc(TestCase): - - def test_polyfromroots(self): - res = poly.polyfromroots([]) - assert_almost_equal(trim(res), [1]) - for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - tgt = Tlist[i] - res = poly.polyfromroots(roots)*2**(i-1) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyroots(self): - assert_almost_equal(poly.polyroots([1]), []) - assert_almost_equal(poly.polyroots([1, 2]), [-.5]) - for i in range(2, 5): - tgt = np.linspace(-1, 1, i) - res = poly.polyroots(poly.polyfromroots(tgt)) - assert_almost_equal(trim(res), trim(tgt)) - - def test_polyfit(self): - def f(x): - return x*(x - 1)*(x - 2) - - # Test exceptions - assert_raises(ValueError, poly.polyfit, [1], [1], -1) - assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) - assert_raises(TypeError, poly.polyfit, [], [1], 0) - assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) - assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) - assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) - assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) - - # Test fit - x = np.linspace(0, 2) - y = f(x) - # - coef3 = poly.polyfit(x, y, 3) - assert_equal(len(coef3), 4) - assert_almost_equal(poly.polyval(x, coef3), y) - # - coef4 = poly.polyfit(x, y, 4) - assert_equal(len(coef4), 5) - assert_almost_equal(poly.polyval(x, coef4), y) - # - coef2d = poly.polyfit(x, np.array([y, y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3, coef3]).T) - # test weighting - w = np.zeros_like(x) - yw = y.copy() - w[1::2] = 1 - yw[0::2] = 0 - wcoef3 = poly.polyfit(x, yw, 3, w=w) - assert_almost_equal(wcoef3, coef3) - # - wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) - # test scaling with complex values x points whose square - # is zero when summed. - x = [1, 1j, -1, -1j] - assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) - - def test_polytrim(self): - coef = [2, -1, 1, 0] - - # Test exceptions - assert_raises(ValueError, poly.polytrim, coef, -1) - - # Test results - assert_equal(poly.polytrim(coef), coef[:-1]) - assert_equal(poly.polytrim(coef, 1), coef[:-3]) - assert_equal(poly.polytrim(coef, 2), [0]) - - def test_polyline(self): - assert_equal(poly.polyline(3, 4), [3, 4]) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polyutils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polyutils.py deleted file mode 100644 index 974e2e09a3886..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_polyutils.py +++ /dev/null @@ -1,109 +0,0 @@ -"""Tests for polyutils module. - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.polynomial.polyutils as pu -from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) - - -class TestMisc(TestCase): - - def test_trimseq(self): - for i in range(5): - tgt = [1] - res = pu.trimseq([1] + [0]*5) - assert_equal(res, tgt) - - def test_as_series(self): - # check exceptions - assert_raises(ValueError, pu.as_series, [[]]) - assert_raises(ValueError, pu.as_series, [[[1, 2]]]) - assert_raises(ValueError, pu.as_series, [[1], ['a']]) - # check common types - types = ['i', 'd', 'O'] - for i in range(len(types)): - for j in range(i): - ci = np.ones(1, types[i]) - cj = np.ones(1, types[j]) - [resi, resj] = pu.as_series([ci, cj]) - assert_(resi.dtype.char == resj.dtype.char) - assert_(resj.dtype.char == types[i]) - - def test_trimcoef(self): - coef = [2, -1, 1, 0] - # Test exceptions - assert_raises(ValueError, pu.trimcoef, coef, -1) - # Test results - assert_equal(pu.trimcoef(coef), coef[:-1]) - assert_equal(pu.trimcoef(coef, 1), coef[:-3]) - assert_equal(pu.trimcoef(coef, 2), [0]) - - -class TestDomain(TestCase): - - def test_getdomain(self): - # test for real values - x = [1, 10, 3, -1] - tgt = [-1, 10] - res = pu.getdomain(x) - assert_almost_equal(res, tgt) - - # test for complex values - x = [1 + 1j, 1 - 1j, 0, 2] - tgt = [-1j, 2 + 1j] - res = pu.getdomain(x) - assert_almost_equal(res, tgt) - - def test_mapdomain(self): - # test for real values - dom1 = [0, 4] - dom2 = [1, 3] - tgt = dom2 - res = pu. mapdomain(dom1, dom1, dom2) - assert_almost_equal(res, tgt) - - # test for complex values - dom1 = [0 - 1j, 2 + 1j] - dom2 = [-2, 2] - tgt = dom2 - x = dom1 - res = pu.mapdomain(x, dom1, dom2) - assert_almost_equal(res, tgt) - - # test for multidimensional arrays - dom1 = [0, 4] - dom2 = [1, 3] - tgt = np.array([dom2, dom2]) - x = np.array([dom1, dom1]) - res = pu.mapdomain(x, dom1, dom2) - assert_almost_equal(res, tgt) - - # test that subtypes are preserved. - dom1 = [0, 4] - dom2 = [1, 3] - x = np.matrix([dom1, dom1]) - res = pu.mapdomain(x, dom1, dom2) - assert_(isinstance(res, np.matrix)) - - def test_mapparms(self): - # test for real values - dom1 = [0, 4] - dom2 = [1, 3] - tgt = [1, .5] - res = pu. mapparms(dom1, dom2) - assert_almost_equal(res, tgt) - - # test for complex values - dom1 = [0 - 1j, 2 + 1j] - dom2 = [-2, 2] - tgt = [-1 + 1j, 1 - 1j] - res = pu.mapparms(dom1, dom2) - assert_almost_equal(res, tgt) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_printing.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_printing.py deleted file mode 100644 index 86cd257328bb4..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/polynomial/tests/test_printing.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy.polynomial as poly -from numpy.testing import TestCase, run_module_suite, assert_ - - -class test_str(TestCase): - def test_polynomial_str(self): - res = str(poly.Polynomial([0, 1])) - tgt = 'poly([0., 1.])' - assert_(res, tgt) - - def test_chebyshev_str(self): - res = str(poly.Chebyshev([0, 1])) - tgt = 'leg([0., 1.])' - assert_(res, tgt) - - def test_legendre_str(self): - res = str(poly.Legendre([0, 1])) - tgt = 'leg([0., 1.])' - assert_(res, tgt) - - def test_hermite_str(self): - res = str(poly.Hermite([0, 1])) - tgt = 'herm([0., 1.])' - assert_(res, tgt) - - def test_hermiteE_str(self): - res = str(poly.HermiteE([0, 1])) - tgt = 'herme([0., 1.])' - assert_(res, tgt) - - def test_laguerre_str(self): - res = str(poly.Laguerre([0, 1])) - tgt = 'lag([0., 1.])' - assert_(res, tgt) - - -class test_repr(TestCase): - def test_polynomial_str(self): - res = repr(poly.Polynomial([0, 1])) - tgt = 'Polynomial([0., 1.])' - assert_(res, tgt) - - def test_chebyshev_str(self): - res = repr(poly.Chebyshev([0, 1])) - tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) - - def test_legendre_repr(self): - res = repr(poly.Legendre([0, 1])) - tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) - - def test_hermite_repr(self): - res = repr(poly.Hermite([0, 1])) - tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) - - def test_hermiteE_repr(self): - res = repr(poly.HermiteE([0, 1])) - tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) - - def test_laguerre_repr(self): - res = repr(poly.Laguerre([0, 1])) - tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])' - assert_(res, tgt) - - -# - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/__init__.py deleted file mode 100644 index 388267c97532c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/__init__.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -======================== -Random Number Generation -======================== - -==================== ========================================================= -Utility functions -============================================================================== -random Uniformly distributed values of a given shape. -bytes Uniformly distributed random bytes. -random_integers Uniformly distributed integers in a given range. -random_sample Uniformly distributed floats in a given range. -random Alias for random_sample -ranf Alias for random_sample -sample Alias for random_sample -choice Generate a weighted random sample from a given array-like -permutation Randomly permute a sequence / generate a random sequence. -shuffle Randomly permute a sequence in place. -seed Seed the random number generator. -==================== ========================================================= - -==================== ========================================================= -Compatibility functions -============================================================================== -rand Uniformly distributed values. -randn Normally distributed values. -ranf Uniformly distributed floating point numbers. -randint Uniformly distributed integers in a given range. -==================== ========================================================= - -==================== ========================================================= -Univariate distributions -============================================================================== -beta Beta distribution over ``[0, 1]``. -binomial Binomial distribution. -chisquare :math:`\\chi^2` distribution. -exponential Exponential distribution. -f F (Fisher-Snedecor) distribution. -gamma Gamma distribution. -geometric Geometric distribution. -gumbel Gumbel distribution. -hypergeometric Hypergeometric distribution. -laplace Laplace distribution. -logistic Logistic distribution. -lognormal Log-normal distribution. -logseries Logarithmic series distribution. -negative_binomial Negative binomial distribution. -noncentral_chisquare Non-central chi-square distribution. -noncentral_f Non-central F distribution. -normal Normal / Gaussian distribution. -pareto Pareto distribution. -poisson Poisson distribution. -power Power distribution. -rayleigh Rayleigh distribution. -triangular Triangular distribution. -uniform Uniform distribution. -vonmises Von Mises circular distribution. -wald Wald (inverse Gaussian) distribution. -weibull Weibull distribution. -zipf Zipf's distribution over ranked data. -==================== ========================================================= - -==================== ========================================================= -Multivariate distributions -============================================================================== -dirichlet Multivariate generalization of Beta distribution. -multinomial Multivariate generalization of the binomial distribution. -multivariate_normal Multivariate generalization of the normal distribution. -==================== ========================================================= - -==================== ========================================================= -Standard distributions -============================================================================== -standard_cauchy Standard Cauchy-Lorentz distribution. -standard_exponential Standard exponential distribution. -standard_gamma Standard Gamma distribution. -standard_normal Standard normal distribution. -standard_t Standard Student's t-distribution. -==================== ========================================================= - -==================== ========================================================= -Internal functions -============================================================================== -get_state Get tuple representing internal state of generator. -set_state Set state of generator. -==================== ========================================================= - -""" -from __future__ import division, absolute_import, print_function - -import warnings - -# To get sub-modules -from .info import __doc__, __all__ - - -with warnings.catch_warnings(): - warnings.filterwarnings("ignore", message="numpy.ndarray size changed") - from .mtrand import * - -# Some aliases: -ranf = random = sample = random_sample -__all__.extend(['ranf', 'random', 'sample']) - -def __RandomState_ctor(): - """Return a RandomState instance. - - This function exists solely to assist (un)pickling. - - Note that the state of the RandomState returned here is irrelevant, as this function's - entire purpose is to return a newly allocated RandomState whose state pickle can set. - Consequently the RandomState returned by this function is a freshly allocated copy - with a seed=0. - - See https://github.com/numpy/numpy/issues/4763 for a detailed discussion - - """ - return RandomState(seed=0) - -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/info.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/info.py deleted file mode 100644 index 396e623815a83..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/info.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -======================== -Random Number Generation -======================== - -==================== ========================================================= -Utility functions -============================================================================== -random_sample Uniformly distributed floats over ``[0, 1)``. -random Alias for `random_sample`. -bytes Uniformly distributed random bytes. -random_integers Uniformly distributed integers in a given range. -permutation Randomly permute a sequence / generate a random sequence. -shuffle Randomly permute a sequence in place. -seed Seed the random number generator. -==================== ========================================================= - -==================== ========================================================= -Compatibility functions -============================================================================== -rand Uniformly distributed values. -randn Normally distributed values. -ranf Uniformly distributed floating point numbers. -randint Uniformly distributed integers in a given range. -==================== ========================================================= - -==================== ========================================================= -Univariate distributions -============================================================================== -beta Beta distribution over ``[0, 1]``. -binomial Binomial distribution. -chisquare :math:`\\chi^2` distribution. -exponential Exponential distribution. -f F (Fisher-Snedecor) distribution. -gamma Gamma distribution. -geometric Geometric distribution. -gumbel Gumbel distribution. -hypergeometric Hypergeometric distribution. -laplace Laplace distribution. -logistic Logistic distribution. -lognormal Log-normal distribution. -logseries Logarithmic series distribution. -negative_binomial Negative binomial distribution. -noncentral_chisquare Non-central chi-square distribution. -noncentral_f Non-central F distribution. -normal Normal / Gaussian distribution. -pareto Pareto distribution. -poisson Poisson distribution. -power Power distribution. -rayleigh Rayleigh distribution. -triangular Triangular distribution. -uniform Uniform distribution. -vonmises Von Mises circular distribution. -wald Wald (inverse Gaussian) distribution. -weibull Weibull distribution. -zipf Zipf's distribution over ranked data. -==================== ========================================================= - -==================== ========================================================= -Multivariate distributions -============================================================================== -dirichlet Multivariate generalization of Beta distribution. -multinomial Multivariate generalization of the binomial distribution. -multivariate_normal Multivariate generalization of the normal distribution. -==================== ========================================================= - -==================== ========================================================= -Standard distributions -============================================================================== -standard_cauchy Standard Cauchy-Lorentz distribution. -standard_exponential Standard exponential distribution. -standard_gamma Standard Gamma distribution. -standard_normal Standard normal distribution. -standard_t Standard Student's t-distribution. -==================== ========================================================= - -==================== ========================================================= -Internal functions -============================================================================== -get_state Get tuple representing internal state of generator. -set_state Set state of generator. -==================== ========================================================= - -""" -from __future__ import division, absolute_import, print_function - -depends = ['core'] - -__all__ = [ - 'beta', - 'binomial', - 'bytes', - 'chisquare', - 'exponential', - 'f', - 'gamma', - 'geometric', - 'get_state', - 'gumbel', - 'hypergeometric', - 'laplace', - 'logistic', - 'lognormal', - 'logseries', - 'multinomial', - 'multivariate_normal', - 'negative_binomial', - 'noncentral_chisquare', - 'noncentral_f', - 'normal', - 'pareto', - 'permutation', - 'poisson', - 'power', - 'rand', - 'randint', - 'randn', - 'random_integers', - 'random_sample', - 'rayleigh', - 'seed', - 'set_state', - 'shuffle', - 'standard_cauchy', - 'standard_exponential', - 'standard_gamma', - 'standard_normal', - 'standard_t', - 'triangular', - 'uniform', - 'vonmises', - 'wald', - 'weibull', - 'zipf' -] diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/mtrand.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/mtrand.py deleted file mode 100644 index 28939761af345..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/mtrand.py +++ /dev/null @@ -1,7 +0,0 @@ -def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, 'mtrand.cpython-34m.so') - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) -__bootstrap__() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/randomkit.h b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/randomkit.h deleted file mode 100644 index e049488eeb14a..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/randomkit.h +++ /dev/null @@ -1,189 +0,0 @@ -/* Random kit 1.3 */ - -/* - * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* @(#) $Jeannot: randomkit.h,v 1.24 2005/07/21 22:14:09 js Exp $ */ - -/* - * Typical use: - * - * { - * rk_state state; - * unsigned long seed = 1, random_value; - * - * rk_seed(seed, &state); // Initialize the RNG - * ... - * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] - * } - * - * Instead of rk_seed, you can use rk_randomseed which will get a random seed - * from /dev/urandom (or the clock, if /dev/urandom is unavailable): - * - * { - * rk_state state; - * unsigned long random_value; - * - * rk_randomseed(&state); // Initialize the RNG with a random seed - * ... - * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] - * } - */ - -/* - * Useful macro: - * RK_DEV_RANDOM: the device used for random seeding. - * defaults to "/dev/urandom" - */ - -#include - -#ifndef _RANDOMKIT_ -#define _RANDOMKIT_ - -#define RK_STATE_LEN 624 - -typedef struct rk_state_ -{ - unsigned long key[RK_STATE_LEN]; - int pos; - int has_gauss; /* !=0: gauss contains a gaussian deviate */ - double gauss; - - /* The rk_state structure has been extended to store the following - * information for the binomial generator. If the input values of n or p - * are different than nsave and psave, then the other parameters will be - * recomputed. RTK 2005-09-02 */ - - int has_binomial; /* !=0: following parameters initialized for - binomial */ - double psave; - long nsave; - double r; - double q; - double fm; - long m; - double p1; - double xm; - double xl; - double xr; - double c; - double laml; - double lamr; - double p2; - double p3; - double p4; - -} -rk_state; - -typedef enum { - RK_NOERR = 0, /* no error */ - RK_ENODEV = 1, /* no RK_DEV_RANDOM device */ - RK_ERR_MAX = 2 -} rk_error; - -/* error strings */ -extern char *rk_strerror[RK_ERR_MAX]; - -/* Maximum generated random value */ -#define RK_MAX 0xFFFFFFFFUL - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Initialize the RNG state using the given seed. - */ -extern void rk_seed(unsigned long seed, rk_state *state); - -/* - * Initialize the RNG state using a random seed. - * Uses /dev/random or, when unavailable, the clock (see randomkit.c). - * Returns RK_NOERR when no errors occurs. - * Returns RK_ENODEV when the use of RK_DEV_RANDOM failed (for example because - * there is no such device). In this case, the RNG was initialized using the - * clock. - */ -extern rk_error rk_randomseed(rk_state *state); - -/* - * Returns a random unsigned long between 0 and RK_MAX inclusive - */ -extern unsigned long rk_random(rk_state *state); - -/* - * Returns a random long between 0 and LONG_MAX inclusive - */ -extern long rk_long(rk_state *state); - -/* - * Returns a random unsigned long between 0 and ULONG_MAX inclusive - */ -extern unsigned long rk_ulong(rk_state *state); - -/* - * Returns a random unsigned long between 0 and max inclusive. - */ -extern unsigned long rk_interval(unsigned long max, rk_state *state); - -/* - * Returns a random double between 0.0 and 1.0, 1.0 excluded. - */ -extern double rk_double(rk_state *state); - -/* - * fill the buffer with size random bytes - */ -extern void rk_fill(void *buffer, size_t size, rk_state *state); - -/* - * fill the buffer with randombytes from the random device - * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is - * On Unix, if strong is defined, RK_DEV_RANDOM is used. If not, RK_DEV_URANDOM - * is used instead. This parameter has no effect on Windows. - * Warning: on most unixes RK_DEV_RANDOM will wait for enough entropy to answer - * which can take a very long time on quiet systems. - */ -extern rk_error rk_devfill(void *buffer, size_t size, int strong); - -/* - * fill the buffer using rk_devfill if the random device is available and using - * rk_fill if is is not - * parameters have the same meaning as rk_fill and rk_devfill - * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is - */ -extern rk_error rk_altfill(void *buffer, size_t size, int strong, - rk_state *state); - -/* - * return a random gaussian deviate with variance unity and zero mean. - */ -extern double rk_gauss(rk_state *state); - -#ifdef __cplusplus -} -#endif - -#endif /* _RANDOMKIT_ */ diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/setup.py deleted file mode 100644 index 33c12975b662b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/setup.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import division, print_function - -from os.path import join, split, dirname -import os -import sys -from distutils.dep_util import newer -from distutils.msvccompiler import get_build_version as get_msvc_build_version - -def needs_mingw_ftime_workaround(): - # We need the mingw workaround for _ftime if the msvc runtime version is - # 7.1 or above and we build with mingw ... - # ... but we can't easily detect compiler version outside distutils command - # context, so we will need to detect in randomkit whether we build with gcc - msver = get_msvc_build_version() - if msver and msver >= 8: - return True - - return False - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration, get_mathlibs - config = Configuration('random', parent_package, top_path) - - def generate_libraries(ext, build_dir): - config_cmd = config.get_config_cmd() - libs = get_mathlibs() - tc = testcode_wincrypt() - if config_cmd.try_run(tc): - libs.append('Advapi32') - ext.libraries.extend(libs) - return None - - # enable unix large file support on 32 bit systems - # (64 bit off_t, lseek -> lseek64 etc.) - defs = [('_FILE_OFFSET_BITS', '64'), - ('_LARGEFILE_SOURCE', '1'), - ('_LARGEFILE64_SOURCE', '1')] - if needs_mingw_ftime_workaround(): - defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None)) - - libs = [] - # Configure mtrand - config.add_extension('mtrand', - sources=[join('mtrand', x) for x in - ['mtrand.c', 'randomkit.c', 'initarray.c', - 'distributions.c']]+[generate_libraries], - libraries=libs, - depends=[join('mtrand', '*.h'), - join('mtrand', '*.pyx'), - join('mtrand', '*.pxi'),], - define_macros=defs, - ) - - config.add_data_files(('.', join('mtrand', 'randomkit.h'))) - config.add_data_dir('tests') - - return config - -def testcode_wincrypt(): - return """\ -/* check to see if _WIN32 is defined */ -int main(int argc, char *argv[]) -{ -#ifdef _WIN32 - return 0; -#else - return 1; -#endif -} -""" - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_random.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_random.py deleted file mode 100644 index 1bf25a92613c5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_random.py +++ /dev/null @@ -1,707 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_raises, assert_equal, - assert_warns) -from numpy import random -from numpy.compat import asbytes -import sys - -class TestSeed(TestCase): - def test_scalar(self): - s = np.random.RandomState(0) - assert_equal(s.randint(1000), 684) - s = np.random.RandomState(4294967295) - assert_equal(s.randint(1000), 419) - - def test_array(self): - s = np.random.RandomState(range(10)) - assert_equal(s.randint(1000), 468) - s = np.random.RandomState(np.arange(10)) - assert_equal(s.randint(1000), 468) - s = np.random.RandomState([0]) - assert_equal(s.randint(1000), 973) - s = np.random.RandomState([4294967295]) - assert_equal(s.randint(1000), 265) - - def test_invalid_scalar(self): - # seed must be a unsigned 32 bit integers - assert_raises(TypeError, np.random.RandomState, -0.5) - assert_raises(ValueError, np.random.RandomState, -1) - - def test_invalid_array(self): - # seed must be a unsigned 32 bit integers - assert_raises(TypeError, np.random.RandomState, [-0.5]) - assert_raises(ValueError, np.random.RandomState, [-1]) - assert_raises(ValueError, np.random.RandomState, [4294967296]) - assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) - assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) - -class TestBinomial(TestCase): - def test_n_zero(self): - # Tests the corner case of n == 0 for the binomial distribution. - # binomial(0, p) should be zero for any p in [0, 1]. - # This test addresses issue #3480. - zeros = np.zeros(2, dtype='int') - for p in [0, .5, 1]: - assert_(random.binomial(0, p) == 0) - np.testing.assert_array_equal(random.binomial(zeros, p), zeros) - - def test_p_is_nan(self): - # Issue #4571. - assert_raises(ValueError, random.binomial, 1, np.nan) - - -class TestMultinomial(TestCase): - def test_basic(self): - random.multinomial(100, [0.2, 0.8]) - - def test_zero_probability(self): - random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) - - def test_int_negative_interval(self): - assert_(-5 <= random.randint(-5, -1) < -1) - x = random.randint(-5, -1, 5) - assert_(np.all(-5 <= x)) - assert_(np.all(x < -1)) - - def test_size(self): - # gh-3173 - p = [0.5, 0.5] - assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) - assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) - assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, - (2, 2, 2)) - - assert_raises(TypeError, np.random.multinomial, 1, p, - np.float(1)) - - -class TestSetState(TestCase): - def setUp(self): - self.seed = 1234567890 - self.prng = random.RandomState(self.seed) - self.state = self.prng.get_state() - - def test_basic(self): - old = self.prng.tomaxint(16) - self.prng.set_state(self.state) - new = self.prng.tomaxint(16) - assert_(np.all(old == new)) - - def test_gaussian_reset(self): - # Make sure the cached every-other-Gaussian is reset. - old = self.prng.standard_normal(size=3) - self.prng.set_state(self.state) - new = self.prng.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_gaussian_reset_in_media_res(self): - # When the state is saved with a cached Gaussian, make sure the - # cached Gaussian is restored. - - self.prng.standard_normal() - state = self.prng.get_state() - old = self.prng.standard_normal(size=3) - self.prng.set_state(state) - new = self.prng.standard_normal(size=3) - assert_(np.all(old == new)) - - def test_backwards_compatibility(self): - # Make sure we can accept old state tuples that do not have the - # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.prng.standard_normal(size=16) - self.prng.set_state(old_state) - x2 = self.prng.standard_normal(size=16) - self.prng.set_state(self.state) - x3 = self.prng.standard_normal(size=16) - assert_(np.all(x1 == x2)) - assert_(np.all(x1 == x3)) - - def test_negative_binomial(self): - # Ensure that the negative binomial results take floating point - # arguments without truncation. - self.prng.negative_binomial(0.5, 0.5) - -class TestRandomDist(TestCase): - # Make sure the random distrobution return the correct value for a - # given seed - - def setUp(self): - self.seed = 1234567890 - - def test_rand(self): - np.random.seed(self.seed) - actual = np.random.rand(3, 2) - desired = np.array([[0.61879477158567997, 0.59162362775974664], - [0.88868358904449662, 0.89165480011560816], - [0.4575674820298663, 0.7781880808593471]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_randn(self): - np.random.seed(self.seed) - actual = np.random.randn(3, 2) - desired = np.array([[1.34016345771863121, 1.73759122771936081], - [1.498988344300628, -0.2286433324536169], - [2.031033998682787, 2.17032494605655257]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_randint(self): - np.random.seed(self.seed) - actual = np.random.randint(-99, 99, size=(3, 2)) - desired = np.array([[31, 3], - [-52, 41], - [-48, -66]]) - np.testing.assert_array_equal(actual, desired) - - def test_random_integers(self): - np.random.seed(self.seed) - actual = np.random.random_integers(-99, 99, size=(3, 2)) - desired = np.array([[31, 3], - [-52, 41], - [-48, -66]]) - np.testing.assert_array_equal(actual, desired) - - def test_random_sample(self): - np.random.seed(self.seed) - actual = np.random.random_sample((3, 2)) - desired = np.array([[0.61879477158567997, 0.59162362775974664], - [0.88868358904449662, 0.89165480011560816], - [0.4575674820298663, 0.7781880808593471]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_choice_uniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4) - desired = np.array([2, 3, 2, 3]) - np.testing.assert_array_equal(actual, desired) - - def test_choice_nonuniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) - desired = np.array([1, 1, 2, 2]) - np.testing.assert_array_equal(actual, desired) - - def test_choice_uniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False) - desired = np.array([0, 1, 3]) - np.testing.assert_array_equal(actual, desired) - - def test_choice_nonuniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False, - p=[0.1, 0.3, 0.5, 0.1]) - desired = np.array([2, 3, 1]) - np.testing.assert_array_equal(actual, desired) - - def test_choice_noninteger(self): - np.random.seed(self.seed) - actual = np.random.choice(['a', 'b', 'c', 'd'], 4) - desired = np.array(['c', 'd', 'c', 'd']) - np.testing.assert_array_equal(actual, desired) - - def test_choice_exceptions(self): - sample = np.random.choice - assert_raises(ValueError, sample, -1, 3) - assert_raises(ValueError, sample, 3., 3) - assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) - assert_raises(ValueError, sample, [], 3) - assert_raises(ValueError, sample, [1, 2, 3, 4], 3, - p=[[0.25, 0.25], [0.25, 0.25]]) - assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) - assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) - assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) - assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) - assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False, - p=[1, 0, 0]) - - def test_choice_return_shape(self): - p = [0.1, 0.9] - # Check scalar - assert_(np.isscalar(np.random.choice(2, replace=True))) - assert_(np.isscalar(np.random.choice(2, replace=False))) - assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) - assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) - assert_(np.isscalar(np.random.choice([1, 2], replace=True))) - assert_(np.random.choice([None], replace=True) is None) - a = np.array([1, 2]) - arr = np.empty(1, dtype=object) - arr[0] = a - assert_(np.random.choice(arr, replace=True) is a) - - # Check 0-d array - s = tuple() - assert_(not np.isscalar(np.random.choice(2, s, replace=True))) - assert_(not np.isscalar(np.random.choice(2, s, replace=False))) - assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) - assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) - assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) - assert_(np.random.choice([None], s, replace=True).ndim == 0) - a = np.array([1, 2]) - arr = np.empty(1, dtype=object) - arr[0] = a - assert_(np.random.choice(arr, s, replace=True).item() is a) - - # Check multi dimensional array - s = (2, 3) - p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] - assert_(np.random.choice(6, s, replace=True).shape, s) - assert_(np.random.choice(6, s, replace=False).shape, s) - assert_(np.random.choice(6, s, replace=True, p=p).shape, s) - assert_(np.random.choice(6, s, replace=False, p=p).shape, s) - assert_(np.random.choice(np.arange(6), s, replace=True).shape, s) - - def test_bytes(self): - np.random.seed(self.seed) - actual = np.random.bytes(10) - desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5') - np.testing.assert_equal(actual, desired) - - def test_shuffle(self): - # Test lists, arrays, and multidimensional versions of both: - for conv in [lambda x: x, - np.asarray, - lambda x: [(i, i) for i in x], - lambda x: np.asarray([(i, i) for i in x])]: - np.random.seed(self.seed) - alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - np.random.shuffle(alist) - actual = alist - desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) - np.testing.assert_array_equal(actual, desired) - - def test_shuffle_flexible(self): - # gh-4270 - arr = [(0, 1), (2, 3)] - dt = np.dtype([('a', np.int32, 1), ('b', np.int32, 1)]) - nparr = np.array(arr, dtype=dt) - a, b = nparr[0].copy(), nparr[1].copy() - for i in range(50): - np.random.shuffle(nparr) - assert_(a in nparr) - assert_(b in nparr) - - def test_shuffle_masked(self): - # gh-3263 - a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1) - b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) - ma = np.ma.count_masked(a) - mb = np.ma.count_masked(b) - for i in range(50): - np.random.shuffle(a) - self.assertEqual(ma, np.ma.count_masked(a)) - np.random.shuffle(b) - self.assertEqual(mb, np.ma.count_masked(b)) - - def test_beta(self): - np.random.seed(self.seed) - actual = np.random.beta(.1, .9, size=(3, 2)) - desired = np.array( - [[1.45341850513746058e-02, 5.31297615662868145e-04], - [1.85366619058432324e-06, 4.19214516800110563e-03], - [1.58405155108498093e-04, 1.26252891949397652e-04]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_binomial(self): - np.random.seed(self.seed) - actual = np.random.binomial(100.123, .456, size=(3, 2)) - desired = np.array([[37, 43], - [42, 48], - [46, 45]]) - np.testing.assert_array_equal(actual, desired) - - def test_chisquare(self): - np.random.seed(self.seed) - actual = np.random.chisquare(50, size=(3, 2)) - desired = np.array([[63.87858175501090585, 68.68407748911370447], - [65.77116116901505904, 47.09686762438974483], - [72.3828403199695174, 74.18408615260374006]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=13) - - def test_dirichlet(self): - np.random.seed(self.seed) - alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) - desired = np.array([[[0.54539444573611562, 0.45460555426388438], - [0.62345816822039413, 0.37654183177960598]], - [[0.55206000085785778, 0.44793999914214233], - [0.58964023305154301, 0.41035976694845688]], - [[0.59266909280647828, 0.40733090719352177], - [0.56974431743975207, 0.43025568256024799]]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_dirichlet_size(self): - # gh-3173 - p = np.array([51.72840233779265162, 39.74494232180943953]) - assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) - assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) - assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) - assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) - - assert_raises(TypeError, np.random.dirichlet, p, np.float(1)) - - def test_exponential(self): - np.random.seed(self.seed) - actual = np.random.exponential(1.1234, size=(3, 2)) - desired = np.array([[1.08342649775011624, 1.00607889924557314], - [2.46628830085216721, 2.49668106809923884], - [0.68717433461363442, 1.69175666993575979]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_f(self): - np.random.seed(self.seed) - actual = np.random.f(12, 77, size=(3, 2)) - desired = np.array([[1.21975394418575878, 1.75135759791559775], - [1.44803115017146489, 1.22108959480396262], - [1.02176975757740629, 1.34431827623300415]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_gamma(self): - np.random.seed(self.seed) - actual = np.random.gamma(5, 3, size=(3, 2)) - desired = np.array([[24.60509188649287182, 28.54993563207210627], - [26.13476110204064184, 12.56988482927716078], - [31.71863275789960568, 33.30143302795922011]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_geometric(self): - np.random.seed(self.seed) - actual = np.random.geometric(.123456789, size=(3, 2)) - desired = np.array([[8, 7], - [17, 17], - [5, 12]]) - np.testing.assert_array_equal(actual, desired) - - def test_gumbel(self): - np.random.seed(self.seed) - actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[0.19591898743416816, 0.34405539668096674], - [-1.4492522252274278, -1.47374816298446865], - [1.10651090478803416, -0.69535848626236174]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_hypergeometric(self): - np.random.seed(self.seed) - actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) - desired = np.array([[10, 10], - [10, 10], - [9, 9]]) - np.testing.assert_array_equal(actual, desired) - - # Test nbad = 0 - actual = np.random.hypergeometric(5, 0, 3, size=4) - desired = np.array([3, 3, 3, 3]) - np.testing.assert_array_equal(actual, desired) - - actual = np.random.hypergeometric(15, 0, 12, size=4) - desired = np.array([12, 12, 12, 12]) - np.testing.assert_array_equal(actual, desired) - - # Test ngood = 0 - actual = np.random.hypergeometric(0, 5, 3, size=4) - desired = np.array([0, 0, 0, 0]) - np.testing.assert_array_equal(actual, desired) - - actual = np.random.hypergeometric(0, 15, 12, size=4) - desired = np.array([0, 0, 0, 0]) - np.testing.assert_array_equal(actual, desired) - - def test_laplace(self): - np.random.seed(self.seed) - actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[0.66599721112760157, 0.52829452552221945], - [3.12791959514407125, 3.18202813572992005], - [-0.05391065675859356, 1.74901336242837324]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_logistic(self): - np.random.seed(self.seed) - actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[1.09232835305011444, 0.8648196662399954], - [4.27818590694950185, 4.33897006346929714], - [-0.21682183359214885, 2.63373365386060332]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_lognormal(self): - np.random.seed(self.seed) - actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) - desired = np.array([[16.50698631688883822, 36.54846706092654784], - [22.67886599981281748, 0.71617561058995771], - [65.72798501792723869, 86.84341601437161273]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=13) - - def test_logseries(self): - np.random.seed(self.seed) - actual = np.random.logseries(p=.923456789, size=(3, 2)) - desired = np.array([[2, 2], - [6, 17], - [3, 6]]) - np.testing.assert_array_equal(actual, desired) - - def test_multinomial(self): - np.random.seed(self.seed) - actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) - desired = np.array([[[4, 3, 5, 4, 2, 2], - [5, 2, 8, 2, 2, 1]], - [[3, 4, 3, 6, 0, 4], - [2, 1, 4, 3, 6, 4]], - [[4, 4, 2, 5, 2, 3], - [4, 3, 4, 2, 3, 4]]]) - np.testing.assert_array_equal(actual, desired) - - def test_multivariate_normal(self): - np.random.seed(self.seed) - mean = (.123456789, 10) - # Hmm... not even symmetric. - cov = [[1, 0], [1, 0]] - size = (3, 2) - actual = np.random.multivariate_normal(mean, cov, size) - desired = np.array([[[-1.47027513018564449, 10.], - [-1.65915081534845532, 10.]], - [[-2.29186329304599745, 10.], - [-1.77505606019580053, 10.]], - [[-0.54970369430044119, 10.], - [0.29768848031692957, 10.]]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - # Check for default size, was raising deprecation warning - actual = np.random.multivariate_normal(mean, cov) - desired = np.array([-0.79441224511977482, 10.]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - # Check that non positive-semidefinite covariance raises warning - mean = [0, 0] - cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]] - assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) - - def test_negative_binomial(self): - np.random.seed(self.seed) - actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) - desired = np.array([[848, 841], - [892, 611], - [779, 647]]) - np.testing.assert_array_equal(actual, desired) - - def test_noncentral_chisquare(self): - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) - desired = np.array([[23.91905354498517511, 13.35324692733826346], - [31.22452661329736401, 16.60047399466177254], - [5.03461598262724586, 17.94973089023519464]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_noncentral_f(self): - np.random.seed(self.seed) - actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, - size=(3, 2)) - desired = np.array([[1.40598099674926669, 0.34207973179285761], - [3.57715069265772545, 7.92632662577829805], - [0.43741599463544162, 1.1774208752428319]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_normal(self): - np.random.seed(self.seed) - actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) - desired = np.array([[2.80378370443726244, 3.59863924443872163], - [3.121433477601256, -0.33382987590723379], - [4.18552478636557357, 4.46410668111310471]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_pareto(self): - np.random.seed(self.seed) - actual = np.random.pareto(a=.123456789, size=(3, 2)) - desired = np.array( - [[2.46852460439034849e+03, 1.41286880810518346e+03], - [5.28287797029485181e+07, 6.57720981047328785e+07], - [1.40840323350391515e+02, 1.98390255135251704e+05]]) - # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this - # matrix differs by 24 nulps. Discussion: - # http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html - # Consensus is that this is probably some gcc quirk that affects - # rounding but not in any important way, so we just use a looser - # tolerance on this test: - np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) - - def test_poisson(self): - np.random.seed(self.seed) - actual = np.random.poisson(lam=.123456789, size=(3, 2)) - desired = np.array([[0, 0], - [1, 0], - [0, 0]]) - np.testing.assert_array_equal(actual, desired) - - def test_poisson_exceptions(self): - lambig = np.iinfo('l').max - lamneg = -1 - assert_raises(ValueError, np.random.poisson, lamneg) - assert_raises(ValueError, np.random.poisson, [lamneg]*10) - assert_raises(ValueError, np.random.poisson, lambig) - assert_raises(ValueError, np.random.poisson, [lambig]*10) - - def test_power(self): - np.random.seed(self.seed) - actual = np.random.power(a=.123456789, size=(3, 2)) - desired = np.array([[0.02048932883240791, 0.01424192241128213], - [0.38446073748535298, 0.39499689943484395], - [0.00177699707563439, 0.13115505880863756]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_rayleigh(self): - np.random.seed(self.seed) - actual = np.random.rayleigh(scale=10, size=(3, 2)) - desired = np.array([[13.8882496494248393, 13.383318339044731], - [20.95413364294492098, 21.08285015800712614], - [11.06066537006854311, 17.35468505778271009]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_standard_cauchy(self): - np.random.seed(self.seed) - actual = np.random.standard_cauchy(size=(3, 2)) - desired = np.array([[0.77127660196445336, -6.55601161955910605], - [0.93582023391158309, -2.07479293013759447], - [-4.74601644297011926, 0.18338989290760804]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_exponential(self): - np.random.seed(self.seed) - actual = np.random.standard_exponential(size=(3, 2)) - desired = np.array([[0.96441739162374596, 0.89556604882105506], - [2.1953785836319808, 2.22243285392490542], - [0.6116915921431676, 1.50592546727413201]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_gamma(self): - np.random.seed(self.seed) - actual = np.random.standard_gamma(shape=3, size=(3, 2)) - desired = np.array([[5.50841531318455058, 6.62953470301903103], - [5.93988484943779227, 2.31044849402133989], - [7.54838614231317084, 8.012756093271868]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_standard_normal(self): - np.random.seed(self.seed) - actual = np.random.standard_normal(size=(3, 2)) - desired = np.array([[1.34016345771863121, 1.73759122771936081], - [1.498988344300628, -0.2286433324536169], - [2.031033998682787, 2.17032494605655257]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_standard_t(self): - np.random.seed(self.seed) - actual = np.random.standard_t(df=10, size=(3, 2)) - desired = np.array([[0.97140611862659965, -0.08830486548450577], - [1.36311143689505321, -0.55317463909867071], - [-0.18473749069684214, 0.61181537341755321]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_triangular(self): - np.random.seed(self.seed) - actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, - size=(3, 2)) - desired = np.array([[12.68117178949215784, 12.4129206149193152], - [16.20131377335158263, 16.25692138747600524], - [11.20400690911820263, 14.4978144835829923]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_uniform(self): - np.random.seed(self.seed) - actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) - desired = np.array([[6.99097932346268003, 6.73801597444323974], - [9.50364421400426274, 9.53130618907631089], - [5.48995325769805476, 8.47493103280052118]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_vonmises(self): - np.random.seed(self.seed) - actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) - desired = np.array([[2.28567572673902042, 2.89163838442285037], - [0.38198375564286025, 2.57638023113890746], - [1.19153771588353052, 1.83509849681825354]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_vonmises_small(self): - # check infinite loop, gh-4720 - np.random.seed(self.seed) - r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) - np.testing.assert_(np.isfinite(r).all()) - - def test_wald(self): - np.random.seed(self.seed) - actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) - desired = np.array([[3.82935265715889983, 5.13125249184285526], - [0.35045403618358717, 1.50832396872003538], - [0.24124319895843183, 0.22031101461955038]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) - - def test_weibull(self): - np.random.seed(self.seed) - actual = np.random.weibull(a=1.23, size=(3, 2)) - desired = np.array([[0.97097342648766727, 0.91422896443565516], - [1.89517770034962929, 1.91414357960479564], - [0.67057783752390987, 1.39494046635066793]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) - - def test_zipf(self): - np.random.seed(self.seed) - actual = np.random.zipf(a=1.23, size=(3, 2)) - desired = np.array([[66, 29], - [1, 1], - [3, 13]]) - np.testing.assert_array_equal(actual, desired) - - -class TestThread(object): - # make sure each state produces the same sequence even in threads - def setUp(self): - self.seeds = range(4) - - def check_function(self, function, sz): - from threading import Thread - - out1 = np.empty((len(self.seeds),) + sz) - out2 = np.empty((len(self.seeds),) + sz) - - # threaded generation - t = [Thread(target=function, args=(np.random.RandomState(s), o)) - for s, o in zip(self.seeds, out1)] - [x.start() for x in t] - [x.join() for x in t] - - # the same serial - for s, o in zip(self.seeds, out2): - function(np.random.RandomState(s), o) - - # these platforms change x87 fpu precision mode in threads - if (np.intp().dtype.itemsize == 4 and - (sys.platform == "win32" or - sys.platform.startswith("gnukfreebsd"))): - np.testing.assert_array_almost_equal(out1, out2) - else: - np.testing.assert_array_equal(out1, out2) - - def test_normal(self): - def gen_random(state, out): - out[...] = state.normal(size=10000) - self.check_function(gen_random, sz=(10000,)) - - def test_exp(self): - def gen_random(state, out): - out[...] = state.exponential(scale=np.ones((100, 1000))) - self.check_function(gen_random, sz=(100, 1000)) - - def test_multinomial(self): - def gen_random(state, out): - out[...] = state.multinomial(10, [1/6.]*6, size=10000) - self.check_function(gen_random, sz=(10000,6)) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_regression.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_regression.py deleted file mode 100644 index ccffd033e55c9..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/random/tests/test_regression.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import (TestCase, run_module_suite, assert_, - assert_array_equal) -from numpy import random -from numpy.compat import long -import numpy as np - - -class TestRegression(TestCase): - - def test_VonMises_range(self): - # Make sure generated random variables are in [-pi, pi]. - # Regression test for ticket #986. - for mu in np.linspace(-7., 7., 5): - r = random.mtrand.vonmises(mu, 1, 50) - assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) - - def test_hypergeometric_range(self): - # Test for ticket #921 - assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) - - def test_logseries_convergence(self): - # Test for ticket #923 - N = 1000 - np.random.seed(0) - rvsn = np.random.logseries(0.8, size=N) - # these two frequency counts should be close to theoretical - # numbers with this large sample - # theoretical large N result is 0.49706795 - freq = np.sum(rvsn == 1) / float(N) - msg = "Frequency was %f, should be > 0.45" % freq - assert_(freq > 0.45, msg) - # theoretical large N result is 0.19882718 - freq = np.sum(rvsn == 2) / float(N) - msg = "Frequency was %f, should be < 0.23" % freq - assert_(freq < 0.23, msg) - - def test_permutation_longs(self): - np.random.seed(1234) - a = np.random.permutation(12) - np.random.seed(1234) - b = np.random.permutation(long(12)) - assert_array_equal(a, b) - - def test_randint_range(self): - # Test for ticket #1690 - lmax = np.iinfo('l').max - lmin = np.iinfo('l').min - try: - random.randint(lmin, lmax) - except: - raise AssertionError - - def test_shuffle_mixed_dimension(self): - # Test for trac ticket #2074 - for t in [[1, 2, 3, None], - [(1, 1), (2, 2), (3, 3), None], - [1, (2, 2), (3, 3), None], - [(1, 1), 2, 3, None]]: - np.random.seed(12345) - shuffled = list(t) - random.shuffle(shuffled) - assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) - - def test_call_within_randomstate(self): - # Check that custom RandomState does not call into global state - m = np.random.RandomState() - res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) - for i in range(3): - np.random.seed(i) - m.seed(4321) - # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) - - def test_multivariate_normal_size_types(self): - # Test for multivariate_normal issue with 'size' argument. - # Check that the multivariate_normal size argument can be a - # numpy integer. - np.random.multivariate_normal([0], [[0]], size=1) - np.random.multivariate_normal([0], [[0]], size=np.int_(1)) - np.random.multivariate_normal([0], [[0]], size=np.int64(1)) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/setup.py deleted file mode 100644 index 2c3846271b6e0..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/setup.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('numpy', parent_package, top_path) - config.add_subpackage('distutils') - config.add_subpackage('testing') - config.add_subpackage('f2py') - config.add_subpackage('core') - config.add_subpackage('lib') - config.add_subpackage('fft') - config.add_subpackage('linalg') - config.add_subpackage('random') - config.add_subpackage('ma') - config.add_subpackage('matrixlib') - config.add_subpackage('compat') - config.add_subpackage('polynomial') - config.add_subpackage('doc') - config.add_data_dir('doc') - config.add_data_dir('tests') - config.make_config_py() # installs __config__.py - return config - -if __name__ == '__main__': - print('This is the wrong setup.py file to run') diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/__init__.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/__init__.py deleted file mode 100644 index 258cbe928b3ce..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Common test support for all numpy test scripts. - -This single module should provide all the common functionality for numpy tests -in a single location, so that test scripts can just import it and work right -away. - -""" -from __future__ import division, absolute_import, print_function - -from unittest import TestCase - -from . import decorators as dec -from .utils import * -from .nosetester import NoseTester as Tester -from .nosetester import run_module_suite -test = Tester().test diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/decorators.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/decorators.py deleted file mode 100644 index 8a4cfb4809cbd..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/decorators.py +++ /dev/null @@ -1,271 +0,0 @@ -""" -Decorators for labeling and modifying behavior of test objects. - -Decorators that merely return a modified version of the original -function object are straightforward. Decorators that return a new -function object need to use -:: - - nose.tools.make_decorator(original_function)(decorator) - -in returning the decorator, in order to preserve meta-data such as -function name, setup and teardown functions and so on - see -``nose.tools`` for more information. - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import collections - - -def slow(t): - """ - Label a test as 'slow'. - - The exact definition of a slow test is obviously both subjective and - hardware-dependent, but in general any individual test that requires more - than a second or two should be labeled as slow (the whole suite consits of - thousands of tests, so even a second is significant). - - Parameters - ---------- - t : callable - The test to label as slow. - - Returns - ------- - t : callable - The decorated test `t`. - - Examples - -------- - The `numpy.testing` module includes ``import decorators as dec``. - A test can be decorated as slow like this:: - - from numpy.testing import * - - @dec.slow - def test_big(self): - print 'Big, slow test' - - """ - - t.slow = True - return t - -def setastest(tf=True): - """ - Signals to nose that this function is or is not a test. - - Parameters - ---------- - tf : bool - If True, specifies that the decorated callable is a test. - If False, specifies that the decorated callable is not a test. - Default is True. - - Notes - ----- - This decorator can't use the nose namespace, because it can be - called from a non-test module. See also ``istest`` and ``nottest`` in - ``nose.tools``. - - Examples - -------- - `setastest` can be used in the following way:: - - from numpy.testing.decorators import setastest - - @setastest(False) - def func_with_test_in_name(arg1, arg2): - pass - - """ - def set_test(t): - t.__test__ = tf - return t - return set_test - -def skipif(skip_condition, msg=None): - """ - Make function raise SkipTest exception if a given condition is true. - - If the condition is a callable, it is used at runtime to dynamically - make the decision. This is useful for tests that may require costly - imports, to delay the cost until the test suite is actually executed. - - Parameters - ---------- - skip_condition : bool or callable - Flag to determine whether to skip the decorated test. - msg : str, optional - Message to give on raising a SkipTest exception. Default is None. - - Returns - ------- - decorator : function - Decorator which, when applied to a function, causes SkipTest - to be raised when `skip_condition` is True, and the function - to be called normally otherwise. - - Notes - ----- - The decorator itself is decorated with the ``nose.tools.make_decorator`` - function in order to transmit function name, and various other metadata. - - """ - - def skip_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - - # Allow for both boolean or callable skip conditions. - if isinstance(skip_condition, collections.Callable): - skip_val = lambda : skip_condition() - else: - skip_val = lambda : skip_condition - - def get_msg(func,msg=None): - """Skip message with information about function being skipped.""" - if msg is None: - out = 'Test skipped due to test condition' - else: - out = msg - - return "Skipping test: %s: %s" % (func.__name__, out) - - # We need to define *two* skippers because Python doesn't allow both - # return with value and yield inside the same function. - def skipper_func(*args, **kwargs): - """Skipper for normal test functions.""" - if skip_val(): - raise nose.SkipTest(get_msg(f, msg)) - else: - return f(*args, **kwargs) - - def skipper_gen(*args, **kwargs): - """Skipper for test generators.""" - if skip_val(): - raise nose.SkipTest(get_msg(f, msg)) - else: - for x in f(*args, **kwargs): - yield x - - # Choose the right skipper to use when building the actual decorator. - if nose.util.isgenerator(f): - skipper = skipper_gen - else: - skipper = skipper_func - - return nose.tools.make_decorator(f)(skipper) - - return skip_decorator - - -def knownfailureif(fail_condition, msg=None): - """ - Make function raise KnownFailureTest exception if given condition is true. - - If the condition is a callable, it is used at runtime to dynamically - make the decision. This is useful for tests that may require costly - imports, to delay the cost until the test suite is actually executed. - - Parameters - ---------- - fail_condition : bool or callable - Flag to determine whether to mark the decorated test as a known - failure (if True) or not (if False). - msg : str, optional - Message to give on raising a KnownFailureTest exception. - Default is None. - - Returns - ------- - decorator : function - Decorator, which, when applied to a function, causes SkipTest - to be raised when `skip_condition` is True, and the function - to be called normally otherwise. - - Notes - ----- - The decorator itself is decorated with the ``nose.tools.make_decorator`` - function in order to transmit function name, and various other metadata. - - """ - if msg is None: - msg = 'Test skipped due to known failure' - - # Allow for both boolean or callable known failure conditions. - if isinstance(fail_condition, collections.Callable): - fail_val = lambda : fail_condition() - else: - fail_val = lambda : fail_condition - - def knownfail_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - from .noseclasses import KnownFailureTest - def knownfailer(*args, **kwargs): - if fail_val(): - raise KnownFailureTest(msg) - else: - return f(*args, **kwargs) - return nose.tools.make_decorator(f)(knownfailer) - - return knownfail_decorator - -def deprecated(conditional=True): - """ - Filter deprecation warnings while running the test suite. - - This decorator can be used to filter DeprecationWarning's, to avoid - printing them during the test suite run, while checking that the test - actually raises a DeprecationWarning. - - Parameters - ---------- - conditional : bool or callable, optional - Flag to determine whether to mark test as deprecated or not. If the - condition is a callable, it is used at runtime to dynamically make the - decision. Default is True. - - Returns - ------- - decorator : function - The `deprecated` decorator itself. - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - def deprecate_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - from .noseclasses import KnownFailureTest - - def _deprecated_imp(*args, **kwargs): - # Poor man's replacement for the with statement - with warnings.catch_warnings(record=True) as l: - warnings.simplefilter('always') - f(*args, **kwargs) - if not len(l) > 0: - raise AssertionError("No warning raised when calling %s" - % f.__name__) - if not l[0].category is DeprecationWarning: - raise AssertionError("First warning for %s is not a " \ - "DeprecationWarning( is %s)" % (f.__name__, l[0])) - - if isinstance(conditional, collections.Callable): - cond = conditional() - else: - cond = conditional - if cond: - return nose.tools.make_decorator(f)(_deprecated_imp) - else: - return f - return deprecate_decorator diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/noseclasses.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/noseclasses.py deleted file mode 100644 index cb757a13f2071..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/noseclasses.py +++ /dev/null @@ -1,353 +0,0 @@ -# These classes implement a doctest runner plugin for nose, a "known failure" -# error class, and a customized TestProgram for NumPy. - -# Because this module imports nose directly, it should not -# be used except by nosetester.py to avoid a general NumPy -# dependency on nose. -from __future__ import division, absolute_import, print_function - -import os -import doctest - -import nose -from nose.plugins import doctests as npd -from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin -from nose.plugins.base import Plugin -from nose.util import src -import numpy -from .nosetester import get_package_name -import inspect - -# Some of the classes in this module begin with 'Numpy' to clearly distinguish -# them from the plethora of very similar names from nose/unittest/doctest - -#----------------------------------------------------------------------------- -# Modified version of the one in the stdlib, that fixes a python bug (doctests -# not found in extension modules, http://bugs.python.org/issue3158) -class NumpyDocTestFinder(doctest.DocTestFinder): - - def _from_module(self, module, object): - """ - Return true if the given object is defined in the given - module. - """ - if module is None: - #print '_fm C1' # dbg - return True - elif inspect.isfunction(object): - #print '_fm C2' # dbg - return module.__dict__ is object.__globals__ - elif inspect.isbuiltin(object): - #print '_fm C2-1' # dbg - return module.__name__ == object.__module__ - elif inspect.isclass(object): - #print '_fm C3' # dbg - return module.__name__ == object.__module__ - elif inspect.ismethod(object): - # This one may be a bug in cython that fails to correctly set the - # __module__ attribute of methods, but since the same error is easy - # to make by extension code writers, having this safety in place - # isn't such a bad idea - #print '_fm C3-1' # dbg - return module.__name__ == object.__self__.__class__.__module__ - elif inspect.getmodule(object) is not None: - #print '_fm C4' # dbg - #print 'C4 mod',module,'obj',object # dbg - return module is inspect.getmodule(object) - elif hasattr(object, '__module__'): - #print '_fm C5' # dbg - return module.__name__ == object.__module__ - elif isinstance(object, property): - #print '_fm C6' # dbg - return True # [XX] no way not be sure. - else: - raise ValueError("object must be a class or function") - - def _find(self, tests, obj, name, module, source_lines, globs, seen): - """ - Find tests for the given object and any contained objects, and - add them to `tests`. - """ - - doctest.DocTestFinder._find(self, tests, obj, name, module, - source_lines, globs, seen) - - # Below we re-run pieces of the above method with manual modifications, - # because the original code is buggy and fails to correctly identify - # doctests in extension modules. - - # Local shorthands - from inspect import isroutine, isclass, ismodule, isfunction, \ - ismethod - - # Look for tests in a module's contained objects. - if ismodule(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - valname1 = '%s.%s' % (name, valname) - if ( (isroutine(val) or isclass(val)) - and self._from_module(module, val) ): - - self._find(tests, val, valname1, module, source_lines, - globs, seen) - - - # Look for tests in a class's contained objects. - if isclass(obj) and self._recurse: - #print 'RECURSE into class:',obj # dbg - for valname, val in obj.__dict__.items(): - #valname1 = '%s.%s' % (name, valname) # dbg - #print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg - # Special handling for staticmethod/classmethod. - if isinstance(val, staticmethod): - val = getattr(obj, valname) - if isinstance(val, classmethod): - val = getattr(obj, valname).__func__ - - # Recurse to methods, properties, and nested classes. - if ((isfunction(val) or isclass(val) or - ismethod(val) or isinstance(val, property)) and - self._from_module(module, val)): - valname = '%s.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - -# second-chance checker; if the default comparison doesn't -# pass, then see if the expected output string contains flags that -# tell us to ignore the output -class NumpyOutputChecker(doctest.OutputChecker): - def check_output(self, want, got, optionflags): - ret = doctest.OutputChecker.check_output(self, want, got, - optionflags) - if not ret: - if "#random" in want: - return True - - # it would be useful to normalize endianness so that - # bigendian machines don't fail all the tests (and there are - # actually some bigendian examples in the doctests). Let's try - # making them all little endian - got = got.replace("'>", "'<") - want= want.replace("'>", "'<") - - # try to normalize out 32 and 64 bit default int sizes - for sz in [4, 8]: - got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') - 'numpy' - - """ - - fullpath = filepath[:] - pkg_name = [] - while 'site-packages' in filepath or 'dist-packages' in filepath: - filepath, p2 = os.path.split(filepath) - if p2 in ('site-packages', 'dist-packages'): - break - pkg_name.append(p2) - - # if package name determination failed, just default to numpy/scipy - if not pkg_name: - if 'scipy' in fullpath: - return 'scipy' - else: - return 'numpy' - - # otherwise, reverse to get correct order and return - pkg_name.reverse() - - # don't include the outer egg directory - if pkg_name[0].endswith('.egg'): - pkg_name.pop(0) - - return '.'.join(pkg_name) - -def import_nose(): - """ Import nose only when needed. - """ - fine_nose = True - minimum_nose_version = (0, 10, 0) - try: - import nose - except ImportError: - fine_nose = False - else: - if nose.__versioninfo__ < minimum_nose_version: - fine_nose = False - - if not fine_nose: - msg = 'Need nose >= %d.%d.%d for tests - see ' \ - 'http://somethingaboutorange.com/mrl/projects/nose' % \ - minimum_nose_version - - raise ImportError(msg) - - return nose - -def run_module_suite(file_to_run=None, argv=None): - """ - Run a test module. - - Equivalent to calling ``$ nosetests `` from - the command line - - Parameters - ---------- - file_to_run: str, optional - Path to test module, or None. - By default, run the module from which this function is called. - argv: list of strings - Arguments to be passed to the nose test runner. ``argv[0]`` is - ignored. All command line arguments accepted by ``nosetests`` - will work. - - .. versionadded:: 1.9.0 - - Examples - -------- - Adding the following:: - - if __name__ == "__main__" : - run_module_suite(argv=sys.argv) - - at the end of a test module will run the tests when that module is - called in the python interpreter. - - Alternatively, calling:: - - >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") - - from an interpreter will run all the test routine in 'test_matlib.py'. - """ - if file_to_run is None: - f = sys._getframe(1) - file_to_run = f.f_locals.get('__file__', None) - if file_to_run is None: - raise AssertionError - - if argv is None: - argv = ['', file_to_run] - else: - argv = argv + [file_to_run] - - nose = import_nose() - from .noseclasses import KnownFailure - nose.run(argv=argv, addplugins=[KnownFailure()]) - - -class NoseTester(object): - """ - Nose test runner. - - This class is made available as numpy.testing.Tester, and a test function - is typically added to a package's __init__.py like so:: - - from numpy.testing import Tester - test = Tester().test - - Calling this test function finds and runs all tests associated with the - package and all its sub-packages. - - Attributes - ---------- - package_path : str - Full path to the package to test. - package_name : str - Name of the package to test. - - Parameters - ---------- - package : module, str or None, optional - The package to test. If a string, this should be the full path to - the package. If None (default), `package` is set to the module from - which `NoseTester` is initialized. - raise_warnings : str or sequence of warnings, optional - This specifies which warnings to configure as 'raise' instead - of 'warn' during the test execution. Valid strings are: - - - "develop" : equals ``(DeprecationWarning, RuntimeWarning)`` - - "release" : equals ``()``, don't raise on any warnings. - - See Notes for more details. - - Notes - ----- - The default for `raise_warnings` is - ``(DeprecationWarning, RuntimeWarning)`` for the master branch of NumPy, - and ``()`` for maintenance branches and released versions. The purpose - of this switching behavior is to catch as many warnings as possible - during development, but not give problems for packaging of released - versions. - - """ - # Stuff to exclude from tests. These are from numpy.distutils - excludes = ['f2py_ext', - 'f2py_f90_ext', - 'gen_ext', - 'pyrex_ext', - 'swig_ext'] - - def __init__(self, package=None, raise_warnings="release"): - package_name = None - if package is None: - f = sys._getframe(1) - package_path = f.f_locals.get('__file__', None) - if package_path is None: - raise AssertionError - package_path = os.path.dirname(package_path) - package_name = f.f_locals.get('__name__', None) - elif isinstance(package, type(os)): - package_path = os.path.dirname(package.__file__) - package_name = getattr(package, '__name__', None) - else: - package_path = str(package) - - self.package_path = package_path - - # Find the package name under test; this name is used to limit coverage - # reporting (if enabled). - if package_name is None: - package_name = get_package_name(package_path) - self.package_name = package_name - - # Set to "release" in constructor in maintenance branches. - self.raise_warnings = raise_warnings - - def _test_argv(self, label, verbose, extra_argv): - ''' Generate argv for nosetest command - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - see ``test`` docstring - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - - Returns - ------- - argv : list - command line arguments that will be passed to nose - ''' - argv = [__file__, self.package_path, '-s'] - if label and label != 'full': - if not isinstance(label, basestring): - raise TypeError('Selection label should be a string') - if label == 'fast': - label = 'not slow' - argv += ['-A', label] - argv += ['--verbosity', str(verbose)] - - # When installing with setuptools, and also in some other cases, the - # test_*.py files end up marked +x executable. Nose, by default, does - # not run files marked with +x as they might be scripts. However, in - # our case nose only looks for test_*.py files under the package - # directory, which should be safe. - argv += ['--exe'] - - if extra_argv: - argv += extra_argv - return argv - - def _show_system_info(self): - nose = import_nose() - - import numpy - print("NumPy version %s" % numpy.__version__) - npdir = os.path.dirname(numpy.__file__) - print("NumPy is installed in %s" % npdir) - - if 'scipy' in self.package_name: - import scipy - print("SciPy version %s" % scipy.__version__) - spdir = os.path.dirname(scipy.__file__) - print("SciPy is installed in %s" % spdir) - - pyversion = sys.version.replace('\n', '') - print("Python version %s" % pyversion) - print("nose version %d.%d.%d" % nose.__versioninfo__) - - def _get_custom_doctester(self): - """ Return instantiated plugin for doctests - - Allows subclassing of this class to override doctester - - A return value of None means use the nose builtin doctest plugin - """ - from .noseclasses import NumpyDoctest - return NumpyDoctest() - - def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False): - """ - Run tests for module using nose. - - This method does the heavy lifting for the `test` method. It takes all - the same arguments, for details see `test`. - - See Also - -------- - test - - """ - # fail with nice error message if nose is not present - import_nose() - # compile argv - argv = self._test_argv(label, verbose, extra_argv) - # bypass tests noted for exclude - for ename in self.excludes: - argv += ['--exclude', ename] - # our way of doing coverage - if coverage: - argv+=['--cover-package=%s' % self.package_name, '--with-coverage', - '--cover-tests', '--cover-erase'] - # construct list of plugins - import nose.plugins.builtin - from .noseclasses import KnownFailure, Unplugger - plugins = [KnownFailure()] - plugins += [p() for p in nose.plugins.builtin.plugins] - # add doctesting if required - doctest_argv = '--with-doctest' in argv - if doctests == False and doctest_argv: - doctests = True - plug = self._get_custom_doctester() - if plug is None: - # use standard doctesting - if doctests and not doctest_argv: - argv += ['--with-doctest'] - else: # custom doctesting - if doctest_argv: # in fact the unplugger would take care of this - argv.remove('--with-doctest') - plugins += [Unplugger('doctest'), plug] - if doctests: - argv += ['--with-' + plug.name] - return argv, plugins - - def test(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False, - raise_warnings=None): - """ - Run tests for module using nose. - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - Identifies the tests to run. This can be a string to pass to - the nosetests executable with the '-A' option, or one of several - special values. Special values are: - * 'fast' - the default - which corresponds to the ``nosetests -A`` - option of 'not slow'. - * 'full' - fast (as above) and slow tests as in the - 'no -A' option to nosetests - this is the same as ''. - * None or '' - run all tests. - attribute_identifier - string passed directly to nosetests as '-A'. - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - doctests : bool, optional - If True, run doctests in module. Default is False. - coverage : bool, optional - If True, report coverage of NumPy code. Default is False. - (This requires the `coverage module: - `_). - raise_warnings : str or sequence of warnings, optional - This specifies which warnings to configure as 'raise' instead - of 'warn' during the test execution. Valid strings are: - - - "develop" : equals ``(DeprecationWarning, RuntimeWarning)`` - - "release" : equals ``()``, don't raise on any warnings. - - Returns - ------- - result : object - Returns the result of running the tests as a - ``nose.result.TextTestResult`` object. - - Notes - ----- - Each NumPy module exposes `test` in its namespace to run all tests for it. - For example, to run all tests for numpy.lib: - - >>> np.lib.test() #doctest: +SKIP - - Examples - -------- - >>> result = np.lib.test() #doctest: +SKIP - Running unit tests for numpy.lib - ... - Ran 976 tests in 3.933s - - OK - - >>> result.errors #doctest: +SKIP - [] - >>> result.knownfail #doctest: +SKIP - [] - """ - - # cap verbosity at 3 because nose becomes *very* verbose beyond that - verbose = min(verbose, 3) - - from . import utils - utils.verbose = verbose - - if doctests: - print("Running unit tests and doctests for %s" % self.package_name) - else: - print("Running unit tests for %s" % self.package_name) - - self._show_system_info() - - # reset doctest state on every run - import doctest - doctest.master = None - - if raise_warnings is None: - raise_warnings = self.raise_warnings - - _warn_opts = dict(develop=(DeprecationWarning, RuntimeWarning), - release=()) - if raise_warnings in _warn_opts.keys(): - raise_warnings = _warn_opts[raise_warnings] - - with warnings.catch_warnings(): - # Reset the warning filters to the default state, - # so that running the tests is more repeatable. - warnings.resetwarnings() - # If deprecation warnings are not set to 'error' below, - # at least set them to 'warn'. - warnings.filterwarnings('always', category=DeprecationWarning) - # Force the requested warnings to raise - for warningtype in raise_warnings: - warnings.filterwarnings('error', category=warningtype) - # Filter out annoying import messages. - warnings.filterwarnings('ignore', message='Not importing directory') - warnings.filterwarnings("ignore", message="numpy.dtype size changed") - warnings.filterwarnings("ignore", message="numpy.ufunc size changed") - warnings.filterwarnings("ignore", category=ModuleDeprecationWarning) - warnings.filterwarnings("ignore", category=FutureWarning) - # Filter out boolean '-' deprecation messages. This allows - # older versions of scipy to test without a flood of messages. - warnings.filterwarnings("ignore", message=".*boolean negative.*") - warnings.filterwarnings("ignore", message=".*boolean subtract.*") - - from .noseclasses import NumpyTestProgram - - argv, plugins = self.prepare_test_args( - label, verbose, extra_argv, doctests, coverage) - t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) - - return t.result - - def bench(self, label='fast', verbose=1, extra_argv=None): - """ - Run benchmarks for module using nose. - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - Identifies the benchmarks to run. This can be a string to pass to - the nosetests executable with the '-A' option, or one of several - special values. Special values are: - * 'fast' - the default - which corresponds to the ``nosetests -A`` - option of 'not slow'. - * 'full' - fast (as above) and slow benchmarks as in the - 'no -A' option to nosetests - this is the same as ''. - * None or '' - run all tests. - attribute_identifier - string passed directly to nosetests as '-A'. - verbose : int, optional - Verbosity value for benchmark outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - - Returns - ------- - success : bool - Returns True if running the benchmarks works, False if an error - occurred. - - Notes - ----- - Benchmarks are like tests, but have names starting with "bench" instead - of "test", and can be found under the "benchmarks" sub-directory of the - module. - - Each NumPy module exposes `bench` in its namespace to run all benchmarks - for it. - - Examples - -------- - >>> success = np.lib.bench() #doctest: +SKIP - Running benchmarks for numpy.lib - ... - using 562341 items: - unique: - 0.11 - unique1d: - 0.11 - ratio: 1.0 - nUnique: 56230 == 56230 - ... - OK - - >>> success #doctest: +SKIP - True - - """ - - print("Running benchmarks for %s" % self.package_name) - self._show_system_info() - - argv = self._test_argv(label, verbose, extra_argv) - argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] - - # import nose or make informative error - nose = import_nose() - - # get plugin to disable doctests - from .noseclasses import Unplugger - add_plugins = [Unplugger('doctest')] - - return nose.run(argv=argv, addplugins=add_plugins) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/print_coercion_tables.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/print_coercion_tables.py deleted file mode 100644 index bde82a666fa8b..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/print_coercion_tables.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -"""Prints type-coercion tables for the built-in NumPy types - -""" -from __future__ import division, absolute_import, print_function - -import numpy as np - -# Generic object that can be added, but doesn't do anything else -class GenericObject(object): - def __init__(self, v): - self.v = v - - def __add__(self, other): - return self - - def __radd__(self, other): - return self - - dtype = np.dtype('O') - -def print_cancast_table(ntypes): - print('X', end=' ') - for char in ntypes: print(char, end=' ') - print() - for row in ntypes: - print(row, end=' ') - for col in ntypes: - print(int(np.can_cast(row, col)), end=' ') - print() - -def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): - print('+', end=' ') - for char in ntypes: print(char, end=' ') - print() - for row in ntypes: - if row == 'O': - rowtype = GenericObject - else: - rowtype = np.obj2sctype(row) - - print(row, end=' ') - for col in ntypes: - if col == 'O': - coltype = GenericObject - else: - coltype = np.obj2sctype(col) - try: - if firstarray: - rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) - else: - rowvalue = rowtype(inputfirstvalue) - colvalue = coltype(inputsecondvalue) - if use_promote_types: - char = np.promote_types(rowvalue.dtype, colvalue.dtype).char - else: - value = np.add(rowvalue, colvalue) - if isinstance(value, np.ndarray): - char = value.dtype.char - else: - char = np.dtype(type(value)).char - except ValueError: - char = '!' - except OverflowError: - char = '@' - except TypeError: - char = '#' - print(char, end=' ') - print() - -print("can cast") -print_cancast_table(np.typecodes['All']) -print() -print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") -print() -print("scalar + scalar") -print_coercion_table(np.typecodes['All'], 0, 0, False) -print() -print("scalar + neg scalar") -print_coercion_table(np.typecodes['All'], 0, -1, False) -print() -print("array + scalar") -print_coercion_table(np.typecodes['All'], 0, 0, True) -print() -print("array + neg scalar") -print_coercion_table(np.typecodes['All'], 0, -1, True) -print() -print("promote_types") -print_coercion_table(np.typecodes['All'], 0, 0, False, True) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/setup.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/setup.py deleted file mode 100644 index 595e48925fffd..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('testing', parent_package, top_path) - - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(maintainer = "NumPy Developers", - maintainer_email = "numpy-dev@numpy.org", - description = "NumPy test module", - url = "http://www.numpy.org", - license = "NumPy License (BSD Style)", - configuration = configuration, - ) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_decorators.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_decorators.py deleted file mode 100644 index 36c7cc7bb29cc..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_decorators.py +++ /dev/null @@ -1,185 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -from numpy.testing import * -from numpy.testing.noseclasses import KnownFailureTest -import nose - -def test_slow(): - @dec.slow - def slow_func(x, y, z): - pass - - assert_(slow_func.slow) - -def test_setastest(): - @dec.setastest() - def f_default(a): - pass - - @dec.setastest(True) - def f_istest(a): - pass - - @dec.setastest(False) - def f_isnottest(a): - pass - - assert_(f_default.__test__) - assert_(f_istest.__test__) - assert_(not f_isnottest.__test__) - -class DidntSkipException(Exception): - pass - -def test_skip_functions_hardcoded(): - @dec.skipif(True) - def f1(x): - raise DidntSkipException - - try: - f1('a') - except DidntSkipException: - raise Exception('Failed to skip') - except nose.SkipTest: - pass - - @dec.skipif(False) - def f2(x): - raise DidntSkipException - - try: - f2('a') - except DidntSkipException: - pass - except nose.SkipTest: - raise Exception('Skipped when not expected to') - - -def test_skip_functions_callable(): - def skip_tester(): - return skip_flag == 'skip me!' - - @dec.skipif(skip_tester) - def f1(x): - raise DidntSkipException - - try: - skip_flag = 'skip me!' - f1('a') - except DidntSkipException: - raise Exception('Failed to skip') - except nose.SkipTest: - pass - - @dec.skipif(skip_tester) - def f2(x): - raise DidntSkipException - - try: - skip_flag = 'five is right out!' - f2('a') - except DidntSkipException: - pass - except nose.SkipTest: - raise Exception('Skipped when not expected to') - - -def test_skip_generators_hardcoded(): - @dec.knownfailureif(True, "This test is known to fail") - def g1(x): - for i in range(x): - yield i - - try: - for j in g1(10): - pass - except KnownFailureTest: - pass - else: - raise Exception('Failed to mark as known failure') - - - @dec.knownfailureif(False, "This test is NOT known to fail") - def g2(x): - for i in range(x): - yield i - raise DidntSkipException('FAIL') - - try: - for j in g2(10): - pass - except KnownFailureTest: - raise Exception('Marked incorretly as known failure') - except DidntSkipException: - pass - - -def test_skip_generators_callable(): - def skip_tester(): - return skip_flag == 'skip me!' - - @dec.knownfailureif(skip_tester, "This test is known to fail") - def g1(x): - for i in range(x): - yield i - - try: - skip_flag = 'skip me!' - for j in g1(10): - pass - except KnownFailureTest: - pass - else: - raise Exception('Failed to mark as known failure') - - - @dec.knownfailureif(skip_tester, "This test is NOT known to fail") - def g2(x): - for i in range(x): - yield i - raise DidntSkipException('FAIL') - - try: - skip_flag = 'do not skip' - for j in g2(10): - pass - except KnownFailureTest: - raise Exception('Marked incorretly as known failure') - except DidntSkipException: - pass - - -def test_deprecated(): - @dec.deprecated(True) - def non_deprecated_func(): - pass - - @dec.deprecated() - def deprecated_func(): - import warnings - warnings.warn("TEST: deprecated func", DeprecationWarning) - - @dec.deprecated() - def deprecated_func2(): - import warnings - warnings.warn("AHHHH") - raise ValueError - - @dec.deprecated() - def deprecated_func3(): - import warnings - warnings.warn("AHHHH") - - # marked as deprecated, but does not raise DeprecationWarning - assert_raises(AssertionError, non_deprecated_func) - # should be silent - deprecated_func() - # fails if deprecated decorator just disables test. See #1453. - assert_raises(ValueError, deprecated_func2) - # first warnings is not a DeprecationWarning - assert_raises(AssertionError, deprecated_func3) - - -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_doctesting.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_doctesting.py deleted file mode 100644 index 43f9fb6cebba5..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_doctesting.py +++ /dev/null @@ -1,56 +0,0 @@ -""" Doctests for NumPy-specific nose/doctest modifications - -""" -from __future__ import division, absolute_import, print_function - -# try the #random directive on the output line -def check_random_directive(): - ''' - >>> 2+2 - #random: may vary on your system - ''' - -# check the implicit "import numpy as np" -def check_implicit_np(): - ''' - >>> np.array([1,2,3]) - array([1, 2, 3]) - ''' - -# there's some extraneous whitespace around the correct responses -def check_whitespace_enabled(): - ''' - # whitespace after the 3 - >>> 1+2 - 3 - - # whitespace before the 7 - >>> 3+4 - 7 - ''' - -def check_empty_output(): - """ Check that no output does not cause an error. - - This is related to nose bug 445; the numpy plugin changed the - doctest-result-variable default and therefore hit this bug: - http://code.google.com/p/python-nose/issues/detail?id=445 - - >>> a = 10 - """ - -def check_skip(): - """ Check skip directive - - The test below should not run - - >>> 1/0 #doctest: +SKIP - """ - - -if __name__ == '__main__': - # Run tests outside numpy test rig - import nose - from numpy.testing.noseclasses import NumpyDoctest - argv = ['', __file__, '--with-numpydoctest'] - nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()]) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_utils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_utils.py deleted file mode 100644 index 41a48ea65dd53..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/tests/test_utils.py +++ /dev/null @@ -1,558 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import warnings -import sys - -import numpy as np -from numpy.testing import * -import unittest - -class _GenericTest(object): - def _test_equal(self, a, b): - self._assert_func(a, b) - - def _test_not_equal(self, a, b): - try: - self._assert_func(a, b) - passed = True - except AssertionError: - pass - else: - raise AssertionError("a and b are found equal but are not") - - def test_array_rank1_eq(self): - """Test two equal array of rank 1 are found equal.""" - a = np.array([1, 2]) - b = np.array([1, 2]) - - self._test_equal(a, b) - - def test_array_rank1_noteq(self): - """Test two different array of rank 1 are found not equal.""" - a = np.array([1, 2]) - b = np.array([2, 2]) - - self._test_not_equal(a, b) - - def test_array_rank2_eq(self): - """Test two equal array of rank 2 are found equal.""" - a = np.array([[1, 2], [3, 4]]) - b = np.array([[1, 2], [3, 4]]) - - self._test_equal(a, b) - - def test_array_diffshape(self): - """Test two arrays with different shapes are found not equal.""" - a = np.array([1, 2]) - b = np.array([[1, 2], [1, 2]]) - - self._test_not_equal(a, b) - - def test_objarray(self): - """Test object arrays.""" - a = np.array([1, 1], dtype=np.object) - self._test_equal(a, 1) - - def test_array_likes(self): - self._test_equal([1, 2, 3], (1, 2, 3)) - -class TestArrayEqual(_GenericTest, unittest.TestCase): - def setUp(self): - self._assert_func = assert_array_equal - - def test_generic_rank1(self): - """Test rank 1 array for all dtypes.""" - def foo(t): - a = np.empty(2, t) - a.fill(1) - b = a.copy() - c = a.copy() - c.fill(0) - self._test_equal(a, b) - self._test_not_equal(c, b) - - # Test numeric types and object - for t in '?bhilqpBHILQPfdgFDG': - foo(t) - - # Test strings - for t in ['S1', 'U1']: - foo(t) - - def test_generic_rank3(self): - """Test rank 3 array for all dtypes.""" - def foo(t): - a = np.empty((4, 2, 3), t) - a.fill(1) - b = a.copy() - c = a.copy() - c.fill(0) - self._test_equal(a, b) - self._test_not_equal(c, b) - - # Test numeric types and object - for t in '?bhilqpBHILQPfdgFDG': - foo(t) - - # Test strings - for t in ['S1', 'U1']: - foo(t) - - def test_nan_array(self): - """Test arrays with nan values in them.""" - a = np.array([1, 2, np.nan]) - b = np.array([1, 2, np.nan]) - - self._test_equal(a, b) - - c = np.array([1, 2, 3]) - self._test_not_equal(c, b) - - def test_string_arrays(self): - """Test two arrays with different shapes are found not equal.""" - a = np.array(['floupi', 'floupa']) - b = np.array(['floupi', 'floupa']) - - self._test_equal(a, b) - - c = np.array(['floupipi', 'floupa']) - - self._test_not_equal(c, b) - - def test_recarrays(self): - """Test record arrays.""" - a = np.empty(2, [('floupi', np.float), ('floupa', np.float)]) - a['floupi'] = [1, 2] - a['floupa'] = [1, 2] - b = a.copy() - - self._test_equal(a, b) - - c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)]) - c['floupipi'] = a['floupi'].copy() - c['floupa'] = a['floupa'].copy() - - self._test_not_equal(c, b) - -class TestBuildErrorMessage(unittest.TestCase): - def test_build_err_msg_defaults(self): - x = np.array([1.00001, 2.00002, 3.00003]) - y = np.array([1.00002, 2.00003, 3.00004]) - err_msg = 'There is a mismatch' - - a = build_err_msg([x, y], err_msg) - b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ ' - '1.00001, 2.00002, 3.00003])\n DESIRED: array([ 1.00002, ' - '2.00003, 3.00004])') - self.assertEqual(a, b) - - def test_build_err_msg_no_verbose(self): - x = np.array([1.00001, 2.00002, 3.00003]) - y = np.array([1.00002, 2.00003, 3.00004]) - err_msg = 'There is a mismatch' - - a = build_err_msg([x, y], err_msg, verbose=False) - b = '\nItems are not equal: There is a mismatch' - self.assertEqual(a, b) - - def test_build_err_msg_custom_names(self): - x = np.array([1.00001, 2.00002, 3.00003]) - y = np.array([1.00002, 2.00003, 3.00004]) - err_msg = 'There is a mismatch' - - a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) - b = ('\nItems are not equal: There is a mismatch\n FOO: array([ ' - '1.00001, 2.00002, 3.00003])\n BAR: array([ 1.00002, 2.00003, ' - '3.00004])') - self.assertEqual(a, b) - - def test_build_err_msg_custom_precision(self): - x = np.array([1.000000001, 2.00002, 3.00003]) - y = np.array([1.000000002, 2.00003, 3.00004]) - err_msg = 'There is a mismatch' - - a = build_err_msg([x, y], err_msg, precision=10) - b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ ' - '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([ ' - '1.000000002, 2.00003 , 3.00004 ])') - self.assertEqual(a, b) - -class TestEqual(TestArrayEqual): - def setUp(self): - self._assert_func = assert_equal - - def test_nan_items(self): - self._assert_func(np.nan, np.nan) - self._assert_func([np.nan], [np.nan]) - self._test_not_equal(np.nan, [np.nan]) - self._test_not_equal(np.nan, 1) - - def test_inf_items(self): - self._assert_func(np.inf, np.inf) - self._assert_func([np.inf], [np.inf]) - self._test_not_equal(np.inf, [np.inf]) - - def test_non_numeric(self): - self._assert_func('ab', 'ab') - self._test_not_equal('ab', 'abb') - - def test_complex_item(self): - self._assert_func(complex(1, 2), complex(1, 2)) - self._assert_func(complex(1, np.nan), complex(1, np.nan)) - self._test_not_equal(complex(1, np.nan), complex(1, 2)) - self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) - self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) - - def test_negative_zero(self): - self._test_not_equal(np.PZERO, np.NZERO) - - def test_complex(self): - x = np.array([complex(1, 2), complex(1, np.nan)]) - y = np.array([complex(1, 2), complex(1, 2)]) - self._assert_func(x, x) - self._test_not_equal(x, y) - -class TestArrayAlmostEqual(_GenericTest, unittest.TestCase): - def setUp(self): - self._assert_func = assert_array_almost_equal - - def test_simple(self): - x = np.array([1234.2222]) - y = np.array([1234.2223]) - - self._assert_func(x, y, decimal=3) - self._assert_func(x, y, decimal=4) - self.assertRaises(AssertionError, - lambda: self._assert_func(x, y, decimal=5)) - - def test_nan(self): - anan = np.array([np.nan]) - aone = np.array([1]) - ainf = np.array([np.inf]) - self._assert_func(anan, anan) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, aone)) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, ainf)) - self.assertRaises(AssertionError, - lambda : self._assert_func(ainf, anan)) - - def test_inf(self): - a = np.array([[1., 2.], [3., 4.]]) - b = a.copy() - a[0, 0] = np.inf - self.assertRaises(AssertionError, - lambda : self._assert_func(a, b)) - - def test_subclass(self): - a = np.array([[1., 2.], [3., 4.]]) - b = np.ma.masked_array([[1., 2.], [0., 4.]], - [[False, False], [True, False]]) - assert_array_almost_equal(a, b) - assert_array_almost_equal(b, a) - assert_array_almost_equal(b, b) - -class TestAlmostEqual(_GenericTest, unittest.TestCase): - def setUp(self): - self._assert_func = assert_almost_equal - - def test_nan_item(self): - self._assert_func(np.nan, np.nan) - self.assertRaises(AssertionError, - lambda : self._assert_func(np.nan, 1)) - self.assertRaises(AssertionError, - lambda : self._assert_func(np.nan, np.inf)) - self.assertRaises(AssertionError, - lambda : self._assert_func(np.inf, np.nan)) - - def test_inf_item(self): - self._assert_func(np.inf, np.inf) - self._assert_func(-np.inf, -np.inf) - self.assertRaises(AssertionError, - lambda : self._assert_func(np.inf, 1)) - - def test_simple_item(self): - self._test_not_equal(1, 2) - - def test_complex_item(self): - self._assert_func(complex(1, 2), complex(1, 2)) - self._assert_func(complex(1, np.nan), complex(1, np.nan)) - self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) - self._test_not_equal(complex(1, np.nan), complex(1, 2)) - self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) - self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) - - def test_complex(self): - x = np.array([complex(1, 2), complex(1, np.nan)]) - z = np.array([complex(1, 2), complex(np.nan, 1)]) - y = np.array([complex(1, 2), complex(1, 2)]) - self._assert_func(x, x) - self._test_not_equal(x, y) - self._test_not_equal(x, z) - - def test_error_message(self): - """Check the message is formatted correctly for the decimal value""" - x = np.array([1.00000000001, 2.00000000002, 3.00003]) - y = np.array([1.00000000002, 2.00000000003, 3.00004]) - - # test with a different amount of decimal digits - # note that we only check for the formatting of the arrays themselves - b = ('x: array([ 1.00000000001, 2.00000000002, 3.00003 ' - ' ])\n y: array([ 1.00000000002, 2.00000000003, 3.00004 ])') - try: - self._assert_func(x, y, decimal=12) - except AssertionError as e: - # remove anything that's not the array string - self.assertEqual(str(e).split('%)\n ')[1], b) - - # with the default value of decimal digits, only the 3rd element differs - # note that we only check for the formatting of the arrays themselves - b = ('x: array([ 1. , 2. , 3.00003])\n y: array([ 1. , ' - '2. , 3.00004])') - try: - self._assert_func(x, y) - except AssertionError as e: - # remove anything that's not the array string - self.assertEqual(str(e).split('%)\n ')[1], b) - -class TestApproxEqual(unittest.TestCase): - def setUp(self): - self._assert_func = assert_approx_equal - - def test_simple_arrays(self): - x = np.array([1234.22]) - y = np.array([1234.23]) - - self._assert_func(x, y, significant=5) - self._assert_func(x, y, significant=6) - self.assertRaises(AssertionError, - lambda: self._assert_func(x, y, significant=7)) - - def test_simple_items(self): - x = 1234.22 - y = 1234.23 - - self._assert_func(x, y, significant=4) - self._assert_func(x, y, significant=5) - self._assert_func(x, y, significant=6) - self.assertRaises(AssertionError, - lambda: self._assert_func(x, y, significant=7)) - - def test_nan_array(self): - anan = np.array(np.nan) - aone = np.array(1) - ainf = np.array(np.inf) - self._assert_func(anan, anan) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, aone)) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, ainf)) - self.assertRaises(AssertionError, - lambda : self._assert_func(ainf, anan)) - - def test_nan_items(self): - anan = np.array(np.nan) - aone = np.array(1) - ainf = np.array(np.inf) - self._assert_func(anan, anan) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, aone)) - self.assertRaises(AssertionError, - lambda : self._assert_func(anan, ainf)) - self.assertRaises(AssertionError, - lambda : self._assert_func(ainf, anan)) - -class TestRaises(unittest.TestCase): - def setUp(self): - class MyException(Exception): - pass - - self.e = MyException - - def raises_exception(self, e): - raise e - - def does_not_raise_exception(self): - pass - - def test_correct_catch(self): - f = raises(self.e)(self.raises_exception)(self.e) - - def test_wrong_exception(self): - try: - f = raises(self.e)(self.raises_exception)(RuntimeError) - except RuntimeError: - return - else: - raise AssertionError("should have caught RuntimeError") - - def test_catch_no_raise(self): - try: - f = raises(self.e)(self.does_not_raise_exception)() - except AssertionError: - return - else: - raise AssertionError("should have raised an AssertionError") - -class TestWarns(unittest.TestCase): - def test_warn(self): - def f(): - warnings.warn("yo") - return 3 - - before_filters = sys.modules['warnings'].filters[:] - assert_equal(assert_warns(UserWarning, f), 3) - after_filters = sys.modules['warnings'].filters - - assert_raises(AssertionError, assert_no_warnings, f) - assert_equal(assert_no_warnings(lambda x: x, 1), 1) - - # Check that the warnings state is unchanged - assert_equal(before_filters, after_filters, - "assert_warns does not preserver warnings state") - - def test_warn_wrong_warning(self): - def f(): - warnings.warn("yo", DeprecationWarning) - - failed = False - filters = sys.modules['warnings'].filters[:] - try: - try: - # Should raise an AssertionError - assert_warns(UserWarning, f) - failed = True - except AssertionError: - pass - finally: - sys.modules['warnings'].filters = filters - - if failed: - raise AssertionError("wrong warning caught by assert_warn") - -class TestAssertAllclose(unittest.TestCase): - def test_simple(self): - x = 1e-3 - y = 1e-9 - - assert_allclose(x, y, atol=1) - self.assertRaises(AssertionError, assert_allclose, x, y) - - a = np.array([x, y, x, y]) - b = np.array([x, y, x, x]) - - assert_allclose(a, b, atol=1) - self.assertRaises(AssertionError, assert_allclose, a, b) - - b[-1] = y * (1 + 1e-8) - assert_allclose(a, b) - self.assertRaises(AssertionError, assert_allclose, a, b, - rtol=1e-9) - - assert_allclose(6, 10, rtol=0.5) - self.assertRaises(AssertionError, assert_allclose, 10, 6, rtol=0.5) - - def test_min_int(self): - a = np.array([np.iinfo(np.int_).min], dtype=np.int_) - # Should not raise: - assert_allclose(a, a) - - -class TestArrayAlmostEqualNulp(unittest.TestCase): - @dec.knownfailureif(True, "Github issue #347") - def test_simple(self): - np.random.seed(12345) - for i in range(100): - dev = np.random.randn(10) - x = np.ones(10) - y = x + dev * np.finfo(np.float64).eps - assert_array_almost_equal_nulp(x, y, nulp=2 * np.max(dev)) - - def test_simple2(self): - x = np.random.randn(10) - y = 2 * x - def failure(): - return assert_array_almost_equal_nulp(x, y, - nulp=1000) - self.assertRaises(AssertionError, failure) - - def test_big_float32(self): - x = (1e10 * np.random.randn(10)).astype(np.float32) - y = x + 1 - assert_array_almost_equal_nulp(x, y, nulp=1000) - - def test_big_float64(self): - x = 1e10 * np.random.randn(10) - y = x + 1 - def failure(): - assert_array_almost_equal_nulp(x, y, nulp=1000) - self.assertRaises(AssertionError, failure) - - def test_complex(self): - x = np.random.randn(10) + 1j * np.random.randn(10) - y = x + 1 - def failure(): - assert_array_almost_equal_nulp(x, y, nulp=1000) - self.assertRaises(AssertionError, failure) - - def test_complex2(self): - x = np.random.randn(10) - y = np.array(x, np.complex) + 1e-16 * np.random.randn(10) - - assert_array_almost_equal_nulp(x, y, nulp=1000) - -class TestULP(unittest.TestCase): - def test_equal(self): - x = np.random.randn(10) - assert_array_max_ulp(x, x, maxulp=0) - - def test_single(self): - # Generate 1 + small deviation, check that adding eps gives a few UNL - x = np.ones(10).astype(np.float32) - x += 0.01 * np.random.randn(10).astype(np.float32) - eps = np.finfo(np.float32).eps - assert_array_max_ulp(x, x+eps, maxulp=20) - - def test_double(self): - # Generate 1 + small deviation, check that adding eps gives a few UNL - x = np.ones(10).astype(np.float64) - x += 0.01 * np.random.randn(10).astype(np.float64) - eps = np.finfo(np.float64).eps - assert_array_max_ulp(x, x+eps, maxulp=200) - - def test_inf(self): - for dt in [np.float32, np.float64]: - inf = np.array([np.inf]).astype(dt) - big = np.array([np.finfo(dt).max]) - assert_array_max_ulp(inf, big, maxulp=200) - - def test_nan(self): - # Test that nan is 'far' from small, tiny, inf, max and min - for dt in [np.float32, np.float64]: - if dt == np.float32: - maxulp = 1e6 - else: - maxulp = 1e12 - inf = np.array([np.inf]).astype(dt) - nan = np.array([np.nan]).astype(dt) - big = np.array([np.finfo(dt).max]) - tiny = np.array([np.finfo(dt).tiny]) - zero = np.array([np.PZERO]).astype(dt) - nzero = np.array([np.NZERO]).astype(dt) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, inf, - maxulp=maxulp)) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, big, - maxulp=maxulp)) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, tiny, - maxulp=maxulp)) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, zero, - maxulp=maxulp)) - self.assertRaises(AssertionError, - lambda: assert_array_max_ulp(nan, nzero, - maxulp=maxulp)) -if __name__ == '__main__': - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/utils.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/utils.py deleted file mode 100644 index 4f45f62f4b2be..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/testing/utils.py +++ /dev/null @@ -1,1715 +0,0 @@ -""" -Utility function to facilitate testing. - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import re -import operator -import warnings -from functools import partial -import shutil -import contextlib -from tempfile import mkdtemp -from .nosetester import import_nose -from numpy.core import float32, empty, arange, array_repr, ndarray - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - -__all__ = ['assert_equal', 'assert_almost_equal', 'assert_approx_equal', - 'assert_array_equal', 'assert_array_less', 'assert_string_equal', - 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', - 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', - 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', - 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', - 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', - 'assert_allclose', 'IgnoreException'] - - -verbose = 0 - - -def assert_(val, msg='') : - """ - Assert that works in release mode. - Accepts callable msg to allow deferring evaluation until failure. - - The Python built-in ``assert`` does not work when executing code in - optimized mode (the ``-O`` flag) - no byte-code is generated for it. - - For documentation on usage, refer to the Python documentation. - - """ - if not val : - try: - smsg = msg() - except TypeError: - smsg = msg - raise AssertionError(smsg) - -def gisnan(x): - """like isnan, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isnan and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isnan - st = isnan(x) - if isinstance(st, type(NotImplemented)): - raise TypeError("isnan not supported for this type") - return st - -def gisfinite(x): - """like isfinite, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isfinite and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isfinite, errstate - with errstate(invalid='ignore'): - st = isfinite(x) - if isinstance(st, type(NotImplemented)): - raise TypeError("isfinite not supported for this type") - return st - -def gisinf(x): - """like isinf, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isinf and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isinf, errstate - with errstate(invalid='ignore'): - st = isinf(x) - if isinstance(st, type(NotImplemented)): - raise TypeError("isinf not supported for this type") - return st - -def rand(*args): - """Returns an array of random numbers with the given shape. - - This only uses the standard library, so it is useful for testing purposes. - """ - import random - from numpy.core import zeros, float64 - results = zeros(args, float64) - f = results.flat - for i in range(len(f)): - f[i] = random.random() - return results - -if sys.platform[:5]=='linux': - def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()), - _load_time=[]): - """ Return number of jiffies (1/100ths of a second) that this - process has been scheduled in user mode. See man 5 proc. """ - import time - if not _load_time: - _load_time.append(time.time()) - try: - f=open(_proc_pid_stat, 'r') - l = f.readline().split(' ') - f.close() - return int(l[13]) - except: - return int(100*(time.time()-_load_time[0])) - - def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())): - """ Return virtual memory size in bytes of the running python. - """ - try: - f=open(_proc_pid_stat, 'r') - l = f.readline().split(' ') - f.close() - return int(l[22]) - except: - return -else: - # os.getpid is not in all platforms available. - # Using time is safe but inaccurate, especially when process - # was suspended or sleeping. - def jiffies(_load_time=[]): - """ Return number of jiffies (1/100ths of a second) that this - process has been scheduled in user mode. [Emulation with time.time]. """ - import time - if not _load_time: - _load_time.append(time.time()) - return int(100*(time.time()-_load_time[0])) - def memusage(): - """ Return memory usage of running python. [Not implemented]""" - raise NotImplementedError - -if os.name=='nt' and sys.version[:3] > '2.3': - # Code "stolen" from enthought/debug/memusage.py - def GetPerformanceAttributes(object, counter, instance = None, - inum=-1, format = None, machine=None): - # NOTE: Many counters require 2 samples to give accurate results, - # including "% Processor Time" (as by definition, at any instant, a - # thread's CPU usage is either 0 or 100). To read counters like this, - # you should copy this function, but keep the counter open, and call - # CollectQueryData() each time you need to know. - # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp - # My older explanation for this was that the "AddCounter" process forced - # the CPU to 100%, but the above makes more sense :) - import win32pdh - if format is None: format = win32pdh.PDH_FMT_LONG - path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter) ) - hq = win32pdh.OpenQuery() - try: - hc = win32pdh.AddCounter(hq, path) - try: - win32pdh.CollectQueryData(hq) - type, val = win32pdh.GetFormattedCounterValue(hc, format) - return val - finally: - win32pdh.RemoveCounter(hc) - finally: - win32pdh.CloseQuery(hq) - - def memusage(processName="python", instance=0): - # from win32pdhutil, part of the win32all package - import win32pdh - return GetPerformanceAttributes("Process", "Virtual Bytes", - processName, instance, - win32pdh.PDH_FMT_LONG, None) - -def build_err_msg(arrays, err_msg, header='Items are not equal:', - verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): - msg = ['\n' + header] - if err_msg: - if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): - msg = [msg[0] + ' ' + err_msg] - else: - msg.append(err_msg) - if verbose: - for i, a in enumerate(arrays): - - if isinstance(a, ndarray): - # precision argument is only needed if the objects are ndarrays - r_func = partial(array_repr, precision=precision) - else: - r_func = repr - - try: - r = r_func(a) - except: - r = '[repr failed]' - if r.count('\n') > 3: - r = '\n'.join(r.splitlines()[:3]) - r += '...' - msg.append(' %s: %s' % (names[i], r)) - return '\n'.join(msg) - -def assert_equal(actual,desired,err_msg='',verbose=True): - """ - Raises an AssertionError if two objects are not equal. - - Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), - check that all elements of these objects are equal. An exception is raised - at the first conflicting values. - - Parameters - ---------- - actual : array_like - The object to check. - desired : array_like - The expected object. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal. - - Examples - -------- - >>> np.testing.assert_equal([4,5], [4,6]) - ... - : - Items are not equal: - item=1 - ACTUAL: 5 - DESIRED: 6 - - """ - if isinstance(desired, dict): - if not isinstance(actual, dict) : - raise AssertionError(repr(type(actual))) - assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in desired.items(): - if k not in actual : - raise AssertionError(repr(k)) - assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose) - return - if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): - assert_equal(len(actual), len(desired), err_msg, verbose) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose) - return - from numpy.core import ndarray, isscalar, signbit - from numpy.lib import iscomplexobj, real, imag - if isinstance(actual, ndarray) or isinstance(desired, ndarray): - return assert_array_equal(actual, desired, err_msg, verbose) - msg = build_err_msg([actual, desired], err_msg, verbose=verbose) - - # Handle complex numbers: separate into real/imag to handle - # nan/inf/negative zero correctly - # XXX: catch ValueError for subclasses of ndarray where iscomplex fail - try: - usecomplex = iscomplexobj(actual) or iscomplexobj(desired) - except ValueError: - usecomplex = False - - if usecomplex: - if iscomplexobj(actual): - actualr = real(actual) - actuali = imag(actual) - else: - actualr = actual - actuali = 0 - if iscomplexobj(desired): - desiredr = real(desired) - desiredi = imag(desired) - else: - desiredr = desired - desiredi = 0 - try: - assert_equal(actualr, desiredr) - assert_equal(actuali, desiredi) - except AssertionError: - raise AssertionError(msg) - - # Inf/nan/negative zero handling - try: - # isscalar test to check cases such as [np.nan] != np.nan - if isscalar(desired) != isscalar(actual): - raise AssertionError(msg) - - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - isdesnan = gisnan(desired) - isactnan = gisnan(actual) - if isdesnan or isactnan: - if not (isdesnan and isactnan): - raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) - return - elif desired == 0 and actual == 0: - if not signbit(desired) == signbit(actual): - raise AssertionError(msg) - # If TypeError or ValueError raised while using isnan and co, just handle - # as before - except (TypeError, ValueError, NotImplementedError): - pass - - # Explicitly use __eq__ for comparison, ticket #2552 - if not (desired == actual): - raise AssertionError(msg) - -def print_assert_equal(test_string, actual, desired): - """ - Test if two objects are equal, and print an error message if test fails. - - The test is performed with ``actual == desired``. - - Parameters - ---------- - test_string : str - The message supplied to AssertionError. - actual : object - The object to test for equality against `desired`. - desired : object - The expected result. - - Examples - -------- - >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) - >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) - Traceback (most recent call last): - ... - AssertionError: Test XYZ of func xyz failed - ACTUAL: - [0, 1] - DESIRED: - [0, 2] - - """ - import pprint - - if not (actual == desired): - msg = StringIO() - msg.write(test_string) - msg.write(' failed\nACTUAL: \n') - pprint.pprint(actual, msg) - msg.write('DESIRED: \n') - pprint.pprint(desired, msg) - raise AssertionError(msg.getvalue()) - -def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): - """ - Raises an AssertionError if two items are not equal up to desired - precision. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - The test is equivalent to ``abs(desired-actual) < 0.5 * 10**(-decimal)``. - - Given two objects (numbers or ndarrays), check that all elements of these - objects are almost equal. An exception is raised at conflicting values. - For ndarrays this delegates to assert_array_almost_equal - - Parameters - ---------- - actual : array_like - The object to check. - desired : array_like - The expected object. - decimal : int, optional - Desired precision, default is 7. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - >>> import numpy.testing as npt - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) - ... - : - Items are not equal: - ACTUAL: 2.3333333333333002 - DESIRED: 2.3333333399999998 - - >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), - ... np.array([1.0,2.33333334]), decimal=9) - ... - : - Arrays are not almost equal - - (mismatch 50.0%) - x: array([ 1. , 2.33333333]) - y: array([ 1. , 2.33333334]) - - """ - from numpy.core import ndarray - from numpy.lib import iscomplexobj, real, imag - - # Handle complex numbers: separate into real/imag to handle - # nan/inf/negative zero correctly - # XXX: catch ValueError for subclasses of ndarray where iscomplex fail - try: - usecomplex = iscomplexobj(actual) or iscomplexobj(desired) - except ValueError: - usecomplex = False - - def _build_err_msg(): - header = ('Arrays are not almost equal to %d decimals' % decimal) - return build_err_msg([actual, desired], err_msg, verbose=verbose, - header=header) - - if usecomplex: - if iscomplexobj(actual): - actualr = real(actual) - actuali = imag(actual) - else: - actualr = actual - actuali = 0 - if iscomplexobj(desired): - desiredr = real(desired) - desiredi = imag(desired) - else: - desiredr = desired - desiredi = 0 - try: - assert_almost_equal(actualr, desiredr, decimal=decimal) - assert_almost_equal(actuali, desiredi, decimal=decimal) - except AssertionError: - raise AssertionError(_build_err_msg()) - - if isinstance(actual, (ndarray, tuple, list)) \ - or isinstance(desired, (ndarray, tuple, list)): - return assert_array_almost_equal(actual, desired, decimal, err_msg) - try: - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - if gisnan(desired) or gisnan(actual): - if not (gisnan(desired) and gisnan(actual)): - raise AssertionError(_build_err_msg()) - else: - if not desired == actual: - raise AssertionError(_build_err_msg()) - return - except (NotImplementedError, TypeError): - pass - if round(abs(desired - actual), decimal) != 0 : - raise AssertionError(_build_err_msg()) - - -def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): - """ - Raises an AssertionError if two items are not equal up to significant - digits. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - Given two numbers, check that they are approximately equal. - Approximately equal is defined as the number of significant digits - that agree. - - Parameters - ---------- - actual : scalar - The object to check. - desired : scalar - The expected object. - significant : int, optional - Desired precision, default is 7. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) - >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, - significant=8) - >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, - significant=8) - ... - : - Items are not equal to 8 significant digits: - ACTUAL: 1.234567e-021 - DESIRED: 1.2345672000000001e-021 - - the evaluated condition that raises the exception is - - >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) - True - - """ - import numpy as np - - (actual, desired) = map(float, (actual, desired)) - if desired==actual: - return - # Normalized the numbers to be in range (-10.0,10.0) - # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) - with np.errstate(invalid='ignore'): - scale = 0.5*(np.abs(desired) + np.abs(actual)) - scale = np.power(10, np.floor(np.log10(scale))) - try: - sc_desired = desired/scale - except ZeroDivisionError: - sc_desired = 0.0 - try: - sc_actual = actual/scale - except ZeroDivisionError: - sc_actual = 0.0 - msg = build_err_msg([actual, desired], err_msg, - header='Items are not equal to %d significant digits:' % - significant, - verbose=verbose) - try: - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - if gisnan(desired) or gisnan(actual): - if not (gisnan(desired) and gisnan(actual)): - raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) - return - except (TypeError, NotImplementedError): - pass - if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)) : - raise AssertionError(msg) - -def assert_array_compare(comparison, x, y, err_msg='', verbose=True, - header='', precision=6): - from numpy.core import array, isnan, isinf, any, all, inf - x = array(x, copy=False, subok=True) - y = array(y, copy=False, subok=True) - - def isnumber(x): - return x.dtype.char in '?bhilqpBHILQPefdgFDG' - - def chk_same_position(x_id, y_id, hasval='nan'): - """Handling nan/inf: check that x and y have the nan/inf at the same - locations.""" - try: - assert_array_equal(x_id, y_id) - except AssertionError: - msg = build_err_msg([x, y], - err_msg + '\nx and y %s location mismatch:' \ - % (hasval), verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise AssertionError(msg) - - try: - cond = (x.shape==() or y.shape==()) or x.shape == y.shape - if not cond: - msg = build_err_msg([x, y], - err_msg - + '\n(shapes %s, %s mismatch)' % (x.shape, - y.shape), - verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - if not cond : - raise AssertionError(msg) - - if isnumber(x) and isnumber(y): - x_isnan, y_isnan = isnan(x), isnan(y) - x_isinf, y_isinf = isinf(x), isinf(y) - - # Validate that the special values are in the same place - if any(x_isnan) or any(y_isnan): - chk_same_position(x_isnan, y_isnan, hasval='nan') - if any(x_isinf) or any(y_isinf): - # Check +inf and -inf separately, since they are different - chk_same_position(x == +inf, y == +inf, hasval='+inf') - chk_same_position(x == -inf, y == -inf, hasval='-inf') - - # Combine all the special values - x_id, y_id = x_isnan, y_isnan - x_id |= x_isinf - y_id |= y_isinf - - # Only do the comparison if actual values are left - if all(x_id): - return - - if any(x_id): - val = comparison(x[~x_id], y[~y_id]) - else: - val = comparison(x, y) - else: - val = comparison(x, y) - - if isinstance(val, bool): - cond = val - reduced = [0] - else: - reduced = val.ravel() - cond = reduced.all() - reduced = reduced.tolist() - if not cond: - match = 100-100.0*reduced.count(1)/len(reduced) - msg = build_err_msg([x, y], - err_msg - + '\n(mismatch %s%%)' % (match,), - verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - if not cond : - raise AssertionError(msg) - except ValueError as e: - import traceback - efmt = traceback.format_exc() - header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header) - - msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise ValueError(msg) - -def assert_array_equal(x, y, err_msg='', verbose=True): - """ - Raises an AssertionError if two array_like objects are not equal. - - Given two array_like objects, check that the shape is equal and all - elements of these objects are equal. An exception is raised at - shape mismatch or conflicting values. In contrast to the standard usage - in numpy, NaNs are compared like numbers, no assertion is raised if - both objects have NaNs in the same positions. - - The usual caution for verifying equality with floating point numbers is - advised. - - Parameters - ---------- - x : array_like - The actual object to check. - y : array_like - The desired, expected object. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired objects are not equal. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - The first assert does not raise an exception: - - >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], - ... [np.exp(0),2.33333, np.nan]) - - Assert fails with numerical inprecision with floats: - - >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], - ... [1, np.sqrt(np.pi)**2, np.nan]) - ... - : - AssertionError: - Arrays are not equal - - (mismatch 50.0%) - x: array([ 1. , 3.14159265, NaN]) - y: array([ 1. , 3.14159265, NaN]) - - Use `assert_allclose` or one of the nulp (number of floating point values) - functions for these cases instead: - - >>> np.testing.assert_allclose([1.0,np.pi,np.nan], - ... [1, np.sqrt(np.pi)**2, np.nan], - ... rtol=1e-10, atol=0) - - """ - assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, - verbose=verbose, header='Arrays are not equal') - -def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): - """ - Raises an AssertionError if two objects are not equal up to desired - precision. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - The test verifies identical shapes and verifies values with - ``abs(desired-actual) < 0.5 * 10**(-decimal)``. - - Given two array_like objects, check that the shape is equal and all - elements of these objects are almost equal. An exception is raised at - shape mismatch or conflicting values. In contrast to the standard usage - in numpy, NaNs are compared like numbers, no assertion is raised if - both objects have NaNs in the same positions. - - Parameters - ---------- - x : array_like - The actual object to check. - y : array_like - The desired, expected object. - decimal : int, optional - Desired precision, default is 6. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - the first assert does not raise an exception - - >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], - [1.0,2.333,np.nan]) - - >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], - ... [1.0,2.33339,np.nan], decimal=5) - ... - : - AssertionError: - Arrays are not almost equal - - (mismatch 50.0%) - x: array([ 1. , 2.33333, NaN]) - y: array([ 1. , 2.33339, NaN]) - - >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], - ... [1.0,2.33333, 5], decimal=5) - : - ValueError: - Arrays are not almost equal - x: array([ 1. , 2.33333, NaN]) - y: array([ 1. , 2.33333, 5. ]) - - """ - from numpy.core import around, number, float_, result_type, array - from numpy.core.numerictypes import issubdtype - from numpy.core.fromnumeric import any as npany - def compare(x, y): - try: - if npany(gisinf(x)) or npany( gisinf(y)): - xinfid = gisinf(x) - yinfid = gisinf(y) - if not xinfid == yinfid: - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - - # make sure y is an inexact type to avoid abs(MIN_INT); will cause - # casting of x later. - dtype = result_type(y, 1.) - y = array(y, dtype=dtype, copy=False, subok=True) - z = abs(x-y) - - if not issubdtype(z.dtype, number): - z = z.astype(float_) # handle object arrays - - return around(z, decimal) <= 10.0**(-decimal) - - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header=('Arrays are not almost equal to %d decimals' % decimal), - precision=decimal) - - -def assert_array_less(x, y, err_msg='', verbose=True): - """ - Raises an AssertionError if two array_like objects are not ordered by less - than. - - Given two array_like objects, check that the shape is equal and all - elements of the first object are strictly smaller than those of the - second object. An exception is raised at shape mismatch or incorrectly - ordered values. Shape mismatch does not raise if an object has zero - dimension. In contrast to the standard usage in numpy, NaNs are - compared, no assertion is raised if both objects have NaNs in the same - positions. - - - - Parameters - ---------- - x : array_like - The smaller object to check. - y : array_like - The larger object to compare. - err_msg : string - The error message to be printed in case of failure. - verbose : bool - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired objects are not equal. - - See Also - -------- - assert_array_equal: tests objects for equality - assert_array_almost_equal: test objects for equality up to precision - - - - Examples - -------- - >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) - >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) - ... - : - Arrays are not less-ordered - (mismatch 50.0%) - x: array([ 1., 1., NaN]) - y: array([ 1., 2., NaN]) - - >>> np.testing.assert_array_less([1.0, 4.0], 3) - ... - : - Arrays are not less-ordered - (mismatch 50.0%) - x: array([ 1., 4.]) - y: array(3) - - >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) - ... - : - Arrays are not less-ordered - (shapes (3,), (1,) mismatch) - x: array([ 1., 2., 3.]) - y: array([4]) - - """ - assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, - verbose=verbose, - header='Arrays are not less-ordered') - -def runstring(astr, dict): - exec(astr, dict) - -def assert_string_equal(actual, desired): - """ - Test if two strings are equal. - - If the given strings are equal, `assert_string_equal` does nothing. - If they are not equal, an AssertionError is raised, and the diff - between the strings is shown. - - Parameters - ---------- - actual : str - The string to test for equality against the expected string. - desired : str - The expected string. - - Examples - -------- - >>> np.testing.assert_string_equal('abc', 'abc') - >>> np.testing.assert_string_equal('abc', 'abcd') - Traceback (most recent call last): - File "", line 1, in - ... - AssertionError: Differences in strings: - - abc+ abcd? + - - """ - # delay import of difflib to reduce startup time - import difflib - - if not isinstance(actual, str) : - raise AssertionError(repr(type(actual))) - if not isinstance(desired, str): - raise AssertionError(repr(type(desired))) - if re.match(r'\A'+desired+r'\Z', actual, re.M): - return - - diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1))) - diff_list = [] - while diff: - d1 = diff.pop(0) - if d1.startswith(' '): - continue - if d1.startswith('- '): - l = [d1] - d2 = diff.pop(0) - if d2.startswith('? '): - l.append(d2) - d2 = diff.pop(0) - if not d2.startswith('+ ') : - raise AssertionError(repr(d2)) - l.append(d2) - d3 = diff.pop(0) - if d3.startswith('? '): - l.append(d3) - else: - diff.insert(0, d3) - if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]): - continue - diff_list.extend(l) - continue - raise AssertionError(repr(d1)) - if not diff_list: - return - msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip() - if actual != desired : - raise AssertionError(msg) - - -def rundocs(filename=None, raise_on_error=True): - """ - Run doctests found in the given file. - - By default `rundocs` raises an AssertionError on failure. - - Parameters - ---------- - filename : str - The path to the file for which the doctests are run. - raise_on_error : bool - Whether to raise an AssertionError when a doctest fails. Default is - True. - - Notes - ----- - The doctests can be run by the user/developer by adding the ``doctests`` - argument to the ``test()`` call. For example, to run all tests (including - doctests) for `numpy.lib`: - - >>> np.lib.test(doctests=True) #doctest: +SKIP - """ - import doctest, imp - if filename is None: - f = sys._getframe(1) - filename = f.f_globals['__file__'] - name = os.path.splitext(os.path.basename(filename))[0] - path = [os.path.dirname(filename)] - file, pathname, description = imp.find_module(name, path) - try: - m = imp.load_module(name, file, pathname, description) - finally: - file.close() - - tests = doctest.DocTestFinder().find(m) - runner = doctest.DocTestRunner(verbose=False) - - msg = [] - if raise_on_error: - out = lambda s: msg.append(s) - else: - out = None - - for test in tests: - runner.run(test, out=out) - - if runner.failures > 0 and raise_on_error: - raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) - - -def raises(*args,**kwargs): - nose = import_nose() - return nose.tools.raises(*args,**kwargs) - - -def assert_raises(*args,**kwargs): - """ - assert_raises(exception_class, callable, *args, **kwargs) - - Fail unless an exception of class exception_class is thrown - by callable when invoked with arguments args and keyword - arguments kwargs. If a different type of exception is - thrown, it will not be caught, and the test case will be - deemed to have suffered an error, exactly as for an - unexpected exception. - - """ - nose = import_nose() - return nose.tools.assert_raises(*args,**kwargs) - - -assert_raises_regex_impl = None - - -def assert_raises_regex(exception_class, expected_regexp, - callable_obj=None, *args, **kwargs): - """ - Fail unless an exception of class exception_class and with message that - matches expected_regexp is thrown by callable when invoked with arguments - args and keyword arguments kwargs. - - Name of this function adheres to Python 3.2+ reference, but should work in - all versions down to 2.6. - - """ - nose = import_nose() - - global assert_raises_regex_impl - if assert_raises_regex_impl is None: - try: - # Python 3.2+ - assert_raises_regex_impl = nose.tools.assert_raises_regex - except AttributeError: - try: - # 2.7+ - assert_raises_regex_impl = nose.tools.assert_raises_regexp - except AttributeError: - # 2.6 - - # This class is copied from Python2.7 stdlib almost verbatim - class _AssertRaisesContext(object): - """A context manager used to implement TestCase.assertRaises* methods.""" - - def __init__(self, expected, expected_regexp=None): - self.expected = expected - self.expected_regexp = expected_regexp - - def failureException(self, msg): - return AssertionError(msg) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - if exc_type is None: - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - raise self.failureException( - "{0} not raised".format(exc_name)) - if not issubclass(exc_type, self.expected): - # let unexpected exceptions pass through - return False - self.exception = exc_value # store for later retrieval - if self.expected_regexp is None: - return True - - expected_regexp = self.expected_regexp - if isinstance(expected_regexp, basestring): - expected_regexp = re.compile(expected_regexp) - if not expected_regexp.search(str(exc_value)): - raise self.failureException( - '"%s" does not match "%s"' % - (expected_regexp.pattern, str(exc_value))) - return True - - def impl(cls, regex, callable_obj, *a, **kw): - mgr = _AssertRaisesContext(cls, regex) - if callable_obj is None: - return mgr - with mgr: - callable_obj(*a, **kw) - assert_raises_regex_impl = impl - - return assert_raises_regex_impl(exception_class, expected_regexp, - callable_obj, *args, **kwargs) - - -def decorate_methods(cls, decorator, testmatch=None): - """ - Apply a decorator to all methods in a class matching a regular expression. - - The given decorator is applied to all public methods of `cls` that are - matched by the regular expression `testmatch` - (``testmatch.search(methodname)``). Methods that are private, i.e. start - with an underscore, are ignored. - - Parameters - ---------- - cls : class - Class whose methods to decorate. - decorator : function - Decorator to apply to methods - testmatch : compiled regexp or str, optional - The regular expression. Default value is None, in which case the - nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) - is used. - If `testmatch` is a string, it is compiled to a regular expression - first. - - """ - if testmatch is None: - testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) - else: - testmatch = re.compile(testmatch) - cls_attr = cls.__dict__ - - # delayed import to reduce startup time - from inspect import isfunction - - methods = [_m for _m in cls_attr.values() if isfunction(_m)] - for function in methods: - try: - if hasattr(function, 'compat_func_name'): - funcname = function.compat_func_name - else: - funcname = function.__name__ - except AttributeError: - # not a function - continue - if testmatch.search(funcname) and not funcname.startswith('_'): - setattr(cls, funcname, decorator(function)) - return - - -def measure(code_str,times=1,label=None): - """ - Return elapsed time for executing code in the namespace of the caller. - - The supplied code string is compiled with the Python builtin ``compile``. - The precision of the timing is 10 milli-seconds. If the code will execute - fast on this timescale, it can be executed many times to get reasonable - timing accuracy. - - Parameters - ---------- - code_str : str - The code to be timed. - times : int, optional - The number of times the code is executed. Default is 1. The code is - only compiled once. - label : str, optional - A label to identify `code_str` with. This is passed into ``compile`` - as the second argument (for run-time error messages). - - Returns - ------- - elapsed : float - Total elapsed time in seconds for executing `code_str` `times` times. - - Examples - -------- - >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', - ... times=times) - >>> print "Time for a single execution : ", etime / times, "s" - Time for a single execution : 0.005 s - - """ - frame = sys._getframe(1) - locs, globs = frame.f_locals, frame.f_globals - - code = compile(code_str, - 'Test name: %s ' % label, - 'exec') - i = 0 - elapsed = jiffies() - while i < times: - i += 1 - exec(code, globs, locs) - elapsed = jiffies() - elapsed - return 0.01*elapsed - -def _assert_valid_refcount(op): - """ - Check that ufuncs don't mishandle refcount of object `1`. - Used in a few regression tests. - """ - import numpy as np - a = np.arange(100 * 100) - b = np.arange(100*100).reshape(100, 100) - c = b - - i = 1 - - rc = sys.getrefcount(i) - for j in range(15): - d = op(b, c) - - assert_(sys.getrefcount(i) >= rc) - -def assert_allclose(actual, desired, rtol=1e-7, atol=0, - err_msg='', verbose=True): - """ - Raises an AssertionError if two objects are not equal up to desired - tolerance. - - The test is equivalent to ``allclose(actual, desired, rtol, atol)``. - It compares the difference between `actual` and `desired` to - ``atol + rtol * abs(desired)``. - - .. versionadded:: 1.5.0 - - Parameters - ---------- - actual : array_like - Array obtained. - desired : array_like - Array desired. - rtol : float, optional - Relative tolerance. - atol : float, optional - Absolute tolerance. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_array_almost_equal_nulp, assert_array_max_ulp - - Examples - -------- - >>> x = [1e-5, 1e-3, 1e-1] - >>> y = np.arccos(np.cos(x)) - >>> assert_allclose(x, y, rtol=1e-5, atol=0) - - """ - import numpy as np - def compare(x, y): - return np.allclose(x, y, rtol=rtol, atol=atol) - - actual, desired = np.asanyarray(actual), np.asanyarray(desired) - header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) - assert_array_compare(compare, actual, desired, err_msg=str(err_msg), - verbose=verbose, header=header) - -def assert_array_almost_equal_nulp(x, y, nulp=1): - """ - Compare two arrays relatively to their spacing. - - This is a relatively robust method to compare two arrays whose amplitude - is variable. - - Parameters - ---------- - x, y : array_like - Input arrays. - nulp : int, optional - The maximum number of unit in the last place for tolerance (see Notes). - Default is 1. - - Returns - ------- - None - - Raises - ------ - AssertionError - If the spacing between `x` and `y` for one or more elements is larger - than `nulp`. - - See Also - -------- - assert_array_max_ulp : Check that all items of arrays differ in at most - N Units in the Last Place. - spacing : Return the distance between x and the nearest adjacent number. - - Notes - ----- - An assertion is raised if the following condition is not met:: - - abs(x - y) <= nulps * spacing(max(abs(x), abs(y))) - - Examples - -------- - >>> x = np.array([1., 1e-10, 1e-20]) - >>> eps = np.finfo(x.dtype).eps - >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) - - >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) - Traceback (most recent call last): - ... - AssertionError: X and Y are not equal to 1 ULP (max is 2) - - """ - import numpy as np - ax = np.abs(x) - ay = np.abs(y) - ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) - if not np.all(np.abs(x-y) <= ref): - if np.iscomplexobj(x) or np.iscomplexobj(y): - msg = "X and Y are not equal to %d ULP" % nulp - else: - max_nulp = np.max(nulp_diff(x, y)) - msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) - raise AssertionError(msg) - -def assert_array_max_ulp(a, b, maxulp=1, dtype=None): - """ - Check that all items of arrays differ in at most N Units in the Last Place. - - Parameters - ---------- - a, b : array_like - Input arrays to be compared. - maxulp : int, optional - The maximum number of units in the last place that elements of `a` and - `b` can differ. Default is 1. - dtype : dtype, optional - Data-type to convert `a` and `b` to if given. Default is None. - - Returns - ------- - ret : ndarray - Array containing number of representable floating point numbers between - items in `a` and `b`. - - Raises - ------ - AssertionError - If one or more elements differ by more than `maxulp`. - - See Also - -------- - assert_array_almost_equal_nulp : Compare two arrays relatively to their - spacing. - - Examples - -------- - >>> a = np.linspace(0., 1., 100) - >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) - - """ - import numpy as np - ret = nulp_diff(a, b, dtype) - if not np.all(ret <= maxulp): - raise AssertionError("Arrays are not almost equal up to %g ULP" % \ - maxulp) - return ret - -def nulp_diff(x, y, dtype=None): - """For each item in x and y, return the number of representable floating - points between them. - - Parameters - ---------- - x : array_like - first input array - y : array_like - second input array - - Returns - ------- - nulp : array_like - number of representable floating point numbers between each item in x - and y. - - Examples - -------- - # By definition, epsilon is the smallest number such as 1 + eps != 1, so - # there should be exactly one ULP between 1 and 1 + eps - >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) - 1.0 - """ - import numpy as np - if dtype: - x = np.array(x, dtype=dtype) - y = np.array(y, dtype=dtype) - else: - x = np.array(x) - y = np.array(y) - - t = np.common_type(x, y) - if np.iscomplexobj(x) or np.iscomplexobj(y): - raise NotImplementedError("_nulp not implemented for complex array") - - x = np.array(x, dtype=t) - y = np.array(y, dtype=t) - - if not x.shape == y.shape: - raise ValueError("x and y do not have the same shape: %s - %s" % \ - (x.shape, y.shape)) - - def _diff(rx, ry, vdt): - diff = np.array(rx-ry, dtype=vdt) - return np.abs(diff) - - rx = integer_repr(x) - ry = integer_repr(y) - return _diff(rx, ry, t) - -def _integer_repr(x, vdt, comp): - # Reinterpret binary representation of the float as sign-magnitude: - # take into account two-complement representation - # See also - # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm - rx = x.view(vdt) - if not (rx.size == 1): - rx[rx < 0] = comp - rx[rx<0] - else: - if rx < 0: - rx = comp - rx - - return rx - -def integer_repr(x): - """Return the signed-magnitude interpretation of the binary representation of - x.""" - import numpy as np - if x.dtype == np.float32: - return _integer_repr(x, np.int32, np.int32(-2**31)) - elif x.dtype == np.float64: - return _integer_repr(x, np.int64, np.int64(-2**63)) - else: - raise ValueError("Unsupported dtype %s" % x.dtype) - -# The following two classes are copied from python 2.6 warnings module (context -# manager) -class WarningMessage(object): - - """ - Holds the result of a single showwarning() call. - - Deprecated in 1.8.0 - - Notes - ----- - `WarningMessage` is copied from the Python 2.6 warnings module, - so it can be used in NumPy with older Python versions. - - """ - - _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", - "line") - - def __init__(self, message, category, filename, lineno, file=None, - line=None): - local_values = locals() - for attr in self._WARNING_DETAILS: - setattr(self, attr, local_values[attr]) - if category: - self._category_name = category.__name__ - else: - self._category_name = None - - def __str__(self): - return ("{message : %r, category : %r, filename : %r, lineno : %s, " - "line : %r}" % (self.message, self._category_name, - self.filename, self.lineno, self.line)) - -class WarningManager(object): - """ - A context manager that copies and restores the warnings filter upon - exiting the context. - - The 'record' argument specifies whether warnings should be captured by a - custom implementation of ``warnings.showwarning()`` and be appended to a - list returned by the context manager. Otherwise None is returned by the - context manager. The objects appended to the list are arguments whose - attributes mirror the arguments to ``showwarning()``. - - The 'module' argument is to specify an alternative module to the module - named 'warnings' and imported under that name. This argument is only useful - when testing the warnings module itself. - - Deprecated in 1.8.0 - - Notes - ----- - `WarningManager` is a copy of the ``catch_warnings`` context manager - from the Python 2.6 warnings module, with slight modifications. - It is copied so it can be used in NumPy with older Python versions. - - """ - def __init__(self, record=False, module=None): - self._record = record - if module is None: - self._module = sys.modules['warnings'] - else: - self._module = module - self._entered = False - - def __enter__(self): - if self._entered: - raise RuntimeError("Cannot enter %r twice" % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._showwarning = self._module.showwarning - if self._record: - log = [] - def showwarning(*args, **kwargs): - log.append(WarningMessage(*args, **kwargs)) - self._module.showwarning = showwarning - return log - else: - return None - - def __exit__(self): - if not self._entered: - raise RuntimeError("Cannot exit %r without entering first" % self) - self._module.filters = self._filters - self._module.showwarning = self._showwarning - - -def assert_warns(warning_class, func, *args, **kw): - """ - Fail unless the given callable throws the specified warning. - - A warning of class warning_class should be thrown by the callable when - invoked with arguments args and keyword arguments kwargs. - If a different type of warning is thrown, it will not be caught, and the - test case will be deemed to have suffered an error. - - .. versionadded:: 1.4.0 - - Parameters - ---------- - warning_class : class - The class defining the warning that `func` is expected to throw. - func : callable - The callable to test. - \\*args : Arguments - Arguments passed to `func`. - \\*\\*kwargs : Kwargs - Keyword arguments passed to `func`. - - Returns - ------- - The value returned by `func`. - - """ - with warnings.catch_warnings(record=True) as l: - warnings.simplefilter('always') - result = func(*args, **kw) - if not len(l) > 0: - raise AssertionError("No warning raised when calling %s" - % func.__name__) - if not l[0].category is warning_class: - raise AssertionError("First warning for %s is not a " \ - "%s( is %s)" % (func.__name__, warning_class, l[0])) - return result - -def assert_no_warnings(func, *args, **kw): - """ - Fail if the given callable produces any warnings. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - func : callable - The callable to test. - \\*args : Arguments - Arguments passed to `func`. - \\*\\*kwargs : Kwargs - Keyword arguments passed to `func`. - - Returns - ------- - The value returned by `func`. - - """ - with warnings.catch_warnings(record=True) as l: - warnings.simplefilter('always') - result = func(*args, **kw) - if len(l) > 0: - raise AssertionError("Got warnings when calling %s: %s" - % (func.__name__, l)) - return result - - -def _gen_alignment_data(dtype=float32, type='binary', max_size=24): - """ - generator producing data with different alignment and offsets - to test simd vectorization - - Parameters - ---------- - dtype : dtype - data type to produce - type : string - 'unary': create data for unary operations, creates one input - and output array - 'binary': create data for unary operations, creates two input - and output array - max_size : integer - maximum size of data to produce - - Returns - ------- - if type is 'unary' yields one output, one input array and a message - containing information on the data - if type is 'binary' yields one output array, two input array and a message - containing information on the data - - """ - ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' - bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' - for o in range(3): - for s in range(o + 2, max(o + 3, max_size)): - if type == 'unary': - inp = lambda : arange(s, dtype=dtype)[o:] - out = empty((s,), dtype=dtype)[o:] - yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') - yield inp(), inp(), ufmt % (o, o, s, dtype, 'in place') - yield out[1:], inp()[:-1], ufmt % \ - (o + 1, o, s - 1, dtype, 'out of place') - yield out[:-1], inp()[1:], ufmt % \ - (o, o + 1, s - 1, dtype, 'out of place') - yield inp()[:-1], inp()[1:], ufmt % \ - (o, o + 1, s - 1, dtype, 'aliased') - yield inp()[1:], inp()[:-1], ufmt % \ - (o + 1, o, s - 1, dtype, 'aliased') - if type == 'binary': - inp1 = lambda :arange(s, dtype=dtype)[o:] - inp2 = lambda :arange(s, dtype=dtype)[o:] - out = empty((s,), dtype=dtype)[o:] - yield out, inp1(), inp2(), bfmt % \ - (o, o, o, s, dtype, 'out of place') - yield inp1(), inp1(), inp2(), bfmt % \ - (o, o, o, s, dtype, 'in place1') - yield inp2(), inp1(), inp2(), bfmt % \ - (o, o, o, s, dtype, 'in place2') - yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ - (o + 1, o, o, s - 1, dtype, 'out of place') - yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ - (o, o + 1, o, s - 1, dtype, 'out of place') - yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ - (o, o, o + 1, s - 1, dtype, 'out of place') - yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ - (o + 1, o, o, s - 1, dtype, 'aliased') - yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ - (o, o + 1, o, s - 1, dtype, 'aliased') - yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ - (o, o, o + 1, s - 1, dtype, 'aliased') - - -class IgnoreException(Exception): - "Ignoring this exception due to disabled feature" - - -@contextlib.contextmanager -def tempdir(*args, **kwargs): - """Context manager to provide a temporary test folder. - - All arguments are passed as this to the underlying tempfile.mkdtemp - function. - - """ - tmpdir = mkdtemp(*args, **kwargs) - yield tmpdir - shutil.rmtree(tmpdir) diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_ctypeslib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_ctypeslib.py deleted file mode 100644 index 8e9c6c0bd9c76..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_ctypeslib.py +++ /dev/null @@ -1,102 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys - -import numpy as np -from numpy.ctypeslib import ndpointer, load_library -from numpy.distutils.misc_util import get_shared_lib_extension -from numpy.testing import * - -try: - cdll = load_library('multiarray', np.core.multiarray.__file__) - _HAS_CTYPE = True -except ImportError: - _HAS_CTYPE = False - -class TestLoadLibrary(TestCase): - @dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation") - @dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin") - def test_basic(self): - try: - cdll = load_library('multiarray', - np.core.multiarray.__file__) - except ImportError as e: - msg = "ctypes is not available on this python: skipping the test" \ - " (import error was: %s)" % str(e) - print(msg) - - @dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation") - @dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin") - def test_basic2(self): - """Regression for #801: load_library with a full library name - (including extension) does not work.""" - try: - try: - so = get_shared_lib_extension(is_python_ext=True) - cdll = load_library('multiarray%s' % so, - np.core.multiarray.__file__) - except ImportError: - print("No distutils available, skipping test.") - except ImportError as e: - msg = "ctypes is not available on this python: skipping the test" \ - " (import error was: %s)" % str(e) - print(msg) - -class TestNdpointer(TestCase): - def test_dtype(self): - dt = np.intc - p = ndpointer(dtype=dt) - self.assertTrue(p.from_param(np.array([1], dt))) - dt = 'i4') - p = ndpointer(dtype=dt) - p.from_param(np.array([1], dt)) - self.assertRaises(TypeError, p.from_param, - np.array([1], dt.newbyteorder('swap'))) - dtnames = ['x', 'y'] - dtformats = [np.intc, np.float64] - dtdescr = {'names' : dtnames, 'formats' : dtformats} - dt = np.dtype(dtdescr) - p = ndpointer(dtype=dt) - self.assertTrue(p.from_param(np.zeros((10,), dt))) - samedt = np.dtype(dtdescr) - p = ndpointer(dtype=samedt) - self.assertTrue(p.from_param(np.zeros((10,), dt))) - dt2 = np.dtype(dtdescr, align=True) - if dt.itemsize != dt2.itemsize: - self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2)) - else: - self.assertTrue(p.from_param(np.zeros((10,), dt2))) - - def test_ndim(self): - p = ndpointer(ndim=0) - self.assertTrue(p.from_param(np.array(1))) - self.assertRaises(TypeError, p.from_param, np.array([1])) - p = ndpointer(ndim=1) - self.assertRaises(TypeError, p.from_param, np.array(1)) - self.assertTrue(p.from_param(np.array([1]))) - p = ndpointer(ndim=2) - self.assertTrue(p.from_param(np.array([[1]]))) - - def test_shape(self): - p = ndpointer(shape=(1, 2)) - self.assertTrue(p.from_param(np.array([[1, 2]]))) - self.assertRaises(TypeError, p.from_param, np.array([[1], [2]])) - p = ndpointer(shape=()) - self.assertTrue(p.from_param(np.array(1))) - - def test_flags(self): - x = np.array([[1, 2], [3, 4]], order='F') - p = ndpointer(flags='FORTRAN') - self.assertTrue(p.from_param(x)) - p = ndpointer(flags='CONTIGUOUS') - self.assertRaises(TypeError, p.from_param, x) - p = ndpointer(flags=x.flags.num) - self.assertTrue(p.from_param(x)) - self.assertRaises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_matlib.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_matlib.py deleted file mode 100644 index 0bc8548baa7f9..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/tests/test_matlib.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpy as np -import numpy.matlib -from numpy.testing import assert_array_equal, assert_, run_module_suite - -def test_empty(): - x = np.matlib.empty((2,)) - assert_(isinstance(x, np.matrix)) - assert_(x.shape, (1, 2)) - -def test_ones(): - assert_array_equal(np.matlib.ones((2, 3)), - np.matrix([[ 1., 1., 1.], - [ 1., 1., 1.]])) - - assert_array_equal(np.matlib.ones(2), np.matrix([[ 1., 1.]])) - -def test_zeros(): - assert_array_equal(np.matlib.zeros((2, 3)), - np.matrix([[ 0., 0., 0.], - [ 0., 0., 0.]])) - - assert_array_equal(np.matlib.zeros(2), np.matrix([[ 0., 0.]])) - -def test_identity(): - x = np.matlib.identity(2, dtype=np.int) - assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) - -def test_eye(): - x = np.matlib.eye(3, k=1, dtype=int) - assert_array_equal(x, np.matrix([[ 0, 1, 0], - [ 0, 0, 1], - [ 0, 0, 0]])) - -def test_rand(): - x = np.matlib.rand(3) - # check matrix type, array would have shape (3,) - assert_(x.ndim == 2) - -def test_randn(): - x = np.matlib.randn(3) - # check matrix type, array would have shape (3,) - assert_(x.ndim == 2) - -def test_repmat(): - a1 = np.arange(4) - x = np.matlib.repmat(a1, 2, 2) - y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], - [0, 1, 2, 3, 0, 1, 2, 3]]) - assert_array_equal(x, y) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/version.py b/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/version.py deleted file mode 100644 index 2eebbacb5df6c..0000000000000 --- a/numpy-1.9.0-py3.4-linux-x86_64.egg/numpy/version.py +++ /dev/null @@ -1,10 +0,0 @@ - -# THIS FILE IS GENERATED FROM NUMPY SETUP.PY -short_version = '1.9.0' -version = '1.9.0' -full_version = '1.9.0' -git_revision = '07601a64cdfeb1c0247bde1294ad6380413cab66' -release = True - -if not release: - version = full_version From 4261ea58500e9aaaf8f89a28e547aa1942667e70 Mon Sep 17 00:00:00 2001 From: bertrandhaut Date: Thu, 9 Oct 2014 19:05:47 +0200 Subject: [PATCH 3/6] Reformating internals.py --- pandas/core/internals.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index e2978bd75d4ff..7f091779ad56c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1171,17 +1171,22 @@ def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.' values = np.array(values, dtype=object) mask = isnull(values) values[mask] = na_rep - if not float_format and decimal != '.': - float_format = '%f' - if float_format: - imask = (~mask).ravel() - values.flat[imask] = np.array( - [float_format % val for val in values.ravel()[imask]]) - if decimal != '.': + + + if float_format and decimal != '.': + formater = lambda v : (float_format % v).replace('.',decimal,1) + elif decimal != '.': + formater = lambda v : ('%g' % v).replace('.',decimal,1) + elif float_format: + formater = lambda v : float_format % v + else: + formater = None + + if formater: imask = (~mask).ravel() values.flat[imask] = np.array( - [val.replace('.',',',1) for val in values.ravel()[imask]]) - + [formater(val) for val in values.ravel()[imask]]) + return values.tolist() def should_store(self, value): From e4100657a57649ad445fc5f87e01a4d8879266a6 Mon Sep 17 00:00:00 2001 From: bertrandhaut Date: Mon, 23 Feb 2015 09:15:45 +0100 Subject: [PATCH 4/6] Test for to_csv decimal separator option --- pandas/tests/test_format.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 9216b7a286c54..cf8dceff90fae 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -2343,7 +2343,15 @@ def test_csv_to_string(self): df = DataFrame({'col' : [1,2]}) expected = ',col\n0,1\n1,2\n' self.assertEqual(df.to_csv(), expected) - + + def test_to_csv_decimal(self): + df = DataFrame({'col1' : [1], 'col2' : ['a'], 'col3' : [10.1] }) + + expected_default = ',col1,col2,col3\n0,1,a,10.1\n' + self.assertEqual(df.to_csv(), expected_default) + + expected_european_excel = ';col1;col2;col3\n0;1;a;10,1\n' + self.assertEqual(df.to_csv(decimal=',',sep=';'), expected_european_excel) class TestSeriesFormatting(tm.TestCase): _multiprocess_can_split_ = True From f129b0cd752b79375281a5b07855d0e21d862ec7 Mon Sep 17 00:00:00 2001 From: bertrandhaut Date: Mon, 23 Feb 2015 11:04:46 +0100 Subject: [PATCH 5/6] Joris' comments --- pandas/core/frame.py | 2 +- pandas/core/internals.py | 12 ++++++------ pandas/core/series.py | 2 +- pandas/tests/test_format.py | 6 ++++++ 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4aa9e9a713955..b7350dfd5d77c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1127,7 +1127,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, Format string for datetime objects cols : kwarg only alias of columns [deprecated] decimal: string, default '.' - Character recognized as decimal separator. E.g. use ‘,’ for European data + Character recognized as decimal separator. E.g. use ',' for European data """ formatter = fmt.CSVFormatter(self, path_or_buf, diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 7f091779ad56c..65419e2c29d75 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1174,18 +1174,18 @@ def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.' if float_format and decimal != '.': - formater = lambda v : (float_format % v).replace('.',decimal,1) + formatter = lambda v : (float_format % v).replace('.',decimal,1) elif decimal != '.': - formater = lambda v : ('%g' % v).replace('.',decimal,1) + formatter = lambda v : ('%g' % v).replace('.',decimal,1) elif float_format: - formater = lambda v : float_format % v + formatter = lambda v : float_format % v else: - formater = None + formatter = None - if formater: + if formatter: imask = (~mask).ravel() values.flat[imask] = np.array( - [formater(val) for val in values.ravel()[imask]]) + [formatter(val) for val in values.ravel()[imask]]) return values.tolist() diff --git a/pandas/core/series.py b/pandas/core/series.py index ef9093c89a713..e19e51fb9c9e5 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2268,7 +2268,7 @@ def to_csv(self, path, index=True, sep=",", na_rep='', date_format: string, default None Format string for datetime objects. decimal: string, default '.' - Character recognized as decimal separator. E.g. use ‘,’ for European data + Character recognized as decimal separator. E.g. use ',' for European data """ from pandas.core.frame import DataFrame df = DataFrame(self) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index cf8dceff90fae..52e17d044c28b 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -2352,6 +2352,12 @@ def test_to_csv_decimal(self): expected_european_excel = ';col1;col2;col3\n0;1;a;10,1\n' self.assertEqual(df.to_csv(decimal=',',sep=';'), expected_european_excel) + + expected_float_format_default = ',col1,col2,col3\n0,1,a,10.10\n' + self.assertEqual(df.to_csv(float_format = '%.2f'), expected_float_format_default) + + expected_float_format = ';col1;col2;col3\n0;1;a;10,10\n' + self.assertEqual(df.to_csv(decimal=',',sep=';', float_format = '%.2f'), expected_float_format) class TestSeriesFormatting(tm.TestCase): _multiprocess_can_split_ = True From f9a3e452c343220b00691c28c8aac304800a33cf Mon Sep 17 00:00:00 2001 From: bertrandhaut Date: Tue, 3 Mar 2015 09:07:43 +0100 Subject: [PATCH 6/6] issue number as comment --- pandas/tests/test_format.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 52e17d044c28b..e2823571fe258 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -2345,6 +2345,7 @@ def test_csv_to_string(self): self.assertEqual(df.to_csv(), expected) def test_to_csv_decimal(self): + # GH 8448 df = DataFrame({'col1' : [1], 'col2' : ['a'], 'col3' : [10.1] }) expected_default = ',col1,col2,col3\n0,1,a,10.1\n'